repo_name
stringlengths
8
75
hexsha
stringlengths
40
40
code
stringlengths
447
163k
apis
sequence
file_path
stringlengths
7
127
api_extract
stringlengths
346
104k
ysatapathy23/TomoEncoders
6f3f8c6dd088e4df968337e33a034a42d1f6c799
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ class implementations for real-time 3D feature extraction """ import pandas as pd import os import glob import numpy as np from tomo_encoders import * from tensorflow import keras from tomo_encoders import Patches import tensorflow as tf from tensorflow.keras.models import load_model from multiprocessing import Pool, cpu_count import functools import h5py import abc import time # from tensorflow import RunOptions from tensorflow.keras.backend import random_normal from tensorflow import map_fn, constant, reduce_max, reduce_min from tensorflow.keras import layers as L # tensorflow configs os.environ['TF_CPP_MIN_LOG_LEVEL']='3' def insert_activation(tensor_in, activation): """ Returns ------- tensor of rank 2 (FC layer), 4 (image) or 5 (volume) (batch_size, nz, ny, nx, n_channels) Parameters ---------- tensor_in : tensor input tensor activation : str or tf.Keras.layers.Activation name of custom activation or Keras activation layer """ if activation is None: return tensor_in if activation == 'lrelu': tensor_out = L.LeakyReLU(alpha = 0.2)(tensor_in) else: tensor_out = L.Activation(activation)(tensor_in) return tensor_out def hidden_layer(tensor_in, n_hidden, activation = None, batch_norm = False): """ Define a fully-connected layer with batch normalization, dropout and custom activations. Returns ------- tensor of rank 2 (batch_size, n_hidden) Parameters ---------- tensor_in : tensor input tensor n_hidden : int number of units in the dense layer (this is the output shape) activation : str or tf.Keras.layers.Activation name of custom activation or Keras activation layer batch_norm : bool True to insert a BN layer """ tensor_out = L.Dense(n_hidden, activation = None)(tensor_in) if batch_norm: tensor_out = L.BatchNormalization(momentum = 0.9, epsilon = 1e-5)(tensor_out) tensor_out = insert_activation(tensor_out, activation) return tensor_out def stdize_vol(vol): eps = constant(1e-12, dtype = 'float32') max_ = reduce_max(vol) min_ = reduce_min(vol) vol = (vol - min_ ) / (max_ - min_ + eps) return vol def standardize(vols): return map_fn(stdize_vol, vols) def custom_Conv3D(tensor_in, n_filters, kern_size, activation = None, batch_norm = False): """ Define a custom 3D convolutional layer with batch normalization and custom activation function (includes lrelu) This is the order chosen in our implementation: -> CONV/FC -> BatchNorm -> ReLu(or other activation) -> Dropout -> CONV/FC -> See supmat in: https://dmitryulyanov.github.io/deep_image_prior Returns ------- tensor of rank 5 (batch_size, nz, ny, nx, n_channels) Parameters ---------- tensor_in : tensor input tensor n_filters : int number of filters in the first convolutional layer kern_size : tuple kernel size, e.g. (3,3,3) activation : str or tf.Keras.layers.Activation name of custom activation or Keras activation layer batch_norm : bool True to insert a BN layer """ tensor_out = L.Conv3D(n_filters, kern_size, activation = None, padding = "same")(tensor_in) if batch_norm: tensor_out = L.BatchNormalization(momentum = 0.9, epsilon = 1e-5)(tensor_out) tensor_out = insert_activation(tensor_out, activation) return tensor_out ############## # Contributed by Audrey Bartlett (Berkeley) def analysis_block_small(tensor_in, n_filters, pool_size, \ kern_size = None, \ activation = None, \ batch_norm = False): """ Define a block of 2 3D convolutional layers followed by a 3D max-pooling layer Returns ------- output tensor of rank 5 (batch_size, nz, ny, nx, n_channels) Parameters ---------- tensor_in : tensor input tensor n_filters : int number of filters in the first convolutional layer pool_size : tuple max pooling e.g. (2,2,2) kern_size : tuple kernel size, e.g. (3,3,3) activation : str or tf.Keras.layers.Activation name of custom activation or Keras activation layer kern_init : str kernel initialization method batch_norm : bool True to insert a BN layer """ # layer # 1 tensor_out = custom_Conv3D(tensor_in, n_filters, kern_size, \ activation = activation, \ batch_norm = batch_norm) # layer # 2; 2x filters tensor_out = custom_Conv3D(tensor_out, 2*n_filters, kern_size, \ activation = activation, \ batch_norm = batch_norm) # MaxPool3D return L.MaxPool3D(pool_size = pool_size, padding = "same")(tensor_out) def synthesis_block_small(tensor_in, n_filters, pool_size, \ activation = None, \ kern_size = 3, \ kern_size_upconv = 2, \ batch_norm = False): """ Define a 3D upsample block (with no concatenation/skip connections) Returns ------- tensor of rank 5 (batch_size, nz, ny, nx, n_channels) Parameters ---------- tensor_in : tensor input tensor concat_tensor : tensor this will be concatenated to the output of the upconvolutional layer n_filters : int number of filters in each convolutional layer after the transpose conv. pool_size : tuple reverse the max pooling e.g. (2,2,2) with these many strides for transpose conv. kern_size : int kernel size for conv, e.g. 3 kern_size_upconv : int kernel size for upconv, e.g. 2 activation : str or tf.Keras.layers.Activation name of custom activation or Keras activation layer batch_norm : bool True to insert a BN layer concat_flag : bool True to concatenate layers (add skip connections) """ # transpose convolution n_filters_upconv = tensor_in.shape[-1] tensor_out = L.Conv3DTranspose(n_filters_upconv, kern_size_upconv, padding = "same", activation = None, strides = pool_size) (tensor_in) tensor_out = insert_activation(tensor_out, activation) # layer # 1 tensor_out = custom_Conv3D(tensor_out, n_filters, kern_size, \ activation = activation, \ batch_norm = batch_norm) # layer # 2 tensor_out = custom_Conv3D(tensor_out, n_filters, kern_size, \ activation = activation, \ batch_norm = batch_norm) return tensor_out def build_encoder_r(input_shape, n_filters = [32, 64], \ n_blocks = 2, activation = 'lrelu',\ batch_norm = True, kern_size = 3, kern_size_upconv = 2,\ hidden_units = [128,32,2], pool_size = 2, POOL_FLAG = True): """ @arshadzahangirchowdhury Define the encoder of a 3D convolutional autoencoder, based on the arguments provided. Returns ------- tf.Keras.model keras model(s) for the encoder of a 3D autoencoder-decoder architecture. flatten_shape preflatten_shape Parameters ---------- input_shape : tuple input volume shape (nz,ny,nx,1) n_filters : list a list of the number of filters in the convolutional layers for each block. Length must equal number of number of blocks. n_blocks : int Number of repeating blocks in the convolutional part activation : str or tf.Keras.layers.Activation name of custom activation or Keras activation layer batch_norm : bool True to insert BN layer after the convolutional layers kern_size : tuple kernel size for conv. layers in downsampling block, e.g. (3,3,3). kern_size_upconv : tuple kernel size for conv. layers in upsampling block, e.g. (2,2,2). hidden_units: list list of number of hidden layer units. last value is the code. pool_size : int or list if list, list length must be equal to number of blocks. """ inp = L.Input(input_shape) if type(pool_size) is int: pool_size = [pool_size]*n_blocks elif len(pool_size) != n_blocks: raise ValueError("list length must be equal to number of blocks") # downsampling path. e.g. n_blocks = 3, n_filters = [16,32,64], input volume is 64^3 for ii in range(n_blocks): #iterations if ii == 0: code = inp code = analysis_block_small(code, \ n_filters[ii], \ pool_size[ii], \ kern_size = kern_size, \ activation = activation, \ batch_norm = batch_norm) if POOL_FLAG: # pool a second time before flattening code = L.MaxPool3D(pool_size = 2, padding = "same")(code) for ic, n_hidden in enumerate(hidden_units): if ic == len(hidden_units) - 1: # ic = 2 (last unit is the code) break elif ic == 0: # ic = 0 --> n_hidden = 128; # first hidden layer takes flattened vector as input preflatten_shape = tuple(code.shape[1:]) code = L.Flatten()(code) flatten_shape = code.shape[-1] code = hidden_layer(code, n_hidden, \ activation = activation, \ batch_norm = batch_norm) else: # ic = 1 --> n_hidden = 32; code = hidden_layer(code, n_hidden, \ activation = activation, \ batch_norm = batch_norm) z = hidden_layer(code, hidden_units[-1], \ activation = activation, \ batch_norm = True) print('inp:',inp) encoder = keras.models.Model(inp, z, name = "encoder") print('encoder:',encoder) return encoder, flatten_shape, preflatten_shape def build_decoder_r(flatten_shape, preflatten_shape, n_filters = [32, 64], \ n_blocks = 2, activation = 'lrelu',\ batch_norm = True, kern_size = 3, kern_size_upconv = 2,\ hidden_units = [128,32,2], pool_size = 2, POOL_FLAG = True): """ @arshadzahangirchowdhury Define the decoder of a 3D convolutional autoencoder, based on the arguments provided. 2 layers and no skip connections Version of _2 with no skip connections NOTE: borrowed from build_CAE_3D_4() to-do: Integrate build_CAE_3D_3 from change_encoders and build_CAE_3D_r Returns ------- tf.Keras.model keras model(s) for the encoder of a 3D autoencoder-decoder architecture. Parameters ---------- flatten_shape : tuple input volume shape (nz,ny,nx,1) preflatten_shape : tuple input volume shape (nz,ny,nx,1) n_filters : list a list of the number of filters in the convolutional layers for each block. Length must equal number of number of blocks. n_blocks : int Number of repeating blocks in the convolutional part activation : str or tf.Keras.layers.Activation name of custom activation or Keras activation layer batch_norm : bool True to insert BN layer after the convolutional layers kern_size : tuple kernel size for conv. layers in downsampling block, e.g. (3,3,3). kern_size_upconv : tuple kernel size for conv. layers in upsampling block, e.g. (2,2,2). hidden_units: list list of number of hidden layer units. last value is the code. pool_size : int or list if list, list length must be equal to number of blocks. """ decoder_input=L.Input((hidden_units[-1],), name = "decoder_input") for ic, n_hidden in enumerate(hidden_units[::-1]): # iterate as e.g. [16,32,128] if ic == 0: # skip n_hidden = 16 as we already implemented that in the previous loop decoded = decoder_input else: # ic = 1 --> n_hidden = 32 # ic = 2 --> n_hidden = 128 decoded = hidden_layer(decoded, n_hidden, activation = activation, batch_norm = batch_norm) # n_hidden = flattened shape decoded = hidden_layer(decoded, flatten_shape, activation = activation, batch_norm = batch_norm) # reshape to convolutional feature maps decoded = L.Reshape(preflatten_shape)(decoded) if POOL_FLAG: # upsample once before synthesis block n_filters_upconv = decoded.shape[-1] decoded = L.Conv3DTranspose(n_filters_upconv, \ kern_size_upconv, \ padding = "same", \ activation = None, \ strides = 2) (decoded) decoded = insert_activation(decoded, activation) # upsampling path. e.g. n_blocks = 3 for ii in range(n_blocks-1, -1, -1): decoded = synthesis_block_small(decoded, \ n_filters[ii], \ pool_size[ii], \ activation = activation, \ kern_size = kern_size, \ kern_size_upconv = kern_size_upconv, \ batch_norm = batch_norm) decoded = L.Conv3D(1, (1,1,1), activation = 'sigmoid', padding = "same")(decoded) decoder = keras.models.Model(decoder_input, decoded, name = "decoder") decoder.summary() return decoder class RegularizedAutoencoder(keras.Model): """ Modifies the keras.Model to implement custom loss functions and train step Parameters ---------- encoder : tf.keras.Model the encoder model. decoder : tf.keras.Model the decoder model. weight: float strength of the regularization loss (L1 or KL). regularization_type: str Type of regularization of model loss.'kl': Kullback-Leibler divergence loss. 'L1': L1 loss. """ def __init__(self, encoder, decoder, weight=1/250.0,regularization_type='kl', **kwargs): super(RegularizedAutoencoder, self).__init__(**kwargs) if len(encoder.output_shape[1:]) !=1: print('WARNING: Encoder output is not a vector.') assert encoder.input_shape == decoder.output_shape, 'Encoder input shape and decoder output shape must match.' self.encoder = encoder self.decoder = decoder self.weight=float(weight) self.regularization_type=regularization_type self.total_loss_tracker = keras.metrics.Mean(name="total_loss") self.pixel_mse_loss_tracker = keras.metrics.Mean( name="pixel_mse_loss" ) self.regularization_loss_tracker = keras.metrics.Mean(name="regularization_loss") @property def metrics(self): return [ self.total_loss_tracker, self.pixel_mse_loss_tracker, self.regularization_loss_tracker, ] def train_step(self, data): with tf.GradientTape() as tape: z = self.encoder(data) decoded = self.decoder(z) pixel_mse_loss = tf.reduce_mean(keras.losses.mean_squared_error(data, decoded)) #to-do: Try lambda function or tensorflow map if self.regularization_type=='L1': regularization_loss=tf.reduce_mean(tf.abs(z)) elif self.regularization_type=='kl': regularization_loss = tf.reduce_mean(keras.losses.kl_divergence(data, decoded)) else: raise ValueError("Regularization loss must be either 'L1' or 'kl' " ) total_loss = pixel_mse_loss + self.weight*regularization_loss grads = tape.gradient(total_loss, self.trainable_weights) self.optimizer.apply_gradients(zip(grads, self.trainable_weights)) self.total_loss_tracker.update_state(total_loss) self.pixel_mse_loss_tracker.update_state(pixel_mse_loss) self.regularization_loss_tracker.update_state(regularization_loss) return { "loss": self.total_loss_tracker.result(), "pixel_mse_loss": self.pixel_mse_loss_tracker.result(), "regularization_loss": self.regularization_loss_tracker.result() } from tomo_encoders.neural_nets.keras_processor import EmbeddingLearner class SelfSupervisedCAE(EmbeddingLearner): def __init__(self, **kwargs): ''' models : dict of tf.keras.Models.model dict contains some models with model keys as string descriptors of them. ''' self.model_keys = ["encoder", "decoder", "autoencoder"] super().__init__(**kwargs) return def save_models(self, model_path): for model_key in self.models.keys(): if model_key == "autoencoder": continue filepath = os.path.join(model_path, "%s_%s.hdf5"%(model_key, self.model_tag)) self.models[model_key].save(filepath, include_optimizer = False) return def _build_models(self, model_size = (64,64,64), descriptor_tag = "misc", **model_params): ''' Parameters ---------- model_keys : list list of strings describing the model, e.g., ["encoder", "decoder"], etc. model_params : dict for passing any number of model hyperparameters necessary to define the model(s). ''' if model_params is None: raise ValueError("Need model hyperparameters or instance of model. Neither were provided") else: self.models = {} # insert your model building code here. The models variable must be a dictionary of models with str descriptors as keys self.model_size = model_size self.model_tag = "%s"%descriptor_tag for key in self.model_keys: self.models.update({key : None}) self.models["encoder"], _flatten_shape, _preflatten_shape = build_encoder_r(self.model_size + (1,), **model_params) self.models["decoder"] = build_decoder_r(_flatten_shape, _preflatten_shape, **model_params) self.models["autoencoder"] = None return def _load_models(self, model_tag = None, model_size = (64,64,64), model_path = 'some-path'): ''' Parameters ---------- model_names : dict example {"model_key" : tf.keras.Model, ...} model_path : str example "some/path" ''' assert model_tag is not None, "need model_tag" self.models = {} # clears any existing models linked to this class!!!! for model_key in self.model_keys: if model_key == "autoencoder": self.models.update({model_key : None}) else: filepath = os.path.join(model_path, "%s_%s.hdf5"%(model_key, model_tag)) self.models.update({model_key : load_model(filepath)}) # insert assignment of model_size here self.model_size = self.models["encoder"].input_shape[1:-1] self.model_tag = model_tag return def data_generator(self, Xs, batch_size, sampling_method, \ max_stride = 1, \ random_rotate = False, add_noise = 0.1): ''' Parameters ---------- vol : np.array Volume from which patches are extracted. batch_size : int Size of the batch generated at every iteration. sampling_method : str Possible methods include "random", "random-fixed-width", "grid" max_stride : int If method is "random" or "multiple-grids", then max_stride is required. ''' while True: n_vols = len(Xs) # sample volumes # use _get_xy idx_vols = np.repeat(np.arange(0, n_vols), int(np.ceil(batch_size/n_vols))) idx_vols = idx_vols[:batch_size] x = [] for ivol in range(n_vols): patches = self.get_patches(Xs[ivol].shape, sampling_method, np.sum(idx_vols == ivol), max_stride = max_stride) x.append(self.extract_training_sub_volumes(Xs[ivol], patches, add_noise, random_rotate)) yield np.concatenate(x, axis = 0, dtype = 'float32') def extract_training_sub_volumes(self, X, patches, add_noise, random_rotate): ''' Extract training pairs x and y from a given volume X, Y pair ''' batch_size = len(patches) x = patches.extract(X, self.model_size)[...,np.newaxis] if random_rotate: nrots = np.random.randint(0, 4, batch_size) for ii in range(batch_size): axes = tuple(np.random.choice([0, 1, 2], size=2, replace=False)) x[ii, ..., 0] = np.rot90(x[ii, ..., 0], k=nrots[ii], axes=axes) return x def get_patches(self, vol_shape, sampling_method, batch_size, max_stride = None): if sampling_method in ["grid", 'regular-grid', "random-fixed-width"]: patches = Patches(vol_shape, initialize_by = sampling_method, \ patch_size = self.model_size, \ n_points = batch_size) elif sampling_method in ["random"]: patches = Patches(vol_shape, initialize_by = sampling_method, \ min_patch_size = self.model_size, \ max_stride = max_stride, \ n_points = batch_size) else: raise ValueError("sampling method not supported") return patches def train(self, vols, batch_size = 10, \ sampling_method = 'random-fixed-width', \ n_epochs = 10, \ random_rotate = True, \ add_noise = 0.1,\ max_stride = 1, \ normalize_sampling_factor = 2): ''' ''' # to-do: IMPORTANT! Go make data_loader.py, make sure normalize volume is done there. # instantiate data generator for use in training. dg = self.data_generator(vols, batch_size, sampling_method, \ max_stride = max_stride, \ random_rotate = random_rotate, \ add_noise = add_noise) tot_steps = 500 val_split = 0.2 steps_per_epoch = int((1-val_split)*tot_steps//batch_size) validation_steps = int(val_split*tot_steps//batch_size) t0 = time.time() self.models["autoencoder"] = RegularizedAutoencoder(self.models['encoder'],\ self.models['decoder'],\ weight=1/250.0,\ regularization_type='kl') self.models["autoencoder"].compile(optimizer='adam') self.models["autoencoder"].fit(x = dg, epochs = n_epochs , \ steps_per_epoch=steps_per_epoch, \ validation_steps=validation_steps, verbose = 1) self.models["encoder"] = self.models["autoencoder"].encoder self.models["decoder"] = self.models["autoencoder"].decoder t1 = time.time() training_time = (t1 - t0) print("training time = %.2f seconds"%training_time) return def random_data_generator(self, batch_size): while True: x_shape = tuple([batch_size] + list(self.input_size) + [1]) x = np.random.uniform(0, 1, x_shape)#.astype(np.float32) x[x == 0] = 1.0e-12 yield x def predict_embeddings(self, x, chunk_size, min_max = None, TIMEIT = False): ''' Predicts on sub_vols. This is a wrapper around keras.model.predict() that speeds up inference on inputs lengths that are not factors of 2. Use this function to do multiprocessing if necessary. ''' assert x.ndim == 5, "x must be 5-dimensional (batch_size, nz, ny, nx, 1)." t0 = time.time() print("call to keras predict, len(x) = %i, shape = %s, chunk_size = %i"%(len(x), str(x.shape[1:-1]), chunk_size)) nb = len(x) nchunks = int(np.ceil(nb/chunk_size)) nb_padded = nchunks*chunk_size padding = nb_padded - nb out_arr = np.zeros((nb, self.models["encoder"].output_shape[-1]), dtype = np.float32) # use numpy since return from predict is numpy for k in range(nchunks): sb = slice(k*chunk_size , min((k+1)*chunk_size, nb)) x_in = x[sb,...] if min_max is not None: min_val, max_val = min_max x_in = _rescale_data(x_in, float(min_val), float(max_val)) if padding != 0: if k == nchunks - 1: x_in = np.pad(x_in, \ ((0,padding), (0,0), \ (0,0), (0,0), (0,0)), mode = 'edge') x_out = self.models["encoder"].predict(x_in) if k == nchunks -1: x_out = x_out[:-padding,...] else: x_out = self.models["encoder"].predict(x_in) out_arr[sb,...] = x_out print("shape of output array: ", out_arr.shape) t_unit = (time.time() - t0)*1000.0/nb if TIMEIT: print("inf. time p. input patch size %s = %.2f ms, nb = %i"%(str(x[0,...,0].shape), t_unit, nb)) print("\n") return out_arr, t_unit else: return out_arr # def detect_changes(self, vol_prev, vol_curr, patches): # ''' # ''' # t0 = time.time() # sub_vols_prev = patches.extract(self._normalize_volume(vol_prev), self.model_size) # sub_vols_curr = patches.extract(self._normalize_volume(vol_curr), self.model_size) # h_prev = self.models["encoder"].predict(sub_vols_prev[...,np.newaxis]) # h_curr = self.models["encoder"].predict(sub_vols_curr[...,np.newaxis]) # h_delta = (h_curr - h_prev)**2 # h_delta = np.mean(h_delta, axis = 1) # h_delta = np.sqrt(h_delta) # patches.add_features(h_delta.reshape(-1,1), names = ["h_delta"]) # t1 = time.time() # tot_time_fe = t1 - t0 # print("total time for change detector = %.2f seconds"%tot_time_fe) # mse = np.mean(np.power(sub_vols_curr - sub_vols_prev, 2), axis = (1,2,3)) # patches.add_features(mse.reshape(-1,1), names = ["mse"]) # return patches if __name__ == "__main__": print('just a bunch of functions')
[ "tensorflow.keras.models.load_model", "numpy.concatenate", "tensorflow.map_fn", "numpy.random.randint", "tensorflow.keras.layers.LeakyReLU", "numpy.pad", "numpy.arange", "tensorflow.keras.layers.Conv3D", "numpy.ceil", "numpy.zeros", "tensorflow.keras.layers.Flatten", "tensorflow.keras.metrics.Mean", "numpy.rot90", "tensorflow.keras.models.Model", "numpy.random.choice", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv3DTranspose", "tensorflow.keras.losses.kl_divergence", "tensorflow.keras.layers.Reshape", "tensorflow.keras.layers.MaxPool3D", "numpy.sum", "tensorflow.GradientTape", "tensorflow.reduce_max", "tensorflow.constant", "tensorflow.keras.layers.Activation", "tensorflow.reduce_min", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.losses.mean_squared_error", "numpy.random.uniform", "tensorflow.abs", "tensorflow.keras.layers.Input" ]
tomo_encoders/neural_nets/autoencoders.py
[(95, 'tensorflow.constant', 'constant', (['(1e-12)'], {'dtype': '"""float32"""'}), False, 'from tensorflow import map_fn, constant, reduce_max, reduce_min\n'), (96, 'tensorflow.reduce_max', 'reduce_max', (['vol'], {}), False, 'from tensorflow import map_fn, constant, reduce_max, reduce_min\n'), (97, 'tensorflow.reduce_min', 'reduce_min', (['vol'], {}), False, 'from tensorflow import map_fn, constant, reduce_max, reduce_min\n'), (102, 'tensorflow.map_fn', 'map_fn', (['stdize_vol', 'vols'], {}), False, 'from tensorflow import map_fn, constant, reduce_max, reduce_min\n'), (297, 'tensorflow.keras.layers.Input', 'L.Input', (['input_shape'], {}), True, 'from tensorflow.keras import layers as L\n'), (344, 'tensorflow.keras.models.Model', 'keras.models.Model', (['inp', 'z'], {'name': '"""encoder"""'}), False, 'from tensorflow import keras\n'), (406, 'tensorflow.keras.layers.Input', 'L.Input', (['(hidden_units[-1],)'], {'name': '"""decoder_input"""'}), True, 'from tensorflow.keras import layers as L\n'), (445, 'tensorflow.keras.models.Model', 'keras.models.Model', (['decoder_input', 'decoded'], {'name': '"""decoder"""'}), False, 'from tensorflow import keras\n'), (83, 'tensorflow.keras.layers.Dense', 'L.Dense', (['n_hidden'], {'activation': 'None'}), True, 'from tensorflow.keras import layers as L\n'), (135, 'tensorflow.keras.layers.Conv3D', 'L.Conv3D', (['n_filters', 'kern_size'], {'activation': 'None', 'padding': '"""same"""'}), True, 'from tensorflow.keras import layers as L\n'), (189, 'tensorflow.keras.layers.MaxPool3D', 'L.MaxPool3D', ([], {'pool_size': 'pool_size', 'padding': '"""same"""'}), True, 'from tensorflow.keras import layers as L\n'), (230, 'tensorflow.keras.layers.Conv3DTranspose', 'L.Conv3DTranspose', (['n_filters_upconv', 'kern_size_upconv'], {'padding': '"""same"""', 'activation': 'None', 'strides': 'pool_size'}), True, 'from tensorflow.keras import layers as L\n'), (420, 'tensorflow.keras.layers.Reshape', 'L.Reshape', (['preflatten_shape'], {}), True, 'from tensorflow.keras import layers as L\n'), (443, 'tensorflow.keras.layers.Conv3D', 'L.Conv3D', (['(1)', '(1, 1, 1)'], {'activation': '"""sigmoid"""', 'padding': '"""same"""'}), True, 'from tensorflow.keras import layers as L\n'), (484, 'tensorflow.keras.metrics.Mean', 'keras.metrics.Mean', ([], {'name': '"""total_loss"""'}), False, 'from tensorflow import keras\n'), (485, 'tensorflow.keras.metrics.Mean', 'keras.metrics.Mean', ([], {'name': '"""pixel_mse_loss"""'}), False, 'from tensorflow import keras\n'), (488, 'tensorflow.keras.metrics.Mean', 'keras.metrics.Mean', ([], {'name': '"""regularization_loss"""'}), False, 'from tensorflow import keras\n'), (698, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (714, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (737, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (744, 'numpy.zeros', 'np.zeros', (["(nb, self.models['encoder'].output_shape[-1])"], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (54, 'tensorflow.keras.layers.LeakyReLU', 'L.LeakyReLU', ([], {'alpha': '(0.2)'}), True, 'from tensorflow.keras import layers as L\n'), (56, 'tensorflow.keras.layers.Activation', 'L.Activation', (['activation'], {}), True, 'from tensorflow.keras import layers as L\n'), (86, 'tensorflow.keras.layers.BatchNormalization', 'L.BatchNormalization', ([], {'momentum': '(0.9)', 'epsilon': '(1e-05)'}), True, 'from tensorflow.keras import layers as L\n'), (138, 'tensorflow.keras.layers.BatchNormalization', 'L.BatchNormalization', ([], {'momentum': '(0.9)', 'epsilon': '(1e-05)'}), True, 'from tensorflow.keras import layers as L\n'), (319, 'tensorflow.keras.layers.MaxPool3D', 'L.MaxPool3D', ([], {'pool_size': '(2)', 'padding': '"""same"""'}), True, 'from tensorflow.keras import layers as L\n'), (425, 'tensorflow.keras.layers.Conv3DTranspose', 'L.Conv3DTranspose', (['n_filters_upconv', 'kern_size_upconv'], {'padding': '"""same"""', 'activation': 'None', 'strides': '(2)'}), True, 'from tensorflow.keras import layers as L\n'), (499, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (547, 'os.path.join', 'os.path.join', (['model_path', "('%s_%s.hdf5' % (model_key, self.model_tag))"], {}), False, 'import os\n'), (649, 'numpy.random.randint', 'np.random.randint', (['(0)', '(4)', 'batch_size'], {}), True, 'import numpy as np\n'), (660, 'tomo_encoders.Patches', 'Patches', (['vol_shape'], {'initialize_by': 'sampling_method', 'patch_size': 'self.model_size', 'n_points': 'batch_size'}), False, 'from tomo_encoders import Patches\n'), (725, 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'x_shape'], {}), True, 'import numpy as np\n'), (740, 'numpy.ceil', 'np.ceil', (['(nb / chunk_size)'], {}), True, 'import numpy as np\n'), (503, 'tensorflow.keras.losses.mean_squared_error', 'keras.losses.mean_squared_error', (['data', 'decoded'], {}), False, 'from tensorflow import keras\n'), (597, 'os.path.join', 'os.path.join', (['model_path', "('%s_%s.hdf5' % (model_key, model_tag))"], {}), False, 'import os\n'), (630, 'numpy.arange', 'np.arange', (['(0)', 'n_vols'], {}), True, 'import numpy as np\n'), (638, 'numpy.concatenate', 'np.concatenate', (['x'], {'axis': '(0)', 'dtype': '"""float32"""'}), True, 'import numpy as np\n'), (652, 'numpy.rot90', 'np.rot90', (['x[ii, ..., 0]'], {'k': 'nrots[ii]', 'axes': 'axes'}), True, 'import numpy as np\n'), (665, 'tomo_encoders.Patches', 'Patches', (['vol_shape'], {'initialize_by': 'sampling_method', 'min_patch_size': 'self.model_size', 'max_stride': 'max_stride', 'n_points': 'batch_size'}), False, 'from tomo_encoders import Patches\n'), (328, 'tensorflow.keras.layers.Flatten', 'L.Flatten', ([], {}), True, 'from tensorflow.keras import layers as L\n'), (507, 'tensorflow.abs', 'tf.abs', (['z'], {}), True, 'import tensorflow as tf\n'), (630, 'numpy.ceil', 'np.ceil', (['(batch_size / n_vols)'], {}), True, 'import numpy as np\n'), (635, 'numpy.sum', 'np.sum', (['(idx_vols == ivol)'], {}), True, 'import numpy as np\n'), (651, 'numpy.random.choice', 'np.random.choice', (['[0, 1, 2]'], {'size': '(2)', 'replace': '(False)'}), True, 'import numpy as np\n'), (757, 'numpy.pad', 'np.pad', (['x_in', '((0, padding), (0, 0), (0, 0), (0, 0), (0, 0))'], {'mode': '"""edge"""'}), True, 'import numpy as np\n'), (770, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (510, 'tensorflow.keras.losses.kl_divergence', 'keras.losses.kl_divergence', (['data', 'decoded'], {}), False, 'from tensorflow import keras\n'), (598, 'tensorflow.keras.models.load_model', 'load_model', (['filepath'], {}), False, 'from tensorflow.keras.models import load_model\n')]
exc4l/MRR-NN
941692db34ef8c7583bae6734e14c3ac9ade6966
import tensorflow as tf from tensorflow import keras from tensorflow.keras import regularizers, initializers from tensorflow.python.framework import tensor_shape from tensorflow.python.keras.engine.input_spec import InputSpec from tensorflow.python.keras import constraints import numpy as np from scipy.optimize import root_scalar, fsolve class MinMaxClipping(tf.keras.constraints.Constraint): def __init__(self, minval, maxval): self.min_val = minval self.max_val = maxval def __call__(self, w): return tf.clip_by_value(w, self.min_val, self.max_val) class Lorentzian(keras.layers.Layer): def __init__( self, units, gamma=5, use_bias=False, kernel_initializer="glorot_uniform", bias_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, train_scale_output=None, output_splitter=None, kernel_constraint=None, bias_constraint=None, sca_out_constraint=MinMaxClipping(0.1, 1), **kwargs, ): super(Lorentzian, self).__init__( activity_regularizer=activity_regularizer, **kwargs ) self.units = int(units) if not isinstance(units, int) else units self.kernel_initializer = keras.initializers.get(kernel_initializer) self.bias_initializer = keras.initializers.get(bias_initializer) self.sca_out_initializer = keras.initializers.get("ones") self.kernel_regularizer = keras.regularizers.get(kernel_regularizer) self.bias_regularizer = keras.regularizers.get(bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.sca_out_constraint = constraints.get(sca_out_constraint) self.use_bias = use_bias self.train_scale_output = train_scale_output self.output_splitter = output_splitter self.gamma = tf.Variable(gamma, trainable=False, dtype="float32") self.intensity = tf.Variable(1, trainable=False, dtype="float32") self.offset = tf.Variable(0, trainable=False, dtype="float32") self.input_spec = InputSpec(min_ndim=2) def build(self, input_shape): dtype = "float32" # if not (dtype.is_floating or dtype.is_complex): # raise TypeError('Unable to build `Dense` layer with non-floating point ' # 'dtype %s' % (dtype,)) input_shape = tensor_shape.TensorShape(input_shape) last_dim = tensor_shape.dimension_value(input_shape[-1]) if last_dim is None: raise ValueError( "The last dimension of the inputs to `Lorentzian` " "should be defined. Found `None`." ) self.input_spec = InputSpec(min_ndim=2, axes={-1: last_dim}) self.kernel = self.add_weight( "kernel", shape=[last_dim, self.units], initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, dtype=self.dtype, trainable=True, ) if self.use_bias: self.bias = self.add_weight( "bias", shape=[ self.units, ], initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, dtype=self.dtype, trainable=True, ) if self.train_scale_output: self.sca_out = self.add_weight( "sca_out", shape=[ self.units, ], initializer=self.sca_out_initializer, regularizer=self.bias_regularizer, constraint=self.sca_out_constraint, dtype=self.dtype, trainable=True, ) self.built = True def call(self, inputs): # a = tf.matmul(inputs, self.kernel) + self.bias # gsq = tf.square(self.gamma) # top = self.intensity * gsq # # bottom = (tf.matmul(inputs, self.kernel) + self.bias) + gsq # bottom = a + gsq # return tf.divide(top, bottom) # return tf.matmul(inputs, self.kernel) + self.bias gsq = tf.square(self.gamma) top = self.intensity * gsq a = tf.square(self.kernel) bottom = a + gsq intens = self.intensity if self.output_splitter: intens = self.intensity / self.output_splitter if self.train_scale_output: intens = tf.divide(intens, self.sca_out) return intens * tf.matmul(inputs, tf.divide(top, bottom)) return intens * tf.matmul(inputs, tf.divide(top, bottom)) class MMRelu(keras.layers.Layer): def __init__( self, units, use_bias=False, kernel_initializer="glorot_uniform", bias_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, train_scale_output=True, output_splitter=None, kernel_constraint=MinMaxClipping(0.01, 1), bias_constraint=None, sca_out_constraint=MinMaxClipping(0.01, 1), **kwargs, ): super(MMRelu, self).__init__( activity_regularizer=activity_regularizer, **kwargs ) self.units = int(units) if not isinstance(units, int) else units self.kernel_initializer = keras.initializers.get(kernel_initializer) self.bias_initializer = keras.initializers.get(bias_initializer) self.sca_out_initializer = keras.initializers.get("ones") self.kernel_regularizer = keras.regularizers.get(kernel_regularizer) self.bias_regularizer = keras.regularizers.get(bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.sca_out_constraint = constraints.get(sca_out_constraint) self.use_bias = use_bias self.train_scale_output = train_scale_output self.output_splitter = output_splitter self.input_spec = InputSpec(min_ndim=2) def build(self, input_shape): dtype = "float32" # if not (dtype.is_floating or dtype.is_complex): # raise TypeError('Unable to build `Dense` layer with non-floating point ' # 'dtype %s' % (dtype,)) input_shape = tensor_shape.TensorShape(input_shape) last_dim = tensor_shape.dimension_value(input_shape[-1]) if last_dim is None: raise ValueError( "The last dimension of the inputs to `Lorentzian` " "should be defined. Found `None`." ) self.input_spec = InputSpec(min_ndim=2, axes={-1: last_dim}) self.kernel = self.add_weight( "kernel", shape=[last_dim, self.units], initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, dtype=self.dtype, trainable=True, ) if self.use_bias: self.bias = self.add_weight( "bias", shape=[ self.units, ], initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, dtype=self.dtype, trainable=True, ) if self.train_scale_output: self.sca_out = self.add_weight( "sca_out", shape=[ self.units, ], initializer=self.sca_out_initializer, regularizer=self.bias_regularizer, constraint=self.sca_out_constraint, dtype=self.dtype, trainable=True, ) self.built = True def call(self, inputs): intens = tf.constant([1], dtype="float32") if self.output_splitter: intens = intens / self.output_splitter if self.train_scale_output: intens = tf.divide(intens, self.sca_out) return intens * tf.keras.activations.relu(tf.matmul(inputs, self.kernel)) return intens * tf.keras.activations.relu(tf.matmul(inputs, self.kernel)) def lorz(x, gamma=5): I = 1 gamma = gamma x0 = 0 gsq = np.square(gamma) top = np.multiply(I, gsq) lbt = np.square(x - x0) bottom = lbt + gsq y = np.divide(top, bottom) return y def get_lorzkernel(kernel, gamma): lorzkernel = np.zeros(kernel.shape, dtype="float32") for x in np.ndindex(kernel.shape): ele = kernel[x] if ele == 0: lorzkernel[x] = 100000 else: sol = root_scalar( lambda x: lorz(x, gamma) - ele, method="brenth", bracket=[0, 150] ) lorzkernel[x] = sol.root return lorzkernel def create_lorentz_from_mrrelu(model, gamma=5): nmodel = tf.keras.Sequential() for idx in range(len(model.layers)): layer = model.layers[idx] if "flatten" in layer.name: inp_sh = tuple(c for c in layer.input_shape if c) nmodel.add(tf.keras.layers.Flatten(input_shape=inp_sh)) if "mm_relu" in layer.name: if len(nmodel.layers) == 0: inp_sh = tuple(c for c in layer.input_shape if c) nmodel.add(tf.keras.Input(shape=inp_sh)) if len(layer.get_weights()) > 1: nmodel.add( Lorentzian( layer.get_weights()[0].shape[1], gamma=gamma, train_scale_output=True, ) ) old_weights = nmodel.layers[idx].get_weights() old_weights[0] = get_lorzkernel(layer.get_weights()[0], gamma) old_weights[1] = layer.get_weights()[1] else: nmodel.add( Lorentzian( layer.get_weights()[0].shape[1], gamma=gamma, train_scale_output=None, ) ) old_weights = nmodel.layers[idx].get_weights() old_weights[0] = get_lorzkernel(layer.get_weights()[0], gamma) nmodel.layers[idx].set_weights(old_weights) return nmodel def norm_to_wavelength(wvl, offset): if wvl > 10: return offset * 1e9 if wvl < 10: if wvl > 1: return offset * 1e6 if wvl < 1: return offset * 1e3 def offset_to_wavelength(wvl, noff): if noff > 100: return 0 else: return wvl - noff def get_ring_specs(model, wvl, gamma, sensitivity=1e-9 / 5e-3, filename="layer.txt"): """Only Lorentzian Layers work""" # sensitivity meint wie sehr sich der ring verschiebt pro mW Heizleistung. In meiner BA waren das etwa 1nm verschiebung pro 5mW. with open(filename, "w", encoding="utf-8") as wr: for idx in range(len(model.layers)): layer = model.layers[idx] if "flatten" in layer.name: wr.write("\n\n Flatten \n\n") continue if "lorentzian" in layer.name: wr.write(f"\n\n Layer Lorentzian {idx}: \n\n") gam_old = model.layers[idx].get_weights()[2] gam_new = gamma offset = model.layers[idx].get_weights()[0] * gam_new / gam_old # np.savetxt("weight.csv", offset, fmt="%s", delimiter=",") # with open("layer.txt", "w", encoding="utf-8") as wr: for nidx in np.ndindex(offset.shape): noff = norm_to_wavelength(wvl, offset[nidx]) heat = noff / norm_to_wavelength(wvl, sensitivity) * 1e3 wr.write( f"{nidx}, {wvl}nm, {offset_to_wavelength(wvl,noff):.6f}nm, {gamma}, {noff:.6f}nm, {heat:.6f}mW\n" )
[ "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.keras.Sequential", "numpy.divide", "numpy.square", "tensorflow.keras.Input", "tensorflow.Variable", "tensorflow.divide", "tensorflow.python.framework.tensor_shape.dimension_value", "tensorflow.square", "tensorflow.keras.layers.Flatten", "numpy.zeros", "tensorflow.keras.initializers.get", "tensorflow.python.keras.constraints.get", "tensorflow.matmul", "numpy.multiply", "tensorflow.clip_by_value", "tensorflow.constant", "tensorflow.keras.regularizers.get", "tensorflow.python.keras.engine.input_spec.InputSpec", "numpy.ndindex" ]
lorentzian.py
[(228, 'numpy.square', 'np.square', (['gamma'], {}), True, 'import numpy as np\n'), (229, 'numpy.multiply', 'np.multiply', (['I', 'gsq'], {}), True, 'import numpy as np\n'), (230, 'numpy.square', 'np.square', (['(x - x0)'], {}), True, 'import numpy as np\n'), (232, 'numpy.divide', 'np.divide', (['top', 'bottom'], {}), True, 'import numpy as np\n'), (237, 'numpy.zeros', 'np.zeros', (['kernel.shape'], {'dtype': '"""float32"""'}), True, 'import numpy as np\n'), (238, 'numpy.ndindex', 'np.ndindex', (['kernel.shape'], {}), True, 'import numpy as np\n'), (251, 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), True, 'import tensorflow as tf\n'), (17, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['w', 'self.min_val', 'self.max_val'], {}), True, 'import tensorflow as tf\n'), (42, 'tensorflow.keras.initializers.get', 'keras.initializers.get', (['kernel_initializer'], {}), False, 'from tensorflow import keras\n'), (43, 'tensorflow.keras.initializers.get', 'keras.initializers.get', (['bias_initializer'], {}), False, 'from tensorflow import keras\n'), (44, 'tensorflow.keras.initializers.get', 'keras.initializers.get', (['"""ones"""'], {}), False, 'from tensorflow import keras\n'), (45, 'tensorflow.keras.regularizers.get', 'keras.regularizers.get', (['kernel_regularizer'], {}), False, 'from tensorflow import keras\n'), (46, 'tensorflow.keras.regularizers.get', 'keras.regularizers.get', (['bias_regularizer'], {}), False, 'from tensorflow import keras\n'), (47, 'tensorflow.python.keras.constraints.get', 'constraints.get', (['kernel_constraint'], {}), False, 'from tensorflow.python.keras import constraints\n'), (48, 'tensorflow.python.keras.constraints.get', 'constraints.get', (['bias_constraint'], {}), False, 'from tensorflow.python.keras import constraints\n'), (49, 'tensorflow.python.keras.constraints.get', 'constraints.get', (['sca_out_constraint'], {}), False, 'from tensorflow.python.keras import constraints\n'), (54, 'tensorflow.Variable', 'tf.Variable', (['gamma'], {'trainable': '(False)', 'dtype': '"""float32"""'}), True, 'import tensorflow as tf\n'), (55, 'tensorflow.Variable', 'tf.Variable', (['(1)'], {'trainable': '(False)', 'dtype': '"""float32"""'}), True, 'import tensorflow as tf\n'), (56, 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)', 'dtype': '"""float32"""'}), True, 'import tensorflow as tf\n'), (58, 'tensorflow.python.keras.engine.input_spec.InputSpec', 'InputSpec', ([], {'min_ndim': '(2)'}), False, 'from tensorflow.python.keras.engine.input_spec import InputSpec\n'), (66, 'tensorflow.python.framework.tensor_shape.TensorShape', 'tensor_shape.TensorShape', (['input_shape'], {}), False, 'from tensorflow.python.framework import tensor_shape\n'), (67, 'tensorflow.python.framework.tensor_shape.dimension_value', 'tensor_shape.dimension_value', (['input_shape[-1]'], {}), False, 'from tensorflow.python.framework import tensor_shape\n'), (73, 'tensorflow.python.keras.engine.input_spec.InputSpec', 'InputSpec', ([], {'min_ndim': '(2)', 'axes': '{(-1): last_dim}'}), False, 'from tensorflow.python.keras.engine.input_spec import InputSpec\n'), (117, 'tensorflow.square', 'tf.square', (['self.gamma'], {}), True, 'import tensorflow as tf\n'), (119, 'tensorflow.square', 'tf.square', (['self.kernel'], {}), True, 'import tensorflow as tf\n'), (151, 'tensorflow.keras.initializers.get', 'keras.initializers.get', (['kernel_initializer'], {}), False, 'from tensorflow import keras\n'), (152, 'tensorflow.keras.initializers.get', 'keras.initializers.get', (['bias_initializer'], {}), False, 'from tensorflow import keras\n'), (153, 'tensorflow.keras.initializers.get', 'keras.initializers.get', (['"""ones"""'], {}), False, 'from tensorflow import keras\n'), (154, 'tensorflow.keras.regularizers.get', 'keras.regularizers.get', (['kernel_regularizer'], {}), False, 'from tensorflow import keras\n'), (155, 'tensorflow.keras.regularizers.get', 'keras.regularizers.get', (['bias_regularizer'], {}), False, 'from tensorflow import keras\n'), (156, 'tensorflow.python.keras.constraints.get', 'constraints.get', (['kernel_constraint'], {}), False, 'from tensorflow.python.keras import constraints\n'), (157, 'tensorflow.python.keras.constraints.get', 'constraints.get', (['bias_constraint'], {}), False, 'from tensorflow.python.keras import constraints\n'), (158, 'tensorflow.python.keras.constraints.get', 'constraints.get', (['sca_out_constraint'], {}), False, 'from tensorflow.python.keras import constraints\n'), (163, 'tensorflow.python.keras.engine.input_spec.InputSpec', 'InputSpec', ([], {'min_ndim': '(2)'}), False, 'from tensorflow.python.keras.engine.input_spec import InputSpec\n'), (171, 'tensorflow.python.framework.tensor_shape.TensorShape', 'tensor_shape.TensorShape', (['input_shape'], {}), False, 'from tensorflow.python.framework import tensor_shape\n'), (172, 'tensorflow.python.framework.tensor_shape.dimension_value', 'tensor_shape.dimension_value', (['input_shape[-1]'], {}), False, 'from tensorflow.python.framework import tensor_shape\n'), (178, 'tensorflow.python.keras.engine.input_spec.InputSpec', 'InputSpec', ([], {'min_ndim': '(2)', 'axes': '{(-1): last_dim}'}), False, 'from tensorflow.python.keras.engine.input_spec import InputSpec\n'), (215, 'tensorflow.constant', 'tf.constant', (['[1]'], {'dtype': '"""float32"""'}), True, 'import tensorflow as tf\n'), (125, 'tensorflow.divide', 'tf.divide', (['intens', 'self.sca_out'], {}), True, 'import tensorflow as tf\n'), (219, 'tensorflow.divide', 'tf.divide', (['intens', 'self.sca_out'], {}), True, 'import tensorflow as tf\n'), (127, 'tensorflow.divide', 'tf.divide', (['top', 'bottom'], {}), True, 'import tensorflow as tf\n'), (221, 'tensorflow.matmul', 'tf.matmul', (['inputs', 'self.kernel'], {}), True, 'import tensorflow as tf\n'), (256, 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {'input_shape': 'inp_sh'}), True, 'import tensorflow as tf\n'), (319, 'numpy.ndindex', 'np.ndindex', (['offset.shape'], {}), True, 'import numpy as np\n'), (126, 'tensorflow.divide', 'tf.divide', (['top', 'bottom'], {}), True, 'import tensorflow as tf\n'), (220, 'tensorflow.matmul', 'tf.matmul', (['inputs', 'self.kernel'], {}), True, 'import tensorflow as tf\n'), (260, 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': 'inp_sh'}), True, 'import tensorflow as tf\n')]
SachsLab/indl
531d2e0c2ee765004aedc553af40e258262f86cb
import tensorflow as tf import numpy as np import tensorflow.keras.layers as tfkl from tensorflow.keras import backend as K import tensorflow_probability as tfp tfd = tfp.distributions tfpl = tfp.layers tfb = tfp.bijectors scale_shift = np.log(np.exp(1) - 1).astype(np.float32) def test_make_mvn_prior(): from indl.model.tfp import make_mvn_prior def _test(latent_size=5, init_std=0.1, trainable_mean=True, trainable_var=True, offdiag=False): prior = make_mvn_prior(latent_size, init_std=init_std, trainable_mean=trainable_mean, trainable_var=trainable_var, offdiag=offdiag) assert (isinstance(prior.loc, tf.Variable) == trainable_mean) if offdiag: assert (hasattr(prior.scale_tril, 'trainable_variables') == trainable_var) else: assert ((len(prior.scale.trainable_variables) > 0) == trainable_var) if not trainable_var: assert np.all(prior.stddev().numpy() == init_std) if not trainable_mean: assert np.all(prior.mean().numpy() == 0.0) for _mean in True, False: for _var in True, False: for _offd in True, False: _test(trainable_mean=_mean, trainable_var=_var, offdiag=_offd) def _run_assertions_on_qdist(q_dist, inputs, input_dim, dist_dim, batch_size): assert isinstance(q_dist, tfd.MultivariateNormalDiag) # Test in a model with a data tensor model = tf.keras.Model(inputs=inputs, outputs=q_dist) dummy_inputs = tf.random.uniform((batch_size, input_dim)) dummy_q = model(dummy_inputs) assert isinstance(dummy_q, tfd.MultivariateNormalDiag) assert dummy_q.stddev().shape.as_list() == [batch_size, dist_dim] assert np.all(dummy_q.stddev().numpy() > 0) assert dummy_q.sample().shape.as_list() == [batch_size, dist_dim] assert ~np.any(np.isnan(dummy_q.sample().numpy())) def test_make_mvn_dist_fn(): from indl.model.tfp import make_mvn_dist_fn input_dim = 4 dist_dim = 3 batch_size = 8 # Test with placeholder inputs = tfkl.Input(shape=(input_dim,)) # First the callable make_dist_fn, dist_params = make_mvn_dist_fn(inputs, dist_dim, shift_std=0.1) assert hasattr(make_dist_fn, '__call__') assert isinstance(dist_params[0], tf.Tensor) assert isinstance(dist_params[1], tf.Tensor) # Then test using it to make a distribution q_dist = tfpl.DistributionLambda(make_distribution_fn=make_dist_fn, # convert_to_tensor_fn=lambda s: s.sample(n_samples), )(dist_params) _run_assertions_on_qdist(q_dist, inputs, input_dim, dist_dim, batch_size) def test_make_variational(): from indl.model.tfp import make_variational input_dim = 4 dist_dim = 3 batch_size = 8 # Test making a placeholder variational. inputs = tfkl.Input(shape=(input_dim,)) q_dist = make_variational(inputs, dist_dim, init_std=0.1) _run_assertions_on_qdist(q_dist, inputs, input_dim, dist_dim, batch_size)
[ "numpy.exp", "tensorflow.random.uniform", "tensorflow.keras.Model", "tensorflow.keras.layers.Input" ]
tests/model/test_tfp.py
[(39, 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'q_dist'}), True, 'import tensorflow as tf\n'), (40, 'tensorflow.random.uniform', 'tf.random.uniform', (['(batch_size, input_dim)'], {}), True, 'import tensorflow as tf\n'), (57, 'tensorflow.keras.layers.Input', 'tfkl.Input', ([], {'shape': '(input_dim,)'}), True, 'import tensorflow.keras.layers as tfkl\n'), (59, 'indl.model.tfp.make_mvn_dist_fn', 'make_mvn_dist_fn', (['inputs', 'dist_dim'], {'shift_std': '(0.1)'}), False, 'from indl.model.tfp import make_mvn_dist_fn\n'), (78, 'tensorflow.keras.layers.Input', 'tfkl.Input', ([], {'shape': '(input_dim,)'}), True, 'import tensorflow.keras.layers as tfkl\n'), (79, 'indl.model.tfp.make_variational', 'make_variational', (['inputs', 'dist_dim'], {'init_std': '(0.1)'}), False, 'from indl.model.tfp import make_variational\n'), (16, 'indl.model.tfp.make_mvn_prior', 'make_mvn_prior', (['latent_size'], {'init_std': 'init_std', 'trainable_mean': 'trainable_mean', 'trainable_var': 'trainable_var', 'offdiag': 'offdiag'}), False, 'from indl.model.tfp import make_mvn_prior\n'), (9, 'numpy.exp', 'np.exp', (['(1)'], {}), True, 'import numpy as np\n')]
sgwhat/BigDL
25b402666fbb26b0bc18fc8100e9a00469844778
# # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import pytest from unittest import TestCase import tensorflow as tf from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2 import numpy as np from bigdl.nano.tf.keras import Model class TestModelQuantize(TestCase): def test_model_quantize_ptq(self): model = MobileNetV2(weights=None, input_shape=[40, 40, 3], classes=10) model = Model(inputs=model.inputs, outputs=model.outputs) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3), loss=tf.keras.losses.BinaryCrossentropy(), metrics=[tf.keras.metrics.CategoricalAccuracy()],) train_examples = np.random.random((100, 40, 40, 3)) train_labels = np.random.randint(0, 10, size=(100,)) train_dataset = tf.data.Dataset.from_tensor_slices((train_examples, train_labels)) # Case 1: Default q_model = model.quantize(calib_dataset=train_dataset) assert q_model output = q_model(train_examples[0:10]) assert output.shape == (10, 10) # Case 2: Override by arguments q_model = model.quantize(calib_dataset=train_dataset, metric=tf.keras.metrics.CategoricalAccuracy(), tuning_strategy='basic', accuracy_criterion={'relative': 0.99, 'higher_is_better': True}) assert q_model output = q_model(train_examples[0:10]) assert output.shape == (10, 10) # Case 3: Invalid approach, dynamic or qat is not supported invalid_approach = 'dynamic' with pytest.raises(RuntimeError, match="post_training_dynamic_quant is invalid."): model.quantize(approach=invalid_approach)
[ "numpy.random.random", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.keras.applications.mobilenet_v2.MobileNetV2", "tensorflow.keras.losses.BinaryCrossentropy", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.metrics.CategoricalAccuracy", "numpy.random.randint" ]
python/nano/test/inc/tf/test_keras_quantize.py
[(28, 'tensorflow.keras.applications.mobilenet_v2.MobileNetV2', 'MobileNetV2', ([], {'weights': 'None', 'input_shape': '[40, 40, 3]', 'classes': '(10)'}), False, 'from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2\n'), (29, 'bigdl.nano.tf.keras.Model', 'Model', ([], {'inputs': 'model.inputs', 'outputs': 'model.outputs'}), False, 'from bigdl.nano.tf.keras import Model\n'), (33, 'numpy.random.random', 'np.random.random', (['(100, 40, 40, 3)'], {}), True, 'import numpy as np\n'), (34, 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)'], {'size': '(100,)'}), True, 'import numpy as np\n'), (35, 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(train_examples, train_labels)'], {}), True, 'import tensorflow as tf\n'), (54, 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""post_training_dynamic_quant is invalid."""'}), False, 'import pytest\n'), (30, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.001)'}), True, 'import tensorflow as tf\n'), (31, 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {}), True, 'import tensorflow as tf\n'), (44, 'tensorflow.keras.metrics.CategoricalAccuracy', 'tf.keras.metrics.CategoricalAccuracy', ([], {}), True, 'import tensorflow as tf\n'), (32, 'tensorflow.keras.metrics.CategoricalAccuracy', 'tf.keras.metrics.CategoricalAccuracy', ([], {}), True, 'import tensorflow as tf\n')]
Jurph/image-super-resolution
50e20c62e1a8af512f63094df44cbeeb01a18b72
import tensorflow as tf from tensorflow.keras.initializers import RandomUniform from tensorflow.keras.layers import concatenate, Input, Activation, Add, Conv2D, Lambda from tensorflow.keras.models import Model from ISR.models.imagemodel import ImageModel WEIGHTS_URLS = { 'gans': { 'arch_params': {'C': 4, 'D': 3, 'G': 32, 'G0': 32, 'x': 4, 'T': 10}, 'url': 'https://public-asai-dl-models.s3.eu-central-1.amazonaws.com/ISR/rrdn-C4-D3-G32-G032-T10-x4-GANS/rrdn-C4-D3-G32-G032-T10-x4_epoch299.hdf5', 'name': 'rrdn-C4-D3-G32-G032-T10-x4_epoch299.hdf5', }, } def make_model(arch_params, patch_size): """ Returns the model. Used to select the model. """ return RRDN(arch_params, patch_size) def get_network(weights): if weights in WEIGHTS_URLS.keys(): arch_params = WEIGHTS_URLS[weights]['arch_params'] url = WEIGHTS_URLS[weights]['url'] name = WEIGHTS_URLS[weights]['name'] else: raise ValueError('Available RRDN network weights: {}'.format(list(WEIGHTS_URLS.keys()))) c_dim = 3 kernel_size = 3 return arch_params, c_dim, kernel_size, url, name class RRDN(ImageModel): """Implementation of the Residual in Residual Dense Network for image super-scaling. The network is the one described in https://arxiv.org/abs/1809.00219 (Wang et al. 2018). Args: arch_params: dictionary, contains the network parameters C, D, G, G0, T, x. patch_size: integer or None, determines the input size. Only needed at training time, for prediction is set to None. beta: float <= 1, scaling parameter for the residual connections. c_dim: integer, number of channels of the input image. kernel_size: integer, common kernel size for convolutions. upscaling: string, 'ups' or 'shuffle', determines which implementation of the upscaling layer to use. init_val: extreme values for the RandomUniform initializer. weights: string, if not empty, download and load pre-trained weights. Overrides other parameters. Attributes: C: integer, number of conv layer inside each residual dense blocks (RDB). D: integer, number of RDBs inside each Residual in Residual Dense Block (RRDB). T: integer, number or RRDBs. G: integer, number of convolution output filters inside the RDBs. G0: integer, number of output filters of each RDB. x: integer, the scaling factor. model: Keras model of the RRDN. name: name used to identify what upscaling network is used during training. model._name: identifies this network as the generator network in the compound model built by the trainer class. """ def __init__( self, arch_params={}, patch_size=None, beta=0.2, c_dim=3, kernel_size=3, init_val=0.05, weights='' ): if weights: arch_params, c_dim, kernel_size, url, fname = get_network(weights) self.params = arch_params self.beta = beta self.c_dim = c_dim self.C = self.params['C'] self.D = self.params['D'] self.G = self.params['G'] self.G0 = self.params['G0'] self.T = self.params['T'] self.scale = self.params['x'] self.initializer = RandomUniform(minval=-init_val, maxval=init_val, seed=None) self.kernel_size = kernel_size self.patch_size = patch_size self.model = self._build_rdn() self.model._name = 'generator' self.name = 'rrdn' if weights: weights_path = tf.keras.utils.get_file(fname=fname, origin=url) self.model.load_weights(weights_path) def _dense_block(self, input_layer, d, t): """ Implementation of the (Residual) Dense Block as in the paper Residual Dense Network for Image Super-Resolution (Zhang et al. 2018). Residuals are incorporated in the RRDB. d is an integer only used for naming. (d-th block) """ x = input_layer for c in range(1, self.C + 1): F_dc = Conv2D( self.G, kernel_size=self.kernel_size, padding='same', kernel_initializer=self.initializer, name='F_%d_%d_%d' % (t, d, c), )(x) F_dc = Activation('relu', name='F_%d_%d_%d_Relu' % (t, d, c))(F_dc) x = concatenate([x, F_dc], axis=3, name='RDB_Concat_%d_%d_%d' % (t, d, c)) # DIFFERENCE: in RDN a kernel size of 1 instead of 3 is used here x = Conv2D( self.G0, kernel_size=3, padding='same', kernel_initializer=self.initializer, name='LFF_%d_%d' % (t, d), )(x) return x def _RRDB(self, input_layer, t): """Residual in Residual Dense Block. t is integer, for naming of RRDB. beta is scalar. """ # SUGGESTION: MAKE BETA LEARNABLE x = input_layer for d in range(1, self.D + 1): LFF = self._dense_block(x, d, t) LFF_beta = Lambda(lambda x: x * self.beta)(LFF) x = Add(name='LRL_%d_%d' % (t, d))([x, LFF_beta]) x = Lambda(lambda x: x * self.beta)(x) x = Add(name='RRDB_%d_out' % (t))([input_layer, x]) return x def _pixel_shuffle(self, input_layer): """ PixelShuffle implementation of the upscaling part. """ x = Conv2D( self.c_dim * self.scale ** 2, kernel_size=3, padding='same', kernel_initializer=self.initializer, name='PreShuffle', )(input_layer) return Lambda( lambda x: tf.nn.depth_to_space(x, block_size=self.scale, data_format='NHWC'), name='PixelShuffle', )(x) def _build_rdn(self): LR_input = Input(shape=(self.patch_size, self.patch_size, 3), name='LR_input') pre_blocks = Conv2D( self.G0, kernel_size=self.kernel_size, padding='same', kernel_initializer=self.initializer, name='Pre_blocks_conv', )(LR_input) # DIFFERENCE: in RDN an extra convolution is present here for t in range(1, self.T + 1): if t == 1: x = self._RRDB(pre_blocks, t) else: x = self._RRDB(x, t) # DIFFERENCE: in RDN a conv with kernel size of 1 after a concat operation is used here post_blocks = Conv2D( self.G0, kernel_size=3, padding='same', kernel_initializer=self.initializer, name='post_blocks_conv', )(x) # Global Residual Learning GRL = Add(name='GRL')([post_blocks, pre_blocks]) # Upscaling PS = self._pixel_shuffle(GRL) # Compose SR image SR = Conv2D( self.c_dim, kernel_size=self.kernel_size, padding='same', kernel_initializer=self.initializer, name='SR', )(PS) return Model(inputs=LR_input, outputs=SR)
[ "tensorflow.nn.depth_to_space", "tensorflow.keras.layers.Activation", "tensorflow.keras.initializers.RandomUniform", "tensorflow.keras.models.Model", "tensorflow.keras.layers.Lambda", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.concatenate", "tensorflow.keras.utils.get_file", "tensorflow.keras.layers.Add", "tensorflow.keras.layers.Input" ]
ISR/models/rrdn.py
[(84, 'tensorflow.keras.initializers.RandomUniform', 'RandomUniform', ([], {'minval': '(-init_val)', 'maxval': 'init_val', 'seed': 'None'}), False, 'from tensorflow.keras.initializers import RandomUniform\n'), (159, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(self.patch_size, self.patch_size, 3)', 'name': '"""LR_input"""'}), False, 'from tensorflow.keras.layers import concatenate, Input, Activation, Add, Conv2D, Lambda\n'), (193, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'LR_input', 'outputs': 'SR'}), False, 'from tensorflow.keras.models import Model\n'), (91, 'tensorflow.keras.utils.get_file', 'tf.keras.utils.get_file', ([], {'fname': 'fname', 'origin': 'url'}), True, 'import tensorflow as tf\n'), (113, 'tensorflow.keras.layers.concatenate', 'concatenate', (['[x, F_dc]'], {'axis': '(3)', 'name': "('RDB_Concat_%d_%d_%d' % (t, d, c))"}), False, 'from tensorflow.keras.layers import concatenate, Input, Activation, Add, Conv2D, Lambda\n'), (116, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['self.G0'], {'kernel_size': '(3)', 'padding': '"""same"""', 'kernel_initializer': 'self.initializer', 'name': "('LFF_%d_%d' % (t, d))"}), False, 'from tensorflow.keras.layers import concatenate, Input, Activation, Add, Conv2D, Lambda\n'), (139, 'tensorflow.keras.layers.Lambda', 'Lambda', (['(lambda x: x * self.beta)'], {}), False, 'from tensorflow.keras.layers import concatenate, Input, Activation, Add, Conv2D, Lambda\n'), (140, 'tensorflow.keras.layers.Add', 'Add', ([], {'name': "('RRDB_%d_out' % t)"}), False, 'from tensorflow.keras.layers import concatenate, Input, Activation, Add, Conv2D, Lambda\n'), (146, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(self.c_dim * self.scale ** 2)'], {'kernel_size': '(3)', 'padding': '"""same"""', 'kernel_initializer': 'self.initializer', 'name': '"""PreShuffle"""'}), False, 'from tensorflow.keras.layers import concatenate, Input, Activation, Add, Conv2D, Lambda\n'), (160, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['self.G0'], {'kernel_size': 'self.kernel_size', 'padding': '"""same"""', 'kernel_initializer': 'self.initializer', 'name': '"""Pre_blocks_conv"""'}), False, 'from tensorflow.keras.layers import concatenate, Input, Activation, Add, Conv2D, Lambda\n'), (174, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['self.G0'], {'kernel_size': '(3)', 'padding': '"""same"""', 'kernel_initializer': 'self.initializer', 'name': '"""post_blocks_conv"""'}), False, 'from tensorflow.keras.layers import concatenate, Input, Activation, Add, Conv2D, Lambda\n'), (182, 'tensorflow.keras.layers.Add', 'Add', ([], {'name': '"""GRL"""'}), False, 'from tensorflow.keras.layers import concatenate, Input, Activation, Add, Conv2D, Lambda\n'), (186, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['self.c_dim'], {'kernel_size': 'self.kernel_size', 'padding': '"""same"""', 'kernel_initializer': 'self.initializer', 'name': '"""SR"""'}), False, 'from tensorflow.keras.layers import concatenate, Input, Activation, Add, Conv2D, Lambda\n'), (105, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['self.G'], {'kernel_size': 'self.kernel_size', 'padding': '"""same"""', 'kernel_initializer': 'self.initializer', 'name': "('F_%d_%d_%d' % (t, d, c))"}), False, 'from tensorflow.keras.layers import concatenate, Input, Activation, Add, Conv2D, Lambda\n'), (112, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': "('F_%d_%d_%d_Relu' % (t, d, c))"}), False, 'from tensorflow.keras.layers import concatenate, Input, Activation, Add, Conv2D, Lambda\n'), (137, 'tensorflow.keras.layers.Lambda', 'Lambda', (['(lambda x: x * self.beta)'], {}), False, 'from tensorflow.keras.layers import concatenate, Input, Activation, Add, Conv2D, Lambda\n'), (138, 'tensorflow.keras.layers.Add', 'Add', ([], {'name': "('LRL_%d_%d' % (t, d))"}), False, 'from tensorflow.keras.layers import concatenate, Input, Activation, Add, Conv2D, Lambda\n'), (154, 'tensorflow.nn.depth_to_space', 'tf.nn.depth_to_space', (['x'], {'block_size': 'self.scale', 'data_format': '"""NHWC"""'}), True, 'import tensorflow as tf\n')]
tonyyang-svail/elasticdl
f7d266cf600c7205d68f59447abd55eff222ac2b
import abc import numpy as np import tensorflow as tf from tensorflow.python.feature_column import feature_column_v2 as fc_lib from elasticdl.python.common.constants import DistributionStrategy from elasticdl.python.common.log_utils import default_logger as logger from elasticdl.python.common.save_utils import CheckpointSaver from elasticdl.python.elasticdl.feature_column.feature_column import ( EmbeddingColumn, embedding_column, ) from elasticdl.python.elasticdl.layers.embedding import Embedding from elasticdl.python.keras.layers import SparseEmbedding from elasticdl.python.ps.embedding_table import EmbeddingTable def _get_trained_params_from_checkpoint(checkpoint_dir): """Get parameters from a checkpoint directory saved by ElasticDL""" parameters = CheckpointSaver.restore_params_from_checkpoint( checkpoint_dir, 0, 1 ) trained_params = parameters.non_embedding_params for name, table in parameters.embedding_params.items(): trained_params[name] = table return trained_params def _convert_embedding_table_to_numpy_array(embedding_table, embedding_shape): """Convert an embedding table to a np.ndarray which can be assigned to trainable weights in keras embedding layers. Args: embedding_table: A `EmbeddingTable` instance. embedding_shape: a tuple with two elements Returns: A np.ndarray """ embedding_ids = list(embedding_table.embedding_vectors.keys()) embedding_values = list(embedding_table.embedding_vectors.values()) embedding_weights = np.zeros(embedding_shape) embedding_weights[embedding_ids] = embedding_values return embedding_weights def _get_embedding_column_input_dim(embedding_column): if type(embedding_column) != fc_lib.EmbeddingColumn: raise Exception("The input should be EmbeddingColumn type.") default_num_buckets = ( embedding_column.categorical_column.num_buckets if embedding_column._is_v2_column else embedding_column.categorical_column._num_buckets ) # pylint: disable=protected-access num_buckets = getattr( embedding_column.categorical_column, "num_buckets", default_num_buckets ) return num_buckets def _need_partition_embedding(embedding_object): """The embedding layer will be partitioned on multiple PS instances if the memory of the layer.train_weights is bigger than 2MB. """ if isinstance(embedding_object, tf.keras.layers.Layer): return _need_partition_embedding_from_shape_info( embedding_object.input_dim, embedding_object.output_dim ) elif isinstance(embedding_object, fc_lib.EmbeddingColumn): return _need_partition_embedding_from_shape_info( _get_embedding_column_input_dim(embedding_object), embedding_object.dimension, ) else: raise Exception( "Unsupported type {} for embedding".format(type(embedding_object)) ) def _need_partition_embedding_from_shape_info(input_dim, output_dim): EMBEDDING_SIZE_THRESHOLD_FOR_PARTITION = 2 * 1024 * 1024 # 2MB FLOAT32_BYTES = 4 weights_memory = input_dim * output_dim * FLOAT32_BYTES return weights_memory > EMBEDDING_SIZE_THRESHOLD_FOR_PARTITION def _replace_tf_embedding_column_with_edl(dense_features_layer): new_feature_columns = [] for column in dense_features_layer._feature_columns: if isinstance( column, fc_lib.EmbeddingColumn ) and _need_partition_embedding(column): logger.info( "Replace embedding_column {} from TensorFlow " "version to ElasticDL version".format(column.name) ) new_column = embedding_column( column.categorical_column, dimension=column.dimension ) new_column.set_dense_features_layer_name(dense_features_layer.name) new_feature_columns.append(new_column) else: new_feature_columns.append(column) return tf.keras.layers.DenseFeatures( feature_columns=new_feature_columns, name=dense_features_layer.name ) def _replace_edl_embedding_column_with_tf(dense_features_layer): new_feature_columns = [] for column in dense_features_layer._feature_columns: if isinstance(column, EmbeddingColumn): logger.info( "Replace embedding_column {} from ElasticDL " "version to TF version".format(column.name) ) new_column = fc_lib.embedding_column( column.categorical_column, dimension=column.dimension ) new_feature_columns.append(new_column) else: new_feature_columns.append(column) return tf.keras.layers.DenseFeatures( feature_columns=new_feature_columns, name=dense_features_layer.name ) class ModelHandler(metaclass=abc.ABCMeta): """Generate the model to train in ElasticDL for different distributed strategies and export trained model in ElasticDL to SavedModel. """ @abc.abstractmethod def get_model_to_train(self, model): """Generate a model to train in ElasticDL. Args: model: A native keras model instance. Returns: A keras model instance for ElasticDL training. """ @abc.abstractmethod def get_model_to_export(self, model, dataset): """Get the model which can be exported a SavedModel by tf.saved_model.save. Args: model: A keras model instance trained by ElasticDL and it may contains `elasticdl.layers.Embedding` layers. dataset: A `tf.data.Dataset` instance which has the same outputs as the training dataset. Returns: A keras model instance trained by ElasticDL. """ @classmethod def get_model_handler( cls, distribution_strategy=None, checkpoint_dir=None ): """Create a model handler to process the model for the distributed strategy. Args: distribution_strategy (string): distribution strategy name checkpoint_dir: Checkpoint directory to save model parametes during training. Return: ModelHandler subclass instance. """ if distribution_strategy == DistributionStrategy.PARAMETER_SERVER: return ParameterServerModelHandler(checkpoint_dir=checkpoint_dir) elif distribution_strategy == DistributionStrategy.ALLREDUCE: logger.warning( "Allreduce distribution strategy is not supported yet. " "Switching to use the default distribution strategy." ) return DefaultModelHandler() class DefaultModelHandler(ModelHandler): """Return the origin model to train and export.""" def get_model_to_train(self, model): return model def get_model_to_export(self, model, dataset): """ Get model with inputs and trained parameters to export. """ if not model.inputs: model._build_model_with_inputs(inputs=dataset, targets=None) return model class ParameterServerModelHandler(ModelHandler): """Model handler for parameter server strategy. For training, The handler will replace `tf.keras.layers.Embedding` layers with`elasticdl.layers.Embedding` for training. For saving model, the handler will restore Keras model definition and pull trained parameters from parameter server(s) for the model. """ def __init__(self, checkpoint_dir=None): """ Arguments: checkpoint_dir: A checkpoint directory to save all model parameters during training. """ self._checkpoint_dir = checkpoint_dir def get_model_to_train(self, model): """Replace the tf.keras.layers.Embedding layer in the model with an elasticdl.layers.Embedding layer in ParameterServerStrategy. """ # clear keras model session to avoid clutter from old models/layers. tf.keras.backend.clear_session() if type(model) == tf.keras.Sequential or model._is_graph_network: model = self._clone_model_with_edl_embedding(model) else: model = self._replace_attr_with_edl_embedding(model) return model def get_model_to_export(self, model, dataset): """Get the model which can be exported to a SavedModel by `tf.saved_model.save`. """ model = self._restore_keras_model_def(model) if not model.inputs: # build model to add inputs and outputs that # can be consumed by tf-serving model._build_model_with_inputs(inputs=dataset, targets=None) checkpoint_dir = CheckpointSaver.get_valid_lastest_version_dir( self._checkpoint_dir ) if checkpoint_dir is None: logger.warning("No available checkpoint to export model") return model trained_params = _get_trained_params_from_checkpoint(checkpoint_dir) for var in model.trainable_variables: if isinstance(trained_params[var.name], EmbeddingTable): embedding_params = _convert_embedding_table_to_numpy_array( trained_params[var.name], var.shape ) var.assign(embedding_params) else: var.assign(trained_params[var.name].numpy()) return model def _restore_keras_model_def(self, model): """Restore Keras model definition by replacing `elasticdl.layers.Embedding` layers with `tf.keras.layers.Embedding` layers. """ # clear keras model session to avoid clutter from old models/layers. tf.keras.backend.clear_session() if ( isinstance(model, tf.keras.models.Model) and not model._is_graph_network ): model = self._replace_attr_with_keras_embedding(model) else: model = self._clone_model_with_keras_embedding(model) return model @staticmethod def _clone_model_with_edl_embedding(model): """Clone a new model and replace keras embedding layers including `tf.keras.layers.Embedding` and `SparseEmbedding` with `elasticdl.layers.Embedding` """ def _clone_function(layer): if type(layer) in [ tf.keras.layers.Embedding, SparseEmbedding, ] and _need_partition_embedding(layer): logger.debug( "Replace {} with {}".format(layer.name, Embedding) ) # ElasticDL embedding only accept a string type initializer init = tf.keras.initializers.serialize( layer.embeddings_initializer )["class_name"] if type(layer) == tf.keras.layers.Embedding: embedding_layer = Embedding( output_dim=layer.output_dim, input_dim=layer.input_dim, embeddings_initializer=init, mask_zero=layer.mask_zero, input_length=layer.input_length, name=layer.name, ) else: embedding_layer = Embedding( output_dim=layer.output_dim, input_dim=layer.input_dim, embeddings_initializer=init, name=layer.name, combiner=layer.combiner, ) embedding_layer.set_embedding_weight_name( layer.trainable_weights[0].name ) return embedding_layer elif type(layer) == tf.keras.layers.DenseFeatures: return _replace_tf_embedding_column_with_edl(layer) return layer return tf.keras.models.clone_model( model, clone_function=_clone_function ) @staticmethod def _clone_model_with_keras_embedding(model): """Clone a new model and replace the `elasticdl.layers.Embedding` layers with `tf.keras.layers.Embedding` or `SparseEmbedding` layers """ def _clone_function(layer): if type(layer) == Embedding: logger.info( "Replace embedding layer with " "elasticdl.layers.Embedding" ) # The combiner is not None only for SparseEmbedding, if layer.combiner is not None: embedding_layer = SparseEmbedding( output_dim=layer.output_dim, input_dim=layer.input_dim, embeddings_initializer=layer.embeddings_initializer, name=layer.name, combiner=layer.combiner, ) else: embedding_layer = tf.keras.layers.Embedding( output_dim=layer.output_dim, input_dim=layer.input_dim, embeddings_initializer=layer.embeddings_initializer, mask_zero=layer.mask_zero, input_length=layer.input_length, name=layer.name, ) return embedding_layer elif type(layer) == tf.keras.layers.DenseFeatures: return _replace_edl_embedding_column_with_tf(layer) return layer return tf.keras.models.clone_model( model, clone_function=_clone_function ) @staticmethod def _replace_attr_with_edl_embedding(model): """Replace the keras embedding attributes in the model with `elasticdl.layers.Embedding` layers. """ for name, value in model.__dict__.items(): if type( value ) == tf.keras.layers.Embedding and _need_partition_embedding( value ): logger.info( "Replace {} layer with " "elasticdl.layers.Embedding".format(value) ) initializer_name = tf.keras.initializers.serialize( value.embeddings_initializer )["class_name"] embedding_layer = Embedding( output_dim=value.output_dim, input_dim=value.input_dim, embeddings_initializer=initializer_name, mask_zero=value.mask_zero, input_length=value.input_length, name=value.name, ) # The weights of subclass model is None, so we need to create # the weight name which is "{layer_name}/embeddings:0" in # tf.keras.layers.Embedding. embedding_layer.set_embedding_weight_name( value.name + "/embeddings:0" ) setattr(model, name, embedding_layer) elif type(value) == SparseEmbedding and _need_partition_embedding( value ): logger.info( "Replace {} layer with " "elasticdl.layers.Embedding".format(value) ) embedding_layer = Embedding( output_dim=value.output_dim, input_dim=value.input_dim, embeddings_initializer=initializer_name, combiner=value.combiner, name=value.name, ) embedding_layer.set_embedding_weight_name( value.name + "/embeddings:0" ) setattr(model, name, embedding_layer) elif type(value) == tf.keras.layers.DenseFeatures: feature_layer = _replace_tf_embedding_column_with_edl(value) setattr(model, name, feature_layer) return model @staticmethod def _replace_attr_with_keras_embedding(model): """Replace the elasticdl.layers.Embedding attributes in the model with `tf.keras.layers.Embedding` or `SparseEmbedding` layers. """ for name, value in model.__dict__.items(): if type(value) == Embedding: # The combiner is not None only for SparseEmbedding, if value.combiner is not None: logger.info("Replace elasticdl with SparseEmbedding") embedding_layer = SparseEmbedding( output_dim=value.output_dim, input_dim=value.input_dim, embeddings_initializer=value.embeddings_initializer, combiner=value.combiner, ) else: logger.info( "Replace elasticdl with tf.kerasl.layers.Embedding" ) embedding_layer = tf.keras.layers.Embedding( output_dim=value.output_dim, input_dim=value.input_dim, embeddings_initializer=value.embeddings_initializer, mask_zero=value.mask_zero, input_length=value.input_length, ) setattr(model, name, embedding_layer) elif type(value) == tf.keras.layers.DenseFeatures: feature_layer = _replace_edl_embedding_column_with_tf(value) setattr(model, name, feature_layer) return model
[ "tensorflow.keras.layers.DenseFeatures", "tensorflow.keras.models.clone_model", "tensorflow.keras.layers.Embedding", "tensorflow.python.feature_column.feature_column_v2.embedding_column", "tensorflow.keras.initializers.serialize", "tensorflow.keras.backend.clear_session", "numpy.zeros" ]
elasticdl/python/common/model_handler.py
[(21, 'elasticdl.python.common.save_utils.CheckpointSaver.restore_params_from_checkpoint', 'CheckpointSaver.restore_params_from_checkpoint', (['checkpoint_dir', '(0)', '(1)'], {}), False, 'from elasticdl.python.common.save_utils import CheckpointSaver\n'), (44, 'numpy.zeros', 'np.zeros', (['embedding_shape'], {}), True, 'import numpy as np\n'), (110, 'tensorflow.keras.layers.DenseFeatures', 'tf.keras.layers.DenseFeatures', ([], {'feature_columns': 'new_feature_columns', 'name': 'dense_features_layer.name'}), True, 'import tensorflow as tf\n'), (130, 'tensorflow.keras.layers.DenseFeatures', 'tf.keras.layers.DenseFeatures', ([], {'feature_columns': 'new_feature_columns', 'name': 'dense_features_layer.name'}), True, 'import tensorflow as tf\n'), (227, 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), True, 'import tensorflow as tf\n'), (244, 'elasticdl.python.common.save_utils.CheckpointSaver.get_valid_lastest_version_dir', 'CheckpointSaver.get_valid_lastest_version_dir', (['self._checkpoint_dir'], {}), False, 'from elasticdl.python.common.save_utils import CheckpointSaver\n'), (268, 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), True, 'import tensorflow as tf\n'), (323, 'tensorflow.keras.models.clone_model', 'tf.keras.models.clone_model', (['model'], {'clone_function': '_clone_function'}), True, 'import tensorflow as tf\n'), (362, 'tensorflow.keras.models.clone_model', 'tf.keras.models.clone_model', (['model'], {'clone_function': '_clone_function'}), True, 'import tensorflow as tf\n'), (102, 'elasticdl.python.elasticdl.feature_column.feature_column.embedding_column', 'embedding_column', (['column.categorical_column'], {'dimension': 'column.dimension'}), False, 'from elasticdl.python.elasticdl.feature_column.feature_column import EmbeddingColumn, embedding_column\n'), (123, 'tensorflow.python.feature_column.feature_column_v2.embedding_column', 'fc_lib.embedding_column', (['column.categorical_column'], {'dimension': 'column.dimension'}), True, 'from tensorflow.python.feature_column import feature_column_v2 as fc_lib\n'), (248, 'elasticdl.python.common.log_utils.default_logger.warning', 'logger.warning', (['"""No available checkpoint to export model"""'], {}), True, 'from elasticdl.python.common.log_utils import default_logger as logger\n'), (184, 'elasticdl.python.common.log_utils.default_logger.warning', 'logger.warning', (['"""Allreduce distribution strategy is not supported yet. Switching to use the default distribution strategy."""'], {}), True, 'from elasticdl.python.common.log_utils import default_logger as logger\n'), (335, 'elasticdl.python.common.log_utils.default_logger.info', 'logger.info', (['"""Replace embedding layer with elasticdl.layers.Embedding"""'], {}), True, 'from elasticdl.python.common.log_utils import default_logger as logger\n'), (384, 'elasticdl.python.elasticdl.layers.embedding.Embedding', 'Embedding', ([], {'output_dim': 'value.output_dim', 'input_dim': 'value.input_dim', 'embeddings_initializer': 'initializer_name', 'mask_zero': 'value.mask_zero', 'input_length': 'value.input_length', 'name': 'value.name'}), False, 'from elasticdl.python.elasticdl.layers.embedding import Embedding\n'), (294, 'tensorflow.keras.initializers.serialize', 'tf.keras.initializers.serialize', (['layer.embeddings_initializer'], {}), True, 'import tensorflow as tf\n'), (299, 'elasticdl.python.elasticdl.layers.embedding.Embedding', 'Embedding', ([], {'output_dim': 'layer.output_dim', 'input_dim': 'layer.input_dim', 'embeddings_initializer': 'init', 'mask_zero': 'layer.mask_zero', 'input_length': 'layer.input_length', 'name': 'layer.name'}), False, 'from elasticdl.python.elasticdl.layers.embedding import Embedding\n'), (308, 'elasticdl.python.elasticdl.layers.embedding.Embedding', 'Embedding', ([], {'output_dim': 'layer.output_dim', 'input_dim': 'layer.input_dim', 'embeddings_initializer': 'init', 'name': 'layer.name', 'combiner': 'layer.combiner'}), False, 'from elasticdl.python.elasticdl.layers.embedding import Embedding\n'), (341, 'elasticdl.python.keras.layers.SparseEmbedding', 'SparseEmbedding', ([], {'output_dim': 'layer.output_dim', 'input_dim': 'layer.input_dim', 'embeddings_initializer': 'layer.embeddings_initializer', 'name': 'layer.name', 'combiner': 'layer.combiner'}), False, 'from elasticdl.python.keras.layers import SparseEmbedding\n'), (349, 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', ([], {'output_dim': 'layer.output_dim', 'input_dim': 'layer.input_dim', 'embeddings_initializer': 'layer.embeddings_initializer', 'mask_zero': 'layer.mask_zero', 'input_length': 'layer.input_length', 'name': 'layer.name'}), True, 'import tensorflow as tf\n'), (381, 'tensorflow.keras.initializers.serialize', 'tf.keras.initializers.serialize', (['value.embeddings_initializer'], {}), True, 'import tensorflow as tf\n'), (406, 'elasticdl.python.elasticdl.layers.embedding.Embedding', 'Embedding', ([], {'output_dim': 'value.output_dim', 'input_dim': 'value.input_dim', 'embeddings_initializer': 'initializer_name', 'combiner': 'value.combiner', 'name': 'value.name'}), False, 'from elasticdl.python.elasticdl.layers.embedding import Embedding\n'), (431, 'elasticdl.python.common.log_utils.default_logger.info', 'logger.info', (['"""Replace elasticdl with SparseEmbedding"""'], {}), True, 'from elasticdl.python.common.log_utils import default_logger as logger\n'), (432, 'elasticdl.python.keras.layers.SparseEmbedding', 'SparseEmbedding', ([], {'output_dim': 'value.output_dim', 'input_dim': 'value.input_dim', 'embeddings_initializer': 'value.embeddings_initializer', 'combiner': 'value.combiner'}), False, 'from elasticdl.python.keras.layers import SparseEmbedding\n'), (439, 'elasticdl.python.common.log_utils.default_logger.info', 'logger.info', (['"""Replace elasticdl with tf.kerasl.layers.Embedding"""'], {}), True, 'from elasticdl.python.common.log_utils import default_logger as logger\n'), (442, 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', ([], {'output_dim': 'value.output_dim', 'input_dim': 'value.input_dim', 'embeddings_initializer': 'value.embeddings_initializer', 'mask_zero': 'value.mask_zero', 'input_length': 'value.input_length'}), True, 'import tensorflow as tf\n')]
jamesellis1999/qml
33c9d66712b36861dc098f9c789ba2c3ab897fdb
r""" Learning to learn with quantum neural networks ============================================== .. meta:: :property="og:description": Use a classical recurrent neural network to initilize the parameters of a variational quatum algorithm. :property="og:image": ../demonstrations/learning2learn/thumbnail.png .. related:: tutorial_qaoa_intro QAOA tutorial_qaoa_maxcut QAOA for MaxCut problem *Author: Stefano Mangini ([email protected]). Posted: 2 March 2021. Last updated: 15 Sep 2021.* In this demo we recreate the architecture proposed in *Learning to learn with quantum neural networks via classical neural networks* [#l2l]_, using **PennyLane** and **TensorFlow**. We use classical recurrent neural networks to assist the optimization of variational quantum algorithms. We start with a brief theoretical overview explaining the problem and the setup used to solve it. After that, we deep dive into the code to build a fully functioning model, ready to be further developed or customized for your own needs. Without further ado, let’s begin! Problem: Optimization of Variational Quantum Algorithms ------------------------------------------------------- Recently, a big effort by the quantum computing community has been devoted to the study of variational quantum algorithms (VQAs) which leverage quantum circuits with fixed shape and tunable parameters. The idea is similar to classical neural networks, where the weights of the network are optimized during training. Similarly, once the shape of the variational quantum circuit is chosen — something that is very difficult and sensitive to the particular task at hand — its tunable parameters are optimized iteratively by minimizing a cost (or loss) function, which measures how good the quantum algorithm is performing (see [#vqas]_ for a thorough overview on VQAs). A major challenge for VQAs relates to the optimization of tunable parameters, which was shown to be a very hard task [#barren]_, [#vqas]_ . Parameter initialization plays a key role in this scenario, since initializing the parameters in the proximity of an optimal solution leads to faster convergence and better results. Thus, a good initialization strategy is crucial to promote the convergence of local optimizers to local extrema and to select reasonably good local minima. By local optimizer, we mean a procedure that moves from one solution to another by small (local) changes in parameter space. These are opposed to global search methods, which take into account large sections of parameter space to propose a new solution. One such strategy could come from the classical machine learning literature. Solution: Classical Recurrent Neural Networks ------------------------------------------------------------------ By building on results from the *meta-learning* literature in machine learning, authors in [#l2l]_ propose to use a Recurrent Neural Network (RNN) as a black-box controller to optimize the parameters of variational quantum algorithms, as shown in the figure below. The cost function used is the expectation value :math:`\langle H \rangle_{\boldsymbol{\theta}} = \langle \psi_{\boldsymbol{\theta}} | H | \psi_{\boldsymbol{\theta}}\rangle` of a Hamiltonian :math:`H` with respect to the parametrized state :math:`|\psi_\boldsymbol{\theta}\rangle` evolved by applying the variational quantum circuit to the zero state :math:`|00\cdots0\rangle`. .. figure:: ../demonstrations/learning2learn/HybridLSTM.png :align: center :width: 100% Given parameters :math:`\boldsymbol{\theta}_{t-1}` of the variational quantum circuit, the cost function :math:`y_{t-1}`, and the hidden state of the classical network :math:`\boldsymbol{h}_{t-1}` at the previous time step, the recurrent neural network proposes a new guess for the parameters :math:`\boldsymbol{\theta}_t`, which are then fed into the quantum computer to evaluate the cost function :math:`y_t`. By repeating this cycle a few times, and by training the weights of the recurrent neural network to minimize the loss function :math:`y_t`, a good initialization heuristic is found for the parameters :math:`\boldsymbol{\theta}` of the variational quantum circuit. At a given iteration, the RNN receives as input the previous cost function :math:`y_t` evaluated on the quantum computer, where :math:`y_t` is the estimate of :math:`\langle H\rangle_{t}`, as well as the parameters :math:`\boldsymbol{\theta}_t` for which the variational circuit was evaluated. The RNN at this time step also receives information stored in its internal hidden state from the previous time step :math:`\boldsymbol{h}_t`. The RNN itself has trainable parameters :math:`\phi`, and hence it applies the parametrized mapping: .. math:: \boldsymbol{h}_{t+1}, \boldsymbol{\theta}_{t+1} = \text{RNN}_{\phi}(\boldsymbol{h}_{t}, \boldsymbol{\theta}_{t}, y_{t}), which generates a new suggestion for the variational parameters as well as a new internal state. Upon training the weights :math:`\phi`, the RNN eventually learns a good heuristic to suggest optimal parameters for the quantum circuit. Thus, by training on a dataset of graphs, the RNN can subsequently be used to provide suggestions for starting points on new graphs! We are not directly optimizing the variational parameters of the quantum circuit, but instead, we let the RNN figure out how to do that. In this sense, we are learning (training the RNN) how to learn (how to optimize a variational quantum circuit). **VQAs in focus: QAOA for MaxCut** There are multiple VQAs for which this hybrid training routine could be used, some of them directly analyzed in [#l2l]_. In the following, we focus on one such example, the Quantum Approximate Optimization Algorithm (QAOA) for solving the MaxCut problem [#maxcutwiki]_. Thus, referring to the picture above, the shape of the variational circuit is the one dictated by the QAOA ansatz, and such a quantum circuit is used to evaluate the cost Hamiltonian :math:`H` of the MaxCut problem. Check out this great tutorial on how to use QAOA for solving graph problems: https://pennylane.ai/qml/demos/tutorial_qaoa_intro.html .. note:: Running the tutorial (excluding the Appendix) requires approx. ~13m. """ ###################################################################### # **Importing the required packages** # # # During this tutorial, we will use # **PennyLane** for executing quantum circuits and for integrating # seamlessly with **TensorFlow**, which will be used for creating the RNN. # # Quantum Machine Learning import pennylane as qml from pennylane import qaoa # Classical Machine Learning import tensorflow as tf # Generation of graphs import networkx as nx # Standard Python libraries import numpy as np import matplotlib.pyplot as plt import random # Fix the seed for reproducibility, which affects all random functions in this demo random.seed(42) np.random.seed(42) tf.random.set_seed(42) ###################################################################### # Generation of training data: graphs # ----------------------------------- # # The first step is to gather or # create a good dataset that will be used to train the model # and test its performance. In our case, we are analyzing MaxCut, # which deals with the problem of finding a good binary partition # of nodes in a graph such that the number of edges *cut* by such a # separation is maximized. We start by generating some # random graphs :math:`G_{n,p}` where: # # * :math:`n` is the number of nodes in each graph, # * :math:`p` is the probability of having an edge between two nodes. # def generate_graphs(n_graphs, n_nodes, p_edge): """Generate a list containing random graphs generated by Networkx.""" datapoints = [] for _ in range(n_graphs): random_graph = nx.gnp_random_graph(n_nodes, p=p_edge) datapoints.append(random_graph) return datapoints ###################################################################### # An example of a random graph generated using the function # ``generate_graphs`` just defined: # # Define parameters of the graphs n_graphs = 20 n_nodes = 7 p_edge = 3.0 / n_nodes graphs = generate_graphs(n_graphs, n_nodes, p_edge) nx.draw(graphs[0]) ###################################################################### # .. figure:: ../demonstrations/learning2learn/rendered_Graph0.png # :align: center # :width: 70% # :target: javascript:void(0); # ###################################################################### # Variational Quantum Circuit: QAOA # --------------------------------- # # Now that we have a dataset, we move on by creating the QAOA quantum # circuits using PennyLane’s built-in sub-packages. In particular, using # PennyLane’s ``qaoa`` module, we will able to create fully functioning # quantum circuits for the MaxCut problem, with very few lines of code. # def qaoa_from_graph(graph, n_layers=1): """Uses QAOA to create a cost Hamiltonian for the MaxCut problem.""" # Number of qubits (wires) equal to the number of nodes in the graph wires = range(len(graph.nodes)) # Define the structure of the cost and mixer subcircuits for the MaxCut problem cost_h, mixer_h = qaoa.maxcut(graph) # Defines a layer of the QAOA ansatz from the cost and mixer Hamiltonians def qaoa_layer(gamma, alpha): qaoa.cost_layer(gamma, cost_h) qaoa.mixer_layer(alpha, mixer_h) # Creates the actual quantum circuit for the QAOA algorithm def circuit(params, **kwargs): for w in wires: qml.Hadamard(wires=w) qml.layer(qaoa_layer, n_layers, params[0], params[1]) return qml.expval(cost_h) # Evaluates the cost Hamiltonian def hamiltonian(params, **kwargs): """Evaluate the cost Hamiltonian, given the angles and the graph.""" # We set the default.qubit.tf device for seamless integration with TensorFlow dev = qml.device("default.qubit.tf", wires=len(graph.nodes)) # This qnode evaluates the expectation value of the cost hamiltonian operator cost = qml.QNode(circuit, dev, interface="tf", diff_method="backprop") return cost(params) return hamiltonian ###################################################################### # Before continuing, let’s see how to use these functions. # # Create an instance of a QAOA circuit given a graph. cost = qaoa_from_graph(graph=graphs[0], n_layers=1) # Since we use only one layer in QAOA, params have the shape 1 x 2, # in the form [[alpha, gamma]]. x = tf.Variable([[0.5], [0.5]], dtype=tf.float32) # Evaluate th QAOA instance just created with some angles. print(cost(x)) ############################################################################## # .. rst-class:: sphx-glr-script-out # # Out: # # .. code-block:: none # # <tf.Tensor: shape=(1,), dtype=float64, numpy=array([-3.19326796])> # ###################################################################### # Recurrent Neural Network: LSTM # ------------------------------ # # So far, we have defined the machinery which lets us build the QAOA # algorithm for solving the MaxCut problem. # Now we wish to implement the Recurrent Neural Network architecture # explained previously. As proposed in the original # paper, we will build a custom model of a Long-Short Term # Memory (LSTM) network, capable of handling the hybrid data passing between # classical and quantum procedures. For this task, we will use ``Keras`` # and ``TensorFlow``. # ###################################################################### # First of all, let’s define the elemental building block of the model, # an LSTM cell (see `TensorFlow # documentation <https://www.tensorflow.org/api_docs/python/tf/keras/layers/LSTMCell>`__ # for further details). # # Set the number of layers in the QAOA ansatz. # The higher the better in terms of performance, but it also gets more # computationally expensive. For simplicity, we stick to the single layer case. n_layers = 1 # Define a single LSTM cell. # The cell has two units per layer since each layer in the QAOA ansatz # makes use of two parameters. cell = tf.keras.layers.LSTMCell(2 * n_layers) ###################################################################### # Using the ``qaoa_from_graph`` function, we create a list # ``graph_cost_list`` containing the cost functions of a set of graphs. # You can see this as a preprocessing step of the data. # # We create the QAOA MaxCut cost functions of some graphs graph_cost_list = [qaoa_from_graph(g) for g in graphs] ###################################################################### # At this stage, we seek to reproduce the recurrent behavior depicted in # the picture above, outlining the functioning of an RNN as a black-box # optimizer. We do so by defining two functions: # # * ``rnn_iteration``: accounts for the computations happening on a single time step in the figure. # It performs the calculation inside the CPU and evaluates the quantum circuit on the QPU to obtain # the loss function for the current parameters. # # * ``recurrent_loop``: as the name suggests, it accounts for the creation of the recurrent loop # of the model. In particular, it makes consecutive calls to the ``rnn_iteration`` function, # where the outputs of a previous call are fed as inputs of the next call. # def rnn_iteration(inputs, graph_cost, n_layers=1): """Perform a single time step in the computational graph of the custom RNN.""" # Unpack the input list containing the previous cost, parameters, # and hidden states (denoted as 'h' and 'c'). prev_cost = inputs[0] prev_params = inputs[1] prev_h = inputs[2] prev_c = inputs[3] # Concatenate the previous parameters and previous cost to create new input new_input = tf.keras.layers.concatenate([prev_cost, prev_params]) # Call the LSTM cell, which outputs new values for the parameters along # with new internal states h and c new_params, [new_h, new_c] = cell(new_input, states=[prev_h, prev_c]) # Reshape the parameters to correctly match those expected by PennyLane _params = tf.reshape(new_params, shape=(2, n_layers)) # Evaluate the cost using new angles _cost = graph_cost(_params) # Reshape to be consistent with other tensors new_cost = tf.reshape(tf.cast(_cost, dtype=tf.float32), shape=(1, 1)) return [new_cost, new_params, new_h, new_c] def recurrent_loop(graph_cost, n_layers=1, intermediate_steps=False): """Creates the recurrent loop for the Recurrent Neural Network.""" # Initialize starting all inputs (cost, parameters, hidden states) as zeros. initial_cost = tf.zeros(shape=(1, 1)) initial_params = tf.zeros(shape=(1, 2 * n_layers)) initial_h = tf.zeros(shape=(1, 2 * n_layers)) initial_c = tf.zeros(shape=(1, 2 * n_layers)) # We perform five consecutive calls to 'rnn_iteration', thus creating the # recurrent loop. More iterations lead to better results, at the cost of # more computationally intensive simulations. out0 = rnn_iteration([initial_cost, initial_params, initial_h, initial_c], graph_cost) out1 = rnn_iteration(out0, graph_cost) out2 = rnn_iteration(out1, graph_cost) out3 = rnn_iteration(out2, graph_cost) out4 = rnn_iteration(out3, graph_cost) # This cost function takes into account the cost from all iterations, # but using different weights. loss = tf.keras.layers.average( [0.1 * out0[0], 0.2 * out1[0], 0.3 * out2[0], 0.4 * out3[0], 0.5 * out4[0]] ) if intermediate_steps: return [out0[1], out1[1], out2[1], out3[1], out4[1], loss] else: return loss ###################################################################### # **The cost function** # # # A key part in the ``recurrent_loop`` function is given by the # definition of the variable ``loss``. In order to drive the learning # procedure of the weights in the LSTM cell, a cost function is needed. # While in the original paper the authors suggest using a measure called # *observed improvement*, for simplicity here we use an easier cost # function :math:`\cal{L}(\phi)` defined as: # # .. math:: \cal{L}(\phi) = {\bf w} \cdot {\bf y}_t(\phi), # # where :math:`{\bf y}_t(\phi) = (y_1, \cdots, y_5)` contains the # Hamiltonian cost functions from all iterations, and :math:`{\bf w}` are # just some coefficients weighting the different steps in the recurrent # loop. In this case, we used :math:`{\bf w}=\frac{1}{5} (0.1, 0.2, 0.3, 0.4, 0.5)`, # to give more importance to the last steps rather than the initial steps. # Intuitively in this way the RNN is more free (low coefficient) to # explore a larger portion of parameter space during the first steps of # optimization, while it is constrained (high coefficient) to select an # optimal solution towards the end of the procedure. Note that one could # also use just the final cost function from the last iteration to drive # the training procedure of the RNN. However, using values also from # intermediate steps allows for a smoother suggestion routine, since even # non-optimal parameter suggestions from early steps are penalized using # :math:`\cal{L}(\phi)`. # ###################################################################### # **Training** # # # Now all the cards are on the table and we just need to prepare a # training routine and then run it! # # First of all, let’s wrap a single gradient descent step inside a custom # function ``train_step``. # def train_step(graph_cost): """Single optimization step in the training procedure.""" with tf.GradientTape() as tape: # Evaluates the cost function loss = recurrent_loop(graph_cost) # Evaluates gradients, cell is the LSTM cell defined previously grads = tape.gradient(loss, cell.trainable_weights) # Apply gradients and update the weights of the LSTM cell opt.apply_gradients(zip(grads, cell.trainable_weights)) return loss ###################################################################### # We are now ready to start the training. In particular, we will perform a # stochastic gradient descent in the parameter space of the weights of the # LSTM cell. For each graph in the training set, we evaluate gradients and # update the weights accordingly. Then, we repeat this procedure for # multiple times (epochs). # # .. note:: # Be careful when using bigger datasets or training for larger # epochs, this may take a while to execute. # # Select an optimizer opt = tf.keras.optimizers.Adam(learning_rate=0.1) # Set the number of training epochs epochs = 5 for epoch in range(epochs): print(f"Epoch {epoch+1}") total_loss = np.array([]) for i, graph_cost in enumerate(graph_cost_list): loss = train_step(graph_cost) total_loss = np.append(total_loss, loss.numpy()) # Log every 5 batches. if i % 5 == 0: print(f" > Graph {i+1}/{len(graph_cost_list)} - Loss: {loss[0][0]}") print(f" >> Mean Loss during epoch: {np.mean(total_loss)}") ############################################################################## # .. rst-class:: sphx-glr-script-out # # Out: # # .. code-block:: none # # Epoch 1 # > Graph 1/20 - Loss: -1.6641689538955688 # > Graph 6/20 - Loss: -1.4186843633651733 # > Graph 11/20 - Loss: -1.3757232427597046 # > Graph 16/20 - Loss: -1.294339656829834 # >> Mean Loss during epoch: -1.7352586269378663 # Epoch 2 # > Graph 1/20 - Loss: -2.119091749191284 # > Graph 6/20 - Loss: -1.4789190292358398 # > Graph 11/20 - Loss: -1.3779840469360352 # > Graph 16/20 - Loss: -1.2963457107543945 # >> Mean Loss during epoch: -1.8252217948436738 # Epoch 3 # > Graph 1/20 - Loss: -2.1322619915008545 # > Graph 6/20 - Loss: -1.459418535232544 # > Graph 11/20 - Loss: -1.390620470046997 # > Graph 16/20 - Loss: -1.3165746927261353 # >> Mean Loss during epoch: -1.8328069806098939 # Epoch 4 # > Graph 1/20 - Loss: -2.1432175636291504 # > Graph 6/20 - Loss: -1.476362943649292 # > Graph 11/20 - Loss: -1.3938289880752563 # > Graph 16/20 - Loss: -1.3140206336975098 # >> Mean Loss during epoch: -1.8369774043560028 # Epoch 5 # > Graph 1/20 - Loss: -2.1429405212402344 # > Graph 6/20 - Loss: -1.477513074874878 # > Graph 11/20 - Loss: -1.3909202814102173 # > Graph 16/20 - Loss: -1.315887689590454 # >> Mean Loss during epoch: -1.8371947884559632 # ###################################################################### # As you can see, the Loss for each graph keeps decreasing across epochs, # indicating that the training routine is working correctly. # ###################################################################### # Results # -------------------- # # Let’s see how to use the optimized RNN as an initializer for the angles # in the QAOA algorithm. # # First, we pick a new graph, not present in the training dataset: # new_graph = nx.gnp_random_graph(7, p=3 / 7) new_cost = qaoa_from_graph(new_graph) nx.draw(new_graph) ###################################################################### # .. figure:: ../demonstrations/learning2learn/rendered_Graph1.png # :align: center # :width: 70% # :target: javascript:void(0); # ###################################################################### # Then we apply the trained RNN to this new graph, saving intermediate # results coming from all the recurrent iterations in the network. # # Apply the RNN (be sure that training was performed) res = recurrent_loop(new_cost, intermediate_steps=True) # Extract all angle suggestions start_zeros = tf.zeros(shape=(2 * n_layers, 1)) guess_0 = res[0] guess_1 = res[1] guess_2 = res[2] guess_3 = res[3] guess_4 = res[4] final_loss = res[5] # Wrap them into a list guesses = [start_zeros, guess_0, guess_1, guess_2, guess_3, guess_4] # Losses from the hybrid LSTM model lstm_losses = [new_cost(tf.reshape(guess, shape=(2, n_layers))) for guess in guesses] ###################################################################### # **Plot of the loss function** # # # We can plot these losses to see how well the RNN proposes new guesses for # the parameters. # fig, ax = plt.subplots() plt.plot(lstm_losses, color="blue", lw=3, ls="-.", label="LSTM") plt.grid(ls="--", lw=2, alpha=0.25) plt.ylabel("Cost function", fontsize=12) plt.xlabel("Iteration", fontsize=12) plt.legend() ax.set_xticks([0, 5, 10, 15, 20]); plt.show() ###################################################################### # .. figure:: ../demonstrations/learning2learn/rendered_LossLSTM.png # :align: center # :width: 70% # :target: javascript:void(0); # # That’s remarkable! The RNN learned to propose new parameters such that # the MaxCut cost is minimized very rapidly: in just a few iterations the # loss reaches a minimum. Actually, it takes just a single step for the LSTM # to find a very good minimum. In fact, due to the recurrent loop, the loss # in each time step is directly dependent on the previous ones, with the first # iteration thus having a lot of influence on the loss function defined above. # Changing the loss function, for example giving less importance to initial # steps and just focusing on the last one, leads to different optimization # behaviors, but with the same final results. # ###################################################################### # **Comparison with standard Stochastic Gradient Descent (SGD)** # # How well does this method compare with # standard optimization techniques, for example, leveraging Stochastic # Gradient Descent (SGD) to optimize the parameters in the QAOA? # # Let’s check it out. # # Parameters are randomly initialized x = tf.Variable(np.random.rand(2, 1)) # We set the optimizer to be a Stochastic Gradient Descent opt = tf.keras.optimizers.SGD(learning_rate=0.01) step = 15 # Training process steps = [] sdg_losses = [] for _ in range(step): with tf.GradientTape() as tape: loss = new_cost(x) steps.append(x) sdg_losses.append(loss) gradients = tape.gradient(loss, [x]) opt.apply_gradients(zip(gradients, [x])) print(f"Step {_+1} - Loss = {loss}") print(f"Final cost function: {new_cost(x).numpy()}\nOptimized angles: {x.numpy()}") ############################################################################## # .. rst-class:: sphx-glr-script-out # # Out: # # .. code-block:: none # # Step 1 - Loss = [-4.1700805] # Step 2 - Loss = [-4.67503588] # Step 3 - Loss = [-5.09949909] # Step 4 - Loss = [-5.40388533] # Step 5 - Loss = [-5.59529203] # Step 6 - Loss = [-5.70495197] # Step 7 - Loss = [-5.7642561] # Step 8 - Loss = [-5.79533198] # Step 9 - Loss = [-5.81138752] # Step 10 - Loss = [-5.81966529] # Step 11 - Loss = [-5.82396722] # Step 12 - Loss = [-5.82624537] # Step 13 - Loss = [-5.82749126] # Step 14 - Loss = [-5.82820626] # Step 15 - Loss = [-5.82864379] # Final cost function: -5.828932361904984 # Optimized angles: [[ 0.5865477 ] # [-0.3228858]] # fig, ax = plt.subplots() plt.plot(sdg_losses, color="orange", lw=3, label="SGD") plt.plot(lstm_losses, color="blue", lw=3, ls="-.", label="LSTM") plt.grid(ls="--", lw=2, alpha=0.25) plt.legend() plt.ylabel("Cost function", fontsize=12) plt.xlabel("Iteration", fontsize=12) ax.set_xticks([0, 5, 10, 15, 20]); plt.show() ###################################################################### # .. figure:: ../demonstrations/learning2learn/rendered_LossConfrontation.png # :align: center # :width: 70% # :target: javascript:void(0); # ###################################################################### # *Hurray!* 🎉🎉 # # As is clear from the picture, the RNN reaches a better minimum in # fewer iterations than the standard SGD. # Thus, as the authors suggest, the trained RNN can # be used for a few iterations at the start of the training procedure to # initialize the parameters of the quantum circuit close to an optimal # solution. Then, a standard optimizer like the SGD can be used to # fine-tune the proposed parameters and reach even better solutions. # While on this small scale example the benefits of using an LSTM to # initialize parameters may seem modest, on more complicated instances # and problems it can make a big difference, since, on random # initialization of the parameters, standard local optimizer may # encounter problems finding a good minimization direction (for further # details, see [#l2l]_, [#vqas]_). # ###################################################################### # Final remarks # ----------------- # # In this demo, we saw how to use a recurrent neural network # as a black-box optimizer to initialize the parameters in # a variational quantum circuit close to an optimal solution. # We connected MaxCut QAOA quantum circuits in PennyLane # with an LSTM built with TensorFlow, and we used a custom hybrid training # routine to optimize the whole network. # # Such architecture proved itself to be a good candidate for the # initialization problem of Variational Quantum Algorithms, since it # reaches good optimal solutions in very few iterations. Besides, the # architecture is quite general since the same machinery can be used for # graphs having a generic number of nodes (see "Generalization Performances" # in the Appendix). # # **What’s next?** # # # But the story does not end here. There are multiple ways this work could # be improved. Here are a few: # # * Use the proposed architecture for VQAs other than QAOA for MaxCut. # You can check the paper [#l2l]_ to get some inspiration. # * Scale up the simulation, using bigger graphs and longer recurrent # loops. # * While working correctly, the training routine is quite basic and it # could be improved for example by implementing batch learning or a # stopping criterion. Also, one could implement the # *observed improvement* loss function, as used in the original paper # [#l2l]_. # * Depending on the problem, you may wish to transform the functions # ``rnn_iteration`` and ``recurrent_loop`` to actual ``Keras Layers`` # and ``Models``. This way, by compiling the model before the training # takes place, ``TensorFlow`` can create the computational graph of the # model and train more efficiently. You can find # some ideas below to start working on it. # # If you're interested, in the Appendix below you can find some more details # and insights about this model. Go check it out! # # If you have any doubt, or wish to discuss about the project don’t # hesitate to contact me, I’ll be very happy to help you as much as I can # 😁 # # Have a great quantum day! # ###################################################################### # References # ---------- # # .. [#l2l] # # Verdon G., Broughton M., McClean J. R., Sung K. J., Babbush R., # Jiang Z., Neven H. and Mohseni M. "Learning to learn with quantum neural networks via classical neural # networks", `arXiv:1907.05415 <https://arxiv.org/abs/1907.05415>`__ (2019). # # .. [#vqas] # # Cerezo M., Arrasmith A., Babbush R., Benjamin S. C., Endo S., # Fujii K., McClean J. R., Mitarai K., Yuan X., Cincio L. and Coles P. # J. "Variational Quantum Algorithms", `arXiv:2012.09265 <https://arxiv.org/abs/2012.09265>`__ (2020). # # .. [#barren] # # McClean J.R., Boixo S., Smelyanskiy V.N. et al. # "Barren plateaus in quantum neural network training landscapes", # `Nat Commun 9, 4812 <https://www.nature.com/articles/s41467-018-07090-4>`__ (2018). # # .. [#maxcutwiki] # # MaxCut problem: https://en.wikipedia.org/wiki/Maximum_cut. # # # # ###################################################################### # Appendix # ----------------- # # In this appendix you can find further details about the Learning to Learn approach # introduced in this tutorial. # # Generalization performances # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # A very interesting feature of this model, is that it can be # straightforwardly applied to graphs having a different number of nodes. In # fact, until now our analysis focused only on graphs with the same number # of nodes for ease of explanation, and there is no actual restriction in # this respect. The same machinery works fine for any graph, since the # number of QAOA parameters are only dependent on the number of layers in # the ansatz, and not on the number of qubits (equal to the number of # nodes in the graph) in the quantum circuit. # # Thus, we might want to challenge our model to learn a good # initialization heuristic for a non-specific graph, with an arbitrary # number of nodes. For this purpose, let’s create a training dataset # containing graphs with a different number of nodes :math:`n`, taken in # the interval :math:`n \in [7,9]` (that is, our dataset now contains # graphs having either 7, 8 and 9 nodes). # cell = tf.keras.layers.LSTMCell(2 * n_layers) g7 = generate_graphs(5, 7, 3 / 7) g8 = generate_graphs(5, 8, 3 / 7) g9 = generate_graphs(5, 9, 3 / 7) gs = g7 + g8 + g9 gs_cost_list = [qaoa_from_graph(g) for g in gs] # Shuffle the dataset import random random.seed(1234) random.shuffle(gs_cost_list) ###################################################################### # So far, we have created an equally balanced dataset that contains graphs with # a different number of nodes. We now use this dataset to train the LSTM. # # Select an optimizer opt = tf.keras.optimizers.Adam(learning_rate=0.1) # Set the number of training epochs epochs = 3 for epoch in range(epochs): print(f"Epoch {epoch+1}") total_loss = np.array([]) for i, graph_cost in enumerate(gs_cost_list): loss = train_step(graph_cost) total_loss = np.append(total_loss, loss.numpy()) # Log every 5 batches. if i % 5 == 0: print(f" > Graph {i+1}/{len(gs_cost_list)} - Loss: {loss}") print(f" >> Mean Loss during epoch: {np.mean(total_loss)}") ############################################################################## # .. rst-class:: sphx-glr-script-out # # Out: # # .. code-block:: none # # Epoch 1 # > Graph 1/15 - Loss: [[-1.4876363]] # > Graph 6/15 - Loss: [[-1.8590403]] # > Graph 11/15 - Loss: [[-1.7644017]] # >> Mean Loss during epoch: -1.9704322338104248 # Epoch 2 # > Graph 1/15 - Loss: [[-1.8650053]] # > Graph 6/15 - Loss: [[-1.9578737]] # > Graph 11/15 - Loss: [[-1.8377447]] # >> Mean Loss during epoch: -2.092947308222453 # Epoch 3 # > Graph 1/15 - Loss: [[-1.9009062]] # > Graph 6/15 - Loss: [[-1.9726204]] # > Graph 11/15 - Loss: [[-1.8668792]] # >> Mean Loss during epoch: -2.1162660201390584 # ###################################################################### # Let’s check if this hybrid model eventually learned a good heuristic to # propose new updates for the parameters in the QAOA ansatz of the MaxCut # problem. # # For this reason, we consider a new graph. In particular, we can take a # graph with 10 nodes, which is something that the recurrent network has # not seen before. # new_graph = nx.gnp_random_graph(10, p=3 / 7) new_cost = qaoa_from_graph(new_graph) nx.draw(new_graph) ###################################################################### # .. figure:: ../demonstrations/learning2learn/rendered_Graph10.png # :align: center # :width: 70% # :target: javascript:void(0); # ###################################################################### # We call the trained recurrent LSTM on this graph, saving not only the # last, but all intermediate guesses for the parameters. # res = recurrent_loop(new_cost, intermediate_steps=True) # Extract all angle suggestions start_zeros = tf.zeros(shape=(2 * n_layers, 1)) guess_0 = res[0] guess_1 = res[1] guess_2 = res[2] guess_3 = res[3] guess_4 = res[4] final_loss = res[5] # Wrap them into a list guesses = [start_zeros, guess_0, guess_1, guess_2, guess_3, guess_4] # Losses from the hybrid LSTM model lstm_losses = [new_cost(tf.reshape(guess, shape=(2, n_layers))) for guess in guesses] fig, ax = plt.subplots() plt.plot(lstm_losses, color="blue", lw=3, ls="-.", label="LSTM") plt.grid(ls="--", lw=2, alpha=0.25) plt.legend() plt.ylabel("Cost function", fontsize=12) plt.xlabel("Iteration", fontsize=12) ax.set_xticks([0, 5, 10, 15, 20]); plt.show() ###################################################################### # .. figure:: ../demonstrations/learning2learn/rendered_LossGeneralization.png # :align: center # :width: 70% # :target: javascript:void(0); # ###################################################################### # Again, we can confirm that the custom optimizer based on the LSTM quickly reaches a good # value of the loss function, and also achieve good generalization performances, since # it is able to initialize parameters also for graphs not present in the training set. # # .. note:: # To get the optimized weights of the LSTM use: ``optimized_weights = cell.get_weights()``. # To set initial weights for the LSTM cell, use instead: ``cell.set_weights(optimized_weights)``. # # # Loss landscape in parameter space # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # It may be interesting to plot the path suggested by the RNN in the space # of the parameters. Note that this is possible only if one layer is used # in the QAOA ansatz since in this case only two angles are needed and # they can be plotted on a 2D plane. Of course, if more layers are used, # you can always select a pair of them to reproduce a similar plot. # # .. note:: # This cell takes approx. ~1m to run with an 11 by 11 grid # # Evaluate the cost function on a grid in parameter space dx = dy = np.linspace(-1.0, 1.0, 11) dz = np.array([new_cost([[xx], [yy]]).numpy() for yy in dy for xx in dx]) Z = dz.reshape((11, 11)) # Plot cost landscape plt.contourf(dx, dy, Z) plt.colorbar() # Extract optimizer steps params_x = [0.0] + [res[i].numpy()[0, 0] for i in range(len(res[:-1]))] params_y = [0.0] + [res[i].numpy()[0, 1] for i in range(len(res[:-1]))] # Plot steps plt.plot(params_x, params_y, linestyle="--", color="red", marker="x") plt.yticks(np.linspace(-1, 1, 5)) plt.xticks(np.linspace(-1, 1, 5)) plt.xlabel(r"$\alpha$", fontsize=12) plt.ylabel(r"$\gamma$", fontsize=12) plt.title("Loss Landscape", fontsize=12) plt.show() ###################################################################### # .. figure:: ../demonstrations/learning2learn/rendered_LossLandscape.png # :align: center # :width: 70% # :target: javascript:void(0); # # # # # Ideas for creating a Keras Layer and Keras Model # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Definition of a ``Keras Layer`` containing a single pass through the # LSTM and the Quantum Circuit. That’s equivalent to the function # ``rnn_iteration`` from before. # class QRNN(tf.keras.layers.Layer): def __init__(self, p=1, graph=None): super(QRNN, self).__init__() # p is the number of layers in the QAOA ansatz self.cell = tf.keras.layers.LSTMCell(2 * p) self.expectation = qaoa_from_graph(graph, n_layers=p) self.qaoa_p = p def call(self, inputs): prev_cost = inputs[0] prev_params = inputs[1] prev_h = inputs[2] prev_c = inputs[3] # Concatenate the previous parameters and previous cost to create new input new_input = tf.keras.layers.concatenate([prev_cost, prev_params]) # New parameters obtained by the LSTM cell, along with new internal states h and c new_params, [new_h, new_c] = self.cell(new_input, states=[prev_h, prev_c]) # This part is used to feed the parameters to the PennyLane function _params = tf.reshape(new_params, shape=(2, self.qaoa_p)) # Cost evaluation, and reshaping to be consistent with other Keras tensors new_cost = tf.reshape(tf.cast(self.expectation(_params), dtype=tf.float32), shape=(1, 1)) return [new_cost, new_params, new_h, new_c] ###################################################################### # Code for creating an actual ``Keras Model`` starting from the previous # layer definition. # _graph = nx.gnp_random_graph(7, p=3 / 7) # Instantiate the LSTM cells rnn0 = QRNN(graph=_graph) # Create some input layers to feed the data inp_cost = tf.keras.layers.Input(shape=(1,)) inp_params = tf.keras.layers.Input(shape=(2,)) inp_h = tf.keras.layers.Input(shape=(2,)) inp_c = tf.keras.layers.Input(shape=(2,)) # Manually creating the recurrent loops. In this case just three iterations are used. out0 = rnn0([inp_cost, inp_params, inp_h, inp_c]) out1 = rnn0(out0) out2 = rnn0(out1) # Definition of a loss function driving the training of the LSTM loss = tf.keras.layers.average([0.15 * out0[0], 0.35 * out1[0], 0.5 * out2[0]]) # Definition of a Keras Model model = tf.keras.Model( inputs=[inp_cost, inp_params, inp_h, inp_c], outputs=[out0[1], out1[1], out2[1], loss] ) model.summary() ############################################################################## # .. rst-class:: sphx-glr-script-out # # Out: # # .. code-block:: none # # Model: "functional_1" # __________________________________________________________________________________________________ # Layer (type) Output Shape Param # Connected to # ================================================================================================== # input_1 (InputLayer) [(None, 1)] 0 # __________________________________________________________________________________________________ # input_2 (InputLayer) [(None, 2)] 0 # __________________________________________________________________________________________________ # input_3 (InputLayer) [(None, 2)] 0 # __________________________________________________________________________________________________ # input_4 (InputLayer) [(None, 2)] 0 # __________________________________________________________________________________________________ # qrnn (QRNN) [(1, 1), (None, 2), 48 input_1[0][0] # input_2[0][0] # input_3[0][0] # input_4[0][0] # qrnn[0][0] # qrnn[0][1] # qrnn[0][2] # qrnn[0][3] # qrnn[1][0] # qrnn[1][1] # qrnn[1][2] # qrnn[1][3] # __________________________________________________________________________________________________ # tf.math.multiply (TFOpLambda) (1, 1) 0 qrnn[0][0] # __________________________________________________________________________________________________ # tf.math.multiply_1 (TFOpLambda) (1, 1) 0 qrnn[1][0] # __________________________________________________________________________________________________ # tf.math.multiply_2 (TFOpLambda) (1, 1) 0 qrnn[2][0] # __________________________________________________________________________________________________ # average_147 (Average) (1, 1) 0 tf.math.multiply[0][0] # tf.math.multiply_1[0][0] # tf.math.multiply_2[0][0] # ================================================================================================== # Total params: 48 # Trainable params: 48 # Non-trainable params: 0 # ###################################################################### # A basic training routine for the ``Keras Model`` just created: # p = 1 inp_costA = tf.zeros(shape=(1, 1)) inp_paramsA = tf.zeros(shape=(1, 2 * p)) inp_hA = tf.zeros(shape=(1, 2 * p)) inp_cA = tf.zeros(shape=(1, 2 * p)) inputs = [inp_costA, inp_paramsA, inp_hA, inp_cA] opt = tf.keras.optimizers.Adam(learning_rate=0.01) step = 5 for _ in range(step): with tf.GradientTape() as tape: pred = model(inputs) loss = pred[3] gradients = tape.gradient(loss, model.trainable_variables) opt.apply_gradients(zip(gradients, model.trainable_variables)) print( f"Step {_+1} - Loss = {loss} - Cost = {qaoa_from_graph(_graph, n_layers=p)(np.reshape(pred[2].numpy(),(2, p)))}" ) print("Final Loss:", loss.numpy()) print("Final Outs:") for t, s in zip(pred, ["out0", "out1", "out2", "Loss"]): print(f" >{s}: {t.numpy()}") ############################################################################## # .. rst-class:: sphx-glr-script-out # # Out: # # .. code-block:: none # # Step 1 - Loss = [[-1.5563084]] - Cost = -4.762684301954701 # Step 2 - Loss = [[-1.5649065]] - Cost = -4.799981173473755 # Step 3 - Loss = [[-1.5741502]] - Cost = -4.840036354736862 # Step 4 - Loss = [[-1.5841404]] - Cost = -4.883246647056216 # Step 5 - Loss = [[-1.5948243]] - Cost = -4.929228976649736 # Final Loss: [[-1.5948243]] # Final Outs: # >out0: [[-0.01041588 0.01016874]] # >out1: [[-0.04530389 0.38148248]] # >out2: [[-0.10258182 0.4134117 ]] # >Loss: [[-1.5948243]] # ###################################################################### # .. note:: # This code works only for a single graph at a time, since a graph was # needed to create the ``QRNN`` ``Keras Layer`` named ``rnn0``. Thus, in # order to actually train the RNN network for multiple graphs, the above # training routine must be modified. Otherwise, you could find a way to # define the model to accept as input a whole dataset of graphs, and not # just a single one. Still, this might prove particularly hard, since # TensorFlow deals with tensors, and is not able to directly manage # other data structures, like graphs or functions taking graphs as # input, like ``qaoa_from_graph``. #
[ "matplotlib.pyplot.legend", "matplotlib.pyplot.contourf", "tensorflow.keras.layers.average", "numpy.linspace", "tensorflow.zeros", "tensorflow.cast", "matplotlib.pyplot.plot", "numpy.mean", "tensorflow.random.set_seed", "tensorflow.keras.optimizers.SGD", "tensorflow.Variable", "tensorflow.keras.layers.LSTMCell", "matplotlib.pyplot.title", "tensorflow.keras.Model", "numpy.random.rand", "matplotlib.pyplot.show", "numpy.array", "matplotlib.pyplot.ylabel", "tensorflow.GradientTape", "numpy.random.seed", "tensorflow.reshape", "matplotlib.pyplot.subplots", "tensorflow.keras.layers.concatenate", "matplotlib.pyplot.colorbar", "tensorflow.keras.optimizers.Adam", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "tensorflow.keras.layers.Input" ]
demonstrations/learning2learn.py
[(152, 'random.seed', 'random.seed', (['(42)'], {}), False, 'import random\n'), (153, 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), True, 'import numpy as np\n'), (154, 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(42)'], {}), True, 'import tensorflow as tf\n'), (195, 'networkx.draw', 'nx.draw', (['graphs[0]'], {}), True, 'import networkx as nx\n'), (260, 'tensorflow.Variable', 'tf.Variable', (['[[0.5], [0.5]]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (306, 'tensorflow.keras.layers.LSTMCell', 'tf.keras.layers.LSTMCell', (['(2 * n_layers)'], {}), True, 'import tensorflow as tf\n'), (463, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.1)'}), True, 'import tensorflow as tf\n'), (535, 'networkx.gnp_random_graph', 'nx.gnp_random_graph', (['(7)'], {'p': '(3 / 7)'}), True, 'import networkx as nx\n'), (538, 'networkx.draw', 'nx.draw', (['new_graph'], {}), True, 'import networkx as nx\n'), (557, 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '(2 * n_layers, 1)'}), True, 'import tensorflow as tf\n'), (580, 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (582, 'matplotlib.pyplot.plot', 'plt.plot', (['lstm_losses'], {'color': '"""blue"""', 'lw': '(3)', 'ls': '"""-."""', 'label': '"""LSTM"""'}), True, 'import matplotlib.pyplot as plt\n'), (584, 'matplotlib.pyplot.grid', 'plt.grid', ([], {'ls': '"""--"""', 'lw': '(2)', 'alpha': '(0.25)'}), True, 'import matplotlib.pyplot as plt\n'), (585, 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cost function"""'], {'fontsize': '(12)'}), True, 'import matplotlib.pyplot as plt\n'), (586, 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {'fontsize': '(12)'}), True, 'import matplotlib.pyplot as plt\n'), (587, 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (589, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (623, 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': '(0.01)'}), True, 'import tensorflow as tf\n'), (669, 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (671, 'matplotlib.pyplot.plot', 'plt.plot', (['sdg_losses'], {'color': '"""orange"""', 'lw': '(3)', 'label': '"""SGD"""'}), True, 'import matplotlib.pyplot as plt\n'), (673, 'matplotlib.pyplot.plot', 'plt.plot', (['lstm_losses'], {'color': '"""blue"""', 'lw': '(3)', 'ls': '"""-."""', 'label': '"""LSTM"""'}), True, 'import matplotlib.pyplot as plt\n'), (675, 'matplotlib.pyplot.grid', 'plt.grid', ([], {'ls': '"""--"""', 'lw': '(2)', 'alpha': '(0.25)'}), True, 'import matplotlib.pyplot as plt\n'), (676, 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (677, 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cost function"""'], {'fontsize': '(12)'}), True, 'import matplotlib.pyplot as plt\n'), (678, 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {'fontsize': '(12)'}), True, 'import matplotlib.pyplot as plt\n'), (680, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (818, 'tensorflow.keras.layers.LSTMCell', 'tf.keras.layers.LSTMCell', (['(2 * n_layers)'], {}), True, 'import tensorflow as tf\n'), (829, 'random.seed', 'random.seed', (['(1234)'], {}), False, 'import random\n'), (830, 'random.shuffle', 'random.shuffle', (['gs_cost_list'], {}), False, 'import random\n'), (839, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.1)'}), True, 'import tensorflow as tf\n'), (890, 'networkx.gnp_random_graph', 'nx.gnp_random_graph', (['(10)'], {'p': '(3 / 7)'}), True, 'import networkx as nx\n'), (893, 'networkx.draw', 'nx.draw', (['new_graph'], {}), True, 'import networkx as nx\n'), (911, 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '(2 * n_layers, 1)'}), True, 'import tensorflow as tf\n'), (925, 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (927, 'matplotlib.pyplot.plot', 'plt.plot', (['lstm_losses'], {'color': '"""blue"""', 'lw': '(3)', 'ls': '"""-."""', 'label': '"""LSTM"""'}), True, 'import matplotlib.pyplot as plt\n'), (929, 'matplotlib.pyplot.grid', 'plt.grid', ([], {'ls': '"""--"""', 'lw': '(2)', 'alpha': '(0.25)'}), True, 'import matplotlib.pyplot as plt\n'), (930, 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (931, 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cost function"""'], {'fontsize': '(12)'}), True, 'import matplotlib.pyplot as plt\n'), (932, 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {'fontsize': '(12)'}), True, 'import matplotlib.pyplot as plt\n'), (934, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (968, 'numpy.linspace', 'np.linspace', (['(-1.0)', '(1.0)', '(11)'], {}), True, 'import numpy as np\n'), (973, 'matplotlib.pyplot.contourf', 'plt.contourf', (['dx', 'dy', 'Z'], {}), True, 'import matplotlib.pyplot as plt\n'), (974, 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (981, 'matplotlib.pyplot.plot', 'plt.plot', (['params_x', 'params_y'], {'linestyle': '"""--"""', 'color': '"""red"""', 'marker': '"""x"""'}), True, 'import matplotlib.pyplot as plt\n'), (985, 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\alpha$"""'], {'fontsize': '(12)'}), True, 'import matplotlib.pyplot as plt\n'), (986, 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\gamma$"""'], {'fontsize': '(12)'}), True, 'import matplotlib.pyplot as plt\n'), (987, 'matplotlib.pyplot.title', 'plt.title', (['"""Loss Landscape"""'], {'fontsize': '(12)'}), True, 'import matplotlib.pyplot as plt\n'), (988, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (1042, 'networkx.gnp_random_graph', 'nx.gnp_random_graph', (['(7)'], {'p': '(3 / 7)'}), True, 'import networkx as nx\n'), (1048, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(1,)'}), True, 'import tensorflow as tf\n'), (1049, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(2,)'}), True, 'import tensorflow as tf\n'), (1050, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(2,)'}), True, 'import tensorflow as tf\n'), (1051, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(2,)'}), True, 'import tensorflow as tf\n'), (1059, 'tensorflow.keras.layers.average', 'tf.keras.layers.average', (['[0.15 * out0[0], 0.35 * out1[0], 0.5 * out2[0]]'], {}), True, 'import tensorflow as tf\n'), (1062, 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': '[inp_cost, inp_params, inp_h, inp_c]', 'outputs': '[out0[1], out1[1], out2[1], loss]'}), True, 'import tensorflow as tf\n'), (1122, 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '(1, 1)'}), True, 'import tensorflow as tf\n'), (1123, 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '(1, 2 * p)'}), True, 'import tensorflow as tf\n'), (1124, 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '(1, 2 * p)'}), True, 'import tensorflow as tf\n'), (1125, 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '(1, 2 * p)'}), True, 'import tensorflow as tf\n'), (1129, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.01)'}), True, 'import tensorflow as tf\n'), (222, 'pennylane.qaoa.maxcut', 'qaoa.maxcut', (['graph'], {}), False, 'from pennylane import qaoa\n'), (345, 'tensorflow.keras.layers.concatenate', 'tf.keras.layers.concatenate', (['[prev_cost, prev_params]'], {}), True, 'import tensorflow as tf\n'), (352, 'tensorflow.reshape', 'tf.reshape', (['new_params'], {'shape': '(2, n_layers)'}), True, 'import tensorflow as tf\n'), (367, 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '(1, 1)'}), True, 'import tensorflow as tf\n'), (368, 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '(1, 2 * n_layers)'}), True, 'import tensorflow as tf\n'), (369, 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '(1, 2 * n_layers)'}), True, 'import tensorflow as tf\n'), (370, 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '(1, 2 * n_layers)'}), True, 'import tensorflow as tf\n'), (383, 'tensorflow.keras.layers.average', 'tf.keras.layers.average', (['[0.1 * out0[0], 0.2 * out1[0], 0.3 * out2[0], 0.4 * out3[0], 0.5 * out4[0]]'], {}), True, 'import tensorflow as tf\n'), (470, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (620, 'numpy.random.rand', 'np.random.rand', (['(2)', '(1)'], {}), True, 'import numpy as np\n'), (846, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (983, 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(5)'], {}), True, 'import numpy as np\n'), (984, 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(5)'], {}), True, 'import numpy as np\n'), (179, 'networkx.gnp_random_graph', 'nx.gnp_random_graph', (['n_nodes'], {'p': 'p_edge'}), True, 'import networkx as nx\n'), (226, 'pennylane.qaoa.cost_layer', 'qaoa.cost_layer', (['gamma', 'cost_h'], {}), False, 'from pennylane import qaoa\n'), (227, 'pennylane.qaoa.mixer_layer', 'qaoa.mixer_layer', (['alpha', 'mixer_h'], {}), False, 'from pennylane import qaoa\n'), (233, 'pennylane.layer', 'qml.layer', (['qaoa_layer', 'n_layers', 'params[0]', 'params[1]'], {}), True, 'import pennylane as qml\n'), (234, 'pennylane.expval', 'qml.expval', (['cost_h'], {}), True, 'import pennylane as qml\n'), (244, 'pennylane.QNode', 'qml.QNode', (['circuit', 'dev'], {'interface': '"""tf"""', 'diff_method': '"""backprop"""'}), True, 'import pennylane as qml\n'), (358, 'tensorflow.cast', 'tf.cast', (['_cost'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (438, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (569, 'tensorflow.reshape', 'tf.reshape', (['guess'], {'shape': '(2, n_layers)'}), True, 'import tensorflow as tf\n'), (630, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (923, 'tensorflow.reshape', 'tf.reshape', (['guess'], {'shape': '(2, n_layers)'}), True, 'import tensorflow as tf\n'), (1012, 'tensorflow.keras.layers.LSTMCell', 'tf.keras.layers.LSTMCell', (['(2 * p)'], {}), True, 'import tensorflow as tf\n'), (1023, 'tensorflow.keras.layers.concatenate', 'tf.keras.layers.concatenate', (['[prev_cost, prev_params]'], {}), True, 'import tensorflow as tf\n'), (1029, 'tensorflow.reshape', 'tf.reshape', (['new_params'], {'shape': '(2, self.qaoa_p)'}), True, 'import tensorflow as tf\n'), (1133, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (232, 'pennylane.Hadamard', 'qml.Hadamard', ([], {'wires': 'w'}), True, 'import pennylane as qml\n'), (477, 'numpy.mean', 'np.mean', (['total_loss'], {}), True, 'import numpy as np\n'), (853, 'numpy.mean', 'np.mean', (['total_loss'], {}), True, 'import numpy as np\n')]
the16thpythonist/gcnn_keras
27d794095b684333d93149c825d84b85df8c30ff
import tensorflow as tf import tensorflow.keras as ks @tf.keras.utils.register_keras_serializable(package='kgcnn', name='ScaledMeanAbsoluteError') class ScaledMeanAbsoluteError(tf.keras.metrics.MeanAbsoluteError): """Metric for a scaled mean absolute error (MAE), which can undo a pre-scaling of the targets. Only intended as metric this allows to info the MAE with correct units or absolute values during fit.""" def __init__(self, scaling_shape=(), name='mean_absolute_error', **kwargs): super(ScaledMeanAbsoluteError, self).__init__(name=name, **kwargs) self.scale = self.add_weight(shape=scaling_shape, initializer=tf.keras.initializers.Ones(), name='kgcnn_scale_mae', dtype=tf.keras.backend.floatx()) self.scaling_shape = scaling_shape def reset_state(self): ks.backend.batch_set_value([(v, 0) for v in self.variables if 'kgcnn_scale_mae' not in v.name]) # Or set them explicitly. # ks.backend.set_value(self.total, 0) # ks.backend.set_value(self.count, 0) def reset_states(self): ks.backend.batch_set_value([(v, 0) for v in self.variables if 'kgcnn_scale_mae' not in v.name]) # Or set them explicitly. # ks.backend.set_value(self.total, 0) # ks.backend.set_value(self.count, 0) def update_state(self, y_true, y_pred, sample_weight=None): y_true = self.scale * y_true y_pred = self.scale * y_pred return super(ScaledMeanAbsoluteError, self).update_state(y_true, y_pred, sample_weight=sample_weight) def get_config(self): """Returns the serializable config of the metric.""" mae_conf = super(ScaledMeanAbsoluteError, self).get_config() mae_conf.update({"scaling_shape": self.scaling_shape}) return mae_conf def set_scale(self, scale): """Set the scale from numpy array. Usually used with broadcasting.""" ks.backend.set_value(self.scale, scale) @tf.keras.utils.register_keras_serializable(package='kgcnn', name='ScaledRootMeanSquaredError') class ScaledRootMeanSquaredError(tf.keras.metrics.RootMeanSquaredError): """Metric for a scaled root mean squared error (RMSE), which can undo a pre-scaling of the targets. Only intended as metric this allows to info the MAE with correct units or absolute values during fit.""" def __init__(self, scaling_shape=(), name='root_mean_squared_error', **kwargs): super(ScaledRootMeanSquaredError, self).__init__(name=name, **kwargs) self.scale = self.add_weight(shape=scaling_shape, initializer=tf.keras.initializers.Ones(), name='kgcnn_scale_rmse', dtype=tf.keras.backend.floatx()) self.scaling_shape = scaling_shape def reset_state(self): ks.backend.batch_set_value([(v, 0) for v in self.variables if 'kgcnn_scale_rmse' not in v.name]) # Or set them explicitly. # ks.backend.set_value(self.total, 0) # ks.backend.set_value(self.count, 0) def reset_states(self): ks.backend.batch_set_value([(v, 0) for v in self.variables if 'kgcnn_scale_rmse' not in v.name]) # Or set them explicitly. # ks.backend.set_value(self.total, 0) # ks.backend.set_value(self.count, 0) def update_state(self, y_true, y_pred, sample_weight=None): y_true = self.scale * y_true y_pred = self.scale * y_pred return super(ScaledRootMeanSquaredError, self).update_state(y_true, y_pred, sample_weight=sample_weight) def get_config(self): """Returns the serializable config of the metric.""" mae_conf = super(ScaledRootMeanSquaredError, self).get_config() mae_conf.update({"scaling_shape": self.scaling_shape}) return mae_conf def set_scale(self, scale): """Set the scale from numpy array. Usually used with broadcasting.""" ks.backend.set_value(self.scale, scale)
[ "tensorflow.keras.backend.floatx", "tensorflow.keras.backend.set_value", "tensorflow.keras.backend.batch_set_value", "tensorflow.keras.utils.register_keras_serializable", "tensorflow.keras.initializers.Ones" ]
kgcnn/utils/loss.py
[(5, 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {'package': '"""kgcnn"""', 'name': '"""ScaledMeanAbsoluteError"""'}), True, 'import tensorflow as tf\n'), (45, 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {'package': '"""kgcnn"""', 'name': '"""ScaledRootMeanSquaredError"""'}), True, 'import tensorflow as tf\n'), (18, 'tensorflow.keras.backend.batch_set_value', 'ks.backend.batch_set_value', (["[(v, 0) for v in self.variables if 'kgcnn_scale_mae' not in v.name]"], {}), True, 'import tensorflow.keras as ks\n'), (24, 'tensorflow.keras.backend.batch_set_value', 'ks.backend.batch_set_value', (["[(v, 0) for v in self.variables if 'kgcnn_scale_mae' not in v.name]"], {}), True, 'import tensorflow.keras as ks\n'), (42, 'tensorflow.keras.backend.set_value', 'ks.backend.set_value', (['self.scale', 'scale'], {}), True, 'import tensorflow.keras as ks\n'), (58, 'tensorflow.keras.backend.batch_set_value', 'ks.backend.batch_set_value', (["[(v, 0) for v in self.variables if 'kgcnn_scale_rmse' not in v.name]"], {}), True, 'import tensorflow.keras as ks\n'), (64, 'tensorflow.keras.backend.batch_set_value', 'ks.backend.batch_set_value', (["[(v, 0) for v in self.variables if 'kgcnn_scale_rmse' not in v.name]"], {}), True, 'import tensorflow.keras as ks\n'), (82, 'tensorflow.keras.backend.set_value', 'ks.backend.set_value', (['self.scale', 'scale'], {}), True, 'import tensorflow.keras as ks\n'), (13, 'tensorflow.keras.initializers.Ones', 'tf.keras.initializers.Ones', ([], {}), True, 'import tensorflow as tf\n'), (14, 'tensorflow.keras.backend.floatx', 'tf.keras.backend.floatx', ([], {}), True, 'import tensorflow as tf\n'), (53, 'tensorflow.keras.initializers.Ones', 'tf.keras.initializers.Ones', ([], {}), True, 'import tensorflow as tf\n'), (54, 'tensorflow.keras.backend.floatx', 'tf.keras.backend.floatx', ([], {}), True, 'import tensorflow as tf\n')]
blafabregue/TimeSeriesDeepClustering
85f4ab2fd45bda3296c6b0861ee11e6c7a77c594
""" Based on Keras implementation https://github.com/XifengGuo/IDEC: and article : Xifeng Guo, Long Gao, Xinwang Liu, Jianping Yin. Improved Deep Embedded Clustering with Local Structure Preservation. IJCAI 2017. Original Author: Xifeng Guo. 2017.1.30 Author: Baptiste Lafabregue 2019.25.04 """ import numpy as np import tensorflow as tf import tensorflow.keras.backend as K from sklearn import metrics from tensorflow.keras.layers import Layer, InputSpec from tensorflow.keras.models import Model from sklearn.cluster import KMeans from networks.trainer import Trainer class ClusteringLayer(Layer): """ Clustering layer converts input sample (feature) to soft label, i.e. a vector that represents the probability of the sample belonging to each cluster. The probability is calculated with student's t-distribution. # Example ``` model.add(ClusteringLayer(n_clusters=10)) ``` # Arguments n_clusters: number of clusters. weights: list of Numpy array with shape `(n_clusters, n_features)` witch represents the initial cluster centers. alpha: parameter in Student's t-distribution. Default to 1.0. # Input shape 2D tensor with shape: `(n_samples, n_features)`. # Output shape 2D tensor with shape: `(n_samples, n_clusters)`. """ def __init__(self, n_clusters, weights=None, alpha=1.0, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(ClusteringLayer, self).__init__(**kwargs) self.n_clusters = n_clusters self.alpha = alpha self.initial_weights = weights self.input_spec = InputSpec(ndim=2) def build(self, input_shape): assert len(input_shape) == 2 input_dim = input_shape[1] self.input_spec = InputSpec(dtype=K.floatx(), shape=(None, input_dim)) self.clusters = self.add_weight(shape=(self.n_clusters, input_dim), initializer='glorot_uniform', name='clustering') if self.initial_weights is not None: self.set_weights(self.initial_weights) del self.initial_weights self.built = True def call(self, inputs, **kwargs): """ student t-distribution, as same as used in t-SNE algorithm. q_ij = 1/(1+dist(x_i, u_j)^2), then normalize it. Arguments: inputs: the variable containing data, shape=(n_samples, n_features) Return: q: student's t-distribution, or soft labels for each sample. shape=(n_samples, n_clusters) """ q = 1.0 / (1.0 + (K.sum(K.square(K.expand_dims(inputs, axis=1) - self.clusters), axis=2) / self.alpha)) q **= (self.alpha + 1.0) / 2.0 q = K.transpose(K.transpose(q) / K.sum(q, axis=1)) return q def compute_output_shape(self, input_shape): assert input_shape and len(input_shape) == 2 return input_shape[0], self.n_clusters def get_config(self): config = {'n_clusters': self.n_clusters} base_config = super(ClusteringLayer, self).get_config() return dict(list(base_config.items()) + list(config.items())) class IDEC(Trainer): def __init__(self, dataset_name, classifier_name, encoder_model, keep_both_losses=True, gamma=0.1, n_clusters=10, alpha=1.0, batch_size=10, tol=1e-3, update_interval=5, optimizer=None): super(IDEC, self).__init__(dataset_name, classifier_name, encoder_model, batch_size, n_clusters, optimizer) self.keep_both_losses = keep_both_losses self.gamma = gamma self.alpha = alpha self.tol = tol self.update_interval = update_interval self.dec_model = None self.dec_loss = None def initialize_model(self, x, y, ae_weights=None): """ Initialize the model for training :param ae_weights: arguments to let the encoder load its weights, None to pre-train the encoder """ if ae_weights is not None: self.encoder_model.load_weights(ae_weights) print('Pretrained AE weights are loaded successfully.') self.pretrain_model = False else: self.pretrain_model = True if self.optimizer is None: self.optimizer = tf.keras.optimizers.Adam() clustering_layer = ClusteringLayer(self.n_clusters, name='clustering')(self.encoder.output) self.dec_model = Model(inputs=self.encoder.input, outputs=clustering_layer) self.dec_loss = tf.keras.losses.KLDivergence() def load_weights(self, weights_path): """ Load weights of IDEC model :param weights_path: path to load weights from """ self.dec_model.load_weights(weights_path + '.tf') def save_weights(self, weights_path): """ Save weights of IDEC model :param weights_path: path to save weights to """ self.dec_model.save_weights(weights_path + '.tf') def get_trainer_name(self): """ Return the name of the training method used :return: method name """ if self.gamma == 0.0: return 'DEC' return self.__class__.__name__ def predict_clusters(self, x, seeds=None): """ Predict cluster labels using the output of clustering layer :param x: the data to evaluate :param seeds: seeds to initialize the K-Means if needed :return: the predicted cluster labels """ q = self.dec_model.predict(x, verbose=0) return q.argmax(1), self.dec_model.get_layer(name='clustering').get_weights() @staticmethod def target_distribution(q): """ Target distribution P which enhances the discrimination of soft label Q :param q: the Q tensor :return: the P tensor """ weight = q ** 2 / q.sum(0) return (weight.T / weight.sum(1)).T def _run_training(self, x, y, x_test, y_test, nb_steps, seeds, verbose, log_writer, dist_matrix=None): if seeds is not None: seeds_enc = self.extract_features(seeds) kmeans = KMeans(n_clusters=self.n_clusters, n_init=20, init=seeds_enc) else: kmeans = KMeans(n_clusters=self.n_clusters, n_init=20) x_pred = self.extract_features(x) y_pred = kmeans.fit_predict(x_pred) self.dec_model.get_layer(name='clustering').set_weights([kmeans.cluster_centers_]) if y is not None: ari = np.round(metrics.adjusted_rand_score(y, y_pred), 5) if verbose: print('ari kmeans: ', str(ari)) self.log_stats(x, y, x_test, y_test, [0, 0, 0], 0, log_writer, 'init') q = self.dec_model.predict(x, verbose=0) p = self.target_distribution(q) # update the auxiliary target distribution p # evaluate the clustering performance y_pred = q.argmax(1) y_pred_last = y_pred i = 0 # Number of performed optimization steps epoch = 0 # Number of performed epochs # define the train function train_enc_loss = tf.keras.metrics.Mean(name='encoder train_loss') dec_enc_loss = tf.keras.metrics.Mean(name='dec train_loss') idec_enc_loss = tf.keras.metrics.Mean(name='idec train_loss') @tf.function def train_step(x_batch, p_batch): with tf.GradientTape() as tape: encoder_loss = self.encoder_model.loss.compute_loss(x_batch, training=True) encoding_x = self.dec_model(x_batch, training=True) dec_loss = tf.keras.losses.KLD(p_batch, encoding_x) loss = (1 - self.gamma) * encoder_loss + self.gamma * dec_loss gradients = tape.gradient(loss, self.encoder_model.get_trainable_variables() + self.dec_model.trainable_variables) self.optimizer.apply_gradients( zip(gradients, self.encoder_model.get_trainable_variables() + self.dec_model.trainable_variables)) train_enc_loss(encoder_loss) dec_enc_loss(loss) idec_enc_loss(loss) if verbose: print('start training') # idec training while i < nb_steps: train_enc_loss.reset_states() dec_enc_loss.reset_states() idec_enc_loss.reset_states() # shuffle the train set # computes P each update_interval epoch if epoch % self.update_interval == 0: q = self.dec_model.predict(x, verbose=0) p = self.target_distribution(q) # update the auxiliary target distribution p # evaluate the clustering performance y_pred = q.argmax(1) delta_label = np.sum(y_pred != y_pred_last).astype(np.float32) / y_pred.shape[0] y_pred_last = y_pred # check stop criterion if epoch > 0 and delta_label < self.tol: if verbose: print('delta_label ', delta_label, '< tol ', self.tol) print('Reached tolerance threshold. Stopping training.') self.log_stats(x, y, x_test, y_test, [0, 0, 0], epoch, log_writer, 'reached_stop_criterion') break train_ds = tf.data.Dataset.from_tensor_slices((x, p)) \ .shuffle(x.shape[0], reshuffle_each_iteration=True) \ .batch(self.batch_size).as_numpy_iterator() for x_batch, p_batch in train_ds: train_step(x_batch, p_batch) i += 1 if i >= nb_steps: break if verbose: template = 'Epoch {}, Loss: {}' print(template.format(epoch + 1, idec_enc_loss.result())) epoch += 1 y_pred = self.log_stats(x, y, x_test, y_test, [idec_enc_loss.result(), dec_enc_loss.result(), train_enc_loss.result()], epoch, log_writer, 'train') return epoch
[ "tensorflow.keras.backend.floatx", "numpy.sum", "sklearn.cluster.KMeans", "tensorflow.keras.models.Model", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.keras.backend.sum", "tensorflow.keras.backend.transpose", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.losses.KLD", "tensorflow.keras.backend.expand_dims", "sklearn.metrics.adjusted_rand_score", "tensorflow.keras.layers.InputSpec", "tensorflow.keras.losses.KLDivergence", "tensorflow.keras.metrics.Mean", "tensorflow.GradientTape" ]
networks/IDEC.py
[(50, 'tensorflow.keras.layers.InputSpec', 'InputSpec', ([], {'ndim': '(2)'}), False, 'from tensorflow.keras.layers import Layer, InputSpec\n'), (126, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'self.encoder.input', 'outputs': 'clustering_layer'}), False, 'from tensorflow.keras.models import Model\n'), (127, 'tensorflow.keras.losses.KLDivergence', 'tf.keras.losses.KLDivergence', ([], {}), True, 'import tensorflow as tf\n'), (201, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {'name': '"""encoder train_loss"""'}), True, 'import tensorflow as tf\n'), (202, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {'name': '"""dec train_loss"""'}), True, 'import tensorflow as tf\n'), (203, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {'name': '"""idec train_loss"""'}), True, 'import tensorflow as tf\n'), (123, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), True, 'import tensorflow as tf\n'), (176, 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'self.n_clusters', 'n_init': '(20)', 'init': 'seeds_enc'}), False, 'from sklearn.cluster import KMeans\n'), (178, 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'self.n_clusters', 'n_init': '(20)'}), False, 'from sklearn.cluster import KMeans\n'), (55, 'tensorflow.keras.backend.floatx', 'K.floatx', ([], {}), True, 'import tensorflow.keras.backend as K\n'), (73, 'tensorflow.keras.backend.transpose', 'K.transpose', (['q'], {}), True, 'import tensorflow.keras.backend as K\n'), (73, 'tensorflow.keras.backend.sum', 'K.sum', (['q'], {'axis': '(1)'}), True, 'import tensorflow.keras.backend as K\n'), (185, 'sklearn.metrics.adjusted_rand_score', 'metrics.adjusted_rand_score', (['y', 'y_pred'], {}), False, 'from sklearn import metrics\n'), (207, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (210, 'tensorflow.keras.losses.KLD', 'tf.keras.losses.KLD', (['p_batch', 'encoding_x'], {}), True, 'import tensorflow as tf\n'), (237, 'numpy.sum', 'np.sum', (['(y_pred != y_pred_last)'], {}), True, 'import numpy as np\n'), (71, 'tensorflow.keras.backend.expand_dims', 'K.expand_dims', (['inputs'], {'axis': '(1)'}), True, 'import tensorflow.keras.backend as K\n'), (249, 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x, p)'], {}), True, 'import tensorflow as tf\n')]
artitw/BERT_QA
947e64f9d33a03ee942e3914bed4b4a354236bad
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras-based transformer block layer.""" from __future__ import absolute_import from __future__ import division # from __future__ import google_type_annotations from __future__ import print_function import tensorflow as tf from . import attention from . import dense_einsum class Transformer(tf.keras.layers.Layer): """Transformer layer. This layer implements the Transformer from "Attention Is All You Need". (https://arxiv.org/abs/1706.03762). Attributes: num_attention_heads: Number of attention heads. intermediate_size: Size of the intermediate layer. intermediate_activation: Activation for the intermediate layer. dropout_rate: Dropout probability for the post-attention and output dropout. attention_dropout_rate: Dropout probability for within the attention layer. kernel_initializer: Initializer for dense layer kernels. bias_initializer: Initializer for dense layer biases. kernel_regularizer: Regularizer for dense layer kernels. bias_regularizer: Regularizer for dense layer biases. activity_regularizer: Regularizer for dense layer activity. kernel_constraint: Constraint for dense layer kernels. bias_constraint: Constraint for dense layer kernels. """ def __init__(self, num_attention_heads, intermediate_size, intermediate_activation, dropout_rate=0.0, attention_dropout_rate=0.0, kernel_initializer="glorot_uniform", bias_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(Transformer, self).__init__(**kwargs) self._num_heads = num_attention_heads self._intermediate_size = intermediate_size self._intermediate_activation = intermediate_activation self._attention_dropout_rate = attention_dropout_rate self._dropout_rate = dropout_rate self._kernel_initializer = tf.keras.initializers.get(kernel_initializer) self._bias_initializer = tf.keras.initializers.get(bias_initializer) self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer) self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer) self._kernel_constraint = tf.keras.constraints.get(kernel_constraint) self._bias_constraint = tf.keras.constraints.get(bias_constraint) def build(self, input_shape): input_tensor = input_shape[0] if len(input_shape) == 2 else input_shape input_tensor_shape = tf.TensorShape(input_tensor) if len(input_tensor_shape) != 3: raise ValueError("TransformerLayer expects a three-dimensional input of " "shape [batch, sequence, width].") batch_size, sequence_length, hidden_size = input_tensor_shape if len(input_shape) == 2: mask_tensor_shape = tf.TensorShape(input_shape[1]) expected_mask_tensor_shape = tf.TensorShape( [batch_size, sequence_length, sequence_length]) if not expected_mask_tensor_shape.is_compatible_with(mask_tensor_shape): raise ValueError("When passing a mask tensor to TransformerLayer, the " "mask tensor must be of shape [batch, " "sequence_length, sequence_length] (here %s). Got a " "mask tensor of shape %s." % (expected_mask_tensor_shape, mask_tensor_shape)) if hidden_size % self._num_heads != 0: raise ValueError( "The input size (%d) is not a multiple of the number of attention " "heads (%d)" % (hidden_size, self._num_heads)) self._attention_head_size = int(hidden_size // self._num_heads) self._attention_layer = attention.Attention( num_heads=self._num_heads, head_size=self._attention_head_size, dropout_rate=self._attention_dropout_rate, kernel_initializer=self._kernel_initializer, bias_initializer=self._bias_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activity_regularizer=self._activity_regularizer, kernel_constraint=self._kernel_constraint, bias_constraint=self._bias_constraint, name="self_attention") self._attention_output_dense = dense_einsum.DenseEinsum( output_shape=hidden_size, num_summed_dimensions=2, kernel_initializer=self._kernel_initializer, bias_initializer=self._bias_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activity_regularizer=self._activity_regularizer, kernel_constraint=self._kernel_constraint, bias_constraint=self._bias_constraint, name="self_attention_output") self._attention_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) self._attention_layer_norm = ( tf.keras.layers.LayerNormalization( name="self_attention_layer_norm", axis=-1, epsilon=1e-12, dtype=tf.float32)) self._intermediate_dense = dense_einsum.DenseEinsum( output_shape=self._intermediate_size, activation=None, kernel_initializer=self._kernel_initializer, bias_initializer=self._bias_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activity_regularizer=self._activity_regularizer, kernel_constraint=self._kernel_constraint, bias_constraint=self._bias_constraint, name="intermediate") self._intermediate_activation_layer = tf.keras.layers.Activation( self._intermediate_activation) self._output_dense = dense_einsum.DenseEinsum( output_shape=hidden_size, kernel_initializer=self._kernel_initializer, bias_initializer=self._bias_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activity_regularizer=self._activity_regularizer, kernel_constraint=self._kernel_constraint, bias_constraint=self._bias_constraint, name="output") self._output_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) self._output_layer_norm = tf.keras.layers.LayerNormalization( name="output_layer_norm", axis=-1, epsilon=1e-12, dtype=tf.float32) super(Transformer, self).build(input_shape) def compute_output_shape(self, input_shape): data_tensor_shape = tf.TensorShape(input_shape[0]) batch = data_tensor_shape[0] sequence_length = data_tensor_shape[1] return tf.TensorShape((batch, sequence_length, self._output_einsum_shape)) def get_config(self): config = { "num_attention_heads": self._num_heads, "intermediate_size": self._intermediate_size, "intermediate_activation": self._intermediate_activation, "dropout_rate": self._dropout_rate, "attention_dropout_rate": self._attention_dropout_rate, "kernel_initializer": tf.keras.initializers.serialize(self._kernel_initializer), "bias_initializer": tf.keras.initializers.serialize(self._bias_initializer), "kernel_regularizer": tf.keras.regularizers.serialize(self._kernel_regularizer), "bias_regularizer": tf.keras.regularizers.serialize(self._bias_regularizer), "activity_regularizer": tf.keras.regularizers.serialize(self._activity_regularizer), "kernel_constraint": tf.keras.constraints.serialize(self._kernel_constraint), "bias_constraint": tf.keras.constraints.serialize(self._bias_constraint) } base_config = super(Transformer, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs): if isinstance(inputs, (list, tuple)) and len(inputs) == 2: input_tensor, attention_mask = inputs else: input_tensor, attention_mask = (inputs, None) attention_inputs = [input_tensor, input_tensor] if attention_mask is not None: attention_inputs.append(attention_mask) attention_output = self._attention_layer(attention_inputs) attention_output = self._attention_output_dense(attention_output) attention_output = self._attention_dropout(attention_output) # Use float32 in keras layer norm and the gelu activation in the # intermediate dense layer for numeric stability if self.dtype == tf.float16: input_tensor = tf.cast(input_tensor, tf.float32) attention_output = tf.cast(attention_output, tf.float32) attention_output = self._attention_layer_norm(input_tensor + attention_output) intermediate_output = self._intermediate_dense(attention_output) if self.dtype == tf.float16: # Casts to float32 so that activation is done in float32. intermediate_output = tf.cast(intermediate_output, tf.float32) intermediate_output = self._intermediate_activation_layer( intermediate_output) intermediate_output = tf.cast(intermediate_output, tf.float16) else: intermediate_output = self._intermediate_activation_layer( intermediate_output) layer_output = self._output_dense(intermediate_output) layer_output = self._output_dropout(layer_output) # Use float32 in keras layer norm for numeric stability if self.dtype == tf.float16: layer_output = tf.cast(layer_output, tf.float32) layer_output = self._output_layer_norm(layer_output + attention_output) if self.dtype == tf.float16: layer_output = tf.cast(layer_output, tf.float16) return layer_output
[ "tensorflow.keras.layers.LayerNormalization", "tensorflow.TensorShape", "tensorflow.keras.layers.Activation", "tensorflow.keras.constraints.get", "tensorflow.keras.constraints.serialize", "tensorflow.keras.regularizers.get", "tensorflow.keras.initializers.serialize", "tensorflow.keras.regularizers.serialize", "tensorflow.cast", "tensorflow.keras.layers.Dropout", "tensorflow.keras.initializers.get" ]
bert_qa/transformer.py
[(70, 'tensorflow.keras.initializers.get', 'tf.keras.initializers.get', (['kernel_initializer'], {}), True, 'import tensorflow as tf\n'), (71, 'tensorflow.keras.initializers.get', 'tf.keras.initializers.get', (['bias_initializer'], {}), True, 'import tensorflow as tf\n'), (72, 'tensorflow.keras.regularizers.get', 'tf.keras.regularizers.get', (['kernel_regularizer'], {}), True, 'import tensorflow as tf\n'), (73, 'tensorflow.keras.regularizers.get', 'tf.keras.regularizers.get', (['bias_regularizer'], {}), True, 'import tensorflow as tf\n'), (74, 'tensorflow.keras.constraints.get', 'tf.keras.constraints.get', (['kernel_constraint'], {}), True, 'import tensorflow as tf\n'), (75, 'tensorflow.keras.constraints.get', 'tf.keras.constraints.get', (['bias_constraint'], {}), True, 'import tensorflow as tf\n'), (79, 'tensorflow.TensorShape', 'tf.TensorShape', (['input_tensor'], {}), True, 'import tensorflow as tf\n'), (124, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ([], {'rate': 'self._dropout_rate'}), True, 'import tensorflow as tf\n'), (126, 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'name': '"""self_attention_layer_norm"""', 'axis': '(-1)', 'epsilon': '(1e-12)', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (142, 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['self._intermediate_activation'], {}), True, 'import tensorflow as tf\n'), (154, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ([], {'rate': 'self._dropout_rate'}), True, 'import tensorflow as tf\n'), (155, 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'name': '"""output_layer_norm"""', 'axis': '(-1)', 'epsilon': '(1e-12)', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (161, 'tensorflow.TensorShape', 'tf.TensorShape', (['input_shape[0]'], {}), True, 'import tensorflow as tf\n'), (165, 'tensorflow.TensorShape', 'tf.TensorShape', (['(batch, sequence_length, self._output_einsum_shape)'], {}), True, 'import tensorflow as tf\n'), (86, 'tensorflow.TensorShape', 'tf.TensorShape', (['input_shape[1]'], {}), True, 'import tensorflow as tf\n'), (87, 'tensorflow.TensorShape', 'tf.TensorShape', (['[batch_size, sequence_length, sequence_length]'], {}), True, 'import tensorflow as tf\n'), (180, 'tensorflow.keras.initializers.serialize', 'tf.keras.initializers.serialize', (['self._kernel_initializer'], {}), True, 'import tensorflow as tf\n'), (182, 'tensorflow.keras.initializers.serialize', 'tf.keras.initializers.serialize', (['self._bias_initializer'], {}), True, 'import tensorflow as tf\n'), (184, 'tensorflow.keras.regularizers.serialize', 'tf.keras.regularizers.serialize', (['self._kernel_regularizer'], {}), True, 'import tensorflow as tf\n'), (186, 'tensorflow.keras.regularizers.serialize', 'tf.keras.regularizers.serialize', (['self._bias_regularizer'], {}), True, 'import tensorflow as tf\n'), (188, 'tensorflow.keras.regularizers.serialize', 'tf.keras.regularizers.serialize', (['self._activity_regularizer'], {}), True, 'import tensorflow as tf\n'), (190, 'tensorflow.keras.constraints.serialize', 'tf.keras.constraints.serialize', (['self._kernel_constraint'], {}), True, 'import tensorflow as tf\n'), (192, 'tensorflow.keras.constraints.serialize', 'tf.keras.constraints.serialize', (['self._bias_constraint'], {}), True, 'import tensorflow as tf\n'), (214, 'tensorflow.cast', 'tf.cast', (['input_tensor', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (215, 'tensorflow.cast', 'tf.cast', (['attention_output', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (221, 'tensorflow.cast', 'tf.cast', (['intermediate_output', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (224, 'tensorflow.cast', 'tf.cast', (['intermediate_output', 'tf.float16'], {}), True, 'import tensorflow as tf\n'), (232, 'tensorflow.cast', 'tf.cast', (['layer_output', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (235, 'tensorflow.cast', 'tf.cast', (['layer_output', 'tf.float16'], {}), True, 'import tensorflow as tf\n')]
hpssjellis/tfQuantumJs
5235c682b0d2de2eaab8d52b84e295c9f4abf4e5
## from https://pennylane.ai/qml/demos/tutorial_qnn_module_tf.html ## from https://github.com/hpssjellis/my-examples-for-quantum-computing/blob/main/pennylaneai/qml-demos/tf-tutorial_qnn_module_tf.py import tensorflow as tf import matplotlib.pyplot as plt from sklearn.datasets import make_moons import pennylane as qml X, y = make_moons(n_samples=200, noise=0.1) print("X") print(X) print() print("y") print(y) y_hot = tf.keras.utils.to_categorical(y, num_classes=2) # one-hot encoded labels print() print("y_hot") print(y_hot) c = ["#1f77b4" if y_ == 0 else "#ff7f0e" for y_ in y] # colours for each class plt.axis("off") plt.scatter(X[:, 0], X[:, 1], c=c) #plt.show() plt.draw() plt.pause(0.001) input("Open Ports --> Open Preview or Browser --> push enter to continue") n_qubits = 2 dev = qml.device("default.qubit", wires=n_qubits) @qml.qnode(dev) def qnode(inputs, weights): qml.templates.AngleEmbedding(inputs, wires=range(n_qubits)) qml.templates.BasicEntanglerLayers(weights, wires=range(n_qubits)) return [qml.expval(qml.PauliZ(wires=i)) for i in range(n_qubits)] n_layers = 6 weight_shapes = {"weights": (n_layers, n_qubits)} qlayer = qml.qnn.KerasLayer(qnode, weight_shapes, output_dim=n_qubits) clayer_1 = tf.keras.layers.Dense(2) clayer_2 = tf.keras.layers.Dense(2, activation="softmax") model = tf.keras.models.Sequential([clayer_1, qlayer, clayer_2]) opt = tf.keras.optimizers.SGD(learning_rate=0.2) model.compile(opt, loss="mae", metrics=["accuracy"]) X = X.astype("float32") y_hot = y_hot.astype("float32") fitting = model.fit(X, y_hot, epochs=3, batch_size=5, validation_split=0.25, verbose=2) model.summary() qnn_results = model.evaluate(X, y_hot, verbose=2) print ("qnn_results") print (qnn_results) #tf.keras.models.save_model(model, './model11.h5', overwrite=True, include_optimizer=True, save_format=None, signatures=None, options=None) tf.keras.models.save_model(model, './model12.h5') #model.save_weights('./model_weights12')
[ "matplotlib.pyplot.pause", "matplotlib.pyplot.scatter", "sklearn.datasets.make_moons", "tensorflow.keras.layers.Dense", "tensorflow.keras.models.save_model", "matplotlib.pyplot.draw", "matplotlib.pyplot.axis", "tensorflow.keras.models.Sequential", "tensorflow.keras.utils.to_categorical", "tensorflow.keras.optimizers.SGD" ]
rocksetta/qnn03-rocksetta.py
[(11, 'sklearn.datasets.make_moons', 'make_moons', ([], {'n_samples': '(200)', 'noise': '(0.1)'}), False, 'from sklearn.datasets import make_moons\n'), (19, 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['y'], {'num_classes': '(2)'}), True, 'import tensorflow as tf\n'), (27, 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (28, 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, (0)]', 'X[:, (1)]'], {'c': 'c'}), True, 'import matplotlib.pyplot as plt\n'), (30, 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (31, 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), True, 'import matplotlib.pyplot as plt\n'), (35, 'pennylane.device', 'qml.device', (['"""default.qubit"""'], {'wires': 'n_qubits'}), True, 'import pennylane as qml\n'), (37, 'pennylane.qnode', 'qml.qnode', (['dev'], {}), True, 'import pennylane as qml\n'), (46, 'pennylane.qnn.KerasLayer', 'qml.qnn.KerasLayer', (['qnode', 'weight_shapes'], {'output_dim': 'n_qubits'}), True, 'import pennylane as qml\n'), (48, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)'], {}), True, 'import tensorflow as tf\n'), (49, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)'], {'activation': '"""softmax"""'}), True, 'import tensorflow as tf\n'), (50, 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', (['[clayer_1, qlayer, clayer_2]'], {}), True, 'import tensorflow as tf\n'), (52, 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': '(0.2)'}), True, 'import tensorflow as tf\n'), (70, 'tensorflow.keras.models.save_model', 'tf.keras.models.save_model', (['model', '"""./model12.h5"""'], {}), True, 'import tensorflow as tf\n'), (41, 'pennylane.PauliZ', 'qml.PauliZ', ([], {'wires': 'i'}), True, 'import pennylane as qml\n')]
guochunhe/Vitis-AI
e86b6efae11f8703ee647e4a99004dc980b84989
# Copyright 2019 Xilinx Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import random import warnings import numpy as np #import pandas as pd import tensorflow from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint from tensorflow.keras.layers import Input from tensorflow.keras import backend as K from tensorflow.keras.models import Model, load_model from lib.build_data import get_train_val_data from PIL import Image import argparse def get_arguments(): """Parse all the arguments. Returns: A list of parsed arguments. """ parser = argparse.ArgumentParser(description="TF2 Semantic Segmentation") parser.add_argument("--input_size", type=str, default='128,128', help="Input shape: [H, W]") #data config parser.add_argument("--img_path", type=str, default='./data/nuclei_data', help="Path to the directory containing the cityscapes validation images.") parser.add_argument("--num_classes", type=int, default=2, help="Number of classes to predict.") # model config parser.add_argument("--weight_file", type=str, default='float/weights.h5', help="Path to the final best weights.") # others parser.add_argument("--gpu", type=str, default='0', help="choose gpu device.") parser.add_argument("--save_path", type=str, default='./results_visulization/', help="where to save the vis results.") parser.add_argument("--return_seg", type=bool, default=True, help="resturn gray prediction") # quantization config parser.add_argument("--quantize", type=bool, default=False, help="whether do quantize or not.") return parser.parse_args() def main(): args = get_arguments() for key, val in args._get_kwargs(): print(key+' : '+str(val)) # Set data parameters NUM_CLASS=args.num_classes IMG_WIDTH, IMG_HEIGHT = map(int, args.input_size.split(',')) IMG_CHANNELS = 3 TRAIN_PATH = os.path.join(args.img_path, 'stage1_train/') TRAIN_VAL_SPLIT = 0.1 # set model parameters seed = 42 ckpt_file = args.weight_file # save prediction save_pred = args.return_seg output_path = args.save_path dir_path = '' warnings.filterwarnings('ignore', category=UserWarning, module='skimage') random.seed = seed np.random.seed = seed # load model if args.quantize: from tensorflow_model_optimization.quantization.keras import vitis_quantize with vitis_quantize.quantize_scope(): model = load_model(ckpt_file) else: model = load_model(ckpt_file) # load data X_train, Y_train = get_train_val_data(dir_path, TRAIN_PATH, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS) index = int(X_train.shape[0]*float(1-TRAIN_VAL_SPLIT)) X_val = X_train[index:] Y_val = Y_train[index:] model.compile(metrics=['accuracy', tensorflow.keras.metrics.MeanIoU(num_classes=2)]) results = model.evaluate(X_val, Y_val, batch_size=1) if save_pred: if not os.path.exists(output_path): os.makedirs(output_path) preds_val = model.predict(X_val, verbose=1) preds_val_t = (preds_val > 0.5).astype(np.uint8) for ix in range(len(preds_val_t)): #image = Image.fromarray(X_train[int(X_train.shape[0]*0.9):][ix]) #image.save(output_path + '/img_'+str(ix)+'.png') #gt = np.squeeze(Y_train[int(X_train.shape[0]*0.9):][ix]) #gt = Image.fromarray(gt) #gt.save(output_path+'/gt_'+str(ix)+'.png') pred_val = np.squeeze(preds_val_t[ix]) pred_val[pred_val==1] = 255 pred_val = Image.fromarray(pred_val) pred_val.save(output_path+'/pred_'+str(ix)+'.png') if __name__ == '__main__': main()
[ "tensorflow.keras.models.load_model", "numpy.squeeze", "tensorflow.keras.metrics.MeanIoU" ]
models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/TensorFlow/tf2_medical-seg_nuclei_128_128_5.31G_1.3/code/test.py
[(39, 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""TF2 Semantic Segmentation"""'}), False, 'import argparse\n'), (73, 'os.path.join', 'os.path.join', (['args.img_path', '"""stage1_train/"""'], {}), False, 'import os\n'), (85, 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning', 'module': '"""skimage"""'}), False, 'import warnings\n'), (98, 'lib.build_data.get_train_val_data', 'get_train_val_data', (['dir_path', 'TRAIN_PATH', 'IMG_HEIGHT', 'IMG_WIDTH', 'IMG_CHANNELS'], {}), False, 'from lib.build_data import get_train_val_data\n'), (95, 'tensorflow.keras.models.load_model', 'load_model', (['ckpt_file'], {}), False, 'from tensorflow.keras.models import Model, load_model\n'), (92, 'tensorflow_model_optimization.quantization.keras.vitis_quantize.quantize_scope', 'vitis_quantize.quantize_scope', ([], {}), False, 'from tensorflow_model_optimization.quantization.keras import vitis_quantize\n'), (93, 'tensorflow.keras.models.load_model', 'load_model', (['ckpt_file'], {}), False, 'from tensorflow.keras.models import Model, load_model\n'), (107, 'os.path.exists', 'os.path.exists', (['output_path'], {}), False, 'import os\n'), (108, 'os.makedirs', 'os.makedirs', (['output_path'], {}), False, 'import os\n'), (120, 'numpy.squeeze', 'np.squeeze', (['preds_val_t[ix]'], {}), True, 'import numpy as np\n'), (122, 'PIL.Image.fromarray', 'Image.fromarray', (['pred_val'], {}), False, 'from PIL import Image\n'), (103, 'tensorflow.keras.metrics.MeanIoU', 'tensorflow.keras.metrics.MeanIoU', ([], {'num_classes': '(2)'}), False, 'import tensorflow\n')]
quentinverlhac/music-emotion-recognition
c57b934574931fbb4cf31406eddce9d77d8824e4
from utils import load_model, load_dump, test_model import tensorflow as tf import argparse import config def test(model): print(f"======================== evaluation on test data =========================") # import data and labels test_spectrograms = load_dump(config.TEST_DATA_PATH) test_labels = load_dump(config.TEST_LABELS_PATH) # define metrics test_loss = tf.keras.metrics.Mean(name='test_loss') test_accuracy = tf.keras.metrics.BinaryAccuracy(name='test_accuracy') test_model(model, test_spectrograms, test_labels, test_loss, test_accuracy, is_test=True) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("model_path", help="path of the model to evaluate") args = parser.parse_args() model = load_model(args.model_path) test(model)
[ "tensorflow.keras.metrics.BinaryAccuracy", "tensorflow.keras.metrics.Mean" ]
test.py
[(11, 'utils.load_dump', 'load_dump', (['config.TEST_DATA_PATH'], {}), False, 'from utils import load_model, load_dump, test_model\n'), (12, 'utils.load_dump', 'load_dump', (['config.TEST_LABELS_PATH'], {}), False, 'from utils import load_model, load_dump, test_model\n'), (15, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {'name': '"""test_loss"""'}), True, 'import tensorflow as tf\n'), (16, 'tensorflow.keras.metrics.BinaryAccuracy', 'tf.keras.metrics.BinaryAccuracy', ([], {'name': '"""test_accuracy"""'}), True, 'import tensorflow as tf\n'), (18, 'utils.test_model', 'test_model', (['model', 'test_spectrograms', 'test_labels', 'test_loss', 'test_accuracy'], {'is_test': '(True)'}), False, 'from utils import load_model, load_dump, test_model\n'), (22, 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), False, 'import argparse\n'), (25, 'utils.load_model', 'load_model', (['args.model_path'], {}), False, 'from utils import load_model, load_dump, test_model\n')]
LongmaoTeamTf/deep_recommenders
168dabe4ef3a38cc582d019766cf3de576bc8af1
#!/usr/bin/python3 # -*- coding: utf-8 -*- import tensorflow as tf @tf.keras.utils.register_keras_serializable() class FM(tf.keras.layers.Layer): """ Factorization Machine """ def __init__(self, **kwargs): super(FM, self).__init__(**kwargs) def build(self, input_shape): self._linear = tf.keras.layers.Dense( units=1, kernel_initializer="zeros", name="linear" ) self.built = True def call(self, sparse_inputs, embedding_inputs=None, **kwargs): if embedding_inputs is None: return self._linear(sparse_inputs) x_sum = tf.reduce_sum(embedding_inputs, axis=1) x_square_sum = tf.reduce_sum(tf.pow(embedding_inputs, 2), axis=1) interaction = 0.5 * tf.reduce_sum( tf.subtract( tf.pow(x_sum, 2), x_square_sum ), axis=1, keepdims=True) return self._linear(sparse_inputs) + interaction class FactorizationMachine(tf.keras.Model): def __init__(self, indicator_columns, embedding_columns, **kwargs): super(FactorizationMachine, self).__init__(**kwargs) self._indicator_columns = indicator_columns self._embedding_columns = embedding_columns self._sparse_features_layer = tf.keras.layers.DenseFeatures(self._indicator_columns) self._embedding_features_layer = { c.categorical_column.key: tf.keras.layers.DenseFeatures(c) for c in self._embedding_columns } self._kernel = FM() def call(self, inputs, training=None, mask=None): sparse_features = self._sparse_features_layer(inputs) embeddings = [] for column_name, column_input in inputs.items(): dense_features = self._embedding_features_layer.get(column_name) if dense_features is not None: embedding = dense_features({column_name: column_input}) embeddings.append(embedding) stack_embeddings = tf.stack(embeddings, axis=1) outputs = self._kernel(sparse_features, stack_embeddings) return tf.nn.sigmoid(outputs) def get_config(self): config = { "indicator_columns": self._indicator_columns, "embedding_columns": self._embedding_columns } base_config = super(FactorizationMachine, self).get_config() return {**base_config, **config}
[ "tensorflow.keras.layers.DenseFeatures", "tensorflow.nn.sigmoid", "tensorflow.pow", "tensorflow.keras.layers.Dense", "tensorflow.stack", "tensorflow.reduce_sum", "tensorflow.keras.utils.register_keras_serializable" ]
deep_recommenders/keras/models/ranking/fm.py
[(7, 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {}), True, 'import tensorflow as tf\n'), (16, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(1)', 'kernel_initializer': '"""zeros"""', 'name': '"""linear"""'}), True, 'import tensorflow as tf\n'), (28, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['embedding_inputs'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (47, 'tensorflow.keras.layers.DenseFeatures', 'tf.keras.layers.DenseFeatures', (['self._indicator_columns'], {}), True, 'import tensorflow as tf\n'), (62, 'tensorflow.stack', 'tf.stack', (['embeddings'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (64, 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['outputs'], {}), True, 'import tensorflow as tf\n'), (29, 'tensorflow.pow', 'tf.pow', (['embedding_inputs', '(2)'], {}), True, 'import tensorflow as tf\n'), (49, 'tensorflow.keras.layers.DenseFeatures', 'tf.keras.layers.DenseFeatures', (['c'], {}), True, 'import tensorflow as tf\n'), (33, 'tensorflow.pow', 'tf.pow', (['x_sum', '(2)'], {}), True, 'import tensorflow as tf\n')]
LongmaoTeamTf/deep_recommenders
168dabe4ef3a38cc582d019766cf3de576bc8af1
#!/usr/bin/python3 # -*- coding: utf-8 -*- import tensorflow as tf from deep_recommenders.keras.models.nlp import Transformer def load_dataset(vocab_size, max_len): (x_train, y_train), (x_test, y_test) = tf.keras.datasets.imdb.load_data(maxlen=max_len, num_words=vocab_size) x_train = tf.keras.preprocessing.sequence.pad_sequences(x_train, maxlen=max_len) x_test = tf.keras.preprocessing.sequence.pad_sequences(x_test, maxlen=max_len) x_train_masks = tf.equal(x_train, 0) x_test_masks = tf.equal(x_test, 0) y_train = tf.keras.utils.to_categorical(y_train) y_test = tf.keras.utils.to_categorical(y_test) return (x_train, x_train_masks, y_train), (x_test, x_test_masks, y_test) def build_model(vocab_size, max_len, model_dim=8, n_heads=2, encoder_stack=2, decoder_stack=2, ff_size=50): encoder_inputs = tf.keras.Input(shape=(max_len,), name='encoder_inputs') decoder_inputs = tf.keras.Input(shape=(max_len,), name='decoder_inputs') outputs = Transformer( vocab_size, model_dim, n_heads=n_heads, encoder_stack=encoder_stack, decoder_stack=decoder_stack, feed_forward_size=ff_size )(encoder_inputs, decoder_inputs) outputs = tf.keras.layers.GlobalAveragePooling1D()(outputs) outputs = tf.keras.layers.Dense(2, activation='softmax')(outputs) return tf.keras.Model(inputs=[encoder_inputs, decoder_inputs], outputs=outputs) def train_model(vocab_size=5000, max_len=128, batch_size=128, epochs=10): train, test = load_dataset(vocab_size, max_len) x_train, x_train_masks, y_train = train x_test, x_test_masks, y_test = test model = build_model(vocab_size, max_len) model.compile(optimizer=tf.keras.optimizers.Adam(beta_1=0.9, beta_2=0.98, epsilon=1e-9), loss='categorical_crossentropy', metrics=['accuracy']) es = tf.keras.callbacks.EarlyStopping(patience=3) model.fit([x_train, x_train_masks], y_train, batch_size=batch_size, epochs=epochs, validation_split=0.2, callbacks=[es]) test_metrics = model.evaluate([x_test, x_test_masks], y_test, batch_size=batch_size, verbose=0) print("loss on Test: %.4f" % test_metrics[0]) print("accu on Test: %.4f" % test_metrics[1]) if __name__ == '__main__': train_model()
[ "tensorflow.keras.Input", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.GlobalAveragePooling1D", "tensorflow.keras.datasets.imdb.load_data", "tensorflow.equal", "tensorflow.keras.Model", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.callbacks.EarlyStopping", "tensorflow.keras.utils.to_categorical", "tensorflow.keras.preprocessing.sequence.pad_sequences" ]
examples/train_transformer_on_imdb_keras.py
[(10, 'tensorflow.keras.datasets.imdb.load_data', 'tf.keras.datasets.imdb.load_data', ([], {'maxlen': 'max_len', 'num_words': 'vocab_size'}), True, 'import tensorflow as tf\n'), (11, 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'tf.keras.preprocessing.sequence.pad_sequences', (['x_train'], {'maxlen': 'max_len'}), True, 'import tensorflow as tf\n'), (12, 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'tf.keras.preprocessing.sequence.pad_sequences', (['x_test'], {'maxlen': 'max_len'}), True, 'import tensorflow as tf\n'), (13, 'tensorflow.equal', 'tf.equal', (['x_train', '(0)'], {}), True, 'import tensorflow as tf\n'), (14, 'tensorflow.equal', 'tf.equal', (['x_test', '(0)'], {}), True, 'import tensorflow as tf\n'), (15, 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['y_train'], {}), True, 'import tensorflow as tf\n'), (16, 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['y_test'], {}), True, 'import tensorflow as tf\n'), (21, 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(max_len,)', 'name': '"""encoder_inputs"""'}), True, 'import tensorflow as tf\n'), (22, 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(max_len,)', 'name': '"""decoder_inputs"""'}), True, 'import tensorflow as tf\n'), (33, 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': '[encoder_inputs, decoder_inputs]', 'outputs': 'outputs'}), True, 'import tensorflow as tf\n'), (48, 'tensorflow.keras.callbacks.EarlyStopping', 'tf.keras.callbacks.EarlyStopping', ([], {'patience': '(3)'}), True, 'import tensorflow as tf\n'), (23, 'deep_recommenders.keras.models.nlp.Transformer', 'Transformer', (['vocab_size', 'model_dim'], {'n_heads': 'n_heads', 'encoder_stack': 'encoder_stack', 'decoder_stack': 'decoder_stack', 'feed_forward_size': 'ff_size'}), False, 'from deep_recommenders.keras.models.nlp import Transformer\n'), (31, 'tensorflow.keras.layers.GlobalAveragePooling1D', 'tf.keras.layers.GlobalAveragePooling1D', ([], {}), True, 'import tensorflow as tf\n'), (32, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)'], {'activation': '"""softmax"""'}), True, 'import tensorflow as tf\n'), (45, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'beta_1': '(0.9)', 'beta_2': '(0.98)', 'epsilon': '(1e-09)'}), True, 'import tensorflow as tf\n')]
mattiasu96/recsys-challenge-2021-twitter
80b78050739a93165cbaaf256bd13932582a8930
from sklearn.metrics import average_precision_score, log_loss from sklearn.model_selection import train_test_split import dask.dataframe as dd import os, sys import time import RootPath from Scripts.utilities import start_cluster import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense,Activation,Dropout,Embedding,LSTM,Concatenate,Input,Flatten,BatchNormalization from tensorflow.keras.optimizers import * from tensorflow.keras.callbacks import * from tensorflow.keras import regularizers from tensorflow.keras.losses import * import numpy as np import pandas as pd from tensorflow.keras.layers.experimental import preprocessing import gc def buildModel(layer,inputSize,depth=3,firstHidden=256,dropout=0,reduction_factor=2,loss=BinaryCrossentropy(from_logits=False),useNormalization=True,optimizer=Adam,lr=0.0003): model=Sequential() shape=(inputSize,) size=firstHidden model.add(layer) for i in range(depth): model.add(Dense(size,input_shape=shape,activation="relu")) model.add(Dropout(dropout)) if useNormalization: model.add(BatchNormalization()) size=size//reduction_factor model.add(Dense(1,activation="sigmoid")) model.compile(loss=loss, metrics=[tf.keras.metrics.AUC(name="PRAUC", curve='PR'),"accuracy"],optimizer=optimizer(learning_rate=lr)) return model def calculate_ctr(gt): positive = len([x for x in gt if x == 1]) ctr = positive/float(len(gt)) return ctr def rce(y_true, y_pred): cross_entropy = log_loss(y_true, y_pred) data_ctr = calculate_ctr(y_true) strawman_cross_entropy = log_loss(y_true, [data_ctr for _ in range(len(y_true))]) return (1.0 - cross_entropy/strawman_cross_entropy)*100.0 def ap(y_true, y_pred): return average_precision_score(y_true, y_pred) if __name__ == '__main__': print('Python %s on %s' % (sys.version, sys.platform)) #code to automatically choose aws or local runtime if RootPath.is_aws(): print("Detected running on AWS!") #set in a way that memory limit * n_workers <= max available ram and avoid memory_limit<16gb c = start_cluster(n_workers=16, threads_per_worker=1, memory_limit="48GB", processes=True) else: print("Running on local") dataset_volume_path = '/home/ubuntu/new' print(f"Dataset folder used: {RootPath.get_dataset_path()}") #change to modify percentage of data used for train-validation-test (1=100%) frac=1 #choose interaction(index in the array engCols (engagement Columns)) idx=3 engCols=['engagement_reply_timestamp', 'engagement_comment_timestamp', 'engagement_retweet_timestamp','engagement_like_timestamp'] print(engCols[idx]) parquet_dataset_path = os.path.join(dataset_volume_path,"train") parquet_dataset_Test_path= os.path.join(dataset_volume_path,"test") cols=[ 'creator_follower_count', 'creator_following_count', 'creator_is_verified', 'creator_creation_timestamp', 'engager_follower_count', 'engager_following_count', 'engager_is_verified', 'engager_creation_timestamp', 'engagement_creator_follows_engager', 'engagement_reply_timestamp', 'engagement_retweet_timestamp', 'engagement_comment_timestamp', 'engagement_like_timestamp', 'is_from_official_val', 'number_of_photo', 'number_of_gif', 'number_of_video', 'tweet_links_count', 'tweet_domains_count', 'tweet_hashtags_count', 'tweet_hashtags_unique_count', 'mapped_language_id', 'mapped_tweet_type', 'tweet_timestamp_hour_sin', 'tweet_timestamp_hour_cos', 'tweet_timestamp_day', 'tweet_timestamp_weekday', 'tweet_timestamp_hour_bin', 'tweet_timestamp_creator_account_age_bin', 'text_is_reply', 'text_tokens_count', 'text_unknown_count', 'text_special_tokens_count', 'text_questions_count', 'text_semantic_separation', 'text_newline_count', 'text_separated_count', 'text_char_count', 'text_asking_like', 'text_asking_reply', 'text_comment_related_count', 'text_no_comment_related_count', 'text_asking_retweet', 'text_nsfw_count', 'text_kpop_count', 'text_covid_count', 'text_sports_count', 'text_japanesetrending_count', 'text_anime_count', 'text_vtuber_count', 'text_news_count', 'text_myanmar_count', 'text_genshin_count', 'text_crypto_count', 'text_trending_count', 'text_love_count', 'text_slang_count', 'text_mention_count', 'engager_follower_quantile', 'creator_follower_quantile', 'creator_follower_ratio', 'engager_follower_ratio', 'creator_vs_engager_follower_ratio', 'creator_vs_engager_following_ratio', 'CE_language__timestamp_hour_bin', 'CE_language__timestamp_hour_bin__timestamp_weekday', 'CE_language__type', 'CE_language__engager_follower_quantile', 'CE_type__timestamp_weekday', 'CE_type__timestamp_hour_bin', 'CE_timestamp_creator_account_age_bin__engager_follower_quantile__creator_follower_quantile', 'CE_language__presence_of_photo__presence_of_gif__presence_of_video', 'TE_mapped_engager_id_engagement_reply', 'TE_number_of_photo_engagement_reply', 'TE_number_of_gif_engagement_reply', 'TE_number_of_video_engagement_reply', 'TE_mapped_tweet_type_engagement_reply', 'TE_mapped_language_id_engagement_reply', 'TE_mapped_creator_id_engagement_reply', 'TE_mapped_tweet_links_id_1_engagement_reply', 'TE_mapped_tweet_links_id_2_engagement_reply', 'TE_mapped_tweet_hashtags_id_1_engagement_reply', 'TE_mapped_tweet_hashtags_id_2_engagement_reply', 'TE_mapped_domains_id_1_engagement_reply', 'TE_mapped_domains_id_2_engagement_reply', "TE_('mapped_domains_id_1', 'mapped_language_id', 'engagement_creator_follows_engager', 'mapped_tweet_type', 'number_of_photo', 'creator_is_verified')_engagement_reply", 'TE_tweet_links_count_engagement_reply', 'TE_tweet_domains_count_engagement_reply', 'TE_tweet_hashtags_count_engagement_reply', 'TE_tweet_hashtags_unique_count_engagement_reply', 'TE_mapped_engager_id_engagement_retweet', 'TE_number_of_photo_engagement_retweet', 'TE_number_of_gif_engagement_retweet', 'TE_number_of_video_engagement_retweet', 'TE_mapped_tweet_type_engagement_retweet', 'TE_mapped_language_id_engagement_retweet', 'TE_mapped_creator_id_engagement_retweet', 'TE_mapped_tweet_links_id_1_engagement_retweet', 'TE_mapped_tweet_links_id_2_engagement_retweet', 'TE_mapped_tweet_hashtags_id_1_engagement_retweet', 'TE_mapped_tweet_hashtags_id_2_engagement_retweet', 'TE_mapped_domains_id_1_engagement_retweet', 'TE_mapped_domains_id_2_engagement_retweet', "TE_('mapped_domains_id_1', 'mapped_language_id', 'engagement_creator_follows_engager', 'mapped_tweet_type', 'number_of_photo', 'creator_is_verified')_engagement_retweet", 'TE_tweet_links_count_engagement_retweet', 'TE_tweet_domains_count_engagement_retweet', 'TE_tweet_hashtags_count_engagement_retweet', 'TE_tweet_hashtags_unique_count_engagement_retweet', 'TE_mapped_engager_id_engagement_comment', 'TE_number_of_photo_engagement_comment', 'TE_number_of_gif_engagement_comment', 'TE_number_of_video_engagement_comment', 'TE_mapped_tweet_type_engagement_comment', 'TE_mapped_language_id_engagement_comment', 'TE_mapped_creator_id_engagement_comment', 'TE_mapped_tweet_links_id_1_engagement_comment', 'TE_mapped_tweet_links_id_2_engagement_comment', 'TE_mapped_tweet_hashtags_id_1_engagement_comment', 'TE_mapped_tweet_hashtags_id_2_engagement_comment', 'TE_mapped_domains_id_1_engagement_comment', 'TE_mapped_domains_id_2_engagement_comment', "TE_('mapped_domains_id_1', 'mapped_language_id', 'engagement_creator_follows_engager', 'mapped_tweet_type', 'number_of_photo', 'creator_is_verified')_engagement_comment", 'TE_tweet_links_count_engagement_comment', 'TE_tweet_domains_count_engagement_comment', 'TE_tweet_hashtags_count_engagement_comment', 'TE_tweet_hashtags_unique_count_engagement_comment', 'TE_mapped_engager_id_engagement_like', 'TE_number_of_photo_engagement_like', 'TE_number_of_gif_engagement_like', 'TE_number_of_video_engagement_like', 'TE_mapped_tweet_type_engagement_like', 'TE_mapped_language_id_engagement_like', 'TE_mapped_creator_id_engagement_like', 'TE_mapped_tweet_links_id_1_engagement_like', 'TE_mapped_tweet_links_id_2_engagement_like', 'TE_mapped_tweet_hashtags_id_1_engagement_like', 'TE_mapped_tweet_hashtags_id_2_engagement_like', 'TE_mapped_domains_id_1_engagement_like', 'TE_mapped_domains_id_2_engagement_like', "TE_('mapped_domains_id_1', 'mapped_language_id', 'engagement_creator_follows_engager', 'mapped_tweet_type', 'number_of_photo', 'creator_is_verified')_engagement_like", 'TE_tweet_links_count_engagement_like', 'TE_tweet_domains_count_engagement_like', 'TE_tweet_hashtags_count_engagement_like', 'TE_tweet_hashtags_unique_count_engagement_like', ] #load datasets print('Start reading \n') df = dd.read_parquet(parquet_dataset_path, engine='pyarrow', columns=cols) dfTest = dd.read_parquet(parquet_dataset_Test_path, engine='pyarrow', columns=cols) #choose fraction of dataset to use df = df.sample(frac = frac) chosen=engCols[idx] rest=[c for c in engCols if c!=chosen] # Drop other engagements df = df.drop(columns=rest) dfTest = dfTest.drop(columns=rest) #prepare output df[chosen] = df[chosen].mask(df[chosen] < 0, 0) df[chosen] = df[chosen].mask(df[chosen] > 0, 1) dfTest[chosen] = dfTest[chosen].mask(dfTest[chosen] < 0, 0) dfTest[chosen] = dfTest[chosen].mask(dfTest[chosen] > 0, 1) #prepare output and drop from dataset yTest = dfTest[chosen] dfTest = dfTest.drop(columns=[chosen]) y = df[chosen] df = df.drop(columns=[chosen]) print('Start compute \n') # From Dask to Pandas train df=df.astype(np.float32) df = df.compute() y = y.compute() print('Start compute \n') # From Dask to Pandas validation dfTest=dfTest.astype(np.float32) dfTest = dfTest.compute() yTest = yTest.compute() #save list of columns and their order for inference time np.save("cols.npy",df.columns) yTest=yTest.to_numpy(copy=False) gc.collect() #Prepare Normalization layer to normalize NN inputs layer = preprocessing.Normalization() layer.adapt(df) print('Columns name:', df.columns) #rename to easier names X_train=df y_train=y #build model using normalization layer model = buildModel(layer,len(df.columns)) del df, y BS=4096 #prepare input and output as numpy arrays trainIn=X_train.to_numpy(copy=False) trainOut=y_train.to_numpy(copy=False) best=0 #iteratively train one epoch at the time and evaluation of metrics on validation set at each step #model saved only on rce score improvements for i in range(30): model.fit(trainIn,trainOut,epochs=i+1,initial_epoch=i,batch_size=BS) preds=model.predict(dfTest.to_numpy(copy=False),batch_size=4096) #this line avoids exact 0 or 1 predictions which in case of mistake can lead to -infinite rce preds=np.clip(preds,np.finfo(float).eps,0.9999999) rce_score=rce( yTest,preds) ap_score=ap(yTest,preds) with open(f"perf_{chosen.replace('engagement_','').replace('_timestamp','')}.txt","a+") as f: f.write(f'The model scored a TEST RCE of: {rce_score}\n') f.write(f'The model scored an TEST AP of: {ap_score}\n') if rce_score>best: model.save(f"{chosen.replace('engagement_','').replace('_timestamp','')}_epoch_{i}") best=rce_score
[ "tensorflow.keras.layers.Dense", "tensorflow.keras.metrics.AUC", "numpy.save", "numpy.finfo", "sklearn.metrics.log_loss", "tensorflow.keras.layers.BatchNormalization", "sklearn.metrics.average_precision_score", "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.experimental.preprocessing.Normalization", "tensorflow.keras.models.Sequential" ]
train_NN_parametric_class.py
[(21, 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), False, 'from tensorflow.keras.models import Sequential\n'), (43, 'sklearn.metrics.log_loss', 'log_loss', (['y_true', 'y_pred'], {}), False, 'from sklearn.metrics import average_precision_score, log_loss\n'), (50, 'sklearn.metrics.average_precision_score', 'average_precision_score', (['y_true', 'y_pred'], {}), False, 'from sklearn.metrics import average_precision_score, log_loss\n'), (58, 'RootPath.is_aws', 'RootPath.is_aws', ([], {}), False, 'import RootPath\n'), (78, 'os.path.join', 'os.path.join', (['dataset_volume_path', '"""train"""'], {}), False, 'import os, sys\n'), (79, 'os.path.join', 'os.path.join', (['dataset_volume_path', '"""test"""'], {}), False, 'import os, sys\n'), (231, 'dask.dataframe.read_parquet', 'dd.read_parquet', (['parquet_dataset_path'], {'engine': '"""pyarrow"""', 'columns': 'cols'}), True, 'import dask.dataframe as dd\n'), (232, 'dask.dataframe.read_parquet', 'dd.read_parquet', (['parquet_dataset_Test_path'], {'engine': '"""pyarrow"""', 'columns': 'cols'}), True, 'import dask.dataframe as dd\n'), (269, 'numpy.save', 'np.save', (['"""cols.npy"""', 'df.columns'], {}), True, 'import numpy as np\n'), (272, 'gc.collect', 'gc.collect', ([], {}), False, 'import gc\n'), (275, 'tensorflow.keras.layers.experimental.preprocessing.Normalization', 'preprocessing.Normalization', ([], {}), False, 'from tensorflow.keras.layers.experimental import preprocessing\n'), (32, 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout, Embedding, LSTM, Concatenate, Input, Flatten, BatchNormalization\n'), (61, 'Scripts.utilities.start_cluster', 'start_cluster', ([], {'n_workers': '(16)', 'threads_per_worker': '(1)', 'memory_limit': '"""48GB"""', 'processes': '(True)'}), False, 'from Scripts.utilities import start_cluster\n'), (27, 'tensorflow.keras.layers.Dense', 'Dense', (['size'], {'input_shape': 'shape', 'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout, Embedding, LSTM, Concatenate, Input, Flatten, BatchNormalization\n'), (28, 'tensorflow.keras.layers.Dropout', 'Dropout', (['dropout'], {}), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout, Embedding, LSTM, Concatenate, Input, Flatten, BatchNormalization\n'), (30, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout, Embedding, LSTM, Concatenate, Input, Flatten, BatchNormalization\n'), (33, 'tensorflow.keras.metrics.AUC', 'tf.keras.metrics.AUC', ([], {'name': '"""PRAUC"""', 'curve': '"""PR"""'}), True, 'import tensorflow as tf\n'), (68, 'RootPath.get_dataset_path', 'RootPath.get_dataset_path', ([], {}), False, 'import RootPath\n'), (301, 'numpy.finfo', 'np.finfo', (['float'], {}), True, 'import numpy as np\n')]
siavash-khodadadeh/MetaLearning-TF2.0
de852bd3b2ff46f8d390cebf561add3a166ee855
import tensorflow as tf from models.sml.sml import SML from networks.maml_umtra_networks import MiniImagenetModel from databases import CelebADatabase, LFWDatabase def run_celeba(): celeba_database = CelebADatabase() base_model = tf.keras.applications.VGG19(weights='imagenet') feature_model = tf.keras.models.Model(inputs=base_model.input, outputs=base_model.layers[24].output) sml = SML( database=celeba_database, target_database=LFWDatabase(), network_cls=MiniImagenetModel, n=5, k=1, k_val_ml=5, k_val_val=15, k_val_test=15, k_test=15, meta_batch_size=4, num_steps_ml=5, lr_inner_ml=0.05, num_steps_validation=5, save_after_iterations=15000, meta_learning_rate=0.001, n_clusters=500, feature_model=feature_model, # feature_size=288, feature_size=4096, input_shape=(224, 224, 3), preprocess_function=tf.keras.applications.vgg19.preprocess_input, log_train_images_after_iteration=1000, number_of_tasks_val=100, number_of_tasks_test=1000, clip_gradients=True, report_validation_frequency=250, experiment_name='cactus_celeba_original3' ) sml.train(iterations=60000) sml.evaluate(iterations=50, seed=42) if __name__ == '__main__': run_celeba()
[ "tensorflow.keras.applications.VGG19", "tensorflow.keras.models.Model" ]
models/sml/sml_celeba_cactus.py
[(9, 'databases.CelebADatabase', 'CelebADatabase', ([], {}), False, 'from databases import CelebADatabase, LFWDatabase\n'), (10, 'tensorflow.keras.applications.VGG19', 'tf.keras.applications.VGG19', ([], {'weights': '"""imagenet"""'}), True, 'import tensorflow as tf\n'), (11, 'tensorflow.keras.models.Model', 'tf.keras.models.Model', ([], {'inputs': 'base_model.input', 'outputs': 'base_model.layers[24].output'}), True, 'import tensorflow as tf\n'), (15, 'databases.LFWDatabase', 'LFWDatabase', ([], {}), False, 'from databases import CelebADatabase, LFWDatabase\n')]
asuiconlab/psiz
4f05348cf43d2d53ff9cc6dee633de385df883e3
# -*- coding: utf-8 -*- # Copyright 2020 The PsiZ Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Example that infers a shared embedding for three groups. Fake data is generated from a ground truth model for three different groups. In this example, these groups represent groups of agents with varying levels of skill: novices, intermediates, and experts. Each group has a different set of attention weights. An embedding model is inferred from the simulated data and compared to the ground truth model. Results are saved in the directory specified by `fp_example`. By default, a `psiz_examples` directory is created in your home directory. Example output: Restart Summary n_valid_restart 1 | total_duration: 2104 s best | n_epoch: 999 | val_loss: 3.0700 mean ±stddev | n_epoch: 999 ±0 | val_loss: 3.0700 ±0.0000 | 2088 ±0 s | 2090 ±0 ms/epoch Model Comparison (R^2) ================================ True | Inferred | Novice Interm Expert --------+----------------------- Novice | 0.94 0.67 0.06 Interm | 0.55 0.87 0.35 Expert | 0.09 0.42 0.85 """ import copy import os from pathlib import Path import shutil import imageio import matplotlib import matplotlib.pyplot as plt import numpy as np import tensorflow as tf import tensorflow_probability as tfp import psiz # Uncomment the following line to force eager execution. # tf.config.experimental_run_functions_eagerly(True) # Modify the following to control GPU visibility. os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = "0" def main(): """Run script.""" # Settings. fp_example = Path.home() / Path('psiz_examples', 'rank', 'vi_3ge') fp_board = fp_example / Path('logs', 'fit') n_stimuli = 30 n_dim = 3 n_group = 3 n_trial = 2000 epochs = 1000 n_restart = 1 batch_size = 128 n_frame = 1 # Directory preparation. fp_example.mkdir(parents=True, exist_ok=True) # Remove existing TensorBoard logs. if fp_board.exists(): shutil.rmtree(fp_board) # Plot settings. small_size = 6 medium_size = 8 large_size = 10 plt.rc('font', size=small_size) # controls default text sizes plt.rc('axes', titlesize=medium_size) plt.rc('axes', labelsize=small_size) plt.rc('xtick', labelsize=small_size) plt.rc('ytick', labelsize=small_size) plt.rc('legend', fontsize=small_size) plt.rc('figure', titlesize=large_size) model_true = ground_truth(n_stimuli, n_group) proxy_true = psiz.models.Proxy(model=model_true) # Generate a random docket of trials to show each group. generator = psiz.generators.RandomRank( n_stimuli, n_reference=8, n_select=2 ) docket = generator.generate(n_trial) # Create virtual agents for each group. agent_novice = psiz.agents.RankAgent(proxy_true.model, group_id=0) agent_interm = psiz.agents.RankAgent(proxy_true.model, group_id=1) agent_expert = psiz.agents.RankAgent(proxy_true.model, group_id=2) # Simulate similarity judgments for each group. obs_novice = agent_novice.simulate(docket) obs_interm = agent_interm.simulate(docket) obs_expert = agent_expert.simulate(docket) obs = psiz.trials.stack((obs_novice, obs_interm, obs_expert)) # Compute ground truth similarity matrices. def truth_sim_func0(z_q, z_ref): return proxy_true.similarity(z_q, z_ref, group_id=0) def truth_sim_func1(z_q, z_ref): return proxy_true.similarity(z_q, z_ref, group_id=1) def truth_sim_func2(z_q, z_ref): return proxy_true.similarity(z_q, z_ref, group_id=2) simmat_truth = ( psiz.utils.pairwise_matrix(truth_sim_func0, proxy_true.z[0]), psiz.utils.pairwise_matrix(truth_sim_func1, proxy_true.z[0]), psiz.utils.pairwise_matrix(truth_sim_func2, proxy_true.z[0]) ) # Partition observations into 80% train, 10% validation and 10% test set. obs_train, obs_val, obs_test = psiz.utils.standard_split(obs) compile_kwargs = { 'loss': tf.keras.losses.CategoricalCrossentropy(), 'optimizer': tf.keras.optimizers.Adam(lr=.001), 'weighted_metrics': [ tf.keras.metrics.CategoricalCrossentropy(name='cce') ] } # Infer independent models with increasing amounts of data. if n_frame == 1: n_obs = np.array([obs_train.n_trial], dtype=int) else: n_obs = np.round( np.linspace(15, obs_train.n_trial, n_frame) ).astype(np.int64) r2 = np.empty([n_frame, n_group, n_group]) * np.nan train_loss = np.empty((n_frame)) * np.nan val_loss = np.empty((n_frame)) * np.nan test_loss = np.empty((n_frame)) * np.nan for i_frame in range(n_frame): include_idx = np.arange(0, n_obs[i_frame]) obs_round_train = obs_train.subset(include_idx) print( '\n Frame {0} ({1} obs)'.format(i_frame, obs_round_train.n_trial) ) # Define model. model = build_model(n_stimuli, n_dim, n_group, obs_round_train.n_trial) proxy_inferred = psiz.models.Proxy(model=model) # Define callbacks. fp_board_frame = fp_board / Path('frame_{0}'.format(i_frame)) cb_board = psiz.keras.callbacks.TensorBoardRe( log_dir=fp_board_frame, histogram_freq=0, write_graph=False, write_images=False, update_freq='epoch', profile_batch=0, embeddings_freq=0, embeddings_metadata=None ) cb_early = psiz.keras.callbacks.EarlyStoppingRe( 'loss', patience=100, mode='min', restore_best_weights=False, verbose=1 ) callbacks = [cb_board, cb_early] # Infer model. restart_record = proxy_inferred.fit( obs_round_train, validation_data=obs_val, epochs=epochs, batch_size=batch_size, callbacks=callbacks, n_restart=n_restart, monitor='val_loss', verbose=1, compile_kwargs=compile_kwargs ) train_loss[i_frame] = restart_record.record['loss'][0] val_loss[i_frame] = restart_record.record['val_loss'][0] tf.keras.backend.clear_session() proxy_inferred.model.n_sample = 100 proxy_inferred.compile(**compile_kwargs) test_metrics = proxy_inferred.evaluate( obs_test, verbose=0, return_dict=True ) test_loss[i_frame] = test_metrics['loss'] z = proxy_inferred.model.stimuli.embeddings.mode().numpy() if proxy_inferred.model.stimuli.mask_zero: z = z[:, 1:, :] # Compare the inferred model with ground truth by comparing the # similarity matrices implied by each model. def infer_sim_func(z_q, z_ref): return proxy_inferred.similarity(z_q, z_ref) simmat_infer = ( psiz.utils.pairwise_matrix(infer_sim_func, z[0]), psiz.utils.pairwise_matrix(infer_sim_func, z[1]), psiz.utils.pairwise_matrix(infer_sim_func, z[2]) ) for i_truth in range(n_group): for j_infer in range(n_group): r2[i_frame, i_truth, j_infer] = psiz.utils.matrix_comparison( simmat_truth[i_truth], simmat_infer[j_infer], score='r2' ) # Display comparison results. A good inferred model will have a high # R^2 value on the diagonal elements (max is 1) and relatively low R^2 # values on the off-diagonal elements. print('\n Model Comparison (R^2)') print(' ================================') print(' True | Inferred') print(' | Novice Interm Expert') print(' --------+-----------------------') print(' Novice | {0: >6.2f} {1: >6.2f} {2: >6.2f}'.format( r2[i_frame, 0, 0], r2[i_frame, 0, 1], r2[i_frame, 0, 2])) print(' Interm | {0: >6.2f} {1: >6.2f} {2: >6.2f}'.format( r2[i_frame, 1, 0], r2[i_frame, 1, 1], r2[i_frame, 1, 2])) print(' Expert | {0: >6.2f} {1: >6.2f} {2: >6.2f}'.format( r2[i_frame, 2, 0], r2[i_frame, 2, 1], r2[i_frame, 2, 2])) print('\n') # Create and save visual frame. fig0 = plt.figure(figsize=(12, 5), dpi=200) plot_frame( fig0, n_obs, train_loss, val_loss, test_loss, r2, proxy_true, proxy_inferred, i_frame ) fname = fp_example / Path('frame_{0}.tiff'.format(i_frame)) plt.savefig( os.fspath(fname), format='tiff', bbox_inches="tight", dpi=300 ) # Create animation. if n_frame > 1: frames = [] for i_frame in range(n_frame): fname = fp_example / Path('frame_{0}.tiff'.format(i_frame)) frames.append(imageio.imread(fname)) imageio.mimwrite(fp_example / Path('evolution.gif'), frames, fps=1) def ground_truth(n_stimuli, n_group): """Return a ground truth embedding.""" n_dim = 4 embedding = tf.keras.layers.Embedding( n_stimuli+1, n_dim, mask_zero=True, embeddings_initializer=tf.keras.initializers.RandomNormal( stddev=.17, seed=58 ) ) stimuli = psiz.keras.layers.Stimuli(embedding=embedding) kernel = psiz.keras.layers.AttentionKernel( group_level=1, distance=psiz.keras.layers.WeightedMinkowski( rho_initializer=tf.keras.initializers.Constant(2.), trainable=False, ), attention=psiz.keras.layers.GroupAttention( n_dim=n_dim, n_group=n_group ), similarity=psiz.keras.layers.ExponentialSimilarity( tau_initializer=tf.keras.initializers.Constant(1.), gamma_initializer=tf.keras.initializers.Constant(0.), trainable=False, ) ) kernel.attention.embeddings.assign( np.array(( (1.8, 1.8, .2, .2), (1., 1., 1., 1.), (.2, .2, 1.8, 1.8) )) ) model = psiz.models.Rank(stimuli=stimuli, kernel=kernel) return model def build_model(n_stimuli, n_dim, n_group, n_obs_train): """Build model. Arguments: n_stimuli: Integer indicating the number of stimuli in the embedding. n_dim: Integer indicating the dimensionality of the embedding. n_group: Integer indicating the number of groups. n_obs_train: Integer indicating the number of training observations. Used to determine KL weight for variational inference. Returns: model: A TensorFlow Keras model. """ kl_weight = 1. / n_obs_train prior_scale = .2 n_source_embeddings = n_group * (n_stimuli + 1) embedding_posterior = psiz.keras.layers.EmbeddingNormalDiag( n_source_embeddings, n_dim, mask_zero=True, scale_initializer=tf.keras.initializers.Constant( tfp.math.softplus_inverse(prior_scale).numpy() ) ) embedding_prior = psiz.keras.layers.EmbeddingShared( n_source_embeddings, n_dim, mask_zero=True, embedding=psiz.keras.layers.EmbeddingNormalDiag( 1, 1, loc_initializer=tf.keras.initializers.Constant(0.), scale_initializer=tf.keras.initializers.Constant( tfp.math.softplus_inverse(prior_scale).numpy() ), loc_trainable=False ) ) embedding_variational = psiz.keras.layers.EmbeddingVariational( posterior=embedding_posterior, prior=embedding_prior, kl_weight=kl_weight, kl_n_sample=30 ) stimuli = psiz.keras.layers.Stimuli( embedding=embedding_variational, group_level=1, n_group=n_group ) kernel = psiz.keras.layers.Kernel( distance=psiz.keras.layers.WeightedMinkowski( rho_initializer=tf.keras.initializers.Constant(2.), trainable=False, ), similarity=psiz.keras.layers.ExponentialSimilarity( beta_initializer=tf.keras.initializers.Constant(10.), tau_initializer=tf.keras.initializers.Constant(1.), gamma_initializer=tf.keras.initializers.Constant(0.), trainable=False ) ) model = psiz.models.Rank( stimuli=stimuli, kernel=kernel, n_sample=1 ) return model def plot_frame( fig0, n_obs, train_loss, val_loss, test_loss, r2, proxy_true, proxy_inferred, i_frame): """Plot posteriors.""" # Settings. group_labels = ['Novice', 'Intermediate', 'Expert'] n_group = proxy_inferred.model.stimuli.n_group n_dim = proxy_inferred.model.n_dim gs = fig0.add_gridspec(n_group + 1, 4) f0_ax0 = fig0.add_subplot(gs[0, 0:2]) plot_loss(f0_ax0, n_obs, train_loss, val_loss, test_loss) f0_ax1 = fig0.add_subplot(gs[0, 2]) plot_convergence(fig0, f0_ax1, n_obs, r2[i_frame]) gs.tight_layout(fig0) def plot_logitnormal(ax, dist, name=None, c=None): """Plot univariate distribution. Arguments: ax: dist: name: """ if name is None: name = 'x' x = np.linspace(.001, .999, 1000) y = dist.prob(x) # Determine mode from samples. idx = np.argmax(y) x_mode = x[idx] ax.plot(x, y, c=c) ax.text(x_mode, .75 * np.max(y), '{0:.2f}'.format(x_mode)) ax.set_xlabel(r'${0}$'.format(name)) ax.set_ylabel(r'$p({0})$'.format(name)) ax.set_xlim([0, 1]) ax.set_xticks([0, 1]) ax.set_xticklabels([0, 1]) def plot_loss(ax, n_obs, train_loss, val_loss, test_loss): """Plot loss.""" # Settings ms = 2 ax.plot(n_obs, train_loss, 'bo-', ms=ms, label='Train Loss') ax.plot(n_obs, val_loss, 'go-', ms=ms, label='Val. Loss') ax.plot(n_obs, test_loss, 'ro-', ms=ms, label='Test Loss') ax.set_title('Optimization Objective') ax.set_xlabel('Trials') limits = [0, np.max(n_obs) + 10] ax.set_xlim(limits) ticks = [np.min(n_obs), np.max(n_obs)] ax.set_xticks(ticks) ax.set_ylabel('Loss') ax.legend() def plot_convergence(fig, ax, n_obs, r2): """Plot convergence.""" # Settings. cmap = matplotlib.cm.get_cmap('Greys') labels = ['Nov', 'Int', 'Exp'] im = ax.imshow(r2, cmap=cmap, vmin=0., vmax=1.) fig.colorbar(im, ax=ax) ax.set_xticks([0, 1, 2]) ax.set_xticklabels(labels) ax.set_yticks([0, 1, 2]) ax.set_yticklabels(labels) ax.set_ylabel('True') ax.set_xlabel('Inferred') ax.set_title(r'$R^2$ Convergence') if __name__ == "__main__": main()
[ "tensorflow.keras.initializers.Constant", "tensorflow.keras.losses.CategoricalCrossentropy", "numpy.linspace", "numpy.min", "numpy.arange", "tensorflow.keras.initializers.RandomNormal", "matplotlib.pyplot.rc", "numpy.max", "tensorflow.keras.metrics.CategoricalCrossentropy", "numpy.argmax", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.backend.clear_session", "matplotlib.cm.get_cmap", "numpy.array", "numpy.empty", "matplotlib.pyplot.figure" ]
examples/rank/vi_3ge.py
[(93, 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': 'small_size'}), True, 'import matplotlib.pyplot as plt\n'), (94, 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': 'medium_size'}), True, 'import matplotlib.pyplot as plt\n'), (95, 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': 'small_size'}), True, 'import matplotlib.pyplot as plt\n'), (96, 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': 'small_size'}), True, 'import matplotlib.pyplot as plt\n'), (97, 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': 'small_size'}), True, 'import matplotlib.pyplot as plt\n'), (98, 'matplotlib.pyplot.rc', 'plt.rc', (['"""legend"""'], {'fontsize': 'small_size'}), True, 'import matplotlib.pyplot as plt\n'), (99, 'matplotlib.pyplot.rc', 'plt.rc', (['"""figure"""'], {'titlesize': 'large_size'}), True, 'import matplotlib.pyplot as plt\n'), (102, 'psiz.models.Proxy', 'psiz.models.Proxy', ([], {'model': 'model_true'}), False, 'import psiz\n'), (105, 'psiz.generators.RandomRank', 'psiz.generators.RandomRank', (['n_stimuli'], {'n_reference': '(8)', 'n_select': '(2)'}), False, 'import psiz\n'), (111, 'psiz.agents.RankAgent', 'psiz.agents.RankAgent', (['proxy_true.model'], {'group_id': '(0)'}), False, 'import psiz\n'), (112, 'psiz.agents.RankAgent', 'psiz.agents.RankAgent', (['proxy_true.model'], {'group_id': '(1)'}), False, 'import psiz\n'), (113, 'psiz.agents.RankAgent', 'psiz.agents.RankAgent', (['proxy_true.model'], {'group_id': '(2)'}), False, 'import psiz\n'), (119, 'psiz.trials.stack', 'psiz.trials.stack', (['(obs_novice, obs_interm, obs_expert)'], {}), False, 'import psiz\n'), (138, 'psiz.utils.standard_split', 'psiz.utils.standard_split', (['obs'], {}), False, 'import psiz\n'), (267, 'psiz.keras.layers.Stimuli', 'psiz.keras.layers.Stimuli', ([], {'embedding': 'embedding'}), False, 'import psiz\n'), (290, 'psiz.models.Rank', 'psiz.models.Rank', ([], {'stimuli': 'stimuli', 'kernel': 'kernel'}), False, 'import psiz\n'), (331, 'psiz.keras.layers.EmbeddingVariational', 'psiz.keras.layers.EmbeddingVariational', ([], {'posterior': 'embedding_posterior', 'prior': 'embedding_prior', 'kl_weight': 'kl_weight', 'kl_n_sample': '(30)'}), False, 'import psiz\n'), (335, 'psiz.keras.layers.Stimuli', 'psiz.keras.layers.Stimuli', ([], {'embedding': 'embedding_variational', 'group_level': '(1)', 'n_group': 'n_group'}), False, 'import psiz\n'), (351, 'psiz.models.Rank', 'psiz.models.Rank', ([], {'stimuli': 'stimuli', 'kernel': 'kernel', 'n_sample': '(1)'}), False, 'import psiz\n'), (390, 'numpy.linspace', 'np.linspace', (['(0.001)', '(0.999)', '(1000)'], {}), True, 'import numpy as np\n'), (394, 'numpy.argmax', 'np.argmax', (['y'], {}), True, 'import numpy as np\n'), (429, 'matplotlib.cm.get_cmap', 'matplotlib.cm.get_cmap', (['"""Greys"""'], {}), False, 'import matplotlib\n'), (72, 'pathlib.Path.home', 'Path.home', ([], {}), False, 'from pathlib import Path\n'), (72, 'pathlib.Path', 'Path', (['"""psiz_examples"""', '"""rank"""', '"""vi_3ge"""'], {}), False, 'from pathlib import Path\n'), (73, 'pathlib.Path', 'Path', (['"""logs"""', '"""fit"""'], {}), False, 'from pathlib import Path\n'), (87, 'shutil.rmtree', 'shutil.rmtree', (['fp_board'], {}), False, 'import shutil\n'), (132, 'psiz.utils.pairwise_matrix', 'psiz.utils.pairwise_matrix', (['truth_sim_func0', 'proxy_true.z[0]'], {}), False, 'import psiz\n'), (133, 'psiz.utils.pairwise_matrix', 'psiz.utils.pairwise_matrix', (['truth_sim_func1', 'proxy_true.z[0]'], {}), False, 'import psiz\n'), (134, 'psiz.utils.pairwise_matrix', 'psiz.utils.pairwise_matrix', (['truth_sim_func2', 'proxy_true.z[0]'], {}), False, 'import psiz\n'), (141, 'tensorflow.keras.losses.CategoricalCrossentropy', 'tf.keras.losses.CategoricalCrossentropy', ([], {}), True, 'import tensorflow as tf\n'), (142, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'lr': '(0.001)'}), True, 'import tensorflow as tf\n'), (150, 'numpy.array', 'np.array', (['[obs_train.n_trial]'], {'dtype': 'int'}), True, 'import numpy as np\n'), (155, 'numpy.empty', 'np.empty', (['[n_frame, n_group, n_group]'], {}), True, 'import numpy as np\n'), (156, 'numpy.empty', 'np.empty', (['n_frame'], {}), True, 'import numpy as np\n'), (157, 'numpy.empty', 'np.empty', (['n_frame'], {}), True, 'import numpy as np\n'), (158, 'numpy.empty', 'np.empty', (['n_frame'], {}), True, 'import numpy as np\n'), (160, 'numpy.arange', 'np.arange', (['(0)', 'n_obs[i_frame]'], {}), True, 'import numpy as np\n'), (168, 'psiz.models.Proxy', 'psiz.models.Proxy', ([], {'model': 'model'}), False, 'import psiz\n'), (172, 'psiz.keras.callbacks.TensorBoardRe', 'psiz.keras.callbacks.TensorBoardRe', ([], {'log_dir': 'fp_board_frame', 'histogram_freq': '(0)', 'write_graph': '(False)', 'write_images': '(False)', 'update_freq': '"""epoch"""', 'profile_batch': '(0)', 'embeddings_freq': '(0)', 'embeddings_metadata': 'None'}), False, 'import psiz\n'), (177, 'psiz.keras.callbacks.EarlyStoppingRe', 'psiz.keras.callbacks.EarlyStoppingRe', (['"""loss"""'], {'patience': '(100)', 'mode': '"""min"""', 'restore_best_weights': '(False)', 'verbose': '(1)'}), False, 'import psiz\n'), (193, 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), True, 'import tensorflow as tf\n'), (239, 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 5)', 'dpi': '(200)'}), True, 'import matplotlib.pyplot as plt\n'), (284, 'numpy.array', 'np.array', (['((1.8, 1.8, 0.2, 0.2), (1.0, 1.0, 1.0, 1.0), (0.2, 0.2, 1.8, 1.8))'], {}), True, 'import numpy as np\n'), (419, 'numpy.min', 'np.min', (['n_obs'], {}), True, 'import numpy as np\n'), (419, 'numpy.max', 'np.max', (['n_obs'], {}), True, 'import numpy as np\n'), (144, 'tensorflow.keras.metrics.CategoricalCrossentropy', 'tf.keras.metrics.CategoricalCrossentropy', ([], {'name': '"""cce"""'}), True, 'import tensorflow as tf\n'), (211, 'psiz.utils.pairwise_matrix', 'psiz.utils.pairwise_matrix', (['infer_sim_func', 'z[0]'], {}), False, 'import psiz\n'), (212, 'psiz.utils.pairwise_matrix', 'psiz.utils.pairwise_matrix', (['infer_sim_func', 'z[1]'], {}), False, 'import psiz\n'), (213, 'psiz.utils.pairwise_matrix', 'psiz.utils.pairwise_matrix', (['infer_sim_func', 'z[2]'], {}), False, 'import psiz\n'), (246, 'os.fspath', 'os.fspath', (['fname'], {}), False, 'import os\n'), (263, 'tensorflow.keras.initializers.RandomNormal', 'tf.keras.initializers.RandomNormal', ([], {'stddev': '(0.17)', 'seed': '(58)'}), True, 'import tensorflow as tf\n'), (274, 'psiz.keras.layers.GroupAttention', 'psiz.keras.layers.GroupAttention', ([], {'n_dim': 'n_dim', 'n_group': 'n_group'}), False, 'import psiz\n'), (398, 'numpy.max', 'np.max', (['y'], {}), True, 'import numpy as np\n'), (417, 'numpy.max', 'np.max', (['n_obs'], {}), True, 'import numpy as np\n'), (217, 'psiz.utils.matrix_comparison', 'psiz.utils.matrix_comparison', (['simmat_truth[i_truth]', 'simmat_infer[j_infer]'], {'score': '"""r2"""'}), False, 'import psiz\n'), (254, 'imageio.imread', 'imageio.imread', (['fname'], {}), False, 'import imageio\n'), (255, 'pathlib.Path', 'Path', (['"""evolution.gif"""'], {}), False, 'from pathlib import Path\n'), (153, 'numpy.linspace', 'np.linspace', (['(15)', 'obs_train.n_trial', 'n_frame'], {}), True, 'import numpy as np\n'), (271, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(2.0)'], {}), True, 'import tensorflow as tf\n'), (278, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(1.0)'], {}), True, 'import tensorflow as tf\n'), (279, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (324, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (341, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(2.0)'], {}), True, 'import tensorflow as tf\n'), (345, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(10.0)'], {}), True, 'import tensorflow as tf\n'), (346, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(1.0)'], {}), True, 'import tensorflow as tf\n'), (347, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (317, 'tensorflow_probability.math.softplus_inverse', 'tfp.math.softplus_inverse', (['prior_scale'], {}), True, 'import tensorflow_probability as tfp\n'), (326, 'tensorflow_probability.math.softplus_inverse', 'tfp.math.softplus_inverse', (['prior_scale'], {}), True, 'import tensorflow_probability as tfp\n')]
avdosev/building_predictor
99ec9b82d1a9421723f958d38cf7f97c8204fe04
import warnings warnings.simplefilter(action='ignore', category=FutureWarning) import numpy as np import os from osgeo import gdal os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true' from tensorflow import keras import tensorflow as tf import math import model as m import config from common import train_pipe, test_pipe, find_info def horizontal_flip(image, rate=0.5): if np.random.rand() < rate: image = image[:, :, ::-1, :] return image def vertical_flip(image, rate=0.5): if np.random.rand() < rate: image = image[:, ::-1, :, :] return image def augment(image): image = horizontal_flip(image) image = vertical_flip(image) return image class Maps(keras.utils.Sequence): def __init__(self, batch_size): self.batch_size = batch_size # получаем все пути к снимкам city_paths = [os.path.join(root, file) for root, _, files in os.walk('data/train') if len(files) > 0 for file in files][6:7] # загружаем все в память y = [] x = [] x2 = [] print('start preparing') for city_path in city_paths: print(f'preparing "{city_path}"') df = gdal.Open(city_path) data = df.GetRasterBand(1).ReadAsArray() for i in range(0, data.shape[0]-config.map_size, 5): for j in range(0, data.shape[1]-config.map_size, 3): y_i = i+config.map_size // 2 x_i = j+config.map_size // 2 val = data[y_i, x_i] # need skip if val == 0 or (val == 2 and i % 2 == 1): continue x.append(np.expand_dims(data[i:i+config.map_size,j:j+config.map_size], axis=2)) x2.append(find_info(y_i, x_i, data)) y.append(val) print('start train pipe') y = np.array(y) y = test_pipe(y) x = np.array(x) x = train_pipe(x) print('input shape:', x.shape) print('output shape:', y.shape) print('preparation ready') self.y = y self.x = x self.x2 = x2 def __len__(self): return math.ceil(len(self.x) / self.batch_size) def __getitem__(self, idx): batch_x = np.array(self.x[idx * self.batch_size: (idx + 1) * self.batch_size]) batch_x2 = np.array(self.x2[idx * self.batch_size: (idx + 1) * self.batch_size]) batch_y = np.array(self.y[idx * self.batch_size: (idx + 1) * self.batch_size]) return [batch_x, batch_x2], batch_y def main(): name = 'first' model_path = f'models/model_{name}_latest.hdf5' model = m.get_model(4, conv_size=config.map_size) # if os.path.exists(model_path): # model.load_weights(model_path) model.summary() optimizer = keras.optimizers.Adam(lr=0.001) model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy', 'mae', tf.keras.metrics.FalseNegatives(), tf.keras.metrics.Recall()]) train_dataset = Maps(config.batch_size) model.fit( train_dataset, epochs=50, initial_epoch=0, callbacks=[ # keras.callbacks.EarlyStopping(monitor="loss", min_delta=0, patience=4, verbose=0, mode="min"), keras.callbacks.ModelCheckpoint( filepath=f'models/model_best_{name}.hdf5', save_weights_only=True, monitor='accuracy', mode='max', save_best_only=True ), keras.callbacks.ModelCheckpoint( filepath=f'models/model_min_{name}.hdf5', save_weights_only=True, monitor='false_negatives', mode='min', save_best_only=True ), keras.callbacks.ModelCheckpoint( filepath=f'models/model_min_mae_{name}.hdf5', save_weights_only=True, monitor='mae', mode='min', save_best_only=True ), # keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1) ] ) model.save(model_path) if __name__ == '__main__': main()
[ "tensorflow.keras.callbacks.ModelCheckpoint", "numpy.expand_dims", "tensorflow.keras.optimizers.Adam", "numpy.random.rand", "tensorflow.keras.metrics.Recall", "numpy.array", "tensorflow.keras.metrics.FalseNegatives" ]
train.py
[(2, 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), False, 'import warnings\n'), (94, 'model.get_model', 'm.get_model', (['(4)'], {'conv_size': 'config.map_size'}), True, 'import model as m\n'), (101, 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': '(0.001)'}), False, 'from tensorflow import keras\n'), (18, 'numpy.random.rand', 'np.random.rand', ([], {}), True, 'import numpy as np\n'), (24, 'numpy.random.rand', 'np.random.rand', ([], {}), True, 'import numpy as np\n'), (63, 'numpy.array', 'np.array', (['y'], {}), True, 'import numpy as np\n'), (64, 'common.test_pipe', 'test_pipe', (['y'], {}), False, 'from common import train_pipe, test_pipe, find_info\n'), (66, 'numpy.array', 'np.array', (['x'], {}), True, 'import numpy as np\n'), (67, 'common.train_pipe', 'train_pipe', (['x'], {}), False, 'from common import train_pipe, test_pipe, find_info\n'), (80, 'numpy.array', 'np.array', (['self.x[idx * self.batch_size:(idx + 1) * self.batch_size]'], {}), True, 'import numpy as np\n'), (82, 'numpy.array', 'np.array', (['self.x2[idx * self.batch_size:(idx + 1) * self.batch_size]'], {}), True, 'import numpy as np\n'), (85, 'numpy.array', 'np.array', (['self.y[idx * self.batch_size:(idx + 1) * self.batch_size]'], {}), True, 'import numpy as np\n'), (47, 'osgeo.gdal.Open', 'gdal.Open', (['city_path'], {}), False, 'from osgeo import gdal\n'), (39, 'os.path.join', 'os.path.join', (['root', 'file'], {}), False, 'import os\n'), (106, 'tensorflow.keras.metrics.FalseNegatives', 'tf.keras.metrics.FalseNegatives', ([], {}), True, 'import tensorflow as tf\n'), (106, 'tensorflow.keras.metrics.Recall', 'tf.keras.metrics.Recall', ([], {}), True, 'import tensorflow as tf\n'), (115, 'tensorflow.keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', ([], {'filepath': 'f"""models/model_best_{name}.hdf5"""', 'save_weights_only': '(True)', 'monitor': '"""accuracy"""', 'mode': '"""max"""', 'save_best_only': '(True)'}), False, 'from tensorflow import keras\n'), (122, 'tensorflow.keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', ([], {'filepath': 'f"""models/model_min_{name}.hdf5"""', 'save_weights_only': '(True)', 'monitor': '"""false_negatives"""', 'mode': '"""min"""', 'save_best_only': '(True)'}), False, 'from tensorflow import keras\n'), (129, 'tensorflow.keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', ([], {'filepath': 'f"""models/model_min_mae_{name}.hdf5"""', 'save_weights_only': '(True)', 'monitor': '"""mae"""', 'mode': '"""min"""', 'save_best_only': '(True)'}), False, 'from tensorflow import keras\n'), (39, 'os.walk', 'os.walk', (['"""data/train"""'], {}), False, 'import os\n'), (59, 'numpy.expand_dims', 'np.expand_dims', (['data[i:i + config.map_size, j:j + config.map_size]'], {'axis': '(2)'}), True, 'import numpy as np\n'), (60, 'common.find_info', 'find_info', (['y_i', 'x_i', 'data'], {}), False, 'from common import train_pipe, test_pipe, find_info\n')]
jarokaz/ucaip-labs
8db85d65a22ad3ffac8a25efea975207f6276049
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A DNN keras classification model.""" import os import logging import tensorflow as tf import tensorflow_transform as tft from tensorflow import keras from src.common import features from src.model_training import defaults def create_model_inputs(): inputs = {} for feature_name in features.FEATURE_NAMES: name = features.transformed_name(feature_name) if feature_name in features.NUMERICAL_FEATURE_NAMES: inputs[name] = keras.layers.Input(name=name, shape=[], dtype=tf.float32) elif feature_name in features.categorical_feature_names(): inputs[name] = keras.layers.Input(name=name, shape=[], dtype=tf.int64) else: pass return inputs def create_binary_classifier(tft_output, hyperparams): input_layers = create_model_inputs() layers = [] for key in input_layers: feature_name = features.original_name(key) if feature_name in features.EMBEDDING_CATEGORICAL_FEATURES: vocab_size = tft_output.vocabulary_size_by_name(feature_name) embedding_size = features.EMBEDDING_CATEGORICAL_FEATURES[feature_name] embedding_output = keras.layers.Embedding( input_dim=vocab_size + 1, output_dim=embedding_size, name=f"{key}_embedding", )(input_layers[key]) layers.append(embedding_output) elif feature_name in features.ONEHOT_CATEGORICAL_FEATURE_NAMES: vocab_size = tft_output.vocabulary_size_by_name(feature_name) onehot_layer = keras.layers.experimental.preprocessing.CategoryEncoding( max_tokens=vocab_size, output_mode="binary", name=f"{key}_onehot", )(input_layers[key]) layers.append(onehot_layer) elif feature_name in features.NUMERICAL_FEATURE_NAMES: numeric_layer = tf.expand_dims(input_layers[key], -1) layers.append(numeric_layer) else: pass joined = keras.layers.Concatenate(name="combines_inputs")(layers) feedforward_output = keras.Sequential( [ keras.layers.Dense(units, activation="relu") for units in hyperparams["hidden_units"] ], name="feedforward_network", )(joined) logits = keras.layers.Dense(units=1, name="logits")(feedforward_output) model = keras.Model(inputs=input_layers, outputs=[logits]) return model
[ "tensorflow.keras.layers.Concatenate", "tensorflow.keras.layers.Embedding", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.experimental.preprocessing.CategoryEncoding", "tensorflow.expand_dims", "tensorflow.keras.Model", "tensorflow.keras.layers.Input" ]
src/model_training/model.py
[(78, 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'input_layers', 'outputs': '[logits]'}), False, 'from tensorflow import keras\n'), (29, 'src.common.features.transformed_name', 'features.transformed_name', (['feature_name'], {}), False, 'from src.common import features\n'), (44, 'src.common.features.original_name', 'features.original_name', (['key'], {}), False, 'from src.common import features\n'), (68, 'tensorflow.keras.layers.Concatenate', 'keras.layers.Concatenate', ([], {'name': '"""combines_inputs"""'}), False, 'from tensorflow import keras\n'), (76, 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': '(1)', 'name': '"""logits"""'}), False, 'from tensorflow import keras\n'), (31, 'tensorflow.keras.layers.Input', 'keras.layers.Input', ([], {'name': 'name', 'shape': '[]', 'dtype': 'tf.float32'}), False, 'from tensorflow import keras\n'), (32, 'src.common.features.categorical_feature_names', 'features.categorical_feature_names', ([], {}), False, 'from src.common import features\n'), (33, 'tensorflow.keras.layers.Input', 'keras.layers.Input', ([], {'name': 'name', 'shape': '[]', 'dtype': 'tf.int64'}), False, 'from tensorflow import keras\n'), (48, 'tensorflow.keras.layers.Embedding', 'keras.layers.Embedding', ([], {'input_dim': '(vocab_size + 1)', 'output_dim': 'embedding_size', 'name': 'f"""{key}_embedding"""'}), False, 'from tensorflow import keras\n'), (71, 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['units'], {'activation': '"""relu"""'}), False, 'from tensorflow import keras\n'), (56, 'tensorflow.keras.layers.experimental.preprocessing.CategoryEncoding', 'keras.layers.experimental.preprocessing.CategoryEncoding', ([], {'max_tokens': 'vocab_size', 'output_mode': '"""binary"""', 'name': 'f"""{key}_onehot"""'}), False, 'from tensorflow import keras\n'), (63, 'tensorflow.expand_dims', 'tf.expand_dims', (['input_layers[key]', '(-1)'], {}), True, 'import tensorflow as tf\n')]
herman-nside/spektral
58bb524ec783f187145c3afe53db491dbc1f0ba0
import numpy as np import tensorflow as tf from tensorflow.keras import Input, Model from spektral import layers from spektral.layers.ops import sp_matrix_to_sp_tensor tf.keras.backend.set_floatx("float64") SINGLE, BATCH, MIXED = 1, 2, 3 # Single, batch, mixed LAYER_K_, MODES_K_, KWARGS_K_ = "layer", "modes", "kwargs" batch_size = 32 N = 11 F = 7 S = 3 A = np.ones((N, N)) X = np.random.normal(size=(N, F)) E = np.random.normal(size=(N, N, S)) E_single = np.random.normal(size=(N * N, S)) """ Each entry in TESTS represent a test to be run for a particular Layer. Each config dictionary has the form: { LAYER_K_: LayerClass, MODES_K_: [...], KWARGS_K_: {...}, }, LAYER_K_ is the class of the layer to be tested. MODES_K_ is a list containing the data modes supported by the model, and should be at least one of: SINGLE, MIXED, BATCH. KWARGS_K_ is a dictionary containing: - all keywords to be passed to the layer (including mandatory ones); - an optional entry 'edges': True if the layer supports edge attributes; - an optional entry 'sparse': [...], indicating whether the layer supports sparse or dense inputs as a bool (e.g., 'sparse': [False, True] will test the layer on both dense and sparse adjacency matrix; 'sparse': [True] will only test for sparse). By default, each layer is tested only on dense inputs. Batch mode only tests for dense inputs. The testing loop will create a simple 1-layer model and run it in single, mixed, and batch mode according the what specified in MODES_K_ in the testing config. The loop will check: - that the model does not crash; - that the output shape is pre-computed correctly; - that the real output shape is correct; - that the get_config() method works correctly (i.e., it is possible to re-instatiate a layer using LayerClass(**layer_instance.get_config())). """ TESTS = [ { LAYER_K_: layers.GCNConv, MODES_K_: [SINGLE, BATCH, MIXED], KWARGS_K_: { "channels": 8, "activation": "relu", "sparse_support": [False, True], }, }, { LAYER_K_: layers.ChebConv, MODES_K_: [SINGLE, BATCH, MIXED], KWARGS_K_: { "K": 3, "channels": 8, "activation": "relu", "sparse_support": [False, True], }, }, { LAYER_K_: layers.GraphSageConv, MODES_K_: [SINGLE, MIXED], KWARGS_K_: {"channels": 8, "activation": "relu", "sparse_support": [True]}, }, { LAYER_K_: layers.ECCConv, MODES_K_: [SINGLE, BATCH, MIXED], KWARGS_K_: { "kernel_network": [8], "channels": 8, "activation": "relu", "edges": True, "sparse_support": [False, True], }, }, { LAYER_K_: layers.GATConv, MODES_K_: [SINGLE, BATCH, MIXED], KWARGS_K_: { "channels": 8, "attn_heads": 2, "concat_heads": False, "activation": "relu", "sparse_support": [False, True], }, }, { LAYER_K_: layers.GCSConv, MODES_K_: [SINGLE, BATCH, MIXED], KWARGS_K_: { "channels": 8, "activation": "relu", "sparse_support": [False, True], }, }, { LAYER_K_: layers.ARMAConv, MODES_K_: [SINGLE, BATCH, MIXED], KWARGS_K_: { "channels": 8, "activation": "relu", "order": 2, "iterations": 2, "share_weights": True, "sparse_support": [False, True], }, }, { LAYER_K_: layers.APPNPConv, MODES_K_: [SINGLE, BATCH, MIXED], KWARGS_K_: { "channels": 8, "activation": "relu", "mlp_hidden": [16], "sparse_support": [False, True], }, }, { LAYER_K_: layers.GINConv, MODES_K_: [SINGLE, MIXED], KWARGS_K_: { "channels": 8, "activation": "relu", "mlp_hidden": [16], "sparse_support": [True], }, }, { LAYER_K_: layers.DiffusionConv, MODES_K_: [SINGLE, BATCH, MIXED], KWARGS_K_: { "channels": 8, "activation": "tanh", "num_diffusion_steps": 5, "sparse_support": [False], }, }, { LAYER_K_: layers.GatedGraphConv, MODES_K_: [SINGLE, MIXED], KWARGS_K_: {"channels": 10, "n_layers": 3, "sparse_support": [True]}, }, { LAYER_K_: layers.AGNNConv, MODES_K_: [SINGLE, MIXED], KWARGS_K_: {"channels": F, "trainable": True, "sparse_support": [True]}, }, { LAYER_K_: layers.TAGConv, MODES_K_: [SINGLE, MIXED], KWARGS_K_: {"channels": F, "K": 3, "sparse_support": [True]}, }, { LAYER_K_: layers.CrystalConv, MODES_K_: [SINGLE, MIXED], KWARGS_K_: {"channels": F, "edges": True, "sparse_support": [True]}, }, { LAYER_K_: layers.EdgeConv, MODES_K_: [SINGLE, MIXED], KWARGS_K_: { "channels": 8, "activation": "relu", "mlp_hidden": [16], "sparse_support": [True], }, }, { LAYER_K_: layers.GeneralConv, MODES_K_: [SINGLE, MIXED], KWARGS_K_: {"channels": 256, "sparse_support": [True]}, }, { LAYER_K_: layers.MessagePassing, MODES_K_: [SINGLE, MIXED], KWARGS_K_: {"channels": F, "sparse_support": [True]}, }, ] def _test_single_mode(layer, **kwargs): sparse = kwargs.pop("sparse", False) A_in = Input(shape=(None,), sparse=sparse) X_in = Input(shape=(F,)) inputs = [X_in, A_in] if sparse: input_data = [X, sp_matrix_to_sp_tensor(A)] else: input_data = [X, A] if kwargs.pop("edges", None): E_in = Input(shape=(S,)) inputs.append(E_in) input_data.append(E_single) layer_instance = layer(**kwargs) output = layer_instance(inputs) model = Model(inputs, output) output = model(input_data) assert output.shape == (N, kwargs["channels"]) def _test_batch_mode(layer, **kwargs): A_batch = np.stack([A] * batch_size) X_batch = np.stack([X] * batch_size) A_in = Input(shape=(N, N)) X_in = Input(shape=(N, F)) inputs = [X_in, A_in] input_data = [X_batch, A_batch] if kwargs.pop("edges", None): E_batch = np.stack([E] * batch_size) E_in = Input(shape=(N, N, S)) inputs.append(E_in) input_data.append(E_batch) layer_instance = layer(**kwargs) output = layer_instance(inputs) model = Model(inputs, output) output = model(input_data) assert output.shape == (batch_size, N, kwargs["channels"]) def _test_mixed_mode(layer, **kwargs): sparse = kwargs.pop("sparse", False) X_batch = np.stack([X] * batch_size) A_in = Input(shape=(N,), sparse=sparse) X_in = Input(shape=(N, F)) inputs = [X_in, A_in] if sparse: input_data = [X_batch, sp_matrix_to_sp_tensor(A)] else: input_data = [X_batch, A] if kwargs.pop("edges", None): E_in = Input( shape=( N * N, S, ) ) inputs.append(E_in) E_batch = np.stack([E_single] * batch_size) input_data.append(E_batch) layer_instance = layer(**kwargs) output = layer_instance(inputs) model = Model(inputs, output) output = model(input_data) assert output.shape == (batch_size, N, kwargs["channels"]) def _test_get_config(layer, **kwargs): if kwargs.get("edges"): kwargs.pop("edges") layer_instance = layer(**kwargs) config = layer_instance.get_config() layer_instance_new = layer(**config) config_new = layer_instance_new.get_config() config.pop("name") config_new.pop("name") # Remove 'name' if we have advanced activations (needed for GeneralConv) if "activation" in config and "class_name" in config["activation"]: config["activation"]["config"].pop("name") config_new["activation"]["config"].pop("name") assert config_new == config def test_layers(): for test in TESTS: sparse = test[KWARGS_K_]["sparse_support"] for mode in test[MODES_K_]: if mode == SINGLE: for s in sparse: _test_single_mode(test[LAYER_K_], sparse=s, **test[KWARGS_K_]) elif mode == BATCH: _test_batch_mode(test[LAYER_K_], **test[KWARGS_K_]) elif mode == MIXED: for s in sparse: _test_mixed_mode(test[LAYER_K_], sparse=s, **test[KWARGS_K_]) _test_get_config(test[LAYER_K_], **test[KWARGS_K_])
[ "tensorflow.keras.Input", "tensorflow.keras.backend.set_floatx", "tensorflow.keras.Model", "numpy.ones", "numpy.stack", "numpy.random.normal" ]
tests/test_layers/test_convolutional.py
[(8, 'tensorflow.keras.backend.set_floatx', 'tf.keras.backend.set_floatx', (['"""float64"""'], {}), True, 'import tensorflow as tf\n'), (15, 'numpy.ones', 'np.ones', (['(N, N)'], {}), True, 'import numpy as np\n'), (16, 'numpy.random.normal', 'np.random.normal', ([], {'size': '(N, F)'}), True, 'import numpy as np\n'), (17, 'numpy.random.normal', 'np.random.normal', ([], {'size': '(N, N, S)'}), True, 'import numpy as np\n'), (18, 'numpy.random.normal', 'np.random.normal', ([], {'size': '(N * N, S)'}), True, 'import numpy as np\n'), (197, 'tensorflow.keras.Input', 'Input', ([], {'shape': '(None,)', 'sparse': 'sparse'}), False, 'from tensorflow.keras import Input, Model\n'), (198, 'tensorflow.keras.Input', 'Input', ([], {'shape': '(F,)'}), False, 'from tensorflow.keras import Input, Model\n'), (212, 'tensorflow.keras.Model', 'Model', (['inputs', 'output'], {}), False, 'from tensorflow.keras import Input, Model\n'), (220, 'numpy.stack', 'np.stack', (['([A] * batch_size)'], {}), True, 'import numpy as np\n'), (221, 'numpy.stack', 'np.stack', (['([X] * batch_size)'], {}), True, 'import numpy as np\n'), (223, 'tensorflow.keras.Input', 'Input', ([], {'shape': '(N, N)'}), False, 'from tensorflow.keras import Input, Model\n'), (224, 'tensorflow.keras.Input', 'Input', ([], {'shape': '(N, F)'}), False, 'from tensorflow.keras import Input, Model\n'), (236, 'tensorflow.keras.Model', 'Model', (['inputs', 'output'], {}), False, 'from tensorflow.keras import Input, Model\n'), (245, 'numpy.stack', 'np.stack', (['([X] * batch_size)'], {}), True, 'import numpy as np\n'), (246, 'tensorflow.keras.Input', 'Input', ([], {'shape': '(N,)', 'sparse': 'sparse'}), False, 'from tensorflow.keras import Input, Model\n'), (247, 'tensorflow.keras.Input', 'Input', ([], {'shape': '(N, F)'}), False, 'from tensorflow.keras import Input, Model\n'), (267, 'tensorflow.keras.Model', 'Model', (['inputs', 'output'], {}), False, 'from tensorflow.keras import Input, Model\n'), (206, 'tensorflow.keras.Input', 'Input', ([], {'shape': '(S,)'}), False, 'from tensorflow.keras import Input, Model\n'), (229, 'numpy.stack', 'np.stack', (['([E] * batch_size)'], {}), True, 'import numpy as np\n'), (230, 'tensorflow.keras.Input', 'Input', ([], {'shape': '(N, N, S)'}), False, 'from tensorflow.keras import Input, Model\n'), (255, 'tensorflow.keras.Input', 'Input', ([], {'shape': '(N * N, S)'}), False, 'from tensorflow.keras import Input, Model\n'), (262, 'numpy.stack', 'np.stack', (['([E_single] * batch_size)'], {}), True, 'import numpy as np\n'), (201, 'spektral.layers.ops.sp_matrix_to_sp_tensor', 'sp_matrix_to_sp_tensor', (['A'], {}), False, 'from spektral.layers.ops import sp_matrix_to_sp_tensor\n'), (250, 'spektral.layers.ops.sp_matrix_to_sp_tensor', 'sp_matrix_to_sp_tensor', (['A'], {}), False, 'from spektral.layers.ops import sp_matrix_to_sp_tensor\n')]
Acemyzoe/mnist-TensorRT
df455542d1f889af755e08412b7fd81343cff2ff
#!/usr/bin/python # -*- coding:utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals import tensorflow as tf import time import numpy def mnist_model(): mnist = tf.keras.datasets.mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train, x_test = x_train / 255.0, x_test / 255.0 model = tf.keras.models.Sequential() model.add(tf.keras.layers.Flatten(input_shape=(28,28))) model.add(tf.keras.layers.Dense(512, activation='relu')) model.add(tf.keras.layers.Dropout(0.2)) model.add(tf.keras.layers.Dense(10, activation='softmax')) model.summary() model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, batch_size=64,epochs=10) score = model.evaluate(x_test, y_test, verbose=2) print('loss:',score[0]) print('accuracy:',score[1]) #model.save('tf_model',save_format = 'tf') model.save('tf_model.h5') def trt(trt_opt): converter = tf.experimental.tensorrt.Converter(input_saved_model_dir='tf_model') converter.convert()#完成转换,但是此时没有进行优化,优化在执行推理时完成 if trt_opt == True: mnist = tf.keras.datasets.mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() x_test = x_test.astype('float32') x_test = x_test / 255.0 def input_fn(): yield (x_test[:1]) converter.build(input_fn) #优化后保存 converter.save('trt_model_opt') else: converter.save('trt_model') def opt(model_path): mnist = tf.keras.datasets.mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() x_test = x_test.astype('float32') x_test /= 255 model_loaded = tf.saved_model.load(model_path)#读取模型 graph_func = model_loaded.signatures['serving_default']#获取推理函数 t=time.time() #output = graph_func(tf.constant(x_test)) output = model_loaded(x_test) print(output[0],'\n',time.time()-t) if __name__ == '__main__': print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU'))) mnist_model() #trt(True) #opt("tf_model") #opt("trt_model") #opt("trt_model_opt")
[ "tensorflow.saved_model.load", "tensorflow.experimental.tensorrt.Converter", "tensorflow.keras.layers.Dense", "tensorflow.config.experimental.list_physical_devices", "tensorflow.keras.layers.Dropout", "tensorflow.keras.models.Sequential", "tensorflow.keras.layers.Flatten" ]
mnist-tensorRT.py
[(15, 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), True, 'import tensorflow as tf\n'), (33, 'tensorflow.experimental.tensorrt.Converter', 'tf.experimental.tensorrt.Converter', ([], {'input_saved_model_dir': '"""tf_model"""'}), True, 'import tensorflow as tf\n'), (53, 'tensorflow.saved_model.load', 'tf.saved_model.load', (['model_path'], {}), True, 'import tensorflow as tf\n'), (55, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (16, 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {'input_shape': '(28, 28)'}), True, 'import tensorflow as tf\n'), (17, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(512)'], {'activation': '"""relu"""'}), True, 'import tensorflow as tf\n'), (18, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), True, 'import tensorflow as tf\n'), (19, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {'activation': '"""softmax"""'}), True, 'import tensorflow as tf\n'), (58, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (61, 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), True, 'import tensorflow as tf\n')]
kayzhu/keras-tuner
32240940cd5814a905aadf8e646497649cbbb046
# Copyright 2020 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import tensorflow.keras as keras from tensorflow.keras import layers try: from tensorflow.keras.layers.experimental import ( # isort:skip preprocessing, ) # pytype: disable=import-error except ImportError: preprocessing = None from keras_tuner.engine import hypermodel # dict of functions that create layers for transforms. # Each function takes a factor (0 to 1) for the strength # of the transform. if preprocessing is not None: TRANSFORMS = { "translate_x": lambda x: preprocessing.RandomTranslation(x, 0), "translate_y": lambda y: preprocessing.RandomTranslation(0, y), "rotate": preprocessing.RandomRotation, "contrast": preprocessing.RandomContrast, } class HyperImageAugment(hypermodel.HyperModel): """A image augmentation hypermodel. The `HyperImageAugment` class searches for the best combination of image augmentation operations in Keras preprocessing layers. The input shape of the model should be (height, width, channels). The output of the model is of the same shape as the input. Args: input_shape: Optional shape tuple, e.g. `(256, 256, 3)`. input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. rotate: A number between [0, 1], a list of two numbers between [0, 1] or None. Configures the search space of the factor of random rotation transform in the augmentation. A factor is chosen for each trial. It sets maximum of clockwise and counterclockwise rotation in terms of fraction of pi, among all samples in the trial. Default is 0.5. When `rotate` is a single number, the search range is [0, `rotate`]. The transform is off when set to None. translate_x: A number between [0, 1], a list of two numbers between [0, 1] or None. Configures the search space of the factor of random horizontal translation transform in the augmentation. A factor is chosen for each trial. It sets maximum of horizontal translation in terms of ratio over the width among all samples in the trial. Default is 0.4. When `translate_x` is a single number, the search range is [0, `translate_x`]. The transform is off when set to None. translate_y: A number between [0, 1], a list of two numbers between [0, 1] or None. Configures the search space of the factor of random vertical translation transform in the augmentation. A factor is chosen for each trial. It sets maximum of vertical translation in terms of ratio over the height among all samples in the trial. Default is 0.4. When `translate_y` is a single number ,the search range is [0, `translate_y`]. The transform is off when set to None. contrast: A number between [0, 1], a list of two numbers between [0, 1] or None. Configures the search space of the factor of random contrast transform in the augmentation. A factor is chosen for each trial. It sets maximum ratio of contrast change among all samples in the trial. Default is 0.3. When `contrast` is a single number, the search rnage is [0, `contrast`]. The transform is off when set to None. augment_layers: None, int or list of two ints, controlling the number of augment applied. Default is 3. When `augment_layers` is 0, all transform are applied sequentially. When `augment_layers` is nonzero, or a list of two ints, a simple version of RandAugment(https://arxiv.org/abs/1909.13719) is used. A search space for 'augment_layers' is created to search [0, `augment_layers`], or between the two ints if a `augment_layers` is a list. For each trial, the hyperparameter 'augment_layers' determines number of layers of augment transforms are applied, each randomly picked from all available transform types with equal probability on each sample. **kwargs: Additional keyword arguments that apply to all hypermodels. See `keras_tuner.HyperModel`. Example: ```python hm_aug = HyperImageAugment(input_shape=(32, 32, 3), augment_layers=0, rotate=[0.2, 0.3], translate_x=0.1, translate_y=None, contrast=None) ``` Then the hypermodel `hm_aug` will search 'factor_rotate' between [0.2, 0.3] and 'factor_translate_x' between [0, 0.1]. These two augments are applied on all samples with factor picked per each trial. ```python hm_aug = HyperImageAugment(input_shape=(32, 32, 3), translate_x=0.5, translate_y=[0.2, 0.4] contrast=None) ``` Then the hypermodel `hm_aug` will search 'factor_rotate' between [0, 0.2], 'factor_translate_x' between [0, 0.5], 'factor_translate_y' between [0.2, 0.4]. It will use RandAugment, searching 'augment_layers' between [0, 3]. Each layer on each sample will be chosen from rotate, translate_x and translate_y. """ def __init__( self, input_shape=None, input_tensor=None, rotate=0.5, translate_x=0.4, translate_y=0.4, contrast=0.3, augment_layers=3, **kwargs, ): if preprocessing is None: raise ImportError( "HyperImageAugment requires tensorflow>=2.3.0, " f"but the current version is {tf.__version__}." ) if input_shape is None and input_tensor is None: raise ValueError( "You must specify either `input_shape` or `input_tensor`." ) self.transforms = [] self._register_transform("rotate", rotate) self._register_transform("translate_x", translate_x) self._register_transform("translate_y", translate_y) self._register_transform("contrast", contrast) self.input_shape = input_shape self.input_tensor = input_tensor if augment_layers: self.model_name = "image_rand_augment" try: augment_layers_min = augment_layers[0] augment_layers_max = augment_layers[1] except TypeError: augment_layers_min = 0 augment_layers_max = augment_layers if not ( isinstance(augment_layers_min, int) and isinstance(augment_layers_max, int) ): raise ValueError( "Keyword argument `augment_layers` must be int," "but received {}. ".format(augment_layers) ) self.augment_layers_min = augment_layers_min self.augment_layers_max = augment_layers_max else: # Separatedly tune and apply all augment transforms if # `randaug_count` is set to 0. self.model_name = "image_augment" super(HyperImageAugment, self).__init__(**kwargs) def build(self, hp): if self.input_tensor is not None: inputs = keras.utils.get_source_inputs(self.input_tensor) x = self.input_tensor else: inputs = layers.Input(shape=self.input_shape) x = inputs if self.model_name == "image_rand_augment": x = self._build_randaug_layers(x, hp) else: x = self._build_fixedaug_layers(x, hp) model = keras.Model(inputs, x, name=self.model_name) return model def _build_randaug_layers(self, inputs, hp): augment_layers = hp.Int( "augment_layers", self.augment_layers_min, self.augment_layers_max, default=self.augment_layers_min, ) x = inputs for _ in range(augment_layers): # selection tensor determines operation for each sample. batch_size = tf.shape(x)[0] selection = tf.random.uniform( [batch_size, 1, 1, 1], maxval=len(self.transforms), dtype="int32" ) for i, (transform, (f_min, f_max)) in enumerate(self.transforms): # Factor for each transform is determined per each trial. factor = hp.Float(f"factor_{transform}", f_min, f_max, default=f_min) if factor == 0: continue transform_layer = TRANSFORMS[transform](factor) x_trans = transform_layer(x) # For each sample, apply the transform if and only if # selection matches the transform index `i` x = tf.where(tf.equal(i, selection), x_trans, x) return x def _build_fixedaug_layers(self, inputs, hp): x = inputs for transform, (factor_min, factor_max) in self.transforms: transform_factor = hp.Float( f"factor_{transform}", factor_min, factor_max, step=0.05, default=factor_min, ) if transform_factor == 0: continue transform_layer = TRANSFORMS[transform](transform_factor) x = transform_layer(x) return x def _register_transform(self, transform_name, transform_params): """Register a transform and format parameters for tuning the transform. Args: transform_name: A string, the name of the transform. trnasform_params: A number between [0, 1], a list of two numbers between [0, 1] or None. If set to a single number x, the corresponding transform factor will be between [0, x]. If set to a list of 2 numbers [x, y], the factor will be between [x, y]. If set to None, the transform will be excluded. """ if not transform_params: return try: transform_factor_min = transform_params[0] transform_factor_max = transform_params[1] if len(transform_params) > 2: raise ValueError( "Length of keyword argument {} must not exceed 2.".format( transform_name ) ) except TypeError: transform_factor_min = 0 transform_factor_max = transform_params if not ( isinstance(transform_factor_max, (int, float)) and isinstance(transform_factor_min, (int, float)) ): raise ValueError( "Keyword argument {} must be int or float, " "but received {}. ".format(transform_name, transform_params) ) self.transforms.append( (transform_name, (transform_factor_min, transform_factor_max)) )
[ "tensorflow.keras.utils.get_source_inputs", "tensorflow.shape", "tensorflow.keras.layers.experimental.preprocessing.RandomTranslation", "tensorflow.equal", "tensorflow.keras.Model", "tensorflow.keras.layers.Input" ]
keras_tuner/applications/augment.py
[(200, 'tensorflow.keras.Model', 'keras.Model', (['inputs', 'x'], {'name': 'self.model_name'}), True, 'import tensorflow.keras as keras\n'), (38, 'tensorflow.keras.layers.experimental.preprocessing.RandomTranslation', 'preprocessing.RandomTranslation', (['x', '(0)'], {}), False, 'from tensorflow.keras.layers.experimental import preprocessing\n'), (39, 'tensorflow.keras.layers.experimental.preprocessing.RandomTranslation', 'preprocessing.RandomTranslation', (['(0)', 'y'], {}), False, 'from tensorflow.keras.layers.experimental import preprocessing\n'), (189, 'tensorflow.keras.utils.get_source_inputs', 'keras.utils.get_source_inputs', (['self.input_tensor'], {}), True, 'import tensorflow.keras as keras\n'), (192, 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': 'self.input_shape'}), False, 'from tensorflow.keras import layers\n'), (213, 'tensorflow.shape', 'tf.shape', (['x'], {}), True, 'import tensorflow as tf\n'), (228, 'tensorflow.equal', 'tf.equal', (['i', 'selection'], {}), True, 'import tensorflow as tf\n')]
openefsa/asreview
aec14fcad0532a3989befe577ceb369a9dbba243
# Copyright 2019 The ASReview Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging try: import tensorflow as tf except ImportError: raise ImportError("Install tensorflow package (`pip install tensorflow`)" " to use 'lstm-pool' model.") try: tf.logging.set_verbosity(tf.logging.ERROR) except AttributeError: logging.getLogger("tensorflow").setLevel(logging.ERROR) from tensorflow.keras.constraints import MaxNorm from tensorflow.keras.layers import Dense from tensorflow.keras.layers import Embedding from tensorflow.keras.layers import Flatten from tensorflow.keras.layers import LSTM from tensorflow.keras.layers import MaxPooling1D from tensorflow.keras.models import Sequential from tensorflow.keras.wrappers.scikit_learn import KerasClassifier from asreview.models.lstm_base import _get_optimizer from asreview.models.base import BaseTrainModel from asreview.utils import _set_class_weight class LSTMPoolModel(BaseTrainModel): """LSTM pool class. LSTM model consisting of an embedding layer, one LSTM layer, and one max pooling layer. """ name = "lstm-pool" def __init__(self, embedding_matrix=None, backwards=True, dropout=0.4, optimizer="rmsprop", lstm_out_width=20, lstm_pool_size=128, learn_rate=1.0, verbose=0, batch_size=32, epochs=35, shuffle=False, class_weight=30.0): """Initialize the LSTM pool model. Arguments --------- embedding_matrix: np.array Embedding matrix to use with LSTM model. backwards: bool Whether to have a forward or backward LSTM. dropout: float Value in [0, 1.0) that gives the dropout and recurrent dropout rate for the LSTM model. optimizer: str Optimizer to use. lstm_out_width: int Output width of the LSTM. lstm_pool_size: int Size of the pool, must be a divisor of max_sequence_length. learn_rate: float Learn rate multiplier of default learning rate. verbose: int Verbosity. batch_size: int Size of the batch size for the LSTM model. epochs: int Number of epochs to train the LSTM model. shuffle: bool Whether to shuffle the data before starting to train. class_weight: float Class weight for the included papers. """ super(LSTMPoolModel, self).__init__() self.embedding_matrix = embedding_matrix self.backwards = backwards self.dropout = dropout self.optimizer = optimizer self.lstm_out_width = lstm_out_width self.learn_rate = learn_rate self.verbose = verbose self.batch_size = batch_size self.epochs = epochs self.shuffle = shuffle self.class_weight = _set_class_weight(class_weight) self.lstm_pool_size = lstm_pool_size self._model = None self.sequence_length = None def fit(self, X, y): sequence_length = X.shape[1] if self._model is None or sequence_length != self.sequence_length: self.sequence_length = sequence_length keras_model = _create_lstm_pool_model( embedding_matrix=self.embedding_matrix, backwards=self.backwards, dropout=self.dropout, optimizer=self.optimizer, max_sequence_length=sequence_length, lstm_out_width=self.lstm_out_width, learn_rate=self.learn_rate, verbose=self.verbose) self._model = KerasClassifier(keras_model, verbose=self.verbose) self._model.fit(X, y, batch_size=self.batch_size, epochs=self.epochs, shuffle=self.shuffle, class_weight=self.class_weight, verbose=self.verbose) def full_hyper_space(self): from hyperopt import hp hyper_choices = {} hyper_space = { "mdl_dropout": hp.uniform("mdl_dropout", 0, 0.9), "mdl_lstm_out_width": hp.quniform("mdl_lstm_out_width", 1, 50, 1), "mdl_dense_width": hp.quniform("mdl_dense_width", 1, 200, 1), "mdl_learn_rate_mult": hp.lognormal("mdl_learn_rate_mult", 0, 1) } return hyper_space, hyper_choices @property def default_param(self): defaults = super(LSTMPoolModel, self).default_param defaults.pop("embedding_matrix") return defaults def _create_lstm_pool_model(embedding_matrix, backwards=True, dropout=0.4, optimizer='rmsprop', max_sequence_length=1000, lstm_out_width=20, lstm_pool_size=100, learn_rate=1.0, verbose=1): """Return callable lstm model. Returns ------- callable: A function that return the Keras Sklearn model when called. """ # The Sklearn API requires a callable as result. # https://keras.io/scikit-learn-api/ def model_wrapper(): model = Sequential() # add first embedding layer with pretrained wikipedia weights model.add( Embedding( embedding_matrix.shape[0], embedding_matrix.shape[1], weights=[embedding_matrix], input_length=max_sequence_length, trainable=False ) ) # add LSTM layer model.add( LSTM( lstm_out_width, input_shape=(max_sequence_length, ), go_backwards=backwards, dropout=dropout, recurrent_dropout=dropout, return_sequences=True, kernel_constraint=MaxNorm(), ) ) model.add( MaxPooling1D( pool_size=lstm_pool_size, ) ) model.add( Flatten() ) # Add output layer model.add( Dense( 1, activation='sigmoid' ) ) optimizer_fn = _get_optimizer(optimizer, learn_rate) # Compile model model.compile( loss='binary_crossentropy', optimizer=optimizer_fn, metrics=['acc']) if verbose >= 1: model.summary() return model return model_wrapper
[ "tensorflow.keras.constraints.MaxNorm", "tensorflow.keras.layers.Embedding", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.MaxPooling1D", "tensorflow.keras.wrappers.scikit_learn.KerasClassifier", "tensorflow.logging.set_verbosity", "tensorflow.keras.models.Sequential", "tensorflow.keras.layers.Flatten" ]
asreview/models/lstm_pool.py
[(22, 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.ERROR'], {}), True, 'import tensorflow as tf\n'), (93, 'asreview.utils._set_class_weight', '_set_class_weight', (['class_weight'], {}), False, 'from asreview.utils import _set_class_weight\n'), (153, 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), False, 'from tensorflow.keras.models import Sequential\n'), (196, 'asreview.models.lstm_base._get_optimizer', '_get_optimizer', (['optimizer', 'learn_rate'], {}), False, 'from asreview.models.lstm_base import _get_optimizer\n'), (109, 'tensorflow.keras.wrappers.scikit_learn.KerasClassifier', 'KerasClassifier', (['keras_model'], {'verbose': 'self.verbose'}), False, 'from tensorflow.keras.wrappers.scikit_learn import KerasClassifier\n'), (119, 'hyperopt.hp.uniform', 'hp.uniform', (['"""mdl_dropout"""', '(0)', '(0.9)'], {}), False, 'from hyperopt import hp\n'), (120, 'hyperopt.hp.quniform', 'hp.quniform', (['"""mdl_lstm_out_width"""', '(1)', '(50)', '(1)'], {}), False, 'from hyperopt import hp\n'), (121, 'hyperopt.hp.quniform', 'hp.quniform', (['"""mdl_dense_width"""', '(1)', '(200)', '(1)'], {}), False, 'from hyperopt import hp\n'), (122, 'hyperopt.hp.lognormal', 'hp.lognormal', (['"""mdl_learn_rate_mult"""', '(0)', '(1)'], {}), False, 'from hyperopt import hp\n'), (157, 'tensorflow.keras.layers.Embedding', 'Embedding', (['embedding_matrix.shape[0]', 'embedding_matrix.shape[1]'], {'weights': '[embedding_matrix]', 'input_length': 'max_sequence_length', 'trainable': '(False)'}), False, 'from tensorflow.keras.layers import Embedding\n'), (180, 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': 'lstm_pool_size'}), False, 'from tensorflow.keras.layers import MaxPooling1D\n'), (185, 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), False, 'from tensorflow.keras.layers import Flatten\n'), (190, 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), False, 'from tensorflow.keras.layers import Dense\n'), (24, 'logging.getLogger', 'logging.getLogger', (['"""tensorflow"""'], {}), False, 'import logging\n'), (175, 'tensorflow.keras.constraints.MaxNorm', 'MaxNorm', ([], {}), False, 'from tensorflow.keras.constraints import MaxNorm\n')]
showmonki/learn_notes
8a416e0294170e242c40d16370e8f42ec9ae8582
from tensorflow import keras from tensorflow.keras import backend as K class t_model(): def __init__(self,num_class,input_shape): self.input_shape = input_shape # xception at least 71x71 self.num_cls = num_class # self.base_model = self.load_model() self.base_model1 = self.load_model1() def load_model(self): inputs = keras.Input(shape=self.input_shape, name = 'model_origin_input') K.set_learning_phase(0) base_model = keras.applications.Xception(weights='imagenet', include_top=False,input_tensor=inputs) base_model.trainable = False K.set_learning_phase(1) gmp = keras.layers.GlobalMaxPool2D(name='gmp')(base_model.output) # bn = keras.layers.BatchNormalization()(gmp) top_dropout_rate = 0.2 # rld = keras.layers.Dense(16, activation='relu')(gmp) dp = keras.layers.Dropout(top_dropout_rate, name="top_dropout")(gmp) outputs = keras.layers.Dense(self.num_cls, activation='softmax')(dp) model = keras.Model(inputs, outputs,name = 'new_model') model.compile(optimizer=keras.optimizers.Adam(), loss='categorical_crossentropy', # keras.losses.BinaryCrossentropy(from_logits=True), metrics=['accuracy']) return model def load_model1(self): base_model = keras.applications.Xception(weights='imagenet', include_top=False,input_shape=self.input_shape) base_model.trainable = False x = base_model.output x = keras.layers.GlobalMaxPool2D(name='gmp')(x) # x = keras.layers.Dense(30, activation='relu')(x) outputs = keras.layers.Dense(self.num_cls, activation='softmax')(x) model = keras.Model(inputs = base_model.inputs, outputs = outputs,name = 'top_model') model.compile(optimizer=keras.optimizers.Adam(), loss='categorical_crossentropy', # keras.losses.BinaryCrossentropy(from_logits=True), metrics=['accuracy']) return model class t2(): def __init__(self,num_classes,img_shape): self.input_shape = img_shape self.num_classes = num_classes self.base_model = self.load_model() def load_model(self): pretrain_model = keras.applications.InceptionResNetV2(include_top=False,input_shape=self.input_shape,weights='imagenet') pretrain_model.trainable = False x=pretrain_model.output x = keras.layers.GlobalMaxPool2D(name='gmp')(x) x = keras.layers.Dense(100, activation='softmax')(x) outputs = keras.layers.Dense(self.num_classes, activation='softmax')(x) model = keras.Model(inputs=pretrain_model.input, outputs=outputs, name='transfer_model') model.compile(optimizer=keras.optimizers.Adam(), loss='categorical_crossentropy', # keras.losses.BinaryCrossentropy(from_logits=True), metrics=['accuracy']) return model
[ "tensorflow.keras.layers.GlobalMaxPool2D", "tensorflow.keras.Input", "tensorflow.keras.layers.Dense", "tensorflow.keras.Model", "tensorflow.keras.applications.Xception", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.layers.Dropout", "tensorflow.keras.backend.set_learning_phase", "tensorflow.keras.applications.InceptionResNetV2" ]
CV/classification/COFFFEE_GROUP/Code/cnn_model.py
[(13, 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': 'self.input_shape', 'name': '"""model_origin_input"""'}), False, 'from tensorflow import keras\n'), (14, 'tensorflow.keras.backend.set_learning_phase', 'K.set_learning_phase', (['(0)'], {}), True, 'from tensorflow.keras import backend as K\n'), (15, 'tensorflow.keras.applications.Xception', 'keras.applications.Xception', ([], {'weights': '"""imagenet"""', 'include_top': '(False)', 'input_tensor': 'inputs'}), False, 'from tensorflow import keras\n'), (17, 'tensorflow.keras.backend.set_learning_phase', 'K.set_learning_phase', (['(1)'], {}), True, 'from tensorflow.keras import backend as K\n'), (24, 'tensorflow.keras.Model', 'keras.Model', (['inputs', 'outputs'], {'name': '"""new_model"""'}), False, 'from tensorflow import keras\n'), (33, 'tensorflow.keras.applications.Xception', 'keras.applications.Xception', ([], {'weights': '"""imagenet"""', 'include_top': '(False)', 'input_shape': 'self.input_shape'}), False, 'from tensorflow import keras\n'), (39, 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'base_model.inputs', 'outputs': 'outputs', 'name': '"""top_model"""'}), False, 'from tensorflow import keras\n'), (56, 'tensorflow.keras.applications.InceptionResNetV2', 'keras.applications.InceptionResNetV2', ([], {'include_top': '(False)', 'input_shape': 'self.input_shape', 'weights': '"""imagenet"""'}), False, 'from tensorflow import keras\n'), (62, 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'pretrain_model.input', 'outputs': 'outputs', 'name': '"""transfer_model"""'}), False, 'from tensorflow import keras\n'), (18, 'tensorflow.keras.layers.GlobalMaxPool2D', 'keras.layers.GlobalMaxPool2D', ([], {'name': '"""gmp"""'}), False, 'from tensorflow import keras\n'), (22, 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['top_dropout_rate'], {'name': '"""top_dropout"""'}), False, 'from tensorflow import keras\n'), (23, 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['self.num_cls'], {'activation': '"""softmax"""'}), False, 'from tensorflow import keras\n'), (36, 'tensorflow.keras.layers.GlobalMaxPool2D', 'keras.layers.GlobalMaxPool2D', ([], {'name': '"""gmp"""'}), False, 'from tensorflow import keras\n'), (38, 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['self.num_cls'], {'activation': '"""softmax"""'}), False, 'from tensorflow import keras\n'), (59, 'tensorflow.keras.layers.GlobalMaxPool2D', 'keras.layers.GlobalMaxPool2D', ([], {'name': '"""gmp"""'}), False, 'from tensorflow import keras\n'), (60, 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(100)'], {'activation': '"""softmax"""'}), False, 'from tensorflow import keras\n'), (61, 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['self.num_classes'], {'activation': '"""softmax"""'}), False, 'from tensorflow import keras\n'), (26, 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {}), False, 'from tensorflow import keras\n'), (41, 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {}), False, 'from tensorflow import keras\n'), (64, 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {}), False, 'from tensorflow import keras\n')]
TTKunt/keras-attention-mechanism
0309dbf79da32c0d8d90925660fc4cc7fe53dc8a
import numpy import numpy as np from tensorflow.keras import Input from tensorflow.keras import Model from tensorflow.keras.callbacks import Callback from tensorflow.keras.datasets import imdb from tensorflow.keras.layers import Dense from tensorflow.keras.layers import Dropout from tensorflow.keras.layers import Embedding from tensorflow.keras.layers import LSTM from tensorflow.keras.preprocessing import sequence from attention import attention_3d_block def train_and_evaluate_model_on_imdb(add_attention=True): numpy.random.seed(7) # load the dataset but only keep the top n words, zero the rest top_words = 5000 (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words) # truncate and pad input sequences max_review_length = 500 X_train = sequence.pad_sequences(X_train, maxlen=max_review_length) X_test = sequence.pad_sequences(X_test, maxlen=max_review_length) # create the model embedding_vector_length = 32 i = Input(shape=(max_review_length,)) x = Embedding(top_words, embedding_vector_length, input_length=max_review_length)(i) x = Dropout(0.5)(x) if add_attention: x = LSTM(100, return_sequences=True)(x) x = attention_3d_block(x) else: x = LSTM(100, return_sequences=False)(x) x = Dense(350, activation='relu')(x) # same number of parameters so fair comparison. x = Dropout(0.5)(x) x = Dense(1, activation='sigmoid')(x) model = Model(inputs=[i], outputs=[x]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary()) class RecordBestTestAccuracy(Callback): def __init__(self): super().__init__() self.val_accuracies = [] self.val_losses = [] def on_epoch_end(self, epoch, logs=None): self.val_accuracies.append(logs['val_accuracy']) self.val_losses.append(logs['val_loss']) rbta = RecordBestTestAccuracy() model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=64, callbacks=[rbta]) print(f"Max Test Accuracy: {100 * np.max(rbta.val_accuracies):.2f} %") print(f"Mean Test Accuracy: {100 * np.mean(rbta.val_accuracies):.2f} %") def main(): # 10 epochs. # Max Test Accuracy: 88.02 % # Mean Test Accuracy: 87.26 % train_and_evaluate_model_on_imdb(add_attention=False) # 10 epochs. # Max Test Accuracy: 88.74 % # Mean Test Accuracy: 88.00 % train_and_evaluate_model_on_imdb(add_attention=True) if __name__ == '__main__': main()
[ "tensorflow.keras.Input", "numpy.random.seed", "tensorflow.keras.layers.Embedding", "tensorflow.keras.layers.Dense", "tensorflow.keras.datasets.imdb.load_data", "tensorflow.keras.Model", "numpy.max", "tensorflow.keras.layers.LSTM", "numpy.mean", "tensorflow.keras.layers.Dropout", "tensorflow.keras.preprocessing.sequence.pad_sequences" ]
examples/imdb.py
[(17, 'numpy.random.seed', 'numpy.random.seed', (['(7)'], {}), False, 'import numpy\n'), (20, 'tensorflow.keras.datasets.imdb.load_data', 'imdb.load_data', ([], {'num_words': 'top_words'}), False, 'from tensorflow.keras.datasets import imdb\n'), (23, 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['X_train'], {'maxlen': 'max_review_length'}), False, 'from tensorflow.keras.preprocessing import sequence\n'), (24, 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['X_test'], {'maxlen': 'max_review_length'}), False, 'from tensorflow.keras.preprocessing import sequence\n'), (27, 'tensorflow.keras.Input', 'Input', ([], {'shape': '(max_review_length,)'}), False, 'from tensorflow.keras import Input\n'), (39, 'tensorflow.keras.Model', 'Model', ([], {'inputs': '[i]', 'outputs': '[x]'}), False, 'from tensorflow.keras import Model\n'), (28, 'tensorflow.keras.layers.Embedding', 'Embedding', (['top_words', 'embedding_vector_length'], {'input_length': 'max_review_length'}), False, 'from tensorflow.keras.layers import Embedding\n'), (29, 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), False, 'from tensorflow.keras.layers import Dropout\n'), (32, 'attention.attention_3d_block', 'attention_3d_block', (['x'], {}), False, 'from attention import attention_3d_block\n'), (36, 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), False, 'from tensorflow.keras.layers import Dropout\n'), (37, 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), False, 'from tensorflow.keras.layers import Dense\n'), (31, 'tensorflow.keras.layers.LSTM', 'LSTM', (['(100)'], {'return_sequences': '(True)'}), False, 'from tensorflow.keras.layers import LSTM\n'), (34, 'tensorflow.keras.layers.LSTM', 'LSTM', (['(100)'], {'return_sequences': '(False)'}), False, 'from tensorflow.keras.layers import LSTM\n'), (35, 'tensorflow.keras.layers.Dense', 'Dense', (['(350)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import Dense\n'), (57, 'numpy.max', 'np.max', (['rbta.val_accuracies'], {}), True, 'import numpy as np\n'), (58, 'numpy.mean', 'np.mean', (['rbta.val_accuracies'], {}), True, 'import numpy as np\n')]
mo-igor/FIR_CNN-LSTM
e88d7bdfb5a22ad2ce0bb16da8431aaf05de3c4a
from tools import dataset from tools.dataset import Dataset from tools import prepare from tools import augmentation as augment import os import argparse import pandas as pd from glob import glob import collections import re import random SEED = None # set to None to use the current system time random.seed(a=SEED) import numpy as np import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix, accuracy_score import tensorflow from tensorflow import keras from tensorflow.keras.models import Model from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout,\ Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D,\ BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape,\ GRU, average, Lambda, Average, Maximum, Concatenate from tools.flow import farneback, farneback_mag from tensorflow.keras.backend import clear_session from tensorflow.keras import optimizers from tensorflow.keras.callbacks import EarlyStopping, TerminateOnNaN, ModelCheckpoint, ReduceLROnPlateau LABELS_REGEX_7 = dataset.LABELS_REGEX #7 labels LABELS_REGEX_5 = dataset.PAPER_LABELS_REGEX #5 labels KERAS_EPSILON = tensorflow.keras.backend.epsilon() keras.backend.set_image_data_format('channels_last') def spatial_stream(): spatial_input = Input(shape=(None, 16, 16, 1), name='spatial_input') spatial_conv1 = TimeDistributed(Conv2D(16, (3, 3), padding='same', activation='relu', name='spatial_conv1'), name='spatial_timedistributed1')(spatial_input) spatial_bn_layer = TimeDistributed(BatchNormalization(name='spatial_bn_layer'), name='spatial_timedistributed2')(spatial_conv1) spatial_maxpool1 = TimeDistributed(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='spatial_maxpool1'), name='spatial_timedistributed3')(spatial_bn_layer) spatial_conv2 = TimeDistributed(Conv2D(32, (3, 3), padding='same', activation='relu', name='spatial_conv2'), name='spatial_timedistributed4')(spatial_maxpool1) spatial_maxpool2 = TimeDistributed(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='spatial_maxpool2'), name='spatial_timedistributed5')(spatial_conv2) spatial_conv3 = TimeDistributed(Conv2D(64, (3, 3), padding='same', activation='relu', name='spatial_conv3'), name='spatial_timedistributed6')(spatial_maxpool2) spatial_maxpool3 = TimeDistributed(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='spatial_maxpool3'), name='spatial_timedistributed7')(spatial_conv3) spatial_conv4 = TimeDistributed(Conv2D(128, (3, 3), padding='same', activation='relu', name='spatial_conv4'), name='spatial_timedistributed8')(spatial_maxpool3) spatial_maxpool4 = TimeDistributed(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='spatial_maxpool4'), name='spatial_timedistributed9')(spatial_conv4) spatial_flattened = TimeDistributed(Flatten(name='spatial_flattened'), name='spatial_timedistributed10')(spatial_maxpool4) spatial_dense1 = TimeDistributed(Dense(512, name='spatial_dense1'), name='spatial_timedistributed11')(spatial_flattened) spatial_dense2 = TimeDistributed(Dense(256, name='spatial_dense2'), name='spatial_timedistributed12')(spatial_dense1) spatial_GRU = GRU(100, return_sequences=True, name='spatial_GRU')(spatial_dense2) spatial_GRU2 = GRU(100, return_sequences=False, name='spatial_GRU2')(spatial_GRU) #handle numerical instability spatial_output = Lambda(lambda x: tensorflow.keras.backend.clip(x, KERAS_EPSILON, 1-KERAS_EPSILON))(spatial_GRU2) return spatial_input, spatial_output def temporal_stream(): temporal_input = Input(shape=(None, 16, 16, 2), name='temporal_input') temporal_conv1 = TimeDistributed(Conv2D(16, (3, 3), padding='same', activation='relu', name='temporal_conv1'), name='temporal_timedistributed1')(temporal_input) temporal_bn_layer = TimeDistributed(BatchNormalization(name='temporal_bn_layer'), name='temporal_timedistributed2')(temporal_conv1) temporal_maxpool1 = TimeDistributed(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='temporal_maxpool1'), name='temporal_timedistributed3')(temporal_bn_layer) temporal_conv2 = TimeDistributed(Conv2D(32, (3, 3), padding='same', activation='relu', name='temporal_conv2'), name='temporal_timedistributed4')(temporal_maxpool1) temporal_maxpool2 = TimeDistributed(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='temporal_maxpool2'), name='temporal_timedistributed5')(temporal_conv2) temporal_conv3 = TimeDistributed(Conv2D(64, (3, 3), padding='same', activation='relu', name='temporal_conv3'), name='temporal_timedistributed6')(temporal_maxpool2) temporal_maxpool3 = TimeDistributed(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='temporal_maxpool3'), name='temporal_timedistributed7')(temporal_conv3) temporal_conv4 = TimeDistributed(Conv2D(128, (3, 3), padding='same', activation='relu', name='temporal_conv4'), name='temporal_timedistributed8')(temporal_maxpool3) temporal_maxpool4 = TimeDistributed(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='temporal_maxpool4'), name='temporal_timedistributed9')(temporal_conv4) temporal_flattened = TimeDistributed(Flatten(name='temporal_flattened'), name='temporal_timedistributed10')(temporal_maxpool4) temporal_dense1 = TimeDistributed(Dense(512, name='temporal_dense1'), name='temporal_timedistributed11')(temporal_flattened) temporal_dense2 = TimeDistributed(Dense(256, name='temporal_dense2'), name='temporal_timedistributed12')(temporal_dense1) temporal_GRU = GRU(100, return_sequences=True, name='temporal_GRU')(temporal_dense2) temporal_GRU2 = GRU(100, return_sequences=False, name='temporal_GRU2')(temporal_GRU) #handle numerical instability temporal_output = Lambda(lambda x: tensorflow.keras.backend.clip(x, KERAS_EPSILON, 1-KERAS_EPSILON))(temporal_GRU2) return temporal_input, temporal_output def stream2model(stream_input, stream_output): classification_output = Dense(CLASSES_N, activation="softmax", name="single_stream_classification")(stream_output) model = Model(stream_input, classification_output) return model def merge_streams(spatial_input, spatial_output, temporal_input, temporal_output): concat = Concatenate(name='merged_concat')([spatial_output, temporal_output]) output = Dense(CLASSES_N, activation="softmax", name='merged_output')(concat) model = Model([spatial_input, temporal_input], output) return model def compile_model(model, model_dir, optimizer="adam", prefix=""): model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) prepare.ensure_dir_exists(model_dir) keras.utils.plot_model(model, os.path.join(model_dir, prefix+'model.png')) return model def plot_history(history, model_dir, prefix="", suffix=""): plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') plt.savefig(os.path.join(model_dir, prefix+"model_accuracy"+suffix+".png")) plt.close() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') plt.savefig(os.path.join(model_dir, prefix+"model_loss"+suffix+".png")) return def to_categorical(y): return tensorflow.keras.utils.to_categorical(y, CLASSES_N) class DataGenerator(keras.utils.Sequence): ''' FIR data batch generator for Keras Parameters ---------- data : list list of [fn, y] where fn is file location and y is a label Returns ---------- [[temperature, flow], y] : list temperature : numpy array flow : numpy array y : numpy array (one-hot encoded) ''' def __init__(self, data, batch_size, shuffle: bool = True, augmentation: bool = False): self.data = data if (batch_size == -1): self.batch_size = len(data) else: self.batch_size = batch_size self.shuffle = shuffle if self.shuffle: random.shuffle(self.data) self.augmentation = augmentation def __len__(self): return int(np.floor(len(self.data) / self.batch_size)) def on_epoch_end(self): if self.shuffle: random.shuffle(self.data) def __getitem__(self, index): indices = list( range(index * self.batch_size, (index + 1) * self.batch_size)) return self.__load_data(indices) def __load_data(self, indices): samples = [] temperature_length_max = 0 flow_length_max = 0 for idx in indices: if self.augmentation: k_rot = np.random.randint(0, 4) k_flip = np.random.randint(0, 3) [temperature_fn, flow_fn], y = self.data[idx] temperature = np.load(temperature_fn).astype(np.float32) if self.augmentation: temperature = augment.random_rotation(temperature, case=k_rot) temperature = augment.random_flip(temperature, case=k_flip) temperature = temperature[..., np.newaxis] flow = farneback(np.squeeze((255*temperature).astype(np.uint8))) if temperature.shape[0] > temperature_length_max: temperature_length_max = temperature.shape[0] if flow.shape[0] > flow_length_max: flow_length_max = flow.shape[0] samples.append([[temperature, flow], y]) # zero-pad TEMPERATURE, FLOW = [], [] Y = [] for sample in samples: [temperature, flow], y = sample temperature = self.__pad_to_length(temperature, temperature_length_max) flow = self.__pad_to_length(flow, flow_length_max) TEMPERATURE.append(temperature) FLOW.append(flow) Y.append(y) TEMPERATURE, FLOW, Y = np.array(TEMPERATURE), np.array(FLOW), np.array( Y) return ([TEMPERATURE, FLOW], Y) def __pad_to_length(self, sequence, length): if sequence.shape[0] == length: return sequence trailing = np.zeros([length - sequence.shape[0], *sequence.shape[1:]], sequence.dtype) return np.vstack([trailing, sequence]) class TemperatureGenerator(keras.utils.Sequence): def __init__(self, data, batch_size, shuffle: bool = True, augmentation: bool = False): self.data = data if (batch_size == -1): self.batch_size = len(data) else: self.batch_size = batch_size self.shuffle = shuffle if self.shuffle: random.shuffle(self.data) self.augmentation = augmentation def __len__(self): return int(np.floor(len(self.data) / self.batch_size)) def on_epoch_end(self): if self.shuffle: random.shuffle(self.data) def __getitem__(self, index): indices = list( range(index * self.batch_size, (index + 1) * self.batch_size)) return self.__load_data(indices) def __load_data(self, indices): samples = [] temperature_length_max = 0 for idx in indices: if self.augmentation: k_rot = np.random.randint(0, 4) k_flip = np.random.randint(0, 3) [temperature_fn, _], y = self.data[idx] temperature = np.load(temperature_fn).astype(np.float32) if self.augmentation: temperature = augment.random_rotation(temperature, case=k_rot) temperature = augment.random_flip(temperature, case=k_flip) temperature = temperature[..., np.newaxis] if temperature.shape[0] > temperature_length_max: temperature_length_max = temperature.shape[0] samples.append([temperature, y]) # zero-pad TEMPERATURE = [] Y = [] for sample in samples: temperature, y = sample temperature = self.__pad_to_length(temperature, temperature_length_max) TEMPERATURE.append(temperature) Y.append(y) TEMPERATURE, Y = np.array(TEMPERATURE), np.array(Y) return (TEMPERATURE, Y) def __pad_to_length(self, sequence, length): if sequence.shape[0] == length: return sequence trailing = np.zeros([length - sequence.shape[0], *sequence.shape[1:]], sequence.dtype) return np.vstack([trailing, sequence]) class FlowGenerator(keras.utils.Sequence): def __init__(self, data, batch_size, shuffle: bool = True, augmentation: bool = False): self.data = data if (batch_size == -1): self.batch_size = len(data) else: self.batch_size = batch_size self.shuffle = shuffle if self.shuffle: random.shuffle(self.data) self.augmentation = augmentation def __len__(self): return int(np.floor(len(self.data) / self.batch_size)) def on_epoch_end(self): if self.shuffle: random.shuffle(self.data) def __getitem__(self, index): indices = list( range(index * self.batch_size, (index + 1) * self.batch_size)) return self.__load_data(indices) def __load_data(self, indices): samples = [] flow_length_max = 0 for idx in indices: if self.augmentation: k_rot = np.random.randint(0, 4) k_flip = np.random.randint(0, 3) [temperature_fn, flow_fn], y = self.data[idx] temperature = np.load(temperature_fn).astype(np.float32) if self.augmentation: temperature = augment.random_rotation(temperature, case=k_rot) temperature = augment.random_flip(temperature, case=k_flip) temperature = temperature[..., np.newaxis] flow = farneback(np.squeeze((255*temperature).astype(np.uint8))) if flow.shape[0] > flow_length_max: flow_length_max = flow.shape[0] samples.append([flow, y]) # zero-pad FLOW = [] Y = [] for sample in samples: flow, y = sample flow = self.__pad_to_length(flow, flow_length_max) FLOW.append(flow) Y.append(y) FLOW, Y = np.array(FLOW), np.array(Y) return (FLOW, Y) def __pad_to_length(self, sequence, length): if sequence.shape[0] == length: return sequence trailing = np.zeros([length - sequence.shape[0], *sequence.shape[1:]], sequence.dtype) return np.vstack([trailing, sequence]) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--dataset_dir', type=str, default=os.path.join("..", "dataset"), help='Path to folder containing the FIR dataset.') parser.add_argument('--model_dir', type=str, default="/" + os.path.join("tmps", "model"), help='Where to save the trained model.') parser.add_argument('--subdir', type=str, default="weigths", help='Custom naming for subdirectory to save the model in.') parser.add_argument( '--temperature_dir', type=str, default="/" + os.path.join("tmps", "cache", "temperature"), help='Where to save the cached sequences (temperature).') parser.add_argument( '--flow_dir', type=str, default="/" + os.path.join("tmps", "cache", "optical_flow"), help='Where to save the cached sequences (optical flow).') parser.add_argument('--classes', type=int, default=5, help='How many classes? 5 if --classes=5, 7 otherwise.') parser.add_argument('--epochs', type=int, default=50, help='How many epochs to run before ending.') parser.add_argument('--learning_rate', type=float, default=1e-1, help='How large a learning rate to use when training.') parser.add_argument( '--validation_size', type=float, default=0.1, help='Between 0.0 and 1.0, the proportion of the dataset \ to include in the validation split.') parser.add_argument('--training_batch_size', type=int, default=128, help='How many images to train on at a time.') parser.add_argument('--validation_batch_size', type=int, default=-1, help='How many images to validate on at a time. -1 for batch_size = samples_n (more stable results).') parser.add_argument('--testing_batch_size', type=int, default=-1, help='How many images to test on at a time. -1 for batch_size = samples_n (more stable results).') parser.add_argument("--download", action="store_true", help='Download the dataset.') parser.add_argument("--prepare", action="store_true", help='Prepare the dataset.') parser.add_argument("--actor", type=str, default=None, help='Choose testing actor, pattern: "human{}" [0-9]. Otherwise full cross validation is performed.') parser.add_argument("--pretrain", action="store_true", help='Pretrain by training streams separately.') parser.add_argument("--temporal_only", action="store_true", help='Train temporal only.') parser.add_argument("--spatial_only", action="store_true", help='Train spatial only.') FLAGS, unparsed = parser.parse_known_args() model_path = os.path.join(FLAGS.model_dir, FLAGS.subdir) if (FLAGS.classes == 5): LABELS_REGEX = LABELS_REGEX_5 else: LABELS_REGEX = LABELS_REGEX_7 CLASSES_N = len(LABELS_REGEX) if FLAGS.download: dataset.download("..") if FLAGS.temporal_only and FLAGS.spatial_only: raise ValueError data_normalized = Dataset(FLAGS.dataset_dir, minmax_normalized=True) if FLAGS.prepare: prepare.sequences_by_actor(data_normalized, FLAGS.temperature_dir) prepare.optical_flow(data_normalized, FLAGS.flow_dir) temperature_files = glob(os.path.join(FLAGS.temperature_dir, "**", "*.npy")) flow_files = glob(os.path.join(FLAGS.flow_dir, "**", "*.npy")) def files_same(a, b): return collections.Counter([os.path.split(item)[1] for item in a]) == collections.Counter( [os.path.split(item)[1] for item in b]) if not files_same(temperature_files, flow_files): raise ValueError( "The number and naming of the samples in temporal and spatial \ streams should be the same.") if (FLAGS.validation_size > 1) or (FLAGS.validation_size < 0): raise ValueError("Validation size should be between 0.0 and 1.0") # relative_path, y = data_fn_y[i] data_fn_y = [] for path in temperature_files: sample_actor, sample_basename = path.split(os.path.sep)[-2:] relative_path = os.path.join(sample_actor, sample_basename) y = None for pattern in LABELS_REGEX: if re.search(pattern + "_", sample_basename): y = LABELS_REGEX[pattern] data_fn_y.append([relative_path, y]) cnfs_mtx_dict = dict() # LOOCV for actor in dataset.ACTORS: if FLAGS.actor: if actor != FLAGS.actor: print("Skip") continue testing_actor = actor training_actors = list(dataset.ACTORS) training_actors.remove(testing_actor) model_fn_json = os.path.join(model_path, "model.json") model_fn_hdf5 = os.path.join(model_path, "model_{}.hdf5".format(actor)) spatial_model_fn_json = os.path.join(model_path, "spatial_model.json") spatial_model_fn_hdf5 = os.path.join(model_path, "spatial_model_{}.hdf5".format(actor)) temporal_model_fn_json = os.path.join(model_path, "temporal_model.json") temporal_model_fn_hdf5 = os.path.join(model_path, "temporal_model_{}.hdf5".format(actor)) train_val_fns_y = [] testing_fns_y = [] for sample in data_fn_y: fn, y = sample sample_actor, sample_basename = fn.split(os.path.sep) if sample_actor == testing_actor: testing_fns_y.append([fn, y]) else: train_val_fns_y.append([fn, y]) # balanced split validation_fns_y, training_fns_y = [], [] train_val_fns_y_classes = [] for key in LABELS_REGEX.keys(): tmp_class = [] random.shuffle(train_val_fns_y) for sample in train_val_fns_y: fn, y = sample if (y == LABELS_REGEX[key]): tmp_class.append(sample) print("{} samples in class {}".format(len(tmp_class), LABELS_REGEX[key])) split = int(len(tmp_class) * FLAGS.validation_size) validation_fns_y.extend(tmp_class[:split]) training_fns_y.extend(tmp_class[split:]) # add back the prefix # [temperature_fn, flow_fn], y = *_data def add_prefixes(list_fns_y, temperature_prefix, flow_prefix): list_data = [] for sample in list_fns_y: fn, y = sample list_data.append([[ os.path.join(temperature_prefix, fn), os.path.join(flow_prefix, fn) ], to_categorical(y)]) return list_data # [temperature_fn, flow_fn], y = *_data testing_data = add_prefixes(testing_fns_y, FLAGS.temperature_dir, FLAGS.flow_dir) training_data = add_prefixes(training_fns_y, FLAGS.temperature_dir, FLAGS.flow_dir) validation_data = add_prefixes(validation_fns_y, FLAGS.temperature_dir, FLAGS.flow_dir) training_batches = DataGenerator(training_data, FLAGS.training_batch_size, shuffle=True, augmentation=True) validation_batches = DataGenerator(validation_data, FLAGS.validation_batch_size, shuffle=True) testing_batches = DataGenerator(testing_data, FLAGS.testing_batch_size, shuffle=False) print("[INFO] \n") print("Actor: {}".format(actor)) print("Training: {} samples -> {} batches".format( len(training_data), len(training_batches))) print("Validation: {} samples -> {} batches".format( len(validation_data), len(validation_batches))) print("Testing: {} samples -> {} batches".format( len(testing_data), len(testing_batches))) def train_model(model, epochs, training_batches, validation_batches, callbacks, model_fn_json, prefix="", suffix=""): json_string = model.to_json() open(model_fn_json, 'w').write(json_string) model.summary() history = model.fit_generator(training_batches, epochs=FLAGS.epochs, validation_data=validation_batches, callbacks=callbacks) plot_history(history, model_path, prefix, suffix) return history def load_checkpoint(model_fn_json, model_fn_hdf5): # load json and create model json_file = open(model_fn_json, 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = tensorflow.keras.models.model_from_json(loaded_model_json) # load weights into new model loaded_model.load_weights(model_fn_hdf5) print("[INFO] Loaded model from disk ({}, {})".format(model_fn_json, model_fn_hdf5)) return loaded_model if (FLAGS.pretrain or FLAGS.spatial_only): #SPATIAL optimizer = optimizers.SGD(lr=FLAGS.learning_rate, clipnorm=0.5, momentum=0.5, nesterov=True) # best early_stopping = EarlyStopping(monitor='val_loss', patience=20, verbose=1) terminateNaN = TerminateOnNaN() #that shouldn't happen saveBest = ModelCheckpoint(spatial_model_fn_hdf5, save_best_only=True) callbacks=[early_stopping, terminateNaN, saveBest] spatial_training_batches = TemperatureGenerator(training_data, FLAGS.training_batch_size, shuffle=True, augmentation=True) spatial_validation_batches = TemperatureGenerator(validation_data, FLAGS.validation_batch_size, shuffle=True) spatial_testing_batches = DataGenerator(testing_data, FLAGS.testing_batch_size, shuffle=False) spatial_model = compile_model(stream2model(*spatial_stream()), model_path, optimizer, prefix="spatial_") spatial_history = train_model(spatial_model, FLAGS.epochs, spatial_training_batches, spatial_validation_batches, callbacks, spatial_model_fn_json, prefix="spatial_", suffix=actor) if FLAGS.spatial_only: loaded_model = spatial_model = compile_model(stream2model(*spatial_stream()), model_path, optimizer, prefix="spatial_") loaded_model.load_weights(spatial_model_fn_hdf5, by_name=True) predictions = loaded_model.predict_generator(spatial_testing_batches) y_pred = np.argmax(predictions, axis=-1) y_test = np.argmax(spatial_testing_batches[0][1], axis=-1) cnfs_mtx = confusion_matrix(y_test, y_pred) print(accuracy_score(y_test, y_pred)) C = cnfs_mtx / cnfs_mtx.astype(np.float).sum(axis=1) cnfs_mtx_dict[actor] = cnfs_mtx continue clear_session() if (FLAGS.pretrain or FLAGS.temporal_only): #TEMPORAL optimizer = optimizers.SGD(lr=FLAGS.learning_rate, clipnorm=0.5) # best early_stopping = EarlyStopping(monitor='val_loss', patience=20, verbose=1) terminateNaN = TerminateOnNaN() #that shouldn't happen saveBest = ModelCheckpoint(temporal_model_fn_hdf5, save_best_only=True) callbacks=[early_stopping, terminateNaN, saveBest] temporal_training_batches = FlowGenerator(training_data, FLAGS.training_batch_size, shuffle=True, augmentation=True) temporal_validation_batches = FlowGenerator(validation_data, FLAGS.validation_batch_size, shuffle=True) temporal_testing_batches = FlowGenerator(testing_data, FLAGS.testing_batch_size, shuffle=False) temporal_model = compile_model(stream2model(*temporal_stream()), model_path, optimizer, prefix="temporal_") temporal_history = train_model(temporal_model, FLAGS.epochs, temporal_training_batches, temporal_validation_batches, callbacks, temporal_model_fn_json, prefix="temporal_", suffix=actor) if FLAGS.temporal_only: loaded_model = temporal_model = compile_model(stream2model(*temporal_stream()), model_path, optimizer, prefix="temporal_") loaded_model.load_weights(temporal_model_fn_hdf5, by_name=True) predictions = loaded_model.predict_generator(temporal_testing_batches) y_pred = np.argmax(predictions, axis=-1) y_test = np.argmax(temporal_testing_batches[0][1], axis=-1) cnfs_mtx = confusion_matrix(y_test, y_pred) print(accuracy_score(y_test, y_pred)) C = cnfs_mtx / cnfs_mtx.astype(np.float).sum(axis=1) cnfs_mtx_dict[actor] = cnfs_mtx continue clear_session() #COMBINED optimizer = optimizers.SGD(lr=FLAGS.learning_rate, clipnorm=0.5) # best early_stopping = EarlyStopping(monitor='val_loss', patience=20, verbose=1) terminateNaN = TerminateOnNaN() #that shouldn't happen saveBest = ModelCheckpoint(model_fn_hdf5, save_best_only=True) callbacks=[early_stopping, terminateNaN, saveBest] model = compile_model(merge_streams(*spatial_stream(), *temporal_stream()), model_path, optimizer) if FLAGS.pretrain: print("[INFO] Loading in pretrained streams weights...") model.load_weights(spatial_model_fn_hdf5, by_name=True) model.load_weights(temporal_model_fn_hdf5, by_name=True) history = train_model(model, FLAGS.epochs, training_batches, validation_batches, callbacks, model_fn_json, suffix=actor) loaded_model = load_checkpoint(model_fn_json, model_fn_hdf5) predictions = loaded_model.predict_generator(testing_batches) y_pred = np.argmax(predictions, axis=-1) y_test = np.argmax(testing_batches[0][1], axis=-1) cnfs_mtx = confusion_matrix(y_test, y_pred) print(accuracy_score(y_test, y_pred)) C = cnfs_mtx / cnfs_mtx.astype(np.float).sum(axis=1) cnfs_mtx_dict[actor] = cnfs_mtx print("[INFO] Model successfully trained, tested on {} ".format(actor)) cross_validation_cnfs_mtx = sum(cnfs_mtx_dict[item] for item in cnfs_mtx_dict) cross_validation_accuracy = cross_validation_cnfs_mtx.diagonal().sum()/cross_validation_cnfs_mtx.sum() metrics = dict() metrics["confusion_matrix"] = cross_validation_cnfs_mtx metrics["accuracy"] = cross_validation_accuracy np.save(os.path.join(model_path, "metrics_dict.npy"), metrics) metrics = np.load(os.path.join(model_path, "metrics_dict.npy"), allow_pickle=True)[()] print(metrics["confusion_matrix"]) print(metrics["accuracy"]) with open(os.path.join(model_path,'cnfs_mtx.txt'),'wb') as f: for line in np.matrix(cross_validation_cnfs_mtx): np.savetxt(f, line, fmt='%.4f') with open(os.path.join(model_path,'accuracy.txt'),'wb') as f: for line in np.matrix(cross_validation_accuracy): np.savetxt(f, line, fmt='%.4f')
[ "numpy.matrix", "matplotlib.pyplot.legend", "sklearn.metrics.accuracy_score", "sklearn.metrics.confusion_matrix", "matplotlib.pyplot.plot", "tensorflow.keras.backend.clear_session", "tensorflow.keras.optimizers.SGD", "tensorflow.keras.layers.Concatenate", "numpy.random.randint", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.layers.GRU", "numpy.argmax", "matplotlib.pyplot.close", "tensorflow.keras.backend.set_image_data_format", "numpy.load", "tensorflow.keras.callbacks.EarlyStopping", "numpy.zeros", "tensorflow.keras.layers.Flatten", "tensorflow.keras.callbacks.ModelCheckpoint", "matplotlib.pyplot.title", "tensorflow.keras.models.Model", "tensorflow.keras.layers.Dense", "tensorflow.keras.callbacks.TerminateOnNaN", "numpy.savetxt", "numpy.array", "matplotlib.pyplot.ylabel", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.models.model_from_json", "matplotlib.pyplot.xlabel", "tensorflow.keras.backend.epsilon", "tensorflow.keras.backend.clip", "tensorflow.keras.utils.to_categorical", "numpy.vstack", "tensorflow.keras.layers.Input" ]
main.py
[(15, 'random.seed', 'random.seed', ([], {'a': 'SEED'}), False, 'import random\n'), (38, 'tensorflow.keras.backend.epsilon', 'tensorflow.keras.backend.epsilon', ([], {}), False, 'import tensorflow\n'), (40, 'tensorflow.keras.backend.set_image_data_format', 'keras.backend.set_image_data_format', (['"""channels_last"""'], {}), False, 'from tensorflow import keras\n'), (43, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(None, 16, 16, 1)', 'name': '"""spatial_input"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (63, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(None, 16, 16, 2)', 'name': '"""temporal_input"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (84, 'tensorflow.keras.models.Model', 'Model', (['stream_input', 'classification_output'], {}), False, 'from tensorflow.keras.models import Model\n'), (90, 'tensorflow.keras.models.Model', 'Model', (['[spatial_input, temporal_input]', 'output'], {}), False, 'from tensorflow.keras.models import Model\n'), (95, 'tools.prepare.ensure_dir_exists', 'prepare.ensure_dir_exists', (['model_dir'], {}), False, 'from tools import prepare\n'), (100, 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['acc']"], {}), True, 'import matplotlib.pyplot as plt\n'), (101, 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_acc']"], {}), True, 'import matplotlib.pyplot as plt\n'), (102, 'matplotlib.pyplot.title', 'plt.title', (['"""model accuracy"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (103, 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (104, 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (105, 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'validation']"], {'loc': '"""upper left"""'}), True, 'import matplotlib.pyplot as plt\n'), (107, 'matplotlib.pyplot.close', 'plt.close', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (109, 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {}), True, 'import matplotlib.pyplot as plt\n'), (110, 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {}), True, 'import matplotlib.pyplot as plt\n'), (111, 'matplotlib.pyplot.title', 'plt.title', (['"""model loss"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (112, 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (113, 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (114, 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'validation']"], {'loc': '"""upper left"""'}), True, 'import matplotlib.pyplot as plt\n'), (119, 'tensorflow.keras.utils.to_categorical', 'tensorflow.keras.utils.to_categorical', (['y', 'CLASSES_N'], {}), False, 'import tensorflow\n'), (324, 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), False, 'import argparse\n'), (398, 'os.path.join', 'os.path.join', (['FLAGS.model_dir', 'FLAGS.subdir'], {}), False, 'import os\n'), (411, 'tools.dataset.Dataset', 'Dataset', (['FLAGS.dataset_dir'], {'minmax_normalized': '(True)'}), False, 'from tools.dataset import Dataset\n'), (56, 'tensorflow.keras.layers.GRU', 'GRU', (['(100)'], {'return_sequences': '(True)', 'name': '"""spatial_GRU"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (57, 'tensorflow.keras.layers.GRU', 'GRU', (['(100)'], {'return_sequences': '(False)', 'name': '"""spatial_GRU2"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (76, 'tensorflow.keras.layers.GRU', 'GRU', (['(100)'], {'return_sequences': '(True)', 'name': '"""temporal_GRU"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (77, 'tensorflow.keras.layers.GRU', 'GRU', (['(100)'], {'return_sequences': '(False)', 'name': '"""temporal_GRU2"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (83, 'tensorflow.keras.layers.Dense', 'Dense', (['CLASSES_N'], {'activation': '"""softmax"""', 'name': '"""single_stream_classification"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (88, 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {'name': '"""merged_concat"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (89, 'tensorflow.keras.layers.Dense', 'Dense', (['CLASSES_N'], {'activation': '"""softmax"""', 'name': '"""merged_output"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (96, 'os.path.join', 'os.path.join', (['model_dir', "(prefix + 'model.png')"], {}), False, 'import os\n'), (106, 'os.path.join', 'os.path.join', (['model_dir', "(prefix + 'model_accuracy' + suffix + '.png')"], {}), False, 'import os\n'), (115, 'os.path.join', 'os.path.join', (['model_dir', "(prefix + 'model_loss' + suffix + '.png')"], {}), False, 'import os\n'), (200, 'numpy.zeros', 'np.zeros', (['[length - sequence.shape[0], *sequence.shape[1:]]', 'sequence.dtype'], {}), True, 'import numpy as np\n'), (202, 'numpy.vstack', 'np.vstack', (['[trailing, sequence]'], {}), True, 'import numpy as np\n'), (259, 'numpy.zeros', 'np.zeros', (['[length - sequence.shape[0], *sequence.shape[1:]]', 'sequence.dtype'], {}), True, 'import numpy as np\n'), (261, 'numpy.vstack', 'np.vstack', (['[trailing, sequence]'], {}), True, 'import numpy as np\n'), (318, 'numpy.zeros', 'np.zeros', (['[length - sequence.shape[0], *sequence.shape[1:]]', 'sequence.dtype'], {}), True, 'import numpy as np\n'), (320, 'numpy.vstack', 'np.vstack', (['[trailing, sequence]'], {}), True, 'import numpy as np\n'), (406, 'tools.dataset.download', 'dataset.download', (['""".."""'], {}), False, 'from tools import dataset\n'), (414, 'tools.prepare.sequences_by_actor', 'prepare.sequences_by_actor', (['data_normalized', 'FLAGS.temperature_dir'], {}), False, 'from tools import prepare\n'), (415, 'tools.prepare.optical_flow', 'prepare.optical_flow', (['data_normalized', 'FLAGS.flow_dir'], {}), False, 'from tools import prepare\n'), (417, 'os.path.join', 'os.path.join', (['FLAGS.temperature_dir', '"""**"""', '"""*.npy"""'], {}), False, 'import os\n'), (419, 'os.path.join', 'os.path.join', (['FLAGS.flow_dir', '"""**"""', '"""*.npy"""'], {}), False, 'import os\n'), (438, 'os.path.join', 'os.path.join', (['sample_actor', 'sample_basename'], {}), False, 'import os\n'), (457, 'os.path.join', 'os.path.join', (['model_path', '"""model.json"""'], {}), False, 'import os\n'), (459, 'os.path.join', 'os.path.join', (['model_path', '"""spatial_model.json"""'], {}), False, 'import os\n'), (461, 'os.path.join', 'os.path.join', (['model_path', '"""temporal_model.json"""'], {}), False, 'import os\n'), (613, 'tensorflow.keras.optimizers.SGD', 'optimizers.SGD', ([], {'lr': 'FLAGS.learning_rate', 'clipnorm': '(0.5)'}), False, 'from tensorflow.keras import optimizers\n'), (614, 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(20)', 'verbose': '(1)'}), False, 'from tensorflow.keras.callbacks import EarlyStopping, TerminateOnNaN, ModelCheckpoint, ReduceLROnPlateau\n'), (615, 'tensorflow.keras.callbacks.TerminateOnNaN', 'TerminateOnNaN', ([], {}), False, 'from tensorflow.keras.callbacks import EarlyStopping, TerminateOnNaN, ModelCheckpoint, ReduceLROnPlateau\n'), (616, 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['model_fn_hdf5'], {'save_best_only': '(True)'}), False, 'from tensorflow.keras.callbacks import EarlyStopping, TerminateOnNaN, ModelCheckpoint, ReduceLROnPlateau\n'), (628, 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(-1)'}), True, 'import numpy as np\n'), (629, 'numpy.argmax', 'np.argmax', (['testing_batches[0][1]'], {'axis': '(-1)'}), True, 'import numpy as np\n'), (630, 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), False, 'from sklearn.metrics import confusion_matrix, accuracy_score\n'), (643, 'os.path.join', 'os.path.join', (['model_path', '"""metrics_dict.npy"""'], {}), False, 'import os\n'), (649, 'numpy.matrix', 'np.matrix', (['cross_validation_cnfs_mtx'], {}), True, 'import numpy as np\n'), (653, 'numpy.matrix', 'np.matrix', (['cross_validation_accuracy'], {}), True, 'import numpy as np\n'), (44, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {'padding': '"""same"""', 'activation': '"""relu"""', 'name': '"""spatial_conv1"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (45, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""spatial_bn_layer"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (46, 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)', 'name': '"""spatial_maxpool1"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (47, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""', 'activation': '"""relu"""', 'name': '"""spatial_conv2"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (48, 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)', 'name': '"""spatial_maxpool2"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (49, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""', 'activation': '"""relu"""', 'name': '"""spatial_conv3"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (50, 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)', 'name': '"""spatial_maxpool3"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (51, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'padding': '"""same"""', 'activation': '"""relu"""', 'name': '"""spatial_conv4"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (52, 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)', 'name': '"""spatial_maxpool4"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (53, 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {'name': '"""spatial_flattened"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (54, 'tensorflow.keras.layers.Dense', 'Dense', (['(512)'], {'name': '"""spatial_dense1"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (55, 'tensorflow.keras.layers.Dense', 'Dense', (['(256)'], {'name': '"""spatial_dense2"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (64, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {'padding': '"""same"""', 'activation': '"""relu"""', 'name': '"""temporal_conv1"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (65, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""temporal_bn_layer"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (66, 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)', 'name': '"""temporal_maxpool1"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (67, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""', 'activation': '"""relu"""', 'name': '"""temporal_conv2"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (68, 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)', 'name': '"""temporal_maxpool2"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (69, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""', 'activation': '"""relu"""', 'name': '"""temporal_conv3"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (70, 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)', 'name': '"""temporal_maxpool3"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (71, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'padding': '"""same"""', 'activation': '"""relu"""', 'name': '"""temporal_conv4"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (72, 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)', 'name': '"""temporal_maxpool4"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (73, 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {'name': '"""temporal_flattened"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (74, 'tensorflow.keras.layers.Dense', 'Dense', (['(512)'], {'name': '"""temporal_dense1"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (75, 'tensorflow.keras.layers.Dense', 'Dense', (['(256)'], {'name': '"""temporal_dense2"""'}), False, 'from tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D, BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape, GRU, average, Lambda, Average, Maximum, Concatenate\n'), (147, 'random.shuffle', 'random.shuffle', (['self.data'], {}), False, 'import random\n'), (155, 'random.shuffle', 'random.shuffle', (['self.data'], {}), False, 'import random\n'), (193, 'numpy.array', 'np.array', (['TEMPERATURE'], {}), True, 'import numpy as np\n'), (193, 'numpy.array', 'np.array', (['FLOW'], {}), True, 'import numpy as np\n'), (193, 'numpy.array', 'np.array', (['Y'], {}), True, 'import numpy as np\n'), (213, 'random.shuffle', 'random.shuffle', (['self.data'], {}), False, 'import random\n'), (221, 'random.shuffle', 'random.shuffle', (['self.data'], {}), False, 'import random\n'), (253, 'numpy.array', 'np.array', (['TEMPERATURE'], {}), True, 'import numpy as np\n'), (253, 'numpy.array', 'np.array', (['Y'], {}), True, 'import numpy as np\n'), (272, 'random.shuffle', 'random.shuffle', (['self.data'], {}), False, 'import random\n'), (280, 'random.shuffle', 'random.shuffle', (['self.data'], {}), False, 'import random\n'), (312, 'numpy.array', 'np.array', (['FLOW'], {}), True, 'import numpy as np\n'), (312, 'numpy.array', 'np.array', (['Y'], {}), True, 'import numpy as np\n'), (327, 'os.path.join', 'os.path.join', (['""".."""', '"""dataset"""'], {}), False, 'import os\n'), (441, 're.search', 're.search', (["(pattern + '_')", 'sample_basename'], {}), False, 'import re\n'), (479, 'random.shuffle', 'random.shuffle', (['train_val_fns_y'], {}), False, 'import random\n'), (544, 'tensorflow.keras.models.model_from_json', 'tensorflow.keras.models.model_from_json', (['loaded_model_json'], {}), False, 'import tensorflow\n'), (552, 'tensorflow.keras.optimizers.SGD', 'optimizers.SGD', ([], {'lr': 'FLAGS.learning_rate', 'clipnorm': '(0.5)', 'momentum': '(0.5)', 'nesterov': '(True)'}), False, 'from tensorflow.keras import optimizers\n'), (553, 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(20)', 'verbose': '(1)'}), False, 'from tensorflow.keras.callbacks import EarlyStopping, TerminateOnNaN, ModelCheckpoint, ReduceLROnPlateau\n'), (554, 'tensorflow.keras.callbacks.TerminateOnNaN', 'TerminateOnNaN', ([], {}), False, 'from tensorflow.keras.callbacks import EarlyStopping, TerminateOnNaN, ModelCheckpoint, ReduceLROnPlateau\n'), (555, 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['spatial_model_fn_hdf5'], {'save_best_only': '(True)'}), False, 'from tensorflow.keras.callbacks import EarlyStopping, TerminateOnNaN, ModelCheckpoint, ReduceLROnPlateau\n'), (579, 'tensorflow.keras.backend.clear_session', 'clear_session', ([], {}), False, 'from tensorflow.keras.backend import clear_session\n'), (583, 'tensorflow.keras.optimizers.SGD', 'optimizers.SGD', ([], {'lr': 'FLAGS.learning_rate', 'clipnorm': '(0.5)'}), False, 'from tensorflow.keras import optimizers\n'), (584, 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(20)', 'verbose': '(1)'}), False, 'from tensorflow.keras.callbacks import EarlyStopping, TerminateOnNaN, ModelCheckpoint, ReduceLROnPlateau\n'), (585, 'tensorflow.keras.callbacks.TerminateOnNaN', 'TerminateOnNaN', ([], {}), False, 'from tensorflow.keras.callbacks import EarlyStopping, TerminateOnNaN, ModelCheckpoint, ReduceLROnPlateau\n'), (586, 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['temporal_model_fn_hdf5'], {'save_best_only': '(True)'}), False, 'from tensorflow.keras.callbacks import EarlyStopping, TerminateOnNaN, ModelCheckpoint, ReduceLROnPlateau\n'), (610, 'tensorflow.keras.backend.clear_session', 'clear_session', ([], {}), False, 'from tensorflow.keras.backend import clear_session\n'), (631, 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), False, 'from sklearn.metrics import confusion_matrix, accuracy_score\n'), (644, 'os.path.join', 'os.path.join', (['model_path', '"""metrics_dict.npy"""'], {}), False, 'import os\n'), (648, 'os.path.join', 'os.path.join', (['model_path', '"""cnfs_mtx.txt"""'], {}), False, 'import os\n'), (650, 'numpy.savetxt', 'np.savetxt', (['f', 'line'], {'fmt': '"""%.4f"""'}), True, 'import numpy as np\n'), (652, 'os.path.join', 'os.path.join', (['model_path', '"""accuracy.txt"""'], {}), False, 'import os\n'), (654, 'numpy.savetxt', 'np.savetxt', (['f', 'line'], {'fmt': '"""%.4f"""'}), True, 'import numpy as np\n'), (59, 'tensorflow.keras.backend.clip', 'tensorflow.keras.backend.clip', (['x', 'KERAS_EPSILON', '(1 - KERAS_EPSILON)'], {}), False, 'import tensorflow\n'), (79, 'tensorflow.keras.backend.clip', 'tensorflow.keras.backend.clip', (['x', 'KERAS_EPSILON', '(1 - KERAS_EPSILON)'], {}), False, 'import tensorflow\n'), (168, 'numpy.random.randint', 'np.random.randint', (['(0)', '(4)'], {}), True, 'import numpy as np\n'), (169, 'numpy.random.randint', 'np.random.randint', (['(0)', '(3)'], {}), True, 'import numpy as np\n'), (173, 'tools.augmentation.random_rotation', 'augment.random_rotation', (['temperature'], {'case': 'k_rot'}), True, 'from tools import augmentation as augment\n'), (174, 'tools.augmentation.random_flip', 'augment.random_flip', (['temperature'], {'case': 'k_flip'}), True, 'from tools import augmentation as augment\n'), (233, 'numpy.random.randint', 'np.random.randint', (['(0)', '(4)'], {}), True, 'import numpy as np\n'), (234, 'numpy.random.randint', 'np.random.randint', (['(0)', '(3)'], {}), True, 'import numpy as np\n'), (238, 'tools.augmentation.random_rotation', 'augment.random_rotation', (['temperature'], {'case': 'k_rot'}), True, 'from tools import augmentation as augment\n'), (239, 'tools.augmentation.random_flip', 'augment.random_flip', (['temperature'], {'case': 'k_flip'}), True, 'from tools import augmentation as augment\n'), (292, 'numpy.random.randint', 'np.random.randint', (['(0)', '(4)'], {}), True, 'import numpy as np\n'), (293, 'numpy.random.randint', 'np.random.randint', (['(0)', '(3)'], {}), True, 'import numpy as np\n'), (297, 'tools.augmentation.random_rotation', 'augment.random_rotation', (['temperature'], {'case': 'k_rot'}), True, 'from tools import augmentation as augment\n'), (298, 'tools.augmentation.random_flip', 'augment.random_flip', (['temperature'], {'case': 'k_flip'}), True, 'from tools import augmentation as augment\n'), (331, 'os.path.join', 'os.path.join', (['"""tmps"""', '"""model"""'], {}), False, 'import os\n'), (340, 'os.path.join', 'os.path.join', (['"""tmps"""', '"""cache"""', '"""temperature"""'], {}), False, 'import os\n'), (345, 'os.path.join', 'os.path.join', (['"""tmps"""', '"""cache"""', '"""optical_flow"""'], {}), False, 'import os\n'), (572, 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(-1)'}), True, 'import numpy as np\n'), (573, 'numpy.argmax', 'np.argmax', (['spatial_testing_batches[0][1]'], {'axis': '(-1)'}), True, 'import numpy as np\n'), (574, 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), False, 'from sklearn.metrics import confusion_matrix, accuracy_score\n'), (603, 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(-1)'}), True, 'import numpy as np\n'), (604, 'numpy.argmax', 'np.argmax', (['temporal_testing_batches[0][1]'], {'axis': '(-1)'}), True, 'import numpy as np\n'), (605, 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), False, 'from sklearn.metrics import confusion_matrix, accuracy_score\n'), (171, 'numpy.load', 'np.load', (['temperature_fn'], {}), True, 'import numpy as np\n'), (236, 'numpy.load', 'np.load', (['temperature_fn'], {}), True, 'import numpy as np\n'), (295, 'numpy.load', 'np.load', (['temperature_fn'], {}), True, 'import numpy as np\n'), (575, 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), False, 'from sklearn.metrics import confusion_matrix, accuracy_score\n'), (606, 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), False, 'from sklearn.metrics import confusion_matrix, accuracy_score\n'), (422, 'os.path.split', 'os.path.split', (['item'], {}), False, 'import os\n'), (424, 'os.path.split', 'os.path.split', (['item'], {}), False, 'import os\n'), (496, 'os.path.join', 'os.path.join', (['temperature_prefix', 'fn'], {}), False, 'import os\n'), (497, 'os.path.join', 'os.path.join', (['flow_prefix', 'fn'], {}), False, 'import os\n')]
antoinedemathelin/adapt
cae888b1a0ae2d82772ae8575457f5ad7799a8b7
""" Test functions for utils module. """ import numpy as np import pytest import tensorflow as tf import tensorflow.keras.backend as K from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import AdaBoostRegressor from sklearn.multioutput import MultiOutputRegressor from sklearn.compose import TransformedTargetRegressor from sklearn.base import BaseEstimator, RegressorMixin, ClassifierMixin from sklearn.tree._tree import Tree from tensorflow.keras.wrappers.scikit_learn import KerasClassifier, KerasRegressor from tensorflow.keras import Model, Sequential from tensorflow.keras.layers import Input, Dense, Flatten, Reshape from tensorflow.python.keras.engine.input_layer import InputLayer from adapt.utils import * def is_equal_estimator(v1, v2): assert type(v2) == type(v1) if isinstance(v1, np.ndarray): assert np.array_equal(v1, v2) elif isinstance(v1, (BaseEstimator, KerasClassifier, KerasRegressor)): assert is_equal_estimator(v1.__dict__, v2.__dict__) elif isinstance(v1, Model): assert is_equal_estimator(v1.get_config(), v2.get_config()) elif isinstance(v1, dict): diff_keys = ((set(v1.keys())-set(v2.keys())) | (set(v2.keys())-set(v1.keys()))) for k in diff_keys: assert "input_shape" in k for k1_i, v1_i in v1.items(): # Avoid exception due to new input layer name if k1_i != "name" and not "input_shape" in str(k1_i): v2_i = v2[k1_i] assert is_equal_estimator(v1_i, v2_i) elif isinstance(v1, (list, tuple)): assert len(v1) == len(v2) for v1_i, v2_i in zip(v1, v2): assert is_equal_estimator(v1_i, v2_i) elif isinstance(v1, Tree): pass # TODO create a function to check if two tree are equal else: if not "input" in str(v1): assert v1 == v2 return True class CustomEstimator(BaseEstimator): def __init__(self): pass def fit(self, X, y): pass class DummyModel(Model): def __init__(self): pass class CantBeDeepCopied(BaseEstimator): def __init__(self): pass def __deepcopy__(self): raise ValueError("Can not be deep copied!") def _get_model_Model(compiled=True, custom_loss=False): inputs = Input((10,)) output = Dense(1)(inputs) model = Model(inputs, output) if custom_loss: loss = K.mean(output) model.add_loss(loss) if compiled: model.compile(loss="mse", optimizer="adam") return model def _get_model_Sequential(input_shape=None, compiled=True): model = Sequential() if input_shape is not None: model.add(Dense(1, input_shape=input_shape)) else: model.add(Dense(1)) if compiled: model.compile(loss="mse", optimizer="adam") return model arrays_nd = [np.ones((10, 1)), np.zeros((10, 10)), np.zeros((10, 5, 1)), np.full((10, 20), -5.5), np.ones((1, 1)), np.random.randn(1, 5, 5, 1)] @pytest.mark.parametrize("z", arrays_nd) def test_check_arrays_nd(z): Xs, ys, Xt, yt = check_arrays(z, z, z, z) assert np.array_equal(Xs, z) assert np.array_equal(ys, z) assert np.array_equal(Xt, z) assert np.array_equal(yt, z) def test_check_arrays_diff_input(): Xs, ys, Xt, yt = arrays_nd[:4] assert np.array_equal(Xs, arrays_nd[0]) assert np.array_equal(ys, arrays_nd[1]) assert np.array_equal(Xt, arrays_nd[2]) assert np.array_equal(yt, arrays_nd[3]) arrays_1d = [np.ones((10,)), np.ones((1,))] arrays_2d = [np.ones((10, 1)), np.ones((1, 1))] @pytest.mark.parametrize("z, zz", zip(arrays_1d, arrays_2d)) def test_check_arrays_1d(z, zz): Xs, ys, Xt, yt = check_arrays(z, z, z, z) assert np.array_equal(Xs, zz) assert np.array_equal(ys, zz) assert np.array_equal(Xt, zz) assert np.array_equal(yt, zz) def test_check_arrays_no_yt(): z = arrays_nd[0] Xs, ys, Xt, yt = check_arrays(z, z, z) assert yt is None assert np.array_equal(Xs, z) assert np.array_equal(ys, z) assert np.array_equal(Xt, z) def test_check_arrays_length_error(): z = arrays_nd[0] with pytest.raises(ValueError) as excinfo: Xs, ys, Xt, yt = check_arrays(z, z[:5], z) assert "Length of Xs and ys mismatch: 10 != 5" in str(excinfo.value) with pytest.raises(ValueError) as excinfo: Xs, ys, Xt, yt = check_arrays(z, z, z, z[:5]) assert "Length of Xt and yt mismatch: 10 != 5" in str(excinfo.value) def test_check_arrays_no_array(): z = np.array([1,2,3]) with pytest.raises(TypeError) as excinfo: Xs, ys, Xt, yt = check_arrays("123", z, z) @pytest.mark.parametrize("X", arrays_nd) def test_check_one_array_nd(X): Xt = check_one_array(X) assert np.array_equal(Xt, X) @pytest.mark.parametrize("X, Xtt", zip(arrays_1d, arrays_2d)) def test_check_one_array_1d(X, Xtt): Xt = check_one_array(X) assert np.array_equal(Xt, Xtt) networks = [ _get_model_Model(compiled=True, custom_loss=False), _get_model_Sequential(compiled=True, input_shape=(10,)), _get_model_Sequential(compiled=True, input_shape=None), _get_model_Model(compiled=False, custom_loss=False), _get_model_Model(compiled=False, custom_loss=True), _get_model_Sequential(compiled=False, input_shape=(10,)), _get_model_Sequential(compiled=False, input_shape=None) ] @pytest.mark.parametrize("net", networks) def test_check_network_network(net): new_net = check_network(net, compile_=False) assert is_equal_estimator(new_net, net) if net.built: for i in range(len(net.get_weights())): assert np.array_equal(net.get_weights()[i], new_net.get_weights()[i]) net.predict(np.ones((10, 10))) new_net = check_network(net, compile_=False) assert is_equal_estimator(new_net, net) for i in range(len(net.get_weights())): assert np.array_equal(net.get_weights()[i], new_net.get_weights()[i]) @pytest.mark.parametrize("net", networks) def test_check_network_copy(net): new_net = check_network(net, copy=True, compile_=False) assert hex(id(new_net)) != hex(id(net)) new_net = check_network(net, copy=False, compile_=False) assert hex(id(new_net)) == hex(id(net)) no_networks = ["lala", Ridge(), 123, np.ones((10, 10))] @pytest.mark.parametrize("no_net", no_networks) def test_check_network_no_model(no_net): with pytest.raises(ValueError) as excinfo: new_net = check_network(no_net) assert ("Expected `network` argument " "to be a `Model` instance," " got: %s"%str(no_net) in str(excinfo.value)) with pytest.raises(ValueError) as excinfo: new_net = check_network(no_net, display_name="tireli") assert ("Expected `tireli` argument " "to be a `Model` instance," " got: %s"%str(no_net) in str(excinfo.value)) def test_check_network_force_copy(): model = DummyModel() with pytest.raises(ValueError) as excinfo: new_net = check_network(model, copy=True, force_copy=True) assert ("`network` argument can't be duplicated. " "Recorded exception: " in str(excinfo.value)) with pytest.raises(ValueError) as excinfo: new_net = check_network(model, copy=True, force_copy=True, display_name="tireli") assert ("`tireli` argument can't be duplicated. " "Recorded exception: " in str(excinfo.value)) with pytest.warns(UserWarning) as record: new_net = check_network(model, copy=True, force_copy=False, compile_=False) assert ("`network` argument can't be duplicated. " "Recorded exception: " in str(record[0].message)) with pytest.warns(UserWarning) as record: new_net = check_network(model, copy=True, force_copy=False, compile_=False, display_name="tireli") assert ("`tireli` argument can't be duplicated. " "Recorded exception: " in str(record[0].message)) new_net = check_network(model, copy=False, force_copy=True) def test_check_network_compile(): net = _get_model_Sequential(compiled=False) with pytest.raises(ValueError) as excinfo: new_net = check_network(net, copy=True, compile_=True) assert ("The given `network` argument is not compiled yet. " "Please use `model.compile(optimizer, loss)`." in str(excinfo.value)) with pytest.raises(ValueError) as excinfo: new_net = check_network(net, copy=True, compile_=True, display_name="tireli") assert ("The given `tireli` argument is not compiled yet. " "Please use `model.compile(optimizer, loss)`." in str(excinfo.value)) def test_check_network_high_dataset(): Xs, ys, Xt, yt = make_regression_da(100000, 1001) net = _get_model_Sequential(compiled=True) new_net = check_network(net, copy=True, compile_=True) new_net.predict(Xs) estimators = [ Ridge(), Ridge(alpha=10, fit_intercept=False, tol=0.1), DecisionTreeClassifier(max_depth=10), AdaBoostRegressor(Ridge(alpha=0.01)), TransformedTargetRegressor(regressor=Ridge(alpha=25), transformer=StandardScaler()), MultiOutputRegressor(Ridge(alpha=0.3)), make_pipeline(StandardScaler(), Ridge(alpha=0.2)), KerasClassifier(_get_model_Sequential, input_shape=(1,)), CustomEstimator() ] @pytest.mark.parametrize("est", estimators) def test_check_estimator_estimators(est): new_est = check_estimator(est, copy=True, force_copy=True) assert is_equal_estimator(est, new_est) if isinstance(est, MultiOutputRegressor): est.fit(np.linspace(0, 1, 10).reshape(-1, 1), np.stack([np.linspace(0, 1, 10)<0.5]*2, -1).astype(float)) else: est.fit(np.linspace(0, 1, 10).reshape(-1, 1), (np.linspace(0, 1, 10)<0.5).astype(float)) if isinstance(est, KerasClassifier): new_est = check_estimator(est, copy=False) else: new_est = check_estimator(est, copy=True, force_copy=True) assert is_equal_estimator(est, new_est) @pytest.mark.parametrize("est", networks[:3]) def test_check_estimator_networks(est): new_est = check_estimator(est) assert is_equal_estimator(est, new_est) no_estimators = ["lala", 123, np.ones((10, 10))] @pytest.mark.parametrize("no_est", no_estimators) def test_check_estimator_no_estimators(no_est): with pytest.raises(ValueError) as excinfo: new_est = check_estimator(no_est) assert ("`estimator` argument is neither a sklearn `BaseEstimator` " "instance nor a tensorflow Model instance. " "Given argument, %s"%str(no_est) in str(excinfo.value)) with pytest.raises(ValueError) as excinfo: new_est = check_estimator(no_est, display_name="tireli") assert ("`tireli` argument is neither a sklearn `BaseEstimator` " "instance nor a tensorflow Model instance. " "Given argument, %s"%str(no_est) in str(excinfo.value)) @pytest.mark.parametrize("est", estimators) def test_check_estimator_copy(est): new_est = check_estimator(est, copy=True) assert hex(id(new_est)) != hex(id(est)) new_est = check_estimator(est, copy=False) assert hex(id(new_est)) == hex(id(est)) def test_check_estimator_force_copy(): est = CantBeDeepCopied() with pytest.raises(ValueError) as excinfo: new_est = check_estimator(est, copy=True, force_copy=True) assert ("`estimator` argument can't be duplicated. " "Recorded exception: " in str(excinfo.value)) with pytest.raises(ValueError) as excinfo: new_est = check_estimator(est, copy=True, force_copy=True, display_name="tireli") assert ("`tireli` argument can't be duplicated. " "Recorded exception: " in str(excinfo.value)) with pytest.warns(UserWarning) as record: new_est = check_estimator(est, copy=True, force_copy=False) assert ("`estimator` argument can't be duplicated. " "Recorded exception: " in str(record[0].message)) with pytest.warns(UserWarning) as record: new_est = check_estimator(est, copy=True, force_copy=False, display_name="tireli") assert ("`tireli` argument can't be duplicated. " "Recorded exception: " in str(record[0].message)) new_est = check_estimator(est, copy=False, force_copy=True) def test_check_estimator_task(): new_est = check_estimator() assert isinstance(new_est, LinearRegression) new_est = check_estimator(task="class") assert isinstance(new_est, LogisticRegression) new_est = check_estimator(DecisionTreeClassifier(), task="class") assert isinstance(new_est, DecisionTreeClassifier) new_est = check_estimator(Ridge(), task="reg") assert isinstance(new_est, Ridge) with pytest.raises(ValueError) as excinfo: new_est = check_estimator(DecisionTreeClassifier(), task="reg") assert ("`estimator` argument is a sklearn `ClassifierMixin` instance " "whereas the considered object handles only regression task. " "Please provide a sklearn `RegressionMixin` instance or a " "tensorflow Model instance." in str(excinfo.value)) with pytest.raises(ValueError) as excinfo: new_est = check_estimator(DecisionTreeClassifier(), task="reg", display_name="tireli") assert ("`tireli` argument is a sklearn" " `ClassifierMixin` instance " in str(excinfo.value)) with pytest.raises(ValueError) as excinfo: new_est = check_estimator(Ridge(), task="class") assert ("`estimator` argument is a sklearn `RegressionMixin` instance " "whereas the considered object handles only classification task. " "Please provide a sklearn `ClassifierMixin` instance or a " "tensorflow Model instance." in str(excinfo.value)) with pytest.raises(ValueError) as excinfo: new_est = check_estimator(Ridge(), task="class", display_name="tireli") assert ("`tireli` argument is a sklearn" " `RegressionMixin` instance " in str(excinfo.value)) def test_get_default_encoder(): model = get_default_encoder() assert isinstance(model.layers[0], Flatten) assert isinstance(model.layers[1], Dense) assert model.layers[1].get_config()["units"] == 10 assert model.layers[1].get_config()["activation"] == "relu" def test_get_default_task(): model = get_default_task() assert isinstance(model.layers[0], Flatten) assert isinstance(model.layers[1], Dense) assert isinstance(model.layers[2], Dense) assert isinstance(model.layers[3], Dense) assert model.layers[1].get_config()["units"] == 10 assert model.layers[1].get_config()["activation"] == "relu" assert model.layers[2].get_config()["units"] == 10 assert model.layers[2].get_config()["activation"] == "relu" assert model.layers[3].get_config()["units"] == 1 assert model.layers[3].get_config()["activation"] == "linear" def test_get_default_discriminator(): model = get_default_discriminator() assert isinstance(model.layers[0], Flatten) assert isinstance(model.layers[1], Dense) assert isinstance(model.layers[2], Dense) assert isinstance(model.layers[3], Dense) assert model.layers[1].get_config()["units"] == 10 assert model.layers[1].get_config()["activation"] == "relu" assert model.layers[2].get_config()["units"] == 10 assert model.layers[2].get_config()["activation"] == "relu" assert model.layers[3].get_config()["units"] == 1 assert model.layers[3].get_config()["activation"] == "sigmoid" scales = [-1, 0, 1., 0.1] @pytest.mark.parametrize("lambda_", scales) def test_gradienthandler(lambda_): grad_handler = GradientHandler(lambda_) inputs = K.variable([1, 2, 3]) assert np.all(grad_handler(inputs) == inputs) with tf.GradientTape() as tape: gradient = tape.gradient(grad_handler(inputs), inputs) assert np.all(gradient == lambda_ * np.ones(3)) config = grad_handler.get_config() assert config['lambda_init'] == lambda_ def test_make_classification_da(): Xs, ys, Xt, yt = make_classification_da() assert Xs.shape == (100, 2) assert len(ys) == 100 assert Xt.shape == (100, 2) assert len(yt) == 100 Xs, ys, Xt, yt = make_classification_da(1000, 10) assert Xs.shape == (1000, 10) assert len(ys) == 1000 assert Xt.shape == (1000, 10) assert len(yt) == 1000 def test_make_regression_da(): Xs, ys, Xt, yt = make_regression_da() assert Xs.shape == (100, 1) assert len(ys) == 100 assert Xt.shape == (100, 1) assert len(yt) == 100 Xs, ys, Xt, yt = make_regression_da(1000, 10) assert Xs.shape == (1000, 10) assert len(ys) == 1000 assert Xt.shape == (1000, 10) assert len(yt) == 1000
[ "tensorflow.keras.backend.variable", "numpy.array_equal", "numpy.linspace", "tensorflow.keras.layers.Dense", "tensorflow.keras.Sequential", "tensorflow.keras.Model", "numpy.ones", "numpy.full", "tensorflow.keras.wrappers.scikit_learn.KerasClassifier", "sklearn.linear_model.Ridge", "sklearn.tree.DecisionTreeClassifier", "numpy.random.randn", "tensorflow.GradientTape", "tensorflow.keras.backend.mean", "sklearn.preprocessing.StandardScaler", "numpy.array", "numpy.zeros", "tensorflow.keras.layers.Input" ]
tests/test_utils.py
[(110, 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""z"""', 'arrays_nd'], {}), False, 'import pytest\n'), (164, 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""X"""', 'arrays_nd'], {}), False, 'import pytest\n'), (187, 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""net"""', 'networks'], {}), False, 'import pytest\n'), (203, 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""net"""', 'networks'], {}), False, 'import pytest\n'), (213, 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""no_net"""', 'no_networks'], {}), False, 'import pytest\n'), (288, 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""est"""', 'estimators'], {}), False, 'import pytest\n'), (305, 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""est"""', 'networks[:3]'], {}), False, 'import pytest\n'), (313, 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""no_est"""', 'no_estimators'], {}), False, 'import pytest\n'), (327, 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""est"""', 'estimators'], {}), False, 'import pytest\n'), (435, 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""lambda_"""', 'scales'], {}), False, 'import pytest\n'), (84, 'tensorflow.keras.layers.Input', 'Input', (['(10,)'], {}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Reshape\n'), (86, 'tensorflow.keras.Model', 'Model', (['inputs', 'output'], {}), False, 'from tensorflow.keras import Model, Sequential\n'), (96, 'tensorflow.keras.Sequential', 'Sequential', ([], {}), False, 'from tensorflow.keras import Model, Sequential\n'), (106, 'numpy.ones', 'np.ones', (['(10, 1)'], {}), True, 'import numpy as np\n'), (106, 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), True, 'import numpy as np\n'), (107, 'numpy.zeros', 'np.zeros', (['(10, 5, 1)'], {}), True, 'import numpy as np\n'), (107, 'numpy.full', 'np.full', (['(10, 20)', '(-5.5)'], {}), True, 'import numpy as np\n'), (108, 'numpy.ones', 'np.ones', (['(1, 1)'], {}), True, 'import numpy as np\n'), (108, 'numpy.random.randn', 'np.random.randn', (['(1)', '(5)', '(5)', '(1)'], {}), True, 'import numpy as np\n'), (113, 'numpy.array_equal', 'np.array_equal', (['Xs', 'z'], {}), True, 'import numpy as np\n'), (114, 'numpy.array_equal', 'np.array_equal', (['ys', 'z'], {}), True, 'import numpy as np\n'), (115, 'numpy.array_equal', 'np.array_equal', (['Xt', 'z'], {}), True, 'import numpy as np\n'), (116, 'numpy.array_equal', 'np.array_equal', (['yt', 'z'], {}), True, 'import numpy as np\n'), (121, 'numpy.array_equal', 'np.array_equal', (['Xs', 'arrays_nd[0]'], {}), True, 'import numpy as np\n'), (122, 'numpy.array_equal', 'np.array_equal', (['ys', 'arrays_nd[1]'], {}), True, 'import numpy as np\n'), (123, 'numpy.array_equal', 'np.array_equal', (['Xt', 'arrays_nd[2]'], {}), True, 'import numpy as np\n'), (124, 'numpy.array_equal', 'np.array_equal', (['yt', 'arrays_nd[3]'], {}), True, 'import numpy as np\n'), (127, 'numpy.ones', 'np.ones', (['(10,)'], {}), True, 'import numpy as np\n'), (127, 'numpy.ones', 'np.ones', (['(1,)'], {}), True, 'import numpy as np\n'), (128, 'numpy.ones', 'np.ones', (['(10, 1)'], {}), True, 'import numpy as np\n'), (128, 'numpy.ones', 'np.ones', (['(1, 1)'], {}), True, 'import numpy as np\n'), (133, 'numpy.array_equal', 'np.array_equal', (['Xs', 'zz'], {}), True, 'import numpy as np\n'), (134, 'numpy.array_equal', 'np.array_equal', (['ys', 'zz'], {}), True, 'import numpy as np\n'), (135, 'numpy.array_equal', 'np.array_equal', (['Xt', 'zz'], {}), True, 'import numpy as np\n'), (136, 'numpy.array_equal', 'np.array_equal', (['yt', 'zz'], {}), True, 'import numpy as np\n'), (143, 'numpy.array_equal', 'np.array_equal', (['Xs', 'z'], {}), True, 'import numpy as np\n'), (144, 'numpy.array_equal', 'np.array_equal', (['ys', 'z'], {}), True, 'import numpy as np\n'), (145, 'numpy.array_equal', 'np.array_equal', (['Xt', 'z'], {}), True, 'import numpy as np\n'), (159, 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), True, 'import numpy as np\n'), (167, 'numpy.array_equal', 'np.array_equal', (['Xt', 'X'], {}), True, 'import numpy as np\n'), (173, 'numpy.array_equal', 'np.array_equal', (['Xt', 'Xtt'], {}), True, 'import numpy as np\n'), (211, 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge\n'), (211, 'numpy.ones', 'np.ones', (['(10, 10)'], {}), True, 'import numpy as np\n'), (277, 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge\n'), (278, 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': '(10)', 'fit_intercept': '(False)', 'tol': '(0.1)'}), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge\n'), (279, 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'max_depth': '(10)'}), False, 'from sklearn.tree import DecisionTreeClassifier\n'), (284, 'tensorflow.keras.wrappers.scikit_learn.KerasClassifier', 'KerasClassifier', (['_get_model_Sequential'], {'input_shape': '(1,)'}), False, 'from tensorflow.keras.wrappers.scikit_learn import KerasClassifier, KerasRegressor\n'), (311, 'numpy.ones', 'np.ones', (['(10, 10)'], {}), True, 'import numpy as np\n'), (438, 'tensorflow.keras.backend.variable', 'K.variable', (['[1, 2, 3]'], {}), True, 'import tensorflow.keras.backend as K\n'), (30, 'numpy.array_equal', 'np.array_equal', (['v1', 'v2'], {}), True, 'import numpy as np\n'), (85, 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Reshape\n'), (88, 'tensorflow.keras.backend.mean', 'K.mean', (['output'], {}), True, 'import tensorflow.keras.backend as K\n'), (150, 'pytest.raises', 'pytest.raises', (['ValueError'], {}), False, 'import pytest\n'), (153, 'pytest.raises', 'pytest.raises', (['ValueError'], {}), False, 'import pytest\n'), (160, 'pytest.raises', 'pytest.raises', (['TypeError'], {}), False, 'import pytest\n'), (195, 'numpy.ones', 'np.ones', (['(10, 10)'], {}), True, 'import numpy as np\n'), (215, 'pytest.raises', 'pytest.raises', (['ValueError'], {}), False, 'import pytest\n'), (220, 'pytest.raises', 'pytest.raises', (['ValueError'], {}), False, 'import pytest\n'), (229, 'pytest.raises', 'pytest.raises', (['ValueError'], {}), False, 'import pytest\n'), (233, 'pytest.raises', 'pytest.raises', (['ValueError'], {}), False, 'import pytest\n'), (239, 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), False, 'import pytest\n'), (244, 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), False, 'import pytest\n'), (256, 'pytest.raises', 'pytest.raises', (['ValueError'], {}), False, 'import pytest\n'), (261, 'pytest.raises', 'pytest.raises', (['ValueError'], {}), False, 'import pytest\n'), (280, 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': '(0.01)'}), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge\n'), (282, 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': '(0.3)'}), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge\n'), (283, 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), False, 'from sklearn.preprocessing import StandardScaler\n'), (283, 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': '(0.2)'}), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge\n'), (315, 'pytest.raises', 'pytest.raises', (['ValueError'], {}), False, 'import pytest\n'), (320, 'pytest.raises', 'pytest.raises', (['ValueError'], {}), False, 'import pytest\n'), (337, 'pytest.raises', 'pytest.raises', (['ValueError'], {}), False, 'import pytest\n'), (341, 'pytest.raises', 'pytest.raises', (['ValueError'], {}), False, 'import pytest\n'), (347, 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), False, 'import pytest\n'), (351, 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), False, 'import pytest\n'), (365, 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), False, 'from sklearn.tree import DecisionTreeClassifier\n'), (368, 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge\n'), (372, 'pytest.raises', 'pytest.raises', (['ValueError'], {}), False, 'import pytest\n'), (378, 'pytest.raises', 'pytest.raises', (['ValueError'], {}), False, 'import pytest\n'), (384, 'pytest.raises', 'pytest.raises', (['ValueError'], {}), False, 'import pytest\n'), (390, 'pytest.raises', 'pytest.raises', (['ValueError'], {}), False, 'import pytest\n'), (440, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (98, 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'input_shape': 'input_shape'}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Reshape\n'), (100, 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Reshape\n'), (281, 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': '(25)'}), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge\n'), (281, 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), False, 'from sklearn.preprocessing import StandardScaler\n'), (373, 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), False, 'from sklearn.tree import DecisionTreeClassifier\n'), (379, 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), False, 'from sklearn.tree import DecisionTreeClassifier\n'), (385, 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge\n'), (391, 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge\n'), (443, 'numpy.ones', 'np.ones', (['(3)'], {}), True, 'import numpy as np\n'), (293, 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(10)'], {}), True, 'import numpy as np\n'), (296, 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(10)'], {}), True, 'import numpy as np\n'), (297, 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(10)'], {}), True, 'import numpy as np\n'), (294, 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(10)'], {}), True, 'import numpy as np\n')]
tanzhenyu/keras-cv
b7208ee25735c492ccc171874e34076111dcf637
import os import tensorflow as tf from kerascv.data.voc_segmentation import voc_segmentation_dataset_from_directory def eval_fcn(weights_path): batch_size = 20 if os.path.exists(weights_path): eval_voc_ds_2012 = voc_segmentation_dataset_from_directory( split="val", batch_size=batch_size) strategy = tf.distribute.MirroredStrategy() with strategy.scope(): iou_metric = tf.keras.metrics.MeanIoU(num_classes=21) model = tf.keras.models.load_model(weights_path, compile=False) y_pred = model.outputs[0] y_pred = tf.math.argmax(y_pred, axis=-1) inputs = model.inputs eval_model = tf.keras.Model(inputs, y_pred) eval_model.compile(metrics=[iou_metric]) print('-------------------Start Evaluating {}-------------------'.format(weights_path)) eval_model.evaluate(eval_voc_ds_2012) if __name__ == "__main__": eval_fcn(os.path.join(os.getcwd(), 'fcn_32.hdf5')) eval_fcn(os.path.join(os.getcwd(), 'fcn_16.hdf5')) eval_fcn(os.path.join(os.getcwd(), 'fcn_8.hdf5'))
[ "tensorflow.math.argmax", "tensorflow.keras.models.load_model", "tensorflow.distribute.MirroredStrategy", "tensorflow.keras.Model", "tensorflow.keras.metrics.MeanIoU" ]
kerascv/examples/fcn_480_eval.py
[(8, 'os.path.exists', 'os.path.exists', (['weights_path'], {}), False, 'import os\n'), (9, 'kerascv.data.voc_segmentation.voc_segmentation_dataset_from_directory', 'voc_segmentation_dataset_from_directory', ([], {'split': '"""val"""', 'batch_size': 'batch_size'}), False, 'from kerascv.data.voc_segmentation import voc_segmentation_dataset_from_directory\n'), (11, 'tensorflow.distribute.MirroredStrategy', 'tf.distribute.MirroredStrategy', ([], {}), True, 'import tensorflow as tf\n'), (13, 'tensorflow.keras.metrics.MeanIoU', 'tf.keras.metrics.MeanIoU', ([], {'num_classes': '(21)'}), True, 'import tensorflow as tf\n'), (14, 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['weights_path'], {'compile': '(False)'}), True, 'import tensorflow as tf\n'), (16, 'tensorflow.math.argmax', 'tf.math.argmax', (['y_pred'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (18, 'tensorflow.keras.Model', 'tf.keras.Model', (['inputs', 'y_pred'], {}), True, 'import tensorflow as tf\n'), (25, 'os.getcwd', 'os.getcwd', ([], {}), False, 'import os\n'), (26, 'os.getcwd', 'os.getcwd', ([], {}), False, 'import os\n'), (27, 'os.getcwd', 'os.getcwd', ([], {}), False, 'import os\n')]
PML-UCF/pinn
dcedf25f7154dccd9872df735a19c1f9bcfca50c
# ______ _ _ _ _ _ _ _ # | ___ \ | | | | (_) (_) | | (_) # | |_/ / __ ___ | |__ __ _| |__ _| |_ ___| |_ _ ___ # | __/ '__/ _ \| '_ \ / _` | '_ \| | | / __| __| |/ __| # | | | | | (_) | |_) | (_| | |_) | | | \__ \ |_| | (__ # \_| |_| \___/|_.__/ \__,_|_.__/|_|_|_|___/\__|_|\___| # ___ ___ _ _ # | \/ | | | (_) # | . . | ___ ___| |__ __ _ _ __ _ ___ ___ # | |\/| |/ _ \/ __| '_ \ / _` | '_ \| |/ __/ __| # | | | | __/ (__| | | | (_| | | | | | (__\__ \ # \_| |_/\___|\___|_| |_|\__,_|_| |_|_|\___|___/ # _ _ _ # | | | | | | # | | __ _| |__ ___ _ __ __ _| |_ ___ _ __ _ _ # | | / _` | '_ \ / _ \| '__/ _` | __/ _ \| '__| | | | # | |___| (_| | |_) | (_) | | | (_| | || (_) | | | |_| | # \_____/\__,_|_.__/ \___/|_| \__,_|\__\___/|_| \__, | # __/ | # |___/ # # MIT License # # Copyright (c) 2019 Probabilistic Mechanics Laboratory # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ============================================================================== import numpy as np import tensorflow as tf from tensorflow.keras.models import Model from tensorflow.keras.layers import Input, Concatenate from pinn.layers import inputsSelection, CumulativeDamageCell from pinn.layers import StressIntensityRange, WalkerModel # Model def create_model(F, alpha, gamma, C0, m , d0RNN, batch_input_shape, input_array, selectdK, selectprop, myDtype, return_sequences = False, unroll = False): batch_adjusted_shape = (batch_input_shape[2]+1,) #Adding state placeHolder = Input(shape=(batch_input_shape[2]+1,)) #Adding state filterdkLayer = inputsSelection(batch_adjusted_shape, selectdK)(placeHolder) filterdaLayer = inputsSelection(batch_adjusted_shape, selectprop)(placeHolder) dk_input_shape = filterdkLayer.get_shape() dkLayer = StressIntensityRange(input_shape = dk_input_shape, dtype = myDtype) dkLayer.build(input_shape = dk_input_shape) dkLayer.set_weights([np.asarray([F], dtype = dkLayer.dtype)]) dkLayer.trainable = False dkLayer = dkLayer(filterdkLayer) wmInput = Concatenate(axis = -1)([dkLayer, filterdaLayer]) wm_input_shape = wmInput.get_shape() wmLayer = WalkerModel(input_shape = wm_input_shape, dtype = myDtype) wmLayer.build(input_shape = wm_input_shape) wmLayer.set_weights([np.asarray([alpha, gamma, C0, m], dtype = wmLayer.dtype)]) wmLayer.trainable = False wmLayer = wmLayer(wmInput) functionalModel = Model(inputs=[placeHolder], outputs=[wmLayer]) "-------------------------------------------------------------------------" CDMCellHybrid = CumulativeDamageCell(model = functionalModel, batch_input_shape = batch_input_shape, dtype = myDtype, initial_damage = d0RNN) CDMRNNhybrid = tf.keras.layers.RNN(cell = CDMCellHybrid, return_sequences = return_sequences, return_state = False, batch_input_shape = batch_input_shape, unroll = unroll) model = tf.keras.Sequential() model.add(CDMRNNhybrid) model.compile(loss='mse', optimizer=tf.keras.optimizers.RMSprop(1e-12), metrics=['mae']) return model
[ "tensorflow.keras.layers.Concatenate", "tensorflow.keras.layers.RNN", "tensorflow.keras.models.Model", "numpy.asarray", "tensorflow.keras.optimizers.RMSprop", "tensorflow.keras.Sequential", "tensorflow.keras.layers.Input" ]
samples/cumulative_damage/propagation_walker_model/model.py
[(57, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(batch_input_shape[2] + 1,)'}), False, 'from tensorflow.keras.layers import Input, Concatenate\n'), (65, 'pinn.layers.StressIntensityRange', 'StressIntensityRange', ([], {'input_shape': 'dk_input_shape', 'dtype': 'myDtype'}), False, 'from pinn.layers import StressIntensityRange, WalkerModel\n'), (74, 'pinn.layers.WalkerModel', 'WalkerModel', ([], {'input_shape': 'wm_input_shape', 'dtype': 'myDtype'}), False, 'from pinn.layers import StressIntensityRange, WalkerModel\n'), (80, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[placeHolder]', 'outputs': '[wmLayer]'}), False, 'from tensorflow.keras.models import Model\n'), (82, 'pinn.layers.CumulativeDamageCell', 'CumulativeDamageCell', ([], {'model': 'functionalModel', 'batch_input_shape': 'batch_input_shape', 'dtype': 'myDtype', 'initial_damage': 'd0RNN'}), False, 'from pinn.layers import inputsSelection, CumulativeDamageCell\n'), (87, 'tensorflow.keras.layers.RNN', 'tf.keras.layers.RNN', ([], {'cell': 'CDMCellHybrid', 'return_sequences': 'return_sequences', 'return_state': '(False)', 'batch_input_shape': 'batch_input_shape', 'unroll': 'unroll'}), True, 'import tensorflow as tf\n'), (93, 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), True, 'import tensorflow as tf\n'), (59, 'pinn.layers.inputsSelection', 'inputsSelection', (['batch_adjusted_shape', 'selectdK'], {}), False, 'from pinn.layers import inputsSelection, CumulativeDamageCell\n'), (61, 'pinn.layers.inputsSelection', 'inputsSelection', (['batch_adjusted_shape', 'selectprop'], {}), False, 'from pinn.layers import inputsSelection, CumulativeDamageCell\n'), (71, 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), False, 'from tensorflow.keras.layers import Input, Concatenate\n'), (67, 'numpy.asarray', 'np.asarray', (['[F]'], {'dtype': 'dkLayer.dtype'}), True, 'import numpy as np\n'), (76, 'numpy.asarray', 'np.asarray', (['[alpha, gamma, C0, m]'], {'dtype': 'wmLayer.dtype'}), True, 'import numpy as np\n'), (95, 'tensorflow.keras.optimizers.RMSprop', 'tf.keras.optimizers.RMSprop', (['(1e-12)'], {}), True, 'import tensorflow as tf\n')]
SkyLord2/ResNetByTFKeras
70d00ef1c559d7d5ab895d3dfce6074fd316793b
# -*- coding: utf-8 -*- # @Time : 2020/10/22 20:14 # @Author : cds # @Site : https://github.com/SkyLord2?tab=repositories # @Email: [email protected] # @File : resnet.py # @Software: PyCharm from tensorflow.keras import layers,Model,Sequential class BasicBlock(layers.Layer): expansion=1 def __init__(self,out_channel,strides=1,downsample=None,**kwargs): super(BasicBlock,self).__init__(**kwargs) self.conv1 = layers.Conv2D(out_channel,kernel_size=3,strides=strides,padding="SAME",use_bias=False) self.bn1 = layers.BatchNormalization(momentum=0.9, epsilon=1e-5) self.conv2 = layers.Conv2D(out_channel,kernel_size=3,strides=1,padding="SAME",use_bias=False) self.bn2 = layers.BatchNormalization(momentum=0.9,epsilon=1e-5) # 下采样函数 self.downsample = downsample self.relu = layers.ReLU() self.add = layers.Add() def call(self,inputs,training=False): identify = inputs if(self.downsample is not None): identify = self.downsample(inputs) x = self.conv1(inputs) x = self.bn1(x,training=training) x = self.relu(x) x = self.conv2(x) x = self.bn2(x,training=training) x = self.add([identify,x]) x = self.relu(x) return x class Bottleneck(layers.Layer): expansion = 4 def __init__(self,out_channel,strides=1,downsample=None,**kwargs): super(Bottleneck,self).__init__(**kwargs) self.conv1 = layers.Conv2D(out_channel,kernel_size=1,use_bias=False,name="conv1") self.bn1 = layers.BatchNormalization(momentum=0.9,epsilon=1e-5, name="conv1/BatchNorm") self.conv2 = layers.Conv2D(out_channel,kernel_size=3,strides=strides,padding="SAME",use_bias=False,name="conv2") self.bn2 = layers.BatchNormalization(momentum=0.9,epsilon=1e-5,name="conv2/BatchNorm") self.conv3 = layers.Conv2D(out_channel*self.expansion,kernel_size=1,use_bias=False,name="conv3") self.bn3 = layers.BatchNormalization(momentum=0.9,epsilon=1e-5,name="conv3/BatchNorm") self.relu = layers.ReLU() self.downsample = downsample self.add = layers.Add() def call(self,inputs,training=False): identity = inputs if(self.downsample is not None): identity = self.downsample(inputs) x = self.conv1(inputs) x = self.bn1(x,training=training) x = self.relu(x) x = self.conv2(x) x = self.bn2(x,training=training) x = self.relu(x) x = self.conv3(x) x = self.bn3(x,training=training) x = self.add([identity,x]) x = self.relu(x) return x def _make_layer(block,in_channel,channel,block_num,name,strides=1): downsample = None if(strides!=1 or in_channel != channel*block.expansion): downsample = Sequential([ #layers.Conv2D(channel*block.expansion,kernel_size=1,padding="SAME",use_bias=False,name="conv1"), layers.Conv2D(channel*block.expansion,kernel_size=1,strides=strides,use_bias=False,name="conv1"), layers.BatchNormalization(momentum=0.9,epsilon=1.001e-5,name="BatchNorm")],name="shortcut") layer_list = [] layer_list.append(block(channel,strides,downsample,name="unit_1")) for index in range(1,block_num): layer_list.append(block(channel,name="unit_"+str(index+1))) return Sequential(layer_list,name=name) def _resnet(block,blocks_num,im_width=224,im_height=224,channel=3,num_classes=1000,include_top=True): input_image = layers.Input(shape=(im_height,im_width,channel),dtype="float32") x = layers.Conv2D(filters=64,kernel_size=7,strides=2,padding="SAME",use_bias=False,name="conv1")(input_image) x = layers.BatchNormalization(momentum=0.9,epsilon=1e-5,name="conv1/BatchNorm")(x) x = layers.ReLU()(x) x = layers.MaxPool2D(pool_size=3,strides=2,padding="SAME")(x) print("-----------------------------block_1-------------------------------------") print("\ndata shape:", x.shape) x = _make_layer(block, x.shape[-1], 64, blocks_num[0], name="block_1")(x) print("-----------------------------block_2-------------------------------------") print("\ndata shape:", x.shape) x = _make_layer(block, x.shape[-1], 128, blocks_num[1], strides=2, name="block_2")(x) print("-----------------------------block_3-------------------------------------") print("\ndata shape:", x.shape) x = _make_layer(block, x.shape[-1], 256, blocks_num[2], strides=2, name="block_3")(x) print("-----------------------------block_4-------------------------------------") print("\ndata shape:", x.shape) x = _make_layer(block, x.shape[-1], 512, blocks_num[3], strides=2, name="block_4")(x) if(include_top): x = layers.GlobalAvgPool2D()(x) x = layers.Dense(num_classes,name="logits")(x) predict = layers.Softmax()(x) else: predict = x model = Model(inputs=input_image,outputs=predict) return model def resnet18(im_width=224,im_height=224,channel=3,num_classes=1000): return _resnet(BasicBlock,[2,2,2,2],im_width, im_height,channel,num_classes)
[ "tensorflow.keras.layers.ReLU", "tensorflow.keras.layers.GlobalAvgPool2D", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.Sequential", "tensorflow.keras.layers.MaxPool2D", "tensorflow.keras.Model", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.layers.Add", "tensorflow.keras.layers.Softmax", "tensorflow.keras.layers.Input" ]
resnet.py
[(93, 'tensorflow.keras.Sequential', 'Sequential', (['layer_list'], {'name': 'name'}), False, 'from tensorflow.keras import layers, Model, Sequential\n'), (96, 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': '(im_height, im_width, channel)', 'dtype': '"""float32"""'}), False, 'from tensorflow.keras import layers, Model, Sequential\n'), (121, 'tensorflow.keras.Model', 'Model', ([], {'inputs': 'input_image', 'outputs': 'predict'}), False, 'from tensorflow.keras import layers, Model, Sequential\n'), (16, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['out_channel'], {'kernel_size': '(3)', 'strides': 'strides', 'padding': '"""SAME"""', 'use_bias': '(False)'}), False, 'from tensorflow.keras import layers, Model, Sequential\n'), (17, 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {'momentum': '(0.9)', 'epsilon': '(1e-05)'}), False, 'from tensorflow.keras import layers, Model, Sequential\n'), (19, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['out_channel'], {'kernel_size': '(3)', 'strides': '(1)', 'padding': '"""SAME"""', 'use_bias': '(False)'}), False, 'from tensorflow.keras import layers, Model, Sequential\n'), (20, 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {'momentum': '(0.9)', 'epsilon': '(1e-05)'}), False, 'from tensorflow.keras import layers, Model, Sequential\n'), (24, 'tensorflow.keras.layers.ReLU', 'layers.ReLU', ([], {}), False, 'from tensorflow.keras import layers, Model, Sequential\n'), (25, 'tensorflow.keras.layers.Add', 'layers.Add', ([], {}), False, 'from tensorflow.keras import layers, Model, Sequential\n'), (46, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['out_channel'], {'kernel_size': '(1)', 'use_bias': '(False)', 'name': '"""conv1"""'}), False, 'from tensorflow.keras import layers, Model, Sequential\n'), (47, 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {'momentum': '(0.9)', 'epsilon': '(1e-05)', 'name': '"""conv1/BatchNorm"""'}), False, 'from tensorflow.keras import layers, Model, Sequential\n'), (49, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['out_channel'], {'kernel_size': '(3)', 'strides': 'strides', 'padding': '"""SAME"""', 'use_bias': '(False)', 'name': '"""conv2"""'}), False, 'from tensorflow.keras import layers, Model, Sequential\n'), (50, 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {'momentum': '(0.9)', 'epsilon': '(1e-05)', 'name': '"""conv2/BatchNorm"""'}), False, 'from tensorflow.keras import layers, Model, Sequential\n'), (52, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(out_channel * self.expansion)'], {'kernel_size': '(1)', 'use_bias': '(False)', 'name': '"""conv3"""'}), False, 'from tensorflow.keras import layers, Model, Sequential\n'), (53, 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {'momentum': '(0.9)', 'epsilon': '(1e-05)', 'name': '"""conv3/BatchNorm"""'}), False, 'from tensorflow.keras import layers, Model, Sequential\n'), (55, 'tensorflow.keras.layers.ReLU', 'layers.ReLU', ([], {}), False, 'from tensorflow.keras import layers, Model, Sequential\n'), (57, 'tensorflow.keras.layers.Add', 'layers.Add', ([], {}), False, 'from tensorflow.keras import layers, Model, Sequential\n'), (97, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(64)', 'kernel_size': '(7)', 'strides': '(2)', 'padding': '"""SAME"""', 'use_bias': '(False)', 'name': '"""conv1"""'}), False, 'from tensorflow.keras import layers, Model, Sequential\n'), (98, 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {'momentum': '(0.9)', 'epsilon': '(1e-05)', 'name': '"""conv1/BatchNorm"""'}), False, 'from tensorflow.keras import layers, Model, Sequential\n'), (99, 'tensorflow.keras.layers.ReLU', 'layers.ReLU', ([], {}), False, 'from tensorflow.keras import layers, Model, Sequential\n'), (100, 'tensorflow.keras.layers.MaxPool2D', 'layers.MaxPool2D', ([], {'pool_size': '(3)', 'strides': '(2)', 'padding': '"""SAME"""'}), False, 'from tensorflow.keras import layers, Model, Sequential\n'), (116, 'tensorflow.keras.layers.GlobalAvgPool2D', 'layers.GlobalAvgPool2D', ([], {}), False, 'from tensorflow.keras import layers, Model, Sequential\n'), (117, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['num_classes'], {'name': '"""logits"""'}), False, 'from tensorflow.keras import layers, Model, Sequential\n'), (118, 'tensorflow.keras.layers.Softmax', 'layers.Softmax', ([], {}), False, 'from tensorflow.keras import layers, Model, Sequential\n'), (84, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(channel * block.expansion)'], {'kernel_size': '(1)', 'strides': 'strides', 'use_bias': '(False)', 'name': '"""conv1"""'}), False, 'from tensorflow.keras import layers, Model, Sequential\n'), (85, 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {'momentum': '(0.9)', 'epsilon': '(1.001e-05)', 'name': '"""BatchNorm"""'}), False, 'from tensorflow.keras import layers, Model, Sequential\n')]
nhatminh46vn/transformers
912f6881d2b69f180522172a5283702bd8c41d9c
# coding=utf-8 # Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF BART model, ported from the fairseq repo.""" import math import random import warnings from typing import Dict, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import ACT2FN from ...file_utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_tf_outputs import ( TFBaseModelOutput, TFBaseModelOutputWithPast, TFSeq2SeqLMOutput, TFSeq2SeqModelOutput, ) # Public API from ...modeling_tf_utils import ( DUMMY_INPUTS, TFPreTrainedModel, TFSharedEmbeddings, TFWrappedEmbeddings, input_processing, keras_serializable, shape_list, ) from ...utils import logging from .configuration_bart import BartConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "BartConfig" _TOKENIZER_FOR_DOC = "BartTokenizer" LARGE_NEGATIVE = -1e8 def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, eos_token_id: int): shifted_input_ids = tf.cast(input_ids, tf.int32) shifted_input_ids = tf.roll(shifted_input_ids, 1, axis=-1) start_tokens = tf.fill((shape_list(shifted_input_ids)[0], 1), eos_token_id) shifted_input_ids = tf.concat([start_tokens, shifted_input_ids[:, 1:]], -1) # replace possible -100 values in labels by `pad_token_id` shifted_input_ids = tf.where( shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids ) # "Verify that `labels` has only positive values and -100" assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.cast(0, tf.int32)) # Make sure the assertion op is called by wrapping the result in an identity no-op with tf.control_dependencies([assert_gte0]): shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = tf.ones((tgt_len, tgt_len), dtype=tf.float32) * LARGE_NEGATIVE mask_cond = tf.range(shape_list(mask)[-1]) mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask) mask = tf.cast(mask, tf.float32) if past_key_values_length > 0: mask = tf.concat([tf.zeros((tgt_len, past_key_values_length), dtype=tf.float32), mask], axis=-1) return tf.broadcast_to(mask[None, None, :, :], (bsz, 1, tgt_len, tgt_len + past_key_values_length)) def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = shape_list(mask) tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = tf.cast(tf.broadcast_to(mask[:, None, None, :], (bsz, 1, tgt_len, src_len)), tf.float32) return (1.0 - expanded_mask) * LARGE_NEGATIVE class TFBartLearnedPositionalEmbedding(TFSharedEmbeddings): """ This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to the forward function. """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, offset, **kwargs): # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2 # and adjust num_embeddings appropriately. Other models dont have this hack self.offset = offset assert padding_idx is not None, "padding_idx cannot be None" num_embeddings += offset super().__init__(num_embeddings, embedding_dim, **kwargs) def call(self, input_shape: tf.TensorShape, past_key_values_length: int = 0): """Input is expected to be of size [bsz x seqlen].""" bsz, seq_len = input_shape[:2] positions = tf.range( past_key_values_length, seq_len + past_key_values_length, delta=1, dtype=tf.int32, name="range" ) return super().call(positions + self.offset) # super object is not callable for some reason class TFBartSinusoidalPositionalEmbedding(tf.keras.layers.Embedding): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions: int, embedding_dim: int, **kwargs): if embedding_dim % 2 != 0: raise NotImplementedError(f"odd embedding_dim {embedding_dim} not supported") super().__init__( num_positions, embedding_dim, **kwargs, ) def build(self, input_shape: tf.TensorShape): """ Build shared token embedding layer Shared weights logic adapted from https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24 """ super().build(input_shape) # Instantiates self.weight so it can be loaded weight: np.ndarray = self._init_weight(self.input_dim, self.output_dim) self.set_weights([weight]) # overwrite self.weight to correct value @staticmethod def _init_weight(n_pos: int, dim: int): """ Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in the 2nd half of the vector. [dim // 2:] """ position_enc = np.array( [[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)] ) # index 0 is all zero position_enc[:, 0 : dim // 2] = np.sin(position_enc[:, 0::2]) position_enc[:, dim // 2 :] = np.cos(position_enc[:, 1::2]) # convert to tensor table = tf.convert_to_tensor(position_enc, dtype=tf.float32) tf.stop_gradient(table) return table def call(self, input_shape: tf.TensorShape, past_key_values_length: int = 0): """Input is expected to be of size [bsz x seqlen].""" bsz, seq_len = input_shape[:2] positions = tf.range( past_key_values_length, seq_len + past_key_values_length, delta=1, dtype=tf.int32, name="range" ) return super().call(positions) class TFBartAttention(tf.keras.layers.Layer): """Multi-headed attention from "Attention Is All You Need""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, **kwargs, ): super().__init__(**kwargs) self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = tf.keras.layers.Dropout(dropout) self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.is_decoder = is_decoder self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj") self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj") self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj") self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj") def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) def call( self, hidden_states: tf.Tensor, key_value_states: Optional[tf.Tensor] = None, past_key_value: Optional[Tuple[Tuple[tf.Tensor]]] = None, attention_mask: Optional[tf.Tensor] = None, training=False, ) -> Tuple[tf.Tensor, Optional[tf.Tensor]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, embed_dim = shape_list(hidden_states) # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = tf.concat([past_key_value[0], key_states], axis=2) value_states = tf.concat([past_key_value[1], value_states], axis=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) key_states = tf.reshape(key_states, proj_shape) value_states = tf.reshape(value_states, proj_shape) src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) tf.debugging.assert_equal( shape_list(attn_weights), [bsz * self.num_heads, tgt_len, src_len], message=f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {shape_list(attn_weights)}", ) if attention_mask is not None: tf.debugging.assert_equal( shape_list(attention_mask), [bsz, 1, tgt_len, src_len], message=f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {shape_list(attention_mask)}", ) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_weights = tf.nn.softmax(attn_weights, axis=-1) attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) tf.debugging.assert_equal( shape_list(attn_output), [bsz * self.num_heads, tgt_len, self.head_dim], message=f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {shape_list(attn_output)}", ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) ) attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) attn_output = self.out_proj(attn_output) attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) return attn_output, attn_weights, past_key_value class TFBartEncoderLayer(tf.keras.layers.Layer): def __init__(self, config: BartConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFBartAttention( self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn" ) self.normalize_before = config.normalize_before self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") self.dropout = tf.keras.layers.Dropout(config.dropout) self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout) self.fc1 = tf.keras.layers.Dense(config.encoder_ffn_dim, name="fc1") self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2") self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, training=False): """ Args: hidden_states (:obj:`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (:obj:`tf.Tensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. """ residual = hidden_states if self.normalize_before: hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, self_attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask ) tf.debugging.assert_equal( shape_list(hidden_states), shape_list(residual), message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states if not self.normalize_before: hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states if self.normalize_before: hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states if not self.normalize_before: hidden_states = self.final_layer_norm(hidden_states) return hidden_states, self_attn_weights class TFBartDecoderLayer(tf.keras.layers.Layer): def __init__(self, config: BartConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFBartAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, name="self_attn", is_decoder=True, ) self.dropout = tf.keras.layers.Dropout(config.dropout) self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout) self.normalize_before = config.normalize_before self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") self.encoder_attn = TFBartAttention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, name="encoder_attn", is_decoder=True, ) self.encoder_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm") self.fc1 = tf.keras.layers.Dense(config.decoder_ffn_dim, name="fc1") self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2") self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") def call( self, hidden_states, attention_mask: Optional[tf.Tensor] = None, encoder_hidden_states: Optional[tf.Tensor] = None, encoder_attention_mask: Optional[tf.Tensor] = None, past_key_value: Optional[Tuple[tf.Tensor]] = None, training=False, ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]: """ Args: hidden_states (:obj:`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (:obj:`tf.Tensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (:obj:`tf.Tensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)` encoder_attention_mask (:obj:`tf.Tensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. past_key_value (:obj:`Tuple(tf.Tensor)`): cached past key and value projection states """ residual = hidden_states if self.normalize_before: hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states if not self.normalize_before: hidden_states = self.self_attn_layer_norm(hidden_states) # Cross-Attention Block cross_attn_present_key_value = None if encoder_hidden_states is not None: residual = hidden_states if self.normalize_before: hidden_states = self.encoder_attn_layer_norm(hidden_states) # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, _, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, past_key_value=cross_attn_past_key_value, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states if not self.normalize_before: hidden_states = self.encoder_attn_layer_norm(hidden_states) # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states if self.normalize_before: hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states if not self.normalize_before: hidden_states = self.final_layer_norm(hidden_states) return ( hidden_states, self_attn_weights, present_key_value, ) class TFBartPretrainedModel(TFPreTrainedModel): config_class = BartConfig base_model_prefix = "model" @property def dummy_inputs(self): pad_token = 1 input_ids = tf.cast(tf.constant(DUMMY_INPUTS), tf.int32) decoder_input_ids = tf.cast(tf.constant(DUMMY_INPUTS), tf.int32) dummy_inputs = { "decoder_input_ids": decoder_input_ids, "attention_mask": tf.math.not_equal(input_ids, pad_token), "input_ids": input_ids, } return dummy_inputs class TFPretrainedBartModel(TFBartPretrainedModel): def __init_subclass__(self): warnings.warn( "The class `TFPretrainedBartModel` has been deprecated, please use `TFBartPretrainedModel` instead.", FutureWarning, ) BART_START_DOCSTRING = r""" This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. .. note:: TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all the tensors in the first argument of the model call function: :obj:`model(inputs)`. If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument : - a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: :obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: :obj:`model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Args: config (:class:`~transformers.BartConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.TFPreTrainedModel.from_pretrained` method to load the model weights. """ BART_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`tf.Tensor` of shape :obj:`({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.BertTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`tf.Tensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ decoder_input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`): Provide for translation and summarization training. By default, the model will create this tensor by shifting the input_ids right, following the paper. decoder_attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`): will be made by default and ignore pad tokens. It is not recommended to set this for most use cases. encoder_outputs (:obj:`tf.FloatTensor`, `optional`): hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape :obj:`(batch_size, sequence_length, hidden_size)` is a sequence of past_key_values (:obj:`Tuple[Tuple[tf.Tensor]]` of length :obj:`config.n_layers`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). Set to :obj:`False` during training, :obj:`True` during generation output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.TFModelOutput` instead of a plain tuple. training (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @keras_serializable class TFBartEncoder(tf.keras.layers.Layer): config_class = BartConfig """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a :class:`TFBartEncoderLayer`. Args: config: BartConfig """ def __init__(self, config: BartConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, **kwargs): super().__init__(**kwargs) self.config = config self.dropout = tf.keras.layers.Dropout(config.dropout) self.layerdrop = config.encoder_layerdrop self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings self.embed_tokens = embed_tokens if config.static_position_embeddings: self.embed_positions = TFBartSinusoidalPositionalEmbedding( config.max_position_embeddings, config.d_model, name="embed_positions", ) else: self.embed_positions = TFBartLearnedPositionalEmbedding( config.max_position_embeddings, config.d_model, self.padding_idx, config.extra_pos_embeddings, name="embed_positions", ) self.layers = [TFBartEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)] self.layernorm_embedding = ( tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding") if config.normalize_embedding else tf.keras.layers.Layer() ) self.layer_norm = ( tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm") if config.add_final_layer_norm else None ) def call( self, input_ids=None, inputs_embeds=None, attention_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, **kwargs, ): """ Args: input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using :class:`~transformers.BartTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ inputs_embeds (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ inputs = input_processing( func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, kwargs_call=kwargs, ) if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif inputs["input_ids"] is not None: input_shape = shape_list(inputs["input_ids"]) elif inputs["inputs_embeds"] is not None: input_shape = shape_list(inputs["inputs_embeds"])[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs["inputs_embeds"] is None: inputs["inputs_embeds"] = self.embed_tokens(inputs["input_ids"]) else: inputs["inputs_embeds"] = inputs["inputs_embeds"] inputs["inputs_embeds"] = inputs["inputs_embeds"] * self.embed_scale embed_pos = self.embed_positions(input_shape) hidden_states = inputs["inputs_embeds"] + embed_pos hidden_states = self.layernorm_embedding(hidden_states) hidden_states = self.dropout(hidden_states, training=inputs["training"]) # check attention mask and invert if inputs["attention_mask"] is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] inputs["attention_mask"] = _expand_mask(inputs["attention_mask"]) encoder_states = () if inputs["output_hidden_states"] else None all_attentions = () if inputs["output_attentions"] else None # encoder layers for encoder_layer in self.layers: if inputs["output_hidden_states"]: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if inputs["training"] and (dropout_probability < self.layerdrop): # skip the layer continue hidden_states, attn = encoder_layer(hidden_states, inputs["attention_mask"]) if inputs["output_attentions"]: all_attentions += (attn,) if self.layer_norm: hidden_states = self.layer_norm(hidden_states) if inputs["output_hidden_states"]: encoder_states = encoder_states + (hidden_states,) if not inputs["return_dict"]: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) @keras_serializable class TFBartDecoder(tf.keras.layers.Layer): config_class = BartConfig """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`TFBartDecoderLayer` Args: config: BartConfig embed_tokens: output embedding """ def __init__(self, config: BartConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, **kwargs): super().__init__(**kwargs) self.config = config self.padding_idx = config.pad_token_id self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.layerdrop = config.decoder_layerdrop if config.static_position_embeddings: self.embed_positions = TFBartSinusoidalPositionalEmbedding( config.max_position_embeddings, config.d_model, name="embed_positions", ) else: self.embed_positions = TFBartLearnedPositionalEmbedding( config.max_position_embeddings, config.d_model, self.padding_idx, config.extra_pos_embeddings, name="embed_positions", ) self.layers = [TFBartDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)] self.layernorm_embedding = ( tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding") if config.normalize_embedding else tf.keras.layers.Layer() ) self.layer_norm = ( tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm") if config.add_final_layer_norm else None ) self.dropout = tf.keras.layers.Dropout(config.dropout) self.do_blenderbot_90_layernorm = config.do_blenderbot_90_layernorm def call( self, input_ids=None, inputs_embeds=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, **kwargs, ): r""" Args: input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using :class:`~transformers.BartTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ encoder_hidden_states (:obj:`tf.Tensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ past_key_values (:obj:`Tuple[Tuple[tf.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`. inputs_embeds (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ inputs = input_processing( func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, inputs_embeds=inputs_embeds, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, kwargs_call=kwargs, ) if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif inputs["input_ids"] is not None: input_shape = shape_list(inputs["input_ids"]) elif inputs["inputs_embeds"] is not None: input_shape = shape_list(inputs["inputs_embeds"])[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") past_key_values_length = ( inputs["past_key_values"][0][0].shape[2] if inputs["past_key_values"] is not None else 0 ) # embed positions positions = self.embed_positions(input_shape, past_key_values_length) if inputs["inputs_embeds"] is None: inputs["inputs_embeds"] = self.embed_tokens(inputs["input_ids"]) hidden_states = inputs["inputs_embeds"] * self.embed_scale # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length) else: combined_attention_mask = _expand_mask( tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1] ) if inputs["attention_mask"] is None and inputs["input_ids"] is not None and input_shape[-1] > 1: inputs["attention_mask"] = tf.cast( tf.math.not_equal(inputs["input_ids"], self.config.pad_token_id), inputs["input_ids"].dtype ) inputs["attention_mask"] = tf.concat( [ tf.ones((input_shape[0], past_key_values_length), dtype=inputs["attention_mask"].dtype), inputs["attention_mask"], ], axis=-1, ) else: inputs["attention_mask"] = tf.ones( (input_shape[0], input_shape[1] + past_key_values_length), dtype=tf.int32 ) # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = combined_attention_mask + _expand_mask( inputs["attention_mask"], tgt_len=input_shape[-1] ) if inputs["encoder_hidden_states"] is not None and inputs["encoder_attention_mask"] is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] inputs["encoder_attention_mask"] = _expand_mask(inputs["encoder_attention_mask"], tgt_len=input_shape[-1]) if self.do_blenderbot_90_layernorm: hidden_states = self.layernorm_embedding(hidden_states) + positions else: hidden_states = self.layernorm_embedding(hidden_states + positions) hidden_states = self.dropout(hidden_states, training=inputs["training"]) # decoder layers all_hidden_states = () all_self_attns = () present_key_values = () for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if inputs["output_hidden_states"]: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if inputs["training"] and (dropout_probability < self.layerdrop): continue past_key_value = inputs["past_key_values"][idx] if inputs["past_key_values"] is not None else None hidden_states, layer_self_attn, present_key_value = decoder_layer( hidden_states, attention_mask=combined_attention_mask, encoder_hidden_states=inputs["encoder_hidden_states"], encoder_attention_mask=inputs["encoder_attention_mask"], past_key_value=past_key_value, ) if inputs["use_cache"]: present_key_values += (present_key_value,) if inputs["output_attentions"]: all_self_attns += (layer_self_attn,) if self.layer_norm is not None: # same as if config.add_final_layer_norm hidden_states = self.layer_norm(hidden_states) # Convert to standard output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim) if inputs["output_hidden_states"]: all_hidden_states += (hidden_states,) else: all_hidden_states = None all_self_attns = list(all_self_attns) if inputs["output_attentions"] else None present_key_values = (inputs["encoder_hidden_states"], present_key_values) if inputs["use_cache"] else None if not inputs["return_dict"]: return hidden_states, present_key_values, all_hidden_states, all_self_attns else: return TFBaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=present_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, ) @add_start_docstrings( "The bare BART Model outputting raw hidden-states without any specific head on top.", BART_START_DOCSTRING, ) @keras_serializable class TFBartModel(TFBartPretrainedModel): base_model_prefix = "model" def __init__(self, config: BartConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.shared = TFSharedEmbeddings(config.vocab_size, config.d_model, config.pad_token_id, name="model.shared") with tf.compat.v1.variable_scope("model.shared") as shared_abs_scope_name: pass # Wraps layer to avoid problems with weight restoring and ensuring we're in the correct TF scope. embed_tokens = TFWrappedEmbeddings(self.shared, abs_scope_name=shared_abs_scope_name) embed_tokens.vocab_size = self.shared.vocab_size embed_tokens.hidden_size = self.shared.hidden_size self.encoder = TFBartEncoder(config, embed_tokens, name="encoder") self.decoder = TFBartDecoder(config, embed_tokens, name="decoder") def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="facebook/bart-large", output_type=TFSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, **kwargs ): inputs = input_processing( func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, kwargs_call=kwargs, ) if inputs["decoder_input_ids"] is None and inputs["decoder_inputs_embeds"] is None: inputs["use_cache"] = False inputs["output_hidden_states"] = ( inputs["output_hidden_states"] if inputs["output_hidden_states"] is not None else self.config.output_hidden_states ) if inputs["decoder_input_ids"] is None and inputs["input_ids"] is not None: inputs["decoder_input_ids"] = shift_tokens_right( inputs["input_ids"], self.config.pad_token_id, self.config.eos_token_id ) if inputs["encoder_outputs"] is None: inputs["encoder_outputs"] = self.encoder( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], inputs_embeds=inputs["inputs_embeds"], output_attentions=inputs["output_attentions"], output_hidden_states=inputs["output_hidden_states"], return_dict=inputs["return_dict"], training=inputs["training"], ) # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True elif inputs["return_dict"] and not isinstance(inputs["encoder_outputs"], TFBaseModelOutput): inputs["encoder_outputs"] = TFBaseModelOutput( last_hidden_state=inputs["encoder_outputs"][0], hidden_states=inputs["encoder_outputs"][1] if len(inputs["encoder_outputs"]) > 1 else None, attentions=inputs["encoder_outputs"][2] if len(inputs["encoder_outputs"]) > 2 else None, ) # If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False elif not inputs["return_dict"] and not isinstance(inputs["encoder_outputs"], tuple): inputs["encoder_outputs"] = inputs["encoder_outputs"].to_tuple() decoder_outputs = self.decoder( inputs["decoder_input_ids"], attention_mask=inputs["decoder_attention_mask"], encoder_hidden_states=inputs["encoder_outputs"][0], encoder_attention_mask=inputs["attention_mask"], past_key_values=inputs["past_key_values"], inputs_embeds=inputs["decoder_inputs_embeds"], use_cache=inputs["use_cache"], output_attentions=inputs["output_attentions"], output_hidden_states=inputs["output_hidden_states"], return_dict=inputs["return_dict"], training=inputs["training"], ) if not inputs["return_dict"]: return decoder_outputs + inputs["encoder_outputs"] return TFSeq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, encoder_last_hidden_state=inputs["encoder_outputs"].last_hidden_state, encoder_hidden_states=inputs["encoder_outputs"].hidden_states, encoder_attentions=inputs["encoder_outputs"].attentions, ) def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value def get_output_embeddings(self): return self.shared @add_start_docstrings( "The BART Model with a language modeling head. Can be used for summarization.", BART_START_DOCSTRING, ) class TFBartForConditionalGeneration(TFBartPretrainedModel): _keys_to_ignore_on_load_unexpected = [ r"model.encoder.embed_tokens.weight", r"model.decoder.embed_tokens.weight", ] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.model = TFBartModel(config, name="model") self.use_cache = config.use_cache # final_bias_logits is registered as a buffer in pytorch, so not trainable for the the sake of consistency. self.final_logits_bias = self.add_weight( name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False ) def get_decoder(self): return self.model.decoder def resize_token_embeddings(self, new_num_tokens): super().resize_token_embeddings(new_num_tokens=new_num_tokens) # BART is a special case where the bias has two dimensions # and not named just `bias` if new_num_tokens is not None: num_tokens_to_copy = min(self.final_logits_bias.shape[0], new_num_tokens) init_bias = tf.zeros((new_num_tokens,)) init_bias[:num_tokens_to_copy] = self.final_logits_bias.value()[:num_tokens_to_copy] self.final_logits_bias = self.add_weight( shape=(1, new_num_tokens), initializer="zeros", trainable=False, name="final_logits_bias", ) self.final_logits_bias.assign(init_bias) @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, encoder_outputs: Optional[TFBaseModelOutput] = None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, **kwargs, ): """ Returns: Examples:: # Mask filling only works for bart-large from transformers import BartTokenizer, TFBartForConditionalGeneration import tensorflow as tf mname = 'facebook/bart-large' tokenizer = BartTokenizer.from_pretrained(mname) TXT = "My friends are <mask> but they eat too many carbs." model = TFBartForConditionalGeneration.from_pretrained(mname) batch = tokenizer([TXT], return_tensors='tf') logits = model(inputs=batch.input_ids).logits probs = tf.nn.softmax(logits[0]) # probs[5] is associated with the mask token """ inputs = input_processing( func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels, training=training, kwargs_call=kwargs, ) if inputs["labels"] is not None: inputs["use_cache"] = False if inputs["decoder_input_ids"] is None: inputs["decoder_input_ids"] = shift_tokens_right( inputs["labels"], self.config.pad_token_id, self.config.eos_token_id ) outputs = self.model( inputs["input_ids"], attention_mask=inputs["attention_mask"], decoder_input_ids=inputs["decoder_input_ids"], encoder_outputs=inputs["encoder_outputs"], decoder_attention_mask=inputs["decoder_attention_mask"], past_key_values=inputs["past_key_values"], inputs_embeds=inputs["inputs_embeds"], decoder_inputs_embeds=inputs["decoder_inputs_embeds"], use_cache=inputs["use_cache"], output_attentions=inputs["output_attentions"], output_hidden_states=inputs["output_hidden_states"], return_dict=inputs["return_dict"], training=inputs["training"], ) lm_logits = self.model.shared(outputs[0], mode="linear") lm_logits = lm_logits + self.final_logits_bias masked_lm_loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], lm_logits) if not inputs["return_dict"]: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return TFSeq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, # index 1 of d outputs decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs encoder_last_hidden_state=outputs.last_hidden_state, # index 0 of encoder outputs encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out encoder_attentions=outputs.encoder_attentions, # 2 of e out ) def prepare_inputs_for_generation(self, decoder_input_ids, past, attention_mask, use_cache, **kwargs) -> Dict: assert past is not None and len(past) in {1, 2}, f"past has to be an iterable of length 1,2 got {past}" if len(past) == 1: assert isinstance(past[0], tf.Tensor), f"`past[0]` has to be of type `tf.Tensor`, but is {type(past[0])}" encoder_outputs = TFBaseModelOutput(last_hidden_state=past[0]) past_key_values = None else: assert ( len(past) == 2 ), "`past` has to be of length 2 with the encoder_outputs at the first position and past_key_values at the second position." encoder_outputs, past_key_values = past if isinstance(encoder_outputs, tuple): assert isinstance( encoder_outputs[0], tf.Tensor ), f"`encoder_outputs[0]` has to be of type `tf.Tensor`, but is {type(encoder_outputs[0])}" encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_outputs[0]) elif isinstance(encoder_outputs, tf.Tensor): encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_outputs) assert ( past_key_values ), f"decoder cached states must be truthy. got {past_key_values} from the 2nd element of past" decoder_input_ids = decoder_input_ids[:, -1:] assert isinstance( encoder_outputs, TFBaseModelOutput ), f"encoder_outputs should be a TFBaseModelOutput, Instead got {type(encoder_outputs)}." return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "use_cache": use_cache, # change this to avoid caching (presumably for debugging) } @staticmethod def _reorder_cache(past, beam_idx): if len(past) == 1: return past past_key_values = past[1] reordered_past = () for layer_past_key_values in past_key_values: reordered_past += ( tuple(tf.gather(layer_past_key_value, beam_idx) for layer_past_key_value in layer_past_key_values), ) return (past[0], reordered_past) def adjust_logits_during_generation(self, logits, cur_len, max_length): if cur_len == 1 and self.config.force_bos_token_to_be_generated: vocab_range = tf.constant(range(self.config.vocab_size)) return tf.where(vocab_range != self.config.bos_token_id, LARGE_NEGATIVE, logits) elif cur_len == max_length - 1: vocab_range = tf.constant(range(self.config.vocab_size)) return tf.where(vocab_range != self.config.eos_token_id, LARGE_NEGATIVE, logits) else: return logits def get_output_embeddings(self): return self.model.shared def get_encoder(self): return self.model.encoder def compute_loss(self, labels, logits): """CrossEntropyLoss that ignores pad tokens""" loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE, ) melted_labels = tf.reshape(labels, (-1,)) active_loss = tf.not_equal(melted_labels, self.config.pad_token_id) reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) labels = tf.boolean_mask(melted_labels, active_loss) return loss_fn(labels, reduced_logits)
[ "tensorflow.convert_to_tensor", "tensorflow.keras.layers.LayerNormalization", "tensorflow.concat", "tensorflow.control_dependencies", "tensorflow.zeros", "tensorflow.cast", "tensorflow.where", "tensorflow.math.not_equal", "tensorflow.boolean_mask", "numpy.sin", "tensorflow.stop_gradient", "tensorflow.gather", "tensorflow.compat.v1.variable_scope", "tensorflow.matmul", "numpy.power", "tensorflow.roll", "tensorflow.keras.layers.Dense", "tensorflow.identity", "tensorflow.not_equal", "tensorflow.nn.softmax", "tensorflow.constant", "tensorflow.range", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.broadcast_to", "tensorflow.reshape", "tensorflow.ones", "numpy.cos", "tensorflow.keras.layers.Layer", "tensorflow.keras.layers.Dropout" ]
src/transformers/models/bart/modeling_tf_bart.py
[(62, 'tensorflow.cast', 'tf.cast', (['input_ids', 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (63, 'tensorflow.roll', 'tf.roll', (['shifted_input_ids', '(1)'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (65, 'tensorflow.concat', 'tf.concat', (['[start_tokens, shifted_input_ids[:, 1:]]', '(-1)'], {}), True, 'import tensorflow as tf\n'), (90, 'tensorflow.cast', 'tf.cast', (['mask', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (94, 'tensorflow.broadcast_to', 'tf.broadcast_to', (['mask[(None), (None), :, :]', '(bsz, 1, tgt_len, tgt_len + past_key_values_length)'], {}), True, 'import tensorflow as tf\n'), (72, 'tensorflow.cast', 'tf.cast', (['(0)', 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (75, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[assert_gte0]'], {}), True, 'import tensorflow as tf\n'), (76, 'tensorflow.identity', 'tf.identity', (['shifted_input_ids'], {}), True, 'import tensorflow as tf\n'), (86, 'tensorflow.ones', 'tf.ones', (['(tgt_len, tgt_len)'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (104, 'tensorflow.broadcast_to', 'tf.broadcast_to', (['mask[:, (None), (None), :]', '(bsz, 1, tgt_len, src_len)'], {}), True, 'import tensorflow as tf\n'), (128, 'tensorflow.range', 'tf.range', (['past_key_values_length', '(seq_len + past_key_values_length)'], {'delta': '(1)', 'dtype': 'tf.int32', 'name': '"""range"""'}), True, 'import tensorflow as tf\n'), (166, 'numpy.sin', 'np.sin', (['position_enc[:, 0::2]'], {}), True, 'import numpy as np\n'), (167, 'numpy.cos', 'np.cos', (['position_enc[:, 1::2]'], {}), True, 'import numpy as np\n'), (169, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['position_enc'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (170, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['table'], {}), True, 'import tensorflow as tf\n'), (177, 'tensorflow.range', 'tf.range', (['past_key_values_length', '(seq_len + past_key_values_length)'], {'delta': '(1)', 'dtype': 'tf.int32', 'name': '"""range"""'}), True, 'import tensorflow as tf\n'), (199, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['dropout'], {}), True, 'import tensorflow as tf\n'), (205, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['embed_dim'], {'use_bias': 'bias', 'name': '"""k_proj"""'}), True, 'import tensorflow as tf\n'), (206, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['embed_dim'], {'use_bias': 'bias', 'name': '"""q_proj"""'}), True, 'import tensorflow as tf\n'), (207, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['embed_dim'], {'use_bias': 'bias', 'name': '"""v_proj"""'}), True, 'import tensorflow as tf\n'), (208, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['embed_dim'], {'use_bias': 'bias', 'name': '"""out_proj"""'}), True, 'import tensorflow as tf\n'), (262, 'tensorflow.reshape', 'tf.reshape', (['key_states', 'proj_shape'], {}), True, 'import tensorflow as tf\n'), (263, 'tensorflow.reshape', 'tf.reshape', (['value_states', 'proj_shape'], {}), True, 'import tensorflow as tf\n'), (266, 'tensorflow.matmul', 'tf.matmul', (['query_states', 'key_states'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (283, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['attn_weights'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (287, 'tensorflow.matmul', 'tf.matmul', (['attn_probs', 'value_states'], {}), True, 'import tensorflow as tf\n'), (298, 'tensorflow.reshape', 'tf.reshape', (['attn_output', '(bsz, tgt_len, embed_dim)'], {}), True, 'import tensorflow as tf\n'), (301, 'tensorflow.reshape', 'tf.reshape', (['attn_weights', '(bsz, self.num_heads, tgt_len, src_len)'], {}), True, 'import tensorflow as tf\n'), (314, 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-05)', 'name': '"""self_attn_layer_norm"""'}), True, 'import tensorflow as tf\n'), (315, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.dropout'], {}), True, 'import tensorflow as tf\n'), (317, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.activation_dropout'], {}), True, 'import tensorflow as tf\n'), (318, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['config.encoder_ffn_dim'], {'name': '"""fc1"""'}), True, 'import tensorflow as tf\n'), (319, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['self.embed_dim'], {'name': '"""fc2"""'}), True, 'import tensorflow as tf\n'), (320, 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-05)', 'name': '"""final_layer_norm"""'}), True, 'import tensorflow as tf\n'), (370, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.dropout'], {}), True, 'import tensorflow as tf\n'), (372, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.activation_dropout'], {}), True, 'import tensorflow as tf\n'), (375, 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-05)', 'name': '"""self_attn_layer_norm"""'}), True, 'import tensorflow as tf\n'), (383, 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-05)', 'name': '"""encoder_attn_layer_norm"""'}), True, 'import tensorflow as tf\n'), (384, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['config.decoder_ffn_dim'], {'name': '"""fc1"""'}), True, 'import tensorflow as tf\n'), (385, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['self.embed_dim'], {'name': '"""fc2"""'}), True, 'import tensorflow as tf\n'), (386, 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-05)', 'name': '"""final_layer_norm"""'}), True, 'import tensorflow as tf\n'), (487, 'warnings.warn', 'warnings.warn', (['"""The class `TFPretrainedBartModel` has been deprecated, please use `TFBartPretrainedModel` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (589, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.dropout'], {}), True, 'import tensorflow as tf\n'), (776, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.dropout'], {}), True, 'import tensorflow as tf\n'), (1318, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), True, 'import tensorflow as tf\n'), (1322, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (1323, 'tensorflow.not_equal', 'tf.not_equal', (['melted_labels', 'self.config.pad_token_id'], {}), True, 'import tensorflow as tf\n'), (1325, 'tensorflow.boolean_mask', 'tf.boolean_mask', (['melted_labels', 'active_loss'], {}), True, 'import tensorflow as tf\n'), (211, 'tensorflow.reshape', 'tf.reshape', (['tensor', '(bsz, seq_len, self.num_heads, self.head_dim)'], {}), True, 'import tensorflow as tf\n'), (281, 'tensorflow.reshape', 'tf.reshape', (['attn_weights', '(bsz * self.num_heads, tgt_len, src_len)'], {}), True, 'import tensorflow as tf\n'), (296, 'tensorflow.reshape', 'tf.reshape', (['attn_output', '(bsz, self.num_heads, tgt_len, self.head_dim)'], {}), True, 'import tensorflow as tf\n'), (475, 'tensorflow.constant', 'tf.constant', (['DUMMY_INPUTS'], {}), True, 'import tensorflow as tf\n'), (476, 'tensorflow.constant', 'tf.constant', (['DUMMY_INPUTS'], {}), True, 'import tensorflow as tf\n'), (479, 'tensorflow.math.not_equal', 'tf.math.not_equal', (['input_ids', 'pad_token'], {}), True, 'import tensorflow as tf\n'), (591, 'math.sqrt', 'math.sqrt', (['config.d_model'], {}), False, 'import math\n'), (612, 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-05)', 'name': '"""layernorm_embedding"""'}), True, 'import tensorflow as tf\n'), (614, 'tensorflow.keras.layers.Layer', 'tf.keras.layers.Layer', ([], {}), True, 'import tensorflow as tf\n'), (617, 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-05)', 'name': '"""layer_norm"""'}), True, 'import tensorflow as tf\n'), (712, 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), False, 'import random\n'), (748, 'math.sqrt', 'math.sqrt', (['config.d_model'], {}), False, 'import math\n'), (766, 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-05)', 'name': '"""layernorm_embedding"""'}), True, 'import tensorflow as tf\n'), (768, 'tensorflow.keras.layers.Layer', 'tf.keras.layers.Layer', ([], {}), True, 'import tensorflow as tf\n'), (771, 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-05)', 'name': '"""layer_norm"""'}), True, 'import tensorflow as tf\n'), (902, 'tensorflow.ones', 'tf.ones', (['(input_shape[0], input_shape[1] + past_key_values_length)'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (929, 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), False, 'import random\n'), (986, 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['"""model.shared"""'], {}), True, 'import tensorflow as tf\n'), (1144, 'tensorflow.zeros', 'tf.zeros', (['(new_num_tokens,)'], {}), True, 'import tensorflow as tf\n'), (1303, 'tensorflow.where', 'tf.where', (['(vocab_range != self.config.bos_token_id)', 'LARGE_NEGATIVE', 'logits'], {}), True, 'import tensorflow as tf\n'), (93, 'tensorflow.zeros', 'tf.zeros', (['(tgt_len, past_key_values_length)'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (280, 'tensorflow.reshape', 'tf.reshape', (['attn_weights', '(bsz, self.num_heads, tgt_len, src_len)'], {}), True, 'import tensorflow as tf\n'), (887, 'tensorflow.ones', 'tf.ones', (['(input_shape[0], input_shape[1] + past_key_values_length)'], {}), True, 'import tensorflow as tf\n'), (892, 'tensorflow.math.not_equal', 'tf.math.not_equal', (["inputs['input_ids']", 'self.config.pad_token_id'], {}), True, 'import tensorflow as tf\n'), (1306, 'tensorflow.where', 'tf.where', (['(vocab_range != self.config.eos_token_id)', 'LARGE_NEGATIVE', 'logits'], {}), True, 'import tensorflow as tf\n'), (243, 'tensorflow.concat', 'tf.concat', (['[past_key_value[0], key_states]'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (244, 'tensorflow.concat', 'tf.concat', (['[past_key_value[1], value_states]'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (896, 'tensorflow.ones', 'tf.ones', (['(input_shape[0], past_key_values_length)'], {'dtype': "inputs['attention_mask'].dtype"}), True, 'import tensorflow as tf\n'), (163, 'numpy.power', 'np.power', (['(10000)', '(2 * (j // 2) / dim)'], {}), True, 'import numpy as np\n'), (1296, 'tensorflow.gather', 'tf.gather', (['layer_past_key_value', 'beam_idx'], {}), True, 'import tensorflow as tf\n')]
gluru/tensorflow
0290bfd96901018d6fd0a520e77aafb44b19a1ac
# Lint as: python2, python3 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for lite.py functionality related to TensorFlow 2.0.""" import ctypes import functools import itertools import os import sys from absl.testing import parameterized import numpy as np from six.moves import range from six.moves import zip import tensorflow as tf # Force loaded shared object symbols to be globally visible. This is needed so # that the interpreter_wrapper, in one .so file, can see the test_registerer, # in a different .so file. Note that this may already be set by default. # pylint: disable=g-import-not-at-top if hasattr(sys, 'setdlopenflags') and hasattr(sys, 'getdlopenflags'): sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL) from tensorflow.lite.python import conversion_metadata_schema_py_generated as metadata_fb from tensorflow.lite.python import convert from tensorflow.lite.python import lite from tensorflow.lite.python import lite_v2_test_util from tensorflow.lite.python import schema_py_generated as schema_fb from tensorflow.lite.python import test_util as tflite_test_util from tensorflow.lite.python import util from tensorflow.lite.python.convert import mlir_quantize from tensorflow.lite.python.interpreter import Interpreter from tensorflow.lite.python.interpreter import InterpreterWithCustomOps from tensorflow.lite.python.interpreter import OpResolverType from tensorflow.lite.python.testdata import _pywrap_test_registerer as test_registerer from tensorflow.lite.python.testdata import double_op from tensorflow.lite.python.util import get_conversion_metadata from tensorflow.lite.toco import types_pb2 as _types_pb2 from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.framework import versions from tensorflow.python.lib.io import file_io from tensorflow.python.ops import map_ops from tensorflow.python.ops import rnn from tensorflow.python.platform import resource_loader from tensorflow.python.platform import test from tensorflow.python.saved_model import save_options from tensorflow.python.saved_model import saved_model from tensorflow.python.saved_model.loader_impl import parse_saved_model from tensorflow.python.saved_model.save import save from tensorflow.python.training.tracking import tracking # Only run jax related tests when we can import jax. DISABLE_JAX_TEST = False try: import jax from jax import numpy as jnp except ImportError: DISABLE_JAX_TEST = True # pylint: enable=g-import-not-at-top class FromConcreteFunctionTest(lite_v2_test_util.ModelTest): @test_util.run_v2_only def testTypeInvalid(self): root = self._getSimpleVariableModel() with self.assertRaises(ValueError) as error: _ = lite.TFLiteConverterV2.from_concrete_functions([root.f], root) self.assertIn('call get_concrete_function', str(error.exception)) @test_util.run_v2_only def testFloat(self): root = self._getSimpleVariableModel() input_data = tf.constant(1., shape=[1]) concrete_func = root.f.get_concrete_function(input_data) # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func], root) tflite_model = converter.convert() # Check output value from converted model. expected_value = root.f(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) self.assertEqual(expected_value.numpy(), actual_value) @parameterized.named_parameters(('_INT8InputOutput', dtypes.int8), ('_UINT8InputOutput', dtypes.uint8), ('_INT16InputOutput', dtypes.int16)) @test_util.run_v2_only def testInvalidFloat(self, inference_input_output_type): root = self._getSimpleVariableModel() input_data = tf.constant(1., shape=[1]) concrete_func = root.f.get_concrete_function(input_data) # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func], root) with self.assertRaises(ValueError) as error: converter.inference_input_type = inference_input_output_type converter.inference_output_type = inference_input_output_type converter.convert() self.assertEqual( 'The inference_input_type and inference_output_type ' 'must be tf.float32.', str(error.exception)) @test_util.run_v2_only def testScalarInput(self): root = self._getSimpleVariableModel() input_data = tf.constant(1., shape=[]) concrete_func = root.f.get_concrete_function(input_data) # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func], root) tflite_model = converter.convert() # Check values from converted model. expected_value = root.f(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) self.assertEqual(expected_value.numpy(), actual_value) @test_util.run_v2_only def testModelWithoutInputs(self): def _get_random_number_gen(): root = tracking.AutoTrackable() @tf.function(input_signature=[]) def func(): return tf.random.uniform(shape=[1], dtype=tf.float32) root.f = func to_save = root.f.get_concrete_function() return (root, to_save) # Model with no input root, concrete_func = _get_random_number_gen() # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func], root) tflite_model = converter.convert() self.assertIsNotNone(tflite_model) @test_util.run_v2_only def testMultiFunctionModel(self): """Convert a single model in a multi-functional model.""" root = self._getMultiFunctionModel() input_data = tf.constant(1., shape=[1]) concrete_func = root.add.get_concrete_function(input_data) # Convert model and ensure model is not None. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func], root) tflite_model = converter.convert() # Check values from converted model. expected_value = root.add(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) self.assertEqual(expected_value.numpy(), actual_value) @test_util.run_v2_only def testConvertMultipleFunctions(self): """Convert multiple functions in a multi-functional model.""" root = self._getMultiFunctionModel() input_data = tf.constant(1., shape=[1]) add_func = root.add.get_concrete_function(input_data) sub_func = root.sub.get_concrete_function(input_data) # Try converting multiple functions. converter = lite.TFLiteConverterV2.from_concrete_functions( [add_func, sub_func], root) tflite_model = converter.convert() # Check signatures are valid from converted model. interpreter = Interpreter(model_content=tflite_model) signature_defs = interpreter.get_signature_list() # Verify the SignatureDef structure returned is as expected. self.assertEqual(len(signature_defs), 2) self.assertEqual(list(signature_defs.keys()), ['add', 'sub']) self.assertEqual(len(signature_defs.values()), 2) self.assertEqual(list(signature_defs['add'].keys()), ['inputs', 'outputs']) self.assertCountEqual(signature_defs['add']['inputs'], ['x']) self.assertEqual(list(signature_defs['add']['outputs']), ['output_0']) self.assertEqual(list(signature_defs['sub'].keys()), ['inputs', 'outputs']) self.assertCountEqual(signature_defs['sub']['inputs'], ['x']) self.assertEqual(list(signature_defs['sub']['outputs']), ['output_0']) # Verify the Signature runner executions. add_signature_runner = interpreter.get_signature_runner('add') add_output = add_signature_runner(x=input_data) self.assertEqual(add_output['output_0'], 3) input_details = add_signature_runner.get_input_details() self.assertEqual(1, len(input_details)) self.assertEqual('add_x:0', input_details['x']['name']) self.assertEqual(np.float32, input_details['x']['dtype']) self.assertTrue(([1] == input_details['x']['shape']).all()) self.assertEqual((0.0, 0), input_details['x']['quantization']) sub_signature_runner = interpreter.get_signature_runner('sub') sub_output = sub_signature_runner(x=input_data) self.assertEqual(sub_output['output_0'], -2) output_details = sub_signature_runner.get_output_details() self.assertEqual(1, len(output_details)) self.assertEqual('StatefulPartitionedCall:0', output_details['output_0']['name']) self.assertEqual(np.float32, output_details['output_0']['dtype']) self.assertTrue(([1] == output_details['output_0']['shape']).all()) self.assertEqual((0.0, 0), output_details['output_0']['quantization']) # Check the conversion metadata. metadata = get_conversion_metadata(tflite_model) self.assertIsNotNone(metadata) self.assertEqual(metadata.environment.apiVersion, 2) self.assertEqual(metadata.environment.modelType, metadata_fb.ModelType.TF_CONCRETE_FUNCTIONS) self.assertAllEqual([], metadata.options.modelOptimizationModes) def _getIntegerQuantizeModel(self, num_filters=16): np.random.seed(0) root = tracking.AutoTrackable() @tf.function( input_signature=[tf.TensorSpec(shape=[1, 5, 5, 3], dtype=tf.float32)]) def func(inp): conv = tf.nn.conv2d( inp, tf.ones([3, 3, 3, num_filters]), strides=[1, 1, 1, 1], padding='SAME') output = tf.nn.relu(conv, name='output') return output def calibration_gen(): for _ in range(5): yield [np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32)] root.f = func to_save = root.f.get_concrete_function() return (root, to_save, calibration_gen) @parameterized.named_parameters( ('EnableMlirQuantizer', True), # enable mlir quantizer ('DisableMlirQuantizer', False)) # disable mlir quantizer def testPostTrainingCalibrateAndQuantize(self, mlir_quantizer): root, func, calibration_gen = self._getIntegerQuantizeModel() # Convert float model. float_converter = lite.TFLiteConverterV2.from_concrete_functions([func], root) float_tflite_model = float_converter.convert() self.assertIsNotNone(float_tflite_model) # Convert quantized model. quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func], root) quantized_converter.optimizations = [lite.Optimize.DEFAULT] quantized_converter.representative_dataset = calibration_gen quantized_converter.experimental_new_quantizer = mlir_quantizer quantized_tflite_model = quantized_converter.convert() self.assertIsNotNone(quantized_tflite_model) # Check the conversion metadata. metadata = get_conversion_metadata(quantized_tflite_model) self.assertIsNotNone(metadata) self.assertEqual( metadata.environment.tensorflowVersion.decode('utf-8'), versions.__version__) self.assertEqual(metadata.environment.apiVersion, 2) self.assertEqual(metadata.environment.modelType, metadata_fb.ModelType.TF_CONCRETE_FUNCTIONS) self.assertEqual(metadata.options.allowCustomOps, False) self.assertEqual(metadata.options.enableSelectTfOps, False) self.assertEqual(metadata.options.forceSelectTfOps, False) self.assertAllEqual([metadata_fb.ModelOptimizationMode.PTQ_FULL_INTEGER], metadata.options.modelOptimizationModes) # The default input and output types should be float. interpreter = Interpreter(model_content=quantized_tflite_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() self.assertLen(input_details, 1) self.assertEqual(np.float32, input_details[0]['dtype']) output_details = interpreter.get_output_details() self.assertLen(output_details, 1) self.assertEqual(np.float32, output_details[0]['dtype']) # Ensure that the quantized weights tflite model is smaller. self.assertLess(len(quantized_tflite_model), len(float_tflite_model)) @parameterized.named_parameters(('_INT8InputOutput', dtypes.int8), ('_UINT8InputOutput', dtypes.uint8), ('_INT16InputOutput', dtypes.int16)) @test_util.run_v2_only def testInvalidPostTrainingDynamicRangeQuantization( self, inference_input_output_type): root, func, _ = self._getIntegerQuantizeModel() # Convert float model. converter = lite.TFLiteConverterV2.from_concrete_functions([func], root) tflite_model = converter.convert() self.assertTrue(tflite_model) # Convert quantized model. quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func], root) quantized_converter.optimizations = [lite.Optimize.DEFAULT] with self.assertRaises(ValueError) as error: quantized_converter.inference_input_type = inference_input_output_type quantized_converter.inference_output_type = inference_input_output_type quantized_converter.convert() self.assertEqual( 'The inference_input_type and inference_output_type ' 'must be tf.float32.', str(error.exception)) @parameterized.named_parameters( ('EnableMlirQuantizer', True), # enable mlir quantizer ('DisableMlirQuantizer', False)) # disable mlir quantizer def testQuantizationRemovesQDQsForFloatIO(self, mlir_quantizer): func, calibration_gen = self._getSqrtModel() converter = lite.TFLiteConverterV2.from_concrete_functions( [func.get_concrete_function()]) converter.representative_dataset = calibration_gen converter.optimizations = [lite.Optimize.DEFAULT] converter.experimental_new_quantizer = mlir_quantizer quantized_model = converter.convert() # Because assertions on the model later, we opt out applying default TFLite # delegates (i.e. the XNNPACK delegate). interpreter = Interpreter( model_content=quantized_model, experimental_op_resolver_type=OpResolverType .BUILTIN_WITHOUT_DEFAULT_DELEGATES) interpreter.allocate_tensors() # The model should have only one sqrt op. op_details = interpreter._get_ops_details() self.assertLen(op_details, 1) self.assertEqual(op_details[0]['op_name'], 'SQRT') @parameterized.named_parameters( ('_Default', False, False, dtypes.float32), ('_INT8InputOutput', False, False, dtypes.int8), ('_UINT8InputOutput', False, False, dtypes.uint8), ('_INT16Quantize', False, True, dtypes.float32), ('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16), ('_IntOnly', True, False, dtypes.float32), ('_IntOnly_INT8InputOutput', True, False, dtypes.int8), ('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8), ('_IntOnly_INT16Quantize', True, True, dtypes.float32), ('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16)) def testIntegerQuantization(self, is_int_only, is_int16_quantize, inference_input_output_type): root, func, calibration_gen = self._getIntegerQuantizeModel() # Convert float model. converter = lite.TFLiteConverterV2.from_concrete_functions([func], root) tflite_model = converter.convert() self.assertTrue(tflite_model) # Convert quantized model. quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func], root) quantized_converter.optimizations = [lite.Optimize.DEFAULT] quantized_converter.representative_dataset = calibration_gen if is_int_only: if is_int16_quantize: quantized_converter.target_spec.supported_ops = [ lite.OpsSet. EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 ] else: quantized_converter.target_spec.supported_ops = [ lite.OpsSet.TFLITE_BUILTINS_INT8 ] else: if is_int16_quantize: quantized_converter.target_spec.supported_ops = [ lite.OpsSet. EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8, lite.OpsSet.TFLITE_BUILTINS ] quantized_converter.inference_input_type = inference_input_output_type quantized_converter.inference_output_type = inference_input_output_type quantized_tflite_model = quantized_converter.convert() self.assertIsNotNone(quantized_tflite_model) # Check the conversion metadata. metadata = get_conversion_metadata(quantized_tflite_model) self.assertIsNotNone(metadata) expected_opt_options = [metadata_fb.ModelOptimizationMode.PTQ_FULL_INTEGER] if is_int16_quantize: expected_opt_options = [metadata_fb.ModelOptimizationMode.PTQ_INT16] self.assertAllEqual(expected_opt_options, metadata.options.modelOptimizationModes) interpreter = Interpreter(model_content=quantized_tflite_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() self.assertLen(input_details, 1) self.assertEqual(inference_input_output_type.as_numpy_dtype, input_details[0]['dtype']) output_details = interpreter.get_output_details() self.assertLen(output_details, 1) self.assertEqual(inference_input_output_type.as_numpy_dtype, output_details[0]['dtype']) # Ensure that the quantized tflite model is smaller. self.assertLess(len(quantized_tflite_model), len(tflite_model)) @parameterized.named_parameters( ('_INT16Quantize_INT8InputOutput', True, dtypes.int8)) def testInvalidIntegerQuantization(self, is_int16_quantize, inference_input_output_type): root, func, calibration_gen = self._getIntegerQuantizeModel() # Convert quantized model. quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func], root) quantized_converter.optimizations = [lite.Optimize.DEFAULT] quantized_converter.representative_dataset = calibration_gen if is_int16_quantize: quantized_converter.target_spec.supported_ops = [ lite.OpsSet. EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8, lite.OpsSet.TFLITE_BUILTINS ] with self.assertRaises(ValueError) as error: quantized_converter.inference_input_type = dtypes.int8 quantized_converter.inference_output_type = dtypes.int8 quantized_converter.convert() self.assertEqual( 'The inference_input_type and inference_output_type ' "must be in ['tf.float32', 'tf.int16'].", str(error.exception)) def testCalibrateAndQuantizeBuiltinInt16(self): root, func, calibration_gen = self._getIntegerQuantizeModel() # Convert float model. float_converter = lite.TFLiteConverterV2.from_concrete_functions([func], root) float_tflite_model = float_converter.convert() self.assertIsNotNone(float_tflite_model) converter = lite.TFLiteConverterV2.from_concrete_functions([func], root) # TODO(b/156309549): We should add INT16 to the builtin types. converter.optimizations = [lite.Optimize.DEFAULT] converter.target_spec.supported_ops = [lite.OpsSet.TFLITE_BUILTINS_INT8] converter.representative_dataset = calibration_gen converter._experimental_calibrate_only = True calibrated_tflite = converter.convert() quantized_tflite_model = mlir_quantize( calibrated_tflite, inference_type=_types_pb2.QUANTIZED_INT16) self.assertIsNotNone(quantized_tflite_model) # The default input and output types should be float. interpreter = Interpreter(model_content=quantized_tflite_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() self.assertLen(input_details, 1) self.assertEqual(np.float32, input_details[0]['dtype']) output_details = interpreter.get_output_details() self.assertLen(output_details, 1) self.assertEqual(np.float32, output_details[0]['dtype']) # Ensure that the quantized weights tflite model is smaller. self.assertLess(len(quantized_tflite_model), len(float_tflite_model)) @test_util.run_v2_only def testSignatureDefs(self): """Test converting SignatureDef is correct and uses SignatureDef API.""" root = self._getMultiFunctionModel() input_data = tf.constant(1., shape=[1]) add_func = root.add.get_concrete_function(input_data) converter = lite.TFLiteConverterV2([add_func], trackable_obj=root) tflite_model = converter.convert() # Check values from converted model. expected_value = add_func(input_data) interpreter = Interpreter(model_content=tflite_model) signature_defs = interpreter.get_signature_list() results = self._evaluateTFLiteModelUsingSignatureDef( tflite_model, 'serving_default', {'x': input_data}) self.assertLen(list(results.keys()), 1) self.assertStartsWith(list(results.keys())[0], 'output') self.assertAllClose( expected_value.numpy(), results[signature_defs['serving_default']['outputs'][0]]) # Verify the SignatureDef structure returned is as expected. self.assertEqual(len(signature_defs), 1) self.assertEqual(list(signature_defs.keys()), ['serving_default']) self.assertEqual(len(signature_defs.values()), 1) self.assertEqual( list(signature_defs['serving_default'].keys()), ['inputs', 'outputs']) self.assertCountEqual(signature_defs['serving_default']['inputs'], ['x']) self.assertLen(list(signature_defs['serving_default']['outputs']), 1) self.assertStartsWith( list(signature_defs['serving_default']['outputs'])[0], 'output') @test_util.run_v2_only def testNoSignatureDefsWhenTrackingObjIsNone(self): """Test converting SignatureDef is correct and uses SignatureDef API.""" root = self._getSimpleVariableModel() input_data = tf.constant(1., shape=[1]) concrete_func = root.f.get_concrete_function(input_data) converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func], None) tflite_model = converter.convert() # Check values from converted model. interpreter = Interpreter(model_content=tflite_model) signature_defs = interpreter.get_signature_list() # Verify that there is no SignatureDef structure found. self.assertEqual(len(signature_defs), 0) @test_util.run_v2_only def testNoSignatureDefsWhenInvalidTrackingObjIsGiven(self): """Test converting SignatureDef is correct and uses SignatureDef API.""" root = self._getSimpleVariableModel() input_data = tf.constant(1., shape=[1]) concrete_func = root.f.get_concrete_function(input_data) converter = lite.TFLiteConverterV2.from_concrete_functions( [concrete_func], trackable_obj=tracking.AutoTrackable()) tflite_model = converter.convert() # Check values from converted model. interpreter = Interpreter(model_content=tflite_model) signature_defs = interpreter.get_signature_list() # Verify that there is no SignatureDef structure found. self.assertEqual(len(signature_defs), 0) @test_util.run_v2_only def testTrackbleObject(self): """Test converting with trackable objects.""" root = self._getMultiFunctionModel() input_data = tf.constant(1., shape=[1]) add_func = root.add.get_concrete_function(input_data) converter = lite.TFLiteConverterV2.from_concrete_functions( [add_func], trackable_obj=root) tflite_model = converter.convert() # Check values from converted model. expected_value = add_func(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) self.assertEqual(expected_value.numpy(), actual_value) def _getTrainingTimeQuantizedModel(self): class QLinear(tf.keras.layers.Layer): def __init__(self, units=3, **kwargs): super(QLinear, self).__init__(**kwargs) self.units = units def build(self, input_shape): self.w = self.add_weight( 'weight', shape=(input_shape[-1], self.units), initializer='random_normal', trainable=True) self.min_var = self.add_weight( 'min', initializer=tf.keras.initializers.Constant(-6.0), trainable=False) self.max_var = self.add_weight( 'max', initializer=tf.keras.initializers.Constant(6.0), trainable=False) def call(self, inputs): x = tf.quantization.fake_quant_with_min_max_vars( inputs, self.min_var, self.max_var) w_fq = tf.quantization.fake_quant_with_min_max_vars( self.w, self.min_var, self.max_var) x = tf.matmul(x, w_fq) x = tf.quantization.fake_quant_with_min_max_vars( x, self.min_var, self.max_var) return x return tf.keras.Sequential(QLinear(3, input_shape=(2,))) @parameterized.named_parameters( ('_DefaultFLOAT32InputOutput', dtypes.float32), ('_INT8InputOutput', dtypes.int8), ('_UINT8InputOutput', dtypes.uint8)) @test_util.run_v2_only def testTrainingTimeQuantization(self, inference_input_output_type): model = self._getTrainingTimeQuantizedModel() float_converter = lite.TFLiteConverterV2.from_keras_model(model) float_tflite_model = float_converter.convert() self.assertIsNotNone(float_tflite_model) quantized_converter = lite.TFLiteConverterV2.from_keras_model(model) quantized_converter.optimizations = [lite.Optimize.DEFAULT] quantized_converter.inference_input_type = inference_input_output_type quantized_converter.inference_output_type = inference_input_output_type quantized_tflite_model = quantized_converter.convert() self.assertIsNotNone(quantized_tflite_model) # Check the conversion metadata. metadata = get_conversion_metadata(quantized_tflite_model) self.assertIsNotNone(metadata) self.assertAllEqual( [metadata_fb.ModelOptimizationMode.QUANTIZATION_AWARE_TRAINING], metadata.options.modelOptimizationModes) interpreter = Interpreter(model_content=quantized_tflite_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() self.assertLen(input_details, 1) self.assertEqual(inference_input_output_type.as_numpy_dtype, input_details[0]['dtype']) output_details = interpreter.get_output_details() self.assertLen(output_details, 1) self.assertEqual(inference_input_output_type.as_numpy_dtype, output_details[0]['dtype']) # Ensure that the quantized tflite model is smaller. self.assertLess(len(quantized_tflite_model), len(float_tflite_model)) @test_util.run_v2_only def testNewQuantizer(self): """Test the model quantized by the new converter.""" root, func, calibration_gen = self._getIntegerQuantizeModel() quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func], root) quantized_converter.target_spec.supported_ops = [ lite.OpsSet.TFLITE_BUILTINS_INT8 ] quantized_converter.representative_dataset = calibration_gen # default quantizer quantized_converter.experimental_new_quantizer = False old_tflite = quantized_converter.convert() # new quantizer quantized_converter.experimental_new_quantizer = True new_tflite = quantized_converter.convert() for _ in range(5): input_data = tf.constant( np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32)) old_value = self._evaluateTFLiteModel(old_tflite, [input_data]) new_value = self._evaluateTFLiteModel(new_tflite, [input_data]) self.assertAllClose(old_value, new_value, atol=1e-01) @test_util.run_v2_only def testEmbeddings(self): """Test model with embeddings.""" input_data = tf.constant( np.array(np.random.random_sample((20)), dtype=np.int32)) class EmbeddingModel(tf.keras.Model): def __init__(self): super(EmbeddingModel, self).__init__() self.shared_weights = self.add_weight( 'weights', shape=(2000, 300), dtype=tf.float32, initializer=tf.random_normal_initializer( mean=0.0, stddev=300**(-0.5))) @tf.function(input_signature=[tf.TensorSpec(shape=(20), dtype=tf.int32)]) def func(self, x): return tf.gather(self.shared_weights, x) # Building the model. root = EmbeddingModel() concrete_func = root.func.get_concrete_function() # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func], root) tflite_model = converter.convert() # Check values from converted model. expected_value = root.func(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) self.assertAllClose(expected_value.numpy(), actual_value[0], atol=1e-05) @test_util.run_v2_only def testGraphDebugInfo(self): """Test a concrete function has debug info captured.""" root = tracking.AutoTrackable() root.v1 = tf.Variable(3.) root.f = tf.function(lambda x: root.v1 * x) input_data = tf.constant(1., shape=[1]) concrete_func = root.f.get_concrete_function(input_data) # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func], root) converter.convert() self._assertValidDebugInfo(converter._debug_info) def _getIntegerQuantizationModelWithFlexOp(self): np.random.seed(0) root = tracking.AutoTrackable() @tf.function(input_signature=[ tf.TensorSpec(shape=[3, 3, 3, 3, 3], dtype=tf.float32) ]) def func(inp): tanh = tf.math.tanh(inp) # Flex delegate will merge the consecutive conv3d and erf ops into one # Delegate node. conv3d = tf.nn.conv3d( tanh, tf.ones([3, 3, 3, 3, 3]), strides=[1, 1, 1, 1, 1], padding='SAME') erf = tf.math.erf(conv3d) output = tf.math.tanh(erf) return output def calibration_gen(): for _ in range(5): yield [ np.random.uniform(-1, 1, size=(3, 3, 3, 3, 3)).astype(np.float32) ] root.f = func return (root, root.f.get_concrete_function(), calibration_gen) @parameterized.named_parameters( ('_Default', False, False, dtypes.float32), ('_INT8InputOutput', False, False, dtypes.int8), ('_UINT8InputOutput', False, False, dtypes.uint8), ('_INT16Quantize', False, True, dtypes.float32), ('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16), ('_IntOnly', True, False, dtypes.float32), ('_IntOnly_INT8InputOutput', True, False, dtypes.int8), ('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8), ('_IntOnly_INT16Quantize', True, True, dtypes.float32), ('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16)) @test_util.run_v2_only def testIntegerQuantizationWithFlexOp(self, is_int_only, is_int16_quantize, inference_input_output_type): root, func, calibration_gen = self._getIntegerQuantizationModelWithFlexOp() quantized_converter = tf.lite.TFLiteConverter.from_concrete_functions( [func], root) quantized_converter.optimizations = [lite.Optimize.DEFAULT] quantized_converter.representative_dataset = calibration_gen if is_int_only: if is_int16_quantize: quantized_converter.target_spec.supported_ops = [ lite.OpsSet. EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8, lite.OpsSet.SELECT_TF_OPS ] else: quantized_converter.target_spec.supported_ops = [ lite.OpsSet.TFLITE_BUILTINS_INT8, lite.OpsSet.SELECT_TF_OPS ] else: if is_int16_quantize: quantized_converter.target_spec.supported_ops = [ lite.OpsSet. EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8, lite.OpsSet.TFLITE_BUILTINS, lite.OpsSet.SELECT_TF_OPS ] else: quantized_converter.target_spec.supported_ops = [ lite.OpsSet.TFLITE_BUILTINS, lite.OpsSet.SELECT_TF_OPS ] quantized_converter.inference_input_type = inference_input_output_type quantized_converter.inference_output_type = inference_input_output_type quantized_tflite_model = quantized_converter.convert() self.assertIsNotNone(quantized_tflite_model) # Check the conversion metadata. metadata = get_conversion_metadata(quantized_tflite_model) self.assertIsNotNone(metadata) self.assertEqual(metadata.options.enableSelectTfOps, True) expected_opt_options = [metadata_fb.ModelOptimizationMode.PTQ_FULL_INTEGER] if is_int16_quantize: expected_opt_options = [metadata_fb.ModelOptimizationMode.PTQ_INT16] self.assertAllEqual(expected_opt_options, metadata.options.modelOptimizationModes) interpreter = Interpreter(model_content=quantized_tflite_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() self.assertLen(input_details, 1) self.assertEqual(inference_input_output_type.as_numpy_dtype, input_details[0]['dtype']) output_details = interpreter.get_output_details() self.assertLen(output_details, 1) self.assertEqual(inference_input_output_type.as_numpy_dtype, output_details[0]['dtype']) def _getIntegerQuantizationModelWithUnsupportedOps(self): np.random.seed(0) root = tracking.AutoTrackable() @tf.function(input_signature=[ tf.TensorSpec(shape=[3], dtype=tf.float32), tf.TensorSpec(shape=[3], dtype=tf.float32) ]) def func(a, b): # ceil kernel does not support int8 nor int16 types neither. left = tf.math.ceil(a) right = tf.nn.tanh(b) add = tf.math.add(left, right) # ceil kernel does not support int8 nor int16 types neither. output = tf.math.ceil(add) return (output, right) def calibration_gen(): for _ in range(5): yield [ np.random.uniform(-1, 1, size=(3)).astype(np.float32), np.random.uniform(-1, 1, size=(3)).astype(np.float32) ] root.f = func return (root, root.f.get_concrete_function(), calibration_gen) @parameterized.named_parameters( ('_INT8InputOutput', False, False, dtypes.int8), ('_UINT8InputOutput', False, False, dtypes.uint8), ('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16), ('_IntOnly_INT8InputOutput', True, False, dtypes.int8), ('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8), ('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16), ('_IntOnly_INT8InputOutputMlirQuant', True, False, dtypes.int8, True), ('_IntOnly_UINT8InputOutputMlirQuant', True, False, dtypes.uint8, True)) @test_util.run_v2_only def testIntegerQuantizationWithUnsupportedOps(self, is_int_only, is_int16_quantize, inference_input_output_type, enable_mlir_quantizer=False): root, func, calib_gen = self._getIntegerQuantizationModelWithUnsupportedOps( ) quantized_converter = tf.lite.TFLiteConverter.from_concrete_functions( [func], root) quantized_converter.optimizations = [lite.Optimize.DEFAULT] quantized_converter.representative_dataset = calib_gen if is_int_only: if is_int16_quantize: quantized_converter.target_spec.supported_ops = [ lite.OpsSet. EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8, lite.OpsSet.TFLITE_BUILTINS ] else: quantized_converter.target_spec.supported_ops = [ lite.OpsSet.TFLITE_BUILTINS_INT8, lite.OpsSet.TFLITE_BUILTINS ] else: if is_int16_quantize: quantized_converter.target_spec.supported_ops = [ lite.OpsSet. EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8, lite.OpsSet.TFLITE_BUILTINS ] else: quantized_converter.target_spec.supported_ops = [ lite.OpsSet.TFLITE_BUILTINS ] quantized_converter.inference_input_type = inference_input_output_type quantized_converter.inference_output_type = inference_input_output_type quantized_converter.experimental_new_quantizer = enable_mlir_quantizer quantized_tflite_model = quantized_converter.convert() self.assertIsNotNone(quantized_tflite_model) expected_dtype = inference_input_output_type.as_numpy_dtype # Allow float32 for fallback on non-quantizable op. expected_ceil_dtype = ( expected_dtype if enable_mlir_quantizer else dtypes.float32) interpreter = Interpreter(model_content=quantized_tflite_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() self.assertLen(input_details, 2) self.assertEqual(input_details[0]['dtype'], expected_dtype) self.assertEqual(input_details[1]['dtype'], expected_ceil_dtype) output_details = interpreter.get_output_details() self.assertLen(output_details, 2) self.assertEqual(output_details[0]['dtype'], expected_dtype) self.assertEqual(output_details[1]['dtype'], expected_ceil_dtype) def _getIntegerQuantizationModelWithControlFlow(self): def true_fn(x): return x def false_fn(x): return x @tf.function(input_signature=[ tf.TensorSpec(shape=[1, 2], dtype=tf.float32), tf.TensorSpec(shape=(), dtype=tf.bool) ]) def model(x, b): x = x + x x = tf.cond(b, true_fn=lambda: true_fn(x), false_fn=lambda: false_fn(x)) return x + x def calibration_gen(): for _ in range(5): yield [ np.random.uniform(-1, 1, size=( 1, 2, )).astype(np.float32), tf.constant(True), ] for _ in range(5): yield [ np.random.uniform(-1, 1, size=( 1, 2, )).astype(np.float32), tf.constant(False), ] return (model, model.get_concrete_function(), calibration_gen) @parameterized.named_parameters( ('_INT8InputOutput', False, False, dtypes.int8), ('_UINT8InputOutput', False, False, dtypes.uint8), ('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16), ('_IntOnly_INT8InputOutput', True, False, dtypes.int8), ('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8), ('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16), # TODO(b/198231624): Support control flow ops in MLIR quantizer # ('_IntOnly_INT8InputOutputMlirQuant', True, False, dtypes.int8, True), # ('_IntOnly_UINT8InputOutputMlirQuant', True, False, dtypes.uint8, True), ) @test_util.run_v2_only def testIntegerQuantizationWithControlFlow(self, is_int_only, is_int16_quantize, inference_input_output_type, enable_mlir_quantizer=False): root, func, calib_gen = self._getIntegerQuantizationModelWithControlFlow() quantized_converter = tf.lite.TFLiteConverter.from_concrete_functions( [func], root) quantized_converter.optimizations = [lite.Optimize.DEFAULT] quantized_converter.representative_dataset = calib_gen if is_int_only: if is_int16_quantize: quantized_converter.target_spec.supported_ops = [ lite.OpsSet .EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8, lite.OpsSet.TFLITE_BUILTINS ] else: quantized_converter.target_spec.supported_ops = [ lite.OpsSet.TFLITE_BUILTINS_INT8, lite.OpsSet.TFLITE_BUILTINS ] else: if is_int16_quantize: quantized_converter.target_spec.supported_ops = [ lite.OpsSet .EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8, lite.OpsSet.TFLITE_BUILTINS ] else: quantized_converter.target_spec.supported_ops = [ lite.OpsSet.TFLITE_BUILTINS ] quantized_converter.inference_input_type = inference_input_output_type quantized_converter.inference_output_type = inference_input_output_type quantized_converter.experimental_new_quantizer = enable_mlir_quantizer quantized_tflite_model = quantized_converter.convert() self.assertIsNotNone(quantized_tflite_model) expected_dtype = inference_input_output_type.as_numpy_dtype interpreter = Interpreter(model_content=quantized_tflite_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() self.assertLen(input_details, 2) self.assertEqual(input_details[0]['dtype'], expected_dtype) self.assertEqual(input_details[1]['dtype'], dtypes.bool) output_details = interpreter.get_output_details() self.assertLen(output_details, 1) self.assertEqual(output_details[0]['dtype'], expected_dtype) @parameterized.named_parameters( ('_BlocklistedNoneWithLowering', None, None, True), ('_BlocklistedNoneWithoutLowering', None, None, False), ('_BlocklistedOpsWithLowering', {'CONV_2D'}, None, True), ('_BlocklistedOpsWithoutLowering', {'CONV_2D'}, None, False), ('_BlocklistedNodesWithLowering', None, {'PartitionedCall:0'}, True), ('_BlocklistedNodesWithoutLowering', None, {'Identity'}, False)) @test_util.run_v2_only def testNewQuantizerBlocklistingArgs(self, denylisted_ops, denylisted_nodes, lower_to_saved_model): """Test the model quantized by the new converter and denylisted options.""" root, func, calibration_gen = self._getIntegerQuantizeModel() quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func], root) quantized_converter.target_spec.supported_ops = [ lite.OpsSet.TFLITE_BUILTINS_INT8 ] quantized_converter.representative_dataset = calibration_gen quantized_converter.optimizations = [lite.Optimize.DEFAULT] quantized_converter.experimental_new_quantizer = True quantized_converter._experimental_calibrate_only = True quantized_converter.experimental_lower_to_saved_model = lower_to_saved_model calibrated = quantized_converter.convert() quantized_tflite_model = mlir_quantize( calibrated, denylisted_ops=denylisted_ops, denylisted_nodes=denylisted_nodes) interpreter = Interpreter(model_content=quantized_tflite_model) details = interpreter.get_tensor_details() num_quantized_tensors = sum( [1 for detail in details if len(detail['quantization_parameters']['scales'])]) if denylisted_nodes or denylisted_ops: self.assertEqual(num_quantized_tensors, 0) return self.assertEqual(num_quantized_tensors, 4) # quant, filter, bias, dequant @parameterized.named_parameters( ('_SingleLayer', False), ('_WholeModel', True), ) @test_util.run_v2_only def testNewQuantizerNumericVerificationDebugMode(self, whole_model_verify): """Test the model quantized by the new converter with numeric verify ops.""" root, func, calibration_gen = self._getIntegerQuantizeModel() quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func], root) quantized_converter.target_spec.supported_ops = [ lite.OpsSet.TFLITE_BUILTINS_INT8 ] quantized_converter.representative_dataset = calibration_gen # Create a TFLite model with new quantizer. quantized_converter.optimizations = [lite.Optimize.DEFAULT] quantized_converter.experimental_new_quantizer = True production_tflite = quantized_converter.convert() # Create a TFLite model with new quantizer and numeric verify ops. quantized_converter._experimental_calibrate_only = True calibrated = quantized_converter.convert() debug_mode_tflite = mlir_quantize( calibrated, enable_numeric_verify=True, enable_whole_model_verify=whole_model_verify) # Check if adding debug mode should output a different flatbuffer. self.assertNotEqual(production_tflite, debug_mode_tflite) # Check if newly added ops are numeric verify ops. input_data = tf.constant( np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32)) def examine_tflite_model(tflite_content, input_data): interpreter = Interpreter( model_content=tflite_content, experimental_op_resolver_type=OpResolverType .BUILTIN_WITHOUT_DEFAULT_DELEGATES) interpreter.allocate_tensors() input_details = interpreter.get_input_details() interpreter.set_tensor(input_details[0]['index'], input_data.numpy()) interpreter.invoke() tensor_details = interpreter.get_tensor_details() return { details['name']: interpreter.get_tensor(details['index']) for details in interpreter.get_tensor_details() }, tensor_details tflite_result, _ = examine_tflite_model(production_tflite, input_data) debug_mode_tflite_result, debug_tensor_details = examine_tflite_model( debug_mode_tflite, input_data) # MLIR-based quantizer should output flatbuffer model with `tfl.quantize`. num_production_quantize_ops = len([ None for output_tensor_name in tflite_result if 'tfl.quantize' in output_tensor_name ]) self.assertEqual(num_production_quantize_ops, 1) # MLIR-based quantizer should output flatbuffer model with `tfl.quantize`. num_debug_quantize_ops = len([ None for output_tensor_name in debug_mode_tflite_result if 'tfl.quantize' in output_tensor_name ]) # Two numbers should be equal. self.assertEqual(num_production_quantize_ops, num_debug_quantize_ops) # DebugMode TFLite flatbuffer should have NumericVerifyOps more than zero. # The name has the prefix "NumericVerify/{name}:{id} # where {name} is the tensor name of the original quantized op's activation, # and {id} is its tensor id. num_debug_ops = 0 for output_tensor_name in debug_mode_tflite_result: if 'NumericVerify' in output_tensor_name: pos_end_prefix = len('NumericVerify/') pos_colon = output_tensor_name.rfind(':') self.assertEqual('NumericVerify/', output_tensor_name[:pos_end_prefix]) tensor_id = int(output_tensor_name[pos_colon + 1:]) original_tensor_name = output_tensor_name[pos_end_prefix:pos_colon] self.assertEqual(original_tensor_name, debug_tensor_details[tensor_id]['name']) num_debug_ops += 1 self.assertEqual(num_debug_ops, 1) # The number of debug ops should be equal to that of quantized ops. self.assertEqual(num_debug_ops, num_debug_quantize_ops) @parameterized.named_parameters( ('_PerChannelQuant', False, False), ('_PerChannelMlirQuant', False, True), ('_PerTensorQuant', True, False), ('_PerTensorMlirQuant', True, True), ('_PerChannelDynamicRange', False, False, False), ('_PerTensorDynamicRange', True, False, False)) @test_util.run_v2_only def testDisablePerChannelQuantization(self, disable_per_channel=False, enable_mlir_quantizer=False, representative_dataset=True): k_conv_name = 'Conv2D1' # Dynamic range quant requires total num elements of filters > 1024. k_num_filters = 38 root, func, calib_gen = self._getIntegerQuantizeModel(k_num_filters) quantized_converter = tf.lite.TFLiteConverter.from_concrete_functions( [func], root) quantized_converter.optimizations = [lite.Optimize.DEFAULT] quantized_converter.representative_dataset = calib_gen quantized_converter.target_spec.supported_ops = [ lite.OpsSet.TFLITE_BUILTINS ] quantized_converter.experimental_new_quantizer = enable_mlir_quantizer if disable_per_channel: quantized_converter._experimental_disable_per_channel = ( disable_per_channel) quantized_tflite_model = quantized_converter.convert() self.assertIsNotNone(quantized_tflite_model) interpreter = Interpreter(model_content=quantized_tflite_model) interpreter.allocate_tensors() detail = next((d for d in interpreter.get_tensor_details() if d['name'] == k_conv_name)) quant_params = detail['quantization_parameters'] expected_num_params = 1 if disable_per_channel else k_num_filters self.assertLen(quant_params['scales'], expected_num_params) self.assertLen(quant_params['zero_points'], expected_num_params) @parameterized.named_parameters(('MlirQuantize', True), ('TocoQuantize', False)) @test_util.run_v2_only def testQuantizeBiasOverflow(self, enable_mlir_quantizer): """Tests if the quantizer handles bias overflow by adjusting scales.""" input_data = np.array([[-1e-3, 1e-3]], dtype=np.float32) def calibration_gen(): yield {'x': input_data} root = self._getMatMulModelWithSmallWeights() input_data = tf.constant([-1e-3, 1e-3], shape=(1, 2)) concrete_func = root.matmul.get_concrete_function(input_data) converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func], root) converter.optimizations = [lite.Optimize.DEFAULT] converter.representative_dataset = calibration_gen converter.experimental_new_quantizer = enable_mlir_quantizer quantized_model = converter.convert() interpreter = Interpreter(model_content=quantized_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() interpreter.set_tensor(input_details[0]['index'], input_data) interpreter.invoke() output_details = interpreter.get_output_details() output = interpreter.get_tensor(output_details[0]['index']) # the inputs and weights are far smaller than the biases, so the final # result should be equal to the biases. self.assertAllClose(root.bias, output.flatten()) @test_util.run_v2_only def testOpVersion(self): @tf.function( input_signature=[tf.TensorSpec(shape=[5, 5], dtype=tf.float32)]) def custom_resize(image): # Add "batch" and "channels" dimensions image = image[tf.newaxis, ..., tf.newaxis] # ResizeBilinear version 3. resize1 = tf.compat.v1.image.resize_bilinear( image, [2, 2], half_pixel_centers=True) # ResizeBilinear version 1. resize2 = tf.compat.v1.image.resize_bilinear(image, [2, 2]) return resize1 + resize2 concrete_func = custom_resize.get_concrete_function() converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func], custom_resize) tflite_model = converter.convert() model_object = schema_fb.Model.GetRootAsModel(tflite_model, 0) model = schema_fb.ModelT.InitFromObj(model_object) for operator in model.operatorCodes: if operator.builtinCode == schema_fb.BuiltinOperator.RESIZE_BILINEAR: # half_pixel_centers is supported by ResizeBilinear version 3. self.assertEqual(operator.version, 3) break @test_util.run_v2_only def testForceSelectTFOps(self): root = self._getSimpleVariableModel() input_data = tf.constant(1., shape=[1]) concrete_func = root.f.get_concrete_function(input_data) # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func], root) converter.target_spec.supported_ops = [ tf.lite.OpsSet.SELECT_TF_OPS ] tflite_model = converter.convert() # Check the conversion metadata. metadata = get_conversion_metadata(tflite_model) self.assertIsNotNone(metadata) self.assertEqual(metadata.options.forceSelectTfOps, True) # Check output value from converted model. expected_value = root.f(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) self.assertEqual(expected_value.numpy(), actual_value) def testExcludeConversionMetadata(self): root = self._getSimpleVariableModel() input_data = tf.constant(1., shape=[1]) concrete_func = root.f.get_concrete_function(input_data) # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func], root) converter.exclude_conversion_metadata = True tflite_model = converter.convert() # Check the conversion metadata. metadata = get_conversion_metadata(tflite_model) self.assertIsNone(metadata) def testConversionMetadataForDynamicRange(self): func, _ = self._getSqrtModel() converter = lite.TFLiteConverterV2.from_concrete_functions( [func.get_concrete_function()]) converter.optimizations = [lite.Optimize.DEFAULT] quantized_model = converter.convert() # Check the conversion metadata. metadata = get_conversion_metadata(quantized_model) self.assertIsNotNone(metadata) self.assertAllEqual([metadata_fb.ModelOptimizationMode.PTQ_DYNAMIC_RANGE], metadata.options.modelOptimizationModes) def testConversionMetadataForFloat16(self): root, func, calibration_gen = self._getIntegerQuantizeModel() converter = lite.TFLiteConverterV2.from_concrete_functions([func], root) converter.optimizations = [lite.Optimize.DEFAULT] converter.representative_dataset = calibration_gen converter.target_spec.supported_types = [dtypes.float16] quantized_model = converter.convert() # Check the conversion metadata. metadata = get_conversion_metadata(quantized_model) self.assertIsNotNone(metadata) self.assertAllEqual([metadata_fb.ModelOptimizationMode.PTQ_FLOAT16], metadata.options.modelOptimizationModes) class FromSavedModelTest(lite_v2_test_util.ModelTest): def _createV1SavedModel(self, shape): """Create a simple SavedModel.""" saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel') with tf.Graph().as_default(): with tf.compat.v1.Session() as sess: in_tensor_1 = tf.compat.v1.placeholder( shape=shape, dtype=tf.float32, name='inputB') in_tensor_2 = tf.compat.v1.placeholder( shape=shape, dtype=tf.float32, name='inputA') variable_node = tf.Variable(1.0, name='variable_node') out_tensor = in_tensor_1 + in_tensor_2 * variable_node inputs = {'x': in_tensor_1, 'y': in_tensor_2} outputs = {'z': out_tensor} sess.run(tf.compat.v1.variables_initializer([variable_node])) saved_model.simple_save(sess, saved_model_dir, inputs, outputs) return saved_model_dir def _createV2QATSavedModel(self, shape): """Create a simple QAT SavedModel in TF 2.""" saved_model_dir = os.path.join(self.get_temp_dir(), 'saved_model') input_name = 'input' output_name = 'scores' input_tensor = tf.keras.layers.Input((32, 32, 128), name=input_name) x = tf.quantization.fake_quant_with_min_max_args(input_tensor, -3.0, 3.0) x = tf.keras.layers.Conv2D(1, (3, 3))(x) x = tf.quantization.fake_quant_with_min_max_args(x, -3.0, 3.0) scores = tf.keras.layers.Reshape((-1,), name=output_name)(x) model = tf.keras.Model(input_tensor, scores) model.save(saved_model_dir) return saved_model_dir, input_name, output_name @test_util.run_v2_only def testV1SimpleModel(self): """Test a SavedModel.""" with tf.Graph().as_default(): saved_model_dir = self._createV1SavedModel(shape=[1, 16, 16, 3]) # Convert model and ensure model is not None. converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir) tflite_model = converter.convert() self.assertTrue(tflite_model) interpreter = Interpreter(model_content=tflite_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() self.assertLen(input_details, 2) self.assertStartsWith(input_details[0]['name'], 'inputA') self.assertEqual(np.float32, input_details[0]['dtype']) self.assertAllEqual([1, 16, 16, 3], input_details[0]['shape']) self.assertEqual((0., 0.), input_details[0]['quantization']) self.assertStartsWith( input_details[1]['name'], 'inputB', ) self.assertEqual(np.float32, input_details[1]['dtype']) self.assertTrue([1, 16, 16, 3], input_details[1]['shape']) self.assertEqual((0., 0.), input_details[1]['quantization']) output_details = interpreter.get_output_details() self.assertLen(output_details, 1) self.assertStartsWith(output_details[0]['name'], 'add') self.assertEqual(np.float32, output_details[0]['dtype']) self.assertTrue([1, 16, 16, 3], output_details[0]['shape']) self.assertEqual((0., 0.), output_details[0]['quantization']) @parameterized.named_parameters( ('Default', False), ('UnfoldLargeConstant', True), ) @test_util.run_v2_only def testUnfoldLargeConstant(self, unfold_large_constant): """Test unfolding large splat constant in a TF Lite model.""" saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel') with tf.Graph().as_default(): with tf.compat.v1.Session() as sess: in_tensor = tf.compat.v1.placeholder( shape=[1000, 1000], dtype=tf.float32, name='input') constant = tf.constant(value=1, dtype=tf.float32, shape=[1000, 1000]) out_tensor = in_tensor + constant inputs = {'x': in_tensor} outputs = {'y': out_tensor} saved_model.simple_save(sess, saved_model_dir, inputs, outputs) # Convert model and ensure model is not None. converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir) converter._experimental_unfold_large_splat_constant = unfold_large_constant tflite_model = converter.convert() self.assertTrue(tflite_model) model = util._convert_model_from_bytearray_to_object(tflite_model) if unfold_large_constant: self.assertEqual(model.operatorCodes[0].builtinCode, schema_fb.BuiltinOperator.FILL) self.assertEqual(model.operatorCodes[1].builtinCode, schema_fb.BuiltinOperator.ADD) else: self.assertEqual(model.operatorCodes[0].builtinCode, schema_fb.BuiltinOperator.ADD) # Check values from converted model. interpreter = Interpreter(model_content=tflite_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() self.assertLen(input_details, 1) self.assertEqual('input:0', input_details[0]['name']) self.assertEqual(np.float32, input_details[0]['dtype']) self.assertAllEqual([1000, 1000], input_details[0]['shape']) self.assertEqual((0., 0.), input_details[0]['quantization']) output_details = interpreter.get_output_details() self.assertEqual('add:0', output_details[0]['name']) self.assertEqual(np.float32, output_details[0]['dtype']) self.assertAllEqual([1000, 1000], output_details[0]['shape']) self.assertEqual((0., 0.), output_details[0]['quantization']) interpreter.set_tensor(input_details[0]['index'], np.ones(shape=[1000, 1000], dtype=np.float32)) interpreter.invoke() self.assertAllEqual( np.full(shape=[1000, 1000], fill_value=2.0, dtype=np.float32), interpreter.get_tensor(output_details[0]['index'])) @test_util.run_v2_only def testTF1HubFormattedModel(self): """Test a TF1 hub formatted model.""" saved_model_dir = self._createV1SavedModel(shape=[1, 16, 16, 3]) # TF1 hub model is based on V1 saved model and they omit the saved model # schema version setting. saved_model_proto = parse_saved_model(saved_model_dir) saved_model_proto.saved_model_schema_version = 0 saved_model_pb_file_path = os.path.join(saved_model_dir, 'saved_model.pb') with file_io.FileIO(saved_model_pb_file_path, 'wb') as writer: writer.write(saved_model_proto.SerializeToString()) # Convert model and ensure model is not None. converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir) tflite_model = converter.convert() self.assertTrue(tflite_model) def _createV1ModelWithHashTableInitializer(self): # Create a v1 saved model with hash table initializers. tf.compat.v1.disable_eager_execution() saved_model_dir = os.path.join(self.get_temp_dir(), 'savedmodel_with_hashtable') table_initializer = tf.lookup.KeyValueTensorInitializer( keys=['a', 'b', 'c', 'd'], values=[1, 2, 3, 4], key_dtype=tf.string, value_dtype=tf.int64) table = tf.lookup.StaticHashTable( table_initializer, default_value=tf.constant(-1, dtype=tf.int64)) x = tf.compat.v1.placeholder(tf.string, shape=(), name='input') y = table.lookup(x) tensor_info_x = tf.compat.v1.saved_model.utils.build_tensor_info(x) tensor_info_y = tf.compat.v1.saved_model.utils.build_tensor_info(y) signature_def_map, init_op, assets_collection = { 'serving_default': (tf.compat.v1.saved_model.signature_def_utils.build_signature_def( inputs={'x': tensor_info_x}, outputs={'y': tensor_info_y}, method_name='some_function')) }, tf.compat.v1.tables_initializer(), None sess = tf.compat.v1.Session() sess.run(tf.compat.v1.initializers.global_variables()) builder = tf.compat.v1.saved_model.builder.SavedModelBuilder( saved_model_dir) builder.add_meta_graph_and_variables( sess, [tf.compat.v1.saved_model.tag_constants.SERVING], signature_def_map, main_op=init_op, assets_collection=assets_collection, strip_default_attrs=True) builder.save() # Restore TF v2 behavior. tf.compat.v1.reset_default_graph() tf.compat.v1.enable_eager_execution() return saved_model_dir @test_util.run_v2_only def testModelWithHashTableInitializer(self): """Test a model with saved_model's session initializer for hash tables.""" saved_model_dir = self._createV1ModelWithHashTableInitializer() # Convert model and ensure model is not None. converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir) tflite_model = converter.convert() # Check values from converted model. interpreter = Interpreter(model_content=tflite_model) input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() input_data = np.array(['a', 'b', 'c', 'z'], dtype=np.string_) interpreter.resize_tensor_input( input_details[0]['index'], [4], strict=False) interpreter.allocate_tensors() interpreter.set_tensor(input_details[0]['index'], input_data) # Invoke multiple times to ensure the initializer graph runs only once. interpreter.invoke() actual_value = interpreter.get_tensor(output_details[0]['index']) self.assertEqual([1, 2, 3, -1], list(actual_value)) interpreter.invoke() actual_value = interpreter.get_tensor(output_details[0]['index']) self.assertEqual([1, 2, 3, -1], list(actual_value)) interpreter.invoke() actual_value = interpreter.get_tensor(output_details[0]['index']) self.assertEqual([1, 2, 3, -1], list(actual_value)) def _createV1ModelWithMutableHashTable(self): # Create a v1 saved model with mutable hash table. tf.compat.v1.disable_eager_execution() saved_model_dir = os.path.join(self.get_temp_dir(), 'savedmodel_with_mutable_hashtable') table = tf.raw_ops.MutableHashTableV2( key_dtype=tf.string, value_dtype=tf.int64) x = tf.compat.v1.placeholder(tf.string, shape=(), name='input') keys = tf.constant(['a', 'b'], tf.string) values = tf.constant([1, 5], tf.int64) default_value = tf.constant(-1, tf.int64) insert_call = tf.raw_ops.LookupTableInsertV2( table_handle=table, keys=keys, values=values) with tf.control_dependencies([insert_call]): y = tf.raw_ops.LookupTableFindV2( table_handle=table, keys=x, default_value=default_value) tensor_info_x = tf.compat.v1.saved_model.utils.build_tensor_info(x) tensor_info_y = tf.compat.v1.saved_model.utils.build_tensor_info(y) signature_def_map, init_op, assets_collection = { 'serving_default': (tf.compat.v1.saved_model.signature_def_utils.build_signature_def( inputs={'x': tensor_info_x}, outputs={'y': tensor_info_y}, method_name='some_function')) }, tf.compat.v1.tables_initializer(), None sess = tf.compat.v1.Session() builder = tf.compat.v1.saved_model.builder.SavedModelBuilder( saved_model_dir) builder.add_meta_graph_and_variables( sess, [tf.compat.v1.saved_model.tag_constants.SERVING], signature_def_map, main_op=init_op, assets_collection=assets_collection, strip_default_attrs=True) builder.save() # Restore TF v2 behavior. tf.compat.v1.reset_default_graph() tf.compat.v1.enable_eager_execution() return saved_model_dir @test_util.run_v2_only def testModelWithMutableHashTable(self): """Test a model with saved_model's session initializer for hash tables.""" saved_model_dir = self._createV1ModelWithMutableHashTable() # Convert model and ensure model is not None. converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir) converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS ] tflite_model = converter.convert() # Check values from converted model. interpreter = Interpreter(model_content=tflite_model) input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() input_data = np.array(['a', 'b', 'c'], dtype=np.string_) interpreter.resize_tensor_input( input_details[0]['index'], [3], strict=False) interpreter.allocate_tensors() interpreter.set_tensor(input_details[0]['index'], input_data) interpreter.invoke() actual_value = interpreter.get_tensor(output_details[0]['index']) self.assertEqual([1, 5, -1], list(actual_value)) @test_util.run_v2_only def testConstModel(self): """Test a basic model with functions to make sure functions are inlined.""" input_data = tf.constant(1., shape=[1]) root = tracking.AutoTrackable() root.f = tf.function(lambda x: 2. * x) to_save = root.f.get_concrete_function(input_data) save_dir = os.path.join(self.get_temp_dir(), 'saved_model') save(root, save_dir, to_save) # Convert model and ensure model is not None. converter = lite.TFLiteConverterV2.from_saved_model(save_dir) tflite_model = converter.convert() # Check values from converted model. expected_value = root.f(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) self.assertEqual(expected_value.numpy(), actual_value) @test_util.run_v2_only def testVariableModel(self): """Test a basic model with Variables with saving/loading the SavedModel.""" root = self._getSimpleVariableModel() input_data = tf.constant(1., shape=[1]) to_save = root.f.get_concrete_function(input_data) save_dir = os.path.join(self.get_temp_dir(), 'saved_model') save(root, save_dir, to_save) # Convert model and ensure model is not None. converter = lite.TFLiteConverterV2.from_saved_model(save_dir) tflite_model = converter.convert() # Check the conversion metadata. metadata = get_conversion_metadata(tflite_model) self.assertIsNotNone(metadata) self.assertEqual(metadata.environment.modelType, metadata_fb.ModelType.TF_SAVED_MODEL) # Check values from converted model. expected_value = root.f(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) self.assertEqual(expected_value.numpy(), actual_value) @parameterized.named_parameters(('EnableResourceVariables', True), ('DisableResourceVariables', False)) @test_util.run_v2_only def testNativeVariablesModel(self, enable_resource_variables): """Test a basic model with Variables with saving/loading the SavedModel.""" root = self._getSimpleModelWithVariables() input_data = tf.constant(1., shape=[1, 10]) to_save = root.assign_add.get_concrete_function(input_data) save_dir = os.path.join(self.get_temp_dir(), 'saved_model') save(root, save_dir, to_save) # Convert model and ensure model is not None. converter = lite.TFLiteConverterV2.from_saved_model(save_dir) converter.experimental_enable_resource_variables = enable_resource_variables if not enable_resource_variables: with self.assertRaises(convert.ConverterError) as error: tflite_model = converter.convert() self.assertIn( 'Variable constant folding is failed. Please consider using enabling ' '`experimental_enable_resource_variables` flag in the TFLite ' 'converter object.', str(error.exception)) return # Enable resource variables. tflite_model = converter.convert() # Check values from converted model. expected_value = root.assign_add(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) for tf_result, tflite_result in zip(expected_value, actual_value[0]): self.assertAllClose(tf_result, tflite_result, atol=1e-05) @test_util.run_v2_only def testSignatures(self): """Test values for `signature_keys` argument.""" root = self._getSimpleVariableModel() input_data = tf.constant(1., shape=[1]) to_save = root.f.get_concrete_function(input_data) save_dir = os.path.join(self.get_temp_dir(), 'saved_model') save(root, save_dir, to_save) # Convert model with invalid `signature_keys`. with self.assertRaises(ValueError) as error: _ = lite.TFLiteConverterV2.from_saved_model( save_dir, signature_keys=['INVALID']) self.assertIn("Invalid signature key 'INVALID'", str(error.exception)) # Convert model with empty `signature_keys`. converter = lite.TFLiteConverterV2.from_saved_model( save_dir, signature_keys=[]) tflite_model = converter.convert() # Check values from converted model. expected_value = root.f(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) self.assertEqual(expected_value.numpy(), actual_value) @test_util.run_v2_only def testSignatureDefsWithFullIntegerQuantization(self): # SETUP # 1. Define input shapes tf_input_shape = (32, 32, 128) tflite_input_shape = (1,) + tf_input_shape # 2. Define model tf_saved_model_dir, input_name, output_name = ( self._createV2QATSavedModel(tf_input_shape)) # MODEL 1: TFLite (float) model # 1. Create TFLite model converter = tf.lite.TFLiteConverter.from_saved_model(tf_saved_model_dir) converter.optimizations = [tf.lite.Optimize.DEFAULT] tflite_model = converter.convert() # 2. Initialize the Intepreter interpreter = Interpreter(model_content=tflite_model) input_details = interpreter.get_input_details()[0] output_details = interpreter.get_output_details()[0] interpreter.resize_tensor_input(input_details['index'], tflite_input_shape) interpreter.allocate_tensors() signature_list = interpreter._get_full_signature_list()['serving_default'] # 3. (Skip) Verify that signature def input/output tensors are in the model. # 4. Evaluate the model input_data = np.random.random(tflite_input_shape).astype(np.float32) result = self._evaluateTFLiteModelUsingSignatureDef( tflite_model, 'serving_default', {input_name: input_data})[output_name] # MODEL 2: TFLite (full integer quantized) model # 1. Create TFLite model converter = tf.lite.TFLiteConverter.from_saved_model(tf_saved_model_dir) converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.inference_input_type = tf.int8 converter.inference_output_type = tf.int8 tflite_model_quant = converter.convert() # 2. Initialize the Intepreter interpreter = Interpreter(model_content=tflite_model_quant) input_details = interpreter.get_input_details()[0] output_details = interpreter.get_output_details()[0] interpreter.resize_tensor_input(input_details['index'], tflite_input_shape) interpreter.allocate_tensors() # 3. Verify that signature def input/output tensors are in the model. all_indices = {item['index'] for item in interpreter.get_tensor_details()} signature_list = interpreter._get_full_signature_list()['serving_default'] input_tensor_indices = set(signature_list['inputs'].values()) assert input_tensor_indices.issubset(all_indices) output_tensor_indices = set(signature_list['outputs'].values()) assert output_tensor_indices.issubset(all_indices) # 4. Evaluate the model input_data = np.random.random(tflite_input_shape) input_scale, input_zero_point = input_details['quantization'] if (input_scale, input_zero_point) != (0.0, 0): input_data = input_data / input_scale + input_zero_point input_data = input_data.astype(input_details['dtype']) result_quant = self._evaluateTFLiteModelUsingSignatureDef( tflite_model_quant, 'serving_default', {input_name: input_data})[output_name] output_scale, output_zero_point = output_details['quantization'] if (output_scale, output_zero_point) != (0.0, 0): result_quant = result_quant.astype(np.float32) result_quant = (result_quant - output_zero_point) * output_scale # COMPARE: Validate that results from both models are approx. the same. root_mean_squared = np.sqrt(np.mean((result-result_quant)**2)) assert root_mean_squared < 1.0 @test_util.run_v2_only def testSignatureDefs(self): """Test converting SignatureDef is correct and uses SignatureDef API.""" root = self._getMultiFunctionModel() input_data_0 = tf.constant(1., shape=[1]) input_data_1 = tf.constant(3., shape=[1]) mul_add_func = root.mul_add.get_concrete_function(input_data_1, input_data_0) save_dir = os.path.join(self.get_temp_dir(), 'saved_model') save(root, save_dir, {'mul_add': mul_add_func}) converter = lite.TFLiteConverterV2.from_saved_model( save_dir, signature_keys=['mul_add']) tflite_model = converter.convert() # Check values from converted model. expected_value = root.mul_add(input_data_1, input_data_0) interpreter = Interpreter(model_content=tflite_model) signature_defs = interpreter.get_signature_list() results = self._evaluateTFLiteModelUsingSignatureDef( tflite_model, 'mul_add', { 'y': input_data_0, 'x': input_data_1 }) self.assertEqual(list(results.keys()), ['output_0']) self.assertEqual(expected_value.numpy(), results['output_0']) # Verify the SignatureDef structure returned is as expected. self.assertEqual(len(signature_defs), 1) self.assertEqual(list(signature_defs.keys()), ['mul_add']) self.assertEqual(len(signature_defs.values()), 1) self.assertEqual( list(signature_defs['mul_add'].keys()), ['inputs', 'outputs']) self.assertCountEqual(signature_defs['mul_add']['inputs'], ['x', 'y']) self.assertEqual(list(signature_defs['mul_add']['outputs']), ['output_0']) @test_util.run_v2_only def testSignatureDefsWithDefaultValue(self): """Test converting SignatureDef is correct and uses SignatureDef API. This test uses None as signature_key to test default behavior. """ root = self._getMultiFunctionModel() input_data_0 = tf.constant(1., shape=[1]) input_data_1 = tf.constant(3., shape=[1]) mul_add_func = root.mul_add.get_concrete_function(input_data_1, input_data_0) save_dir = os.path.join(self.get_temp_dir(), 'saved_model') save(root, save_dir, {'mul_add': mul_add_func}) converter = lite.TFLiteConverterV2.from_saved_model( save_dir, signature_keys=['mul_add']) tflite_model = converter.convert() # Check values from converted model. expected_value = root.mul_add(input_data_1, input_data_0) interpreter = Interpreter(model_content=tflite_model) signature_defs = interpreter.get_signature_list() results = self._evaluateTFLiteModelUsingSignatureDef( tflite_model, None, { 'y': input_data_0, 'x': input_data_1 }) self.assertEqual(list(results.keys()), ['output_0']) self.assertEqual(expected_value.numpy(), results['output_0']) # Verify the SignatureDef structure returned is as expected. self.assertEqual(len(signature_defs), 1) self.assertEqual(list(signature_defs.keys()), ['mul_add']) self.assertEqual(len(signature_defs.values()), 1) self.assertEqual( list(signature_defs['mul_add'].keys()), ['inputs', 'outputs']) self.assertCountEqual(signature_defs['mul_add']['inputs'], ['x', 'y']) self.assertEqual(list(signature_defs['mul_add']['outputs']), ['output_0']) @test_util.run_v2_only def testSignatureDefsQuantizedModel(self): """Test converting SignatureDef on quantized model.""" root = self._getMultiFunctionModel() input_data_0 = tf.constant(1., shape=[1]) input_data_1 = tf.constant(3., shape=[1]) mul_add_func = root.mul_add.get_concrete_function(input_data_1, input_data_0) save_dir = os.path.join(self.get_temp_dir(), 'saved_model') save(root, save_dir, {'mul_add': mul_add_func}) converter = lite.TFLiteConverterV2.from_saved_model( save_dir, signature_keys=['mul_add']) def representative_dataset_gen(): for _ in range(2): yield { 'x': np.random.uniform(low=0, high=1, size=(1, 1)).astype(np.float32), 'y': np.random.uniform(low=0, high=1, size=(1, 1)).astype(np.float32) } converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.representative_dataset = representative_dataset_gen converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] tflite_model = converter.convert() # Check signatures are valid from converted model. interpreter = Interpreter(model_content=tflite_model) signature_defs = interpreter.get_signature_list() # Verify the SignatureDef structure returned is as expected. self.assertEqual(len(signature_defs), 1) self.assertEqual(list(signature_defs.keys()), ['mul_add']) self.assertEqual(len(signature_defs.values()), 1) self.assertEqual( list(signature_defs['mul_add'].keys()), ['inputs', 'outputs']) self.assertCountEqual(signature_defs['mul_add']['inputs'], ['x', 'y']) self.assertEqual(list(signature_defs['mul_add']['outputs']), ['output_0']) @test_util.run_v2_only def testMultipleFunctionModel(self): """Convert multiple functions in a multi-functional model.""" root = self._getMultiFunctionModel() input_data = tf.constant(1., shape=[1]) add_func = root.add.get_concrete_function(input_data) sub_func = root.sub.get_concrete_function(input_data) save_dir = os.path.join(self.get_temp_dir(), 'saved_model') save(root, save_dir, {'add': add_func, 'sub': sub_func}) # Try converting multiple functions. converter = lite.TFLiteConverterV2.from_saved_model(save_dir) tflite_model = converter.convert() self.assertIsNotNone(tflite_model) interpreter = tf.lite.Interpreter(model_content=tflite_model) signature_defs = interpreter.get_signature_list() # Verify the SignatureDef structure returned is as expected. self.assertEqual(len(signature_defs), 2) self.assertEqual(list(signature_defs.keys()), ['add', 'sub']) self.assertEqual(len(signature_defs.values()), 2) self.assertEqual(list(signature_defs['add'].keys()), ['inputs', 'outputs']) self.assertCountEqual(signature_defs['add']['inputs'], ['x']) self.assertEqual(list(signature_defs['add']['outputs']), ['output_0']) self.assertEqual(list(signature_defs['sub'].keys()), ['inputs', 'outputs']) self.assertCountEqual(signature_defs['sub']['inputs'], ['x']) self.assertEqual(list(signature_defs['sub']['outputs']), ['output_0']) # Verify the Signature runner executions. add_signature_runner = interpreter.get_signature_runner('add') add_output = add_signature_runner(x=input_data) self.assertEqual(add_output['output_0'], 3) sub_signature_runner = interpreter.get_signature_runner('sub') sub_output = sub_signature_runner(x=input_data) self.assertEqual(sub_output['output_0'], -2) @parameterized.named_parameters( ('_Default', False, False, dtypes.float32, False), ('_DefaultMlirQuant', False, False, dtypes.float32, True), ('_INT8InputOutput', False, False, dtypes.int8), ('_UINT8InputOutput', False, False, dtypes.uint8), ('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16), ('_IntOnly_INT8InputOutput', True, False, dtypes.int8), ('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8), ('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16), ('_IntOnly_INT8InputOutputMlirQuant', True, False, dtypes.int8, True), ('_IntOnly_UINT8InputOutputMlirQuant', True, False, dtypes.uint8, True)) @test_util.run_v2_only def testMultipleFunctionQuantizedModel(self, is_int_only, is_int16_quantize, inference_input_output_type, enable_mlir_quantizer=False): """Convert multiple functions in a multi-functional model.""" root = self._getMultiFunctionModel() input_data = tf.constant(1., shape=[1]) add_func = root.add.get_concrete_function(input_data) sub_func = root.sub.get_concrete_function(input_data) save_dir = os.path.join(self.get_temp_dir(), 'saved_model') save(root, save_dir, {'add': add_func, 'sub': sub_func}) # Try converting multiple functions. converter = lite.TFLiteConverterV2.from_saved_model(save_dir) def representative_dataset_gen(): for _ in range(2): yield ('add', { 'x': np.random.uniform(low=0, high=1, size=(1,)).astype(np.float32), }) for _ in range(2): yield ('sub', { 'x': np.random.uniform(low=0, high=1, size=(1,)).astype(np.float32), }) converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.representative_dataset = representative_dataset_gen if is_int_only: if is_int16_quantize: converter.target_spec.supported_ops = [ lite.OpsSet .EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 ] else: converter.target_spec.supported_ops = [lite.OpsSet.TFLITE_BUILTINS_INT8] else: if is_int16_quantize: converter.target_spec.supported_ops = [ lite.OpsSet .EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 ] else: converter.target_spec.supported_ops = [lite.OpsSet.TFLITE_BUILTINS] converter.inference_input_type = inference_input_output_type converter.inference_output_type = inference_input_output_type converter.experimental_new_quantizer = enable_mlir_quantizer tflite_model = converter.convert() self.assertIsNotNone(tflite_model) interpreter = tf.lite.Interpreter(model_content=tflite_model) signature_defs = interpreter.get_signature_list() # Verify the SignatureDef structure returned is as expected. self.assertEqual(len(signature_defs), 2) self.assertEqual(list(signature_defs.keys()), ['add', 'sub']) self.assertEqual(len(signature_defs.values()), 2) self.assertEqual(list(signature_defs['add'].keys()), ['inputs', 'outputs']) self.assertCountEqual(signature_defs['add']['inputs'], ['x']) self.assertEqual(list(signature_defs['add']['outputs']), ['output_0']) self.assertEqual(list(signature_defs['sub'].keys()), ['inputs', 'outputs']) self.assertCountEqual(signature_defs['sub']['inputs'], ['x']) self.assertEqual(list(signature_defs['sub']['outputs']), ['output_0']) # Verify the Signature runner executions. input_data = tf.constant( np.random.uniform(-1, 1, size=(1,)).astype( inference_input_output_type.as_numpy_dtype)) add_signature_runner = interpreter.get_signature_runner('add') add_output = add_signature_runner(x=input_data) self.assertIsNotNone(add_output['output_0']) input_details = add_signature_runner.get_input_details() self.assertLen(input_details, 1) self.assertStartsWith(input_details['x']['name'], 'add_x:0') self.assertEqual(inference_input_output_type.as_numpy_dtype, input_details['x']['dtype']) self.assertTrue(([1] == input_details['x']['shape']).all()) if inference_input_output_type == dtypes.float32: self.assertEqual((0.0, 0), input_details['x']['quantization']) sub_signature_runner = interpreter.get_signature_runner('sub') sub_output = sub_signature_runner(x=input_data) self.assertIsNotNone(sub_output['output_0']) output_details = sub_signature_runner.get_output_details() self.assertLen(output_details, 1) self.assertStartsWith(output_details['output_0']['name'], 'StatefulPartitionedCall:0') self.assertEqual(inference_input_output_type.as_numpy_dtype, output_details['output_0']['dtype']) self.assertTrue(([1] == output_details['output_0']['shape']).all()) if inference_input_output_type == dtypes.float32: self.assertEqual((0.0, 0), output_details['output_0']['quantization']) @test_util.run_v2_only def testMultipleFunctionModelWithSharedWeight(self): """Convert multiple functions with the shared weight.""" root = self._getMultiFunctionModelWithSharedWeight() input_data = tf.constant(1., shape=[1]) add_func = root.add.get_concrete_function(input_data) sub_func = root.sub.get_concrete_function(input_data) mul_func = root.mul.get_concrete_function(input_data) save_dir = os.path.join(self.get_temp_dir(), 'saved_model') save(root, save_dir, {'add': add_func, 'sub': sub_func, 'mul': mul_func}) # Try converting multiple functions. converter = lite.TFLiteConverterV2.from_saved_model(save_dir) tflite_model = converter.convert() self.assertIsNotNone(tflite_model) # Make sure that the weight tensors are shared. self.assertLess(len(tflite_model), 1100000) # TODO(b/184696047): Write down the test codes for multiple signature # runners once the Python API is ready to use. interpreter = tf.lite.Interpreter(model_content=tflite_model) signature_defs = interpreter.get_signature_list() self.assertLen(signature_defs, 3) add_signature_runner = interpreter.get_signature_runner('add') sub_signature_runner = interpreter.get_signature_runner('sub') mul_signature_runner = interpreter.get_signature_runner('mul') self.assertIsNotNone(add_signature_runner) self.assertIsNotNone(sub_signature_runner) self.assertIsNotNone(mul_signature_runner) @test_util.run_v2_only def testNoConcreteFunctionModel(self): root = self._getMultiFunctionModel() save_dir = os.path.join(self.get_temp_dir(), 'saved_model') save(root, save_dir) with self.assertRaises(ValueError) as error: _ = lite.TFLiteConverterV2.from_saved_model(save_dir) self.assertIn('Only support at least one signature key.', str(error.exception)) @test_util.run_v2_only def testKerasSequentialModel(self): """Test a simple sequential tf.Keras model.""" input_data = tf.constant(1., shape=[1, 1]) x = np.array([[1.], [2.]]) y = np.array([[2.], [4.]]) model = tf.keras.models.Sequential([ tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(1), ]) model.compile(optimizer='sgd', loss='mean_squared_error') model.fit(x, y, epochs=1) save_dir = os.path.join(self.get_temp_dir(), 'saved_model') save(model, save_dir) # Convert model and ensure model is not None. converter = lite.TFLiteConverterV2.from_saved_model(save_dir) tflite_model = converter.convert() # Check values from converted model. expected_value = model.predict(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) self.assertEqual(expected_value, actual_value) @test_util.run_v2_only def testGraphDebugInfo(self): """Test a SavedModel has debug info captured.""" input_data = tf.constant(1., shape=[1]) root = tracking.AutoTrackable() root.f = tf.function(lambda x: 2. * x) to_save = root.f.get_concrete_function(input_data) options = save_options.SaveOptions(save_debug_info=True) save_dir = os.path.join(self.get_temp_dir(), 'saved_model') save(root, save_dir, to_save, options) # Convert model and ensure model is not None. converter = lite.TFLiteConverterV2.from_saved_model(save_dir) converter.convert() self._assertValidDebugInfo(converter._debug_info) @test_util.run_v2_only def testNonStatefulConvLSTM2D(self): """Test saved model with non stateful ConvLSTM2D keras layer.""" # Create keras model model = tf.keras.Sequential([ tf.keras.layers.ConvLSTM2D( 32, (3, 3), padding='same', return_sequences=True, stateful=False, batch_input_shape=(1, 1, 10, 10, 1)) ]) model.compile() # Export the keras model to saved model. saved_model_dir = os.path.join(self.get_temp_dir(), 'conv_lstm_2d') model.save(saved_model_dir, save_format='tf', include_optimizer=False) converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir) converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS ] tflite_model = converter.convert() self.assertTrue(tflite_model) @test_util.run_v2_only def testKerasConvLSTM2DWithMoreThanOneDilationRate(self): input_tensor = tf.keras.layers.Input( batch_size=8, shape=[9, 10, 11, 12], name='input_tensor', dtype=tf.float32) output = tf.keras.layers.ConvLSTM2D( filters=3, kernel_size=3, strides=1, padding='VALID', dilation_rate=2, use_bias=False, bias_initializer='ones', data_format='channels_last')( input_tensor) model = tf.keras.Model(inputs=[input_tensor], outputs=output) model.compile( optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) # Export the keras model to saved model. saved_model_dir = os.path.join(self.get_temp_dir(), 'conv_lstm_2d_with_dilation_rate') model.save(saved_model_dir, save_format='tf', include_optimizer=False) converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir) converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS ] tflite_model = converter.convert() self.assertTrue(tflite_model) def _createUnknownInputShapeModel(self): """Create a simple SavedModel with unknown input.""" saved_model_dir = os.path.join(self.get_temp_dir(), 'unknown_input_shape') with tf.Graph().as_default(): with tf.compat.v1.Session() as sess: unknown_shape = tf.TensorShape(None) in_tensor = tf.compat.v1.placeholder( shape=unknown_shape, dtype=tf.float32, name='input') out_tensor = in_tensor + in_tensor inputs = {'input': in_tensor} outputs = {'output': out_tensor} saved_model.simple_save(sess, saved_model_dir, inputs, outputs) return saved_model_dir @test_util.run_v2_only def testUnknownInputShapeModel(self): """Test a SavedModel with an unknown input shape.""" saved_model_dir = self._createUnknownInputShapeModel() converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir) tflite_model = converter.convert() self.assertTrue(tflite_model) # Check values from converted model. interpreter = Interpreter(model_content=tflite_model) input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() input_data = np.array([1., 2., 3.], dtype=np.float32) interpreter.resize_tensor_input( input_details[0]['index'], [3], strict=False) interpreter.allocate_tensors() interpreter.set_tensor(input_details[0]['index'], input_data) interpreter.invoke() actual_value = interpreter.get_tensor(output_details[0]['index']) self.assertEqual([2., 4., 6.], list(actual_value)) @parameterized.named_parameters( ('_PerChannelQuant', False, False), ('_PerChannelMlirQuant', False, True), ('_PerTensorQuant', True, False), ('_PerTensorMlirQuant', True, True), ('_PerChannelDynamicRange', False, False, True), ('_PerTensorDynamicRange', True, False, True)) @test_util.run_v2_only def testDisablePerChannelQuantization(self, disable_per_channel=False, enable_mlir_quantizer=False, representative_dataset=True): # Dynamic range quant requires total num elements of filters > 1024. k_num_filters = 38 model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(k_num_filters, (3, 3), activation='relu') ]) model.build(input_shape=(1, 5, 5, 3)) saved_model_dir = os.path.join(self.get_temp_dir(), 'conv_saved_model') save(model, saved_model_dir) k_conv_name = 'sequential/conv2d/Conv2D1' quantized_converter = tf.lite.TFLiteConverter.from_saved_model( saved_model_dir) quantized_converter.optimizations = [lite.Optimize.DEFAULT] if representative_dataset: def calib_gen(): for _ in range(5): yield [np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32)] quantized_converter.representative_dataset = calib_gen quantized_converter.target_spec.supported_ops = [ lite.OpsSet.TFLITE_BUILTINS ] quantized_converter.experimental_new_quantizer = enable_mlir_quantizer if disable_per_channel: quantized_converter._experimental_disable_per_channel = ( disable_per_channel) quantized_tflite_model = quantized_converter.convert() self.assertIsNotNone(quantized_tflite_model) interpreter = Interpreter(model_content=quantized_tflite_model) interpreter.allocate_tensors() detail = next((d for d in interpreter.get_tensor_details() if d['name'] == k_conv_name)) quant_params = detail['quantization_parameters'] expected_num_params = k_num_filters if disable_per_channel: expected_num_params = 1 self.assertLen(quant_params['scales'], expected_num_params) self.assertLen(quant_params['zero_points'], expected_num_params) @parameterized.named_parameters( ('_INT8Quant_INT32Bias', False, False, dtypes.int32, True), ('_INT16Quant_INT64Bias', True, False, dtypes.int64, True), ('_INT8Quant_INT32Bias_Set', False, True, dtypes.int32, True), ('_INT8Quant_INT64Bias_Set', False, True, dtypes.int64, False), ('_INT16Quant_INT32Bias_Set', True, True, dtypes.int32, True), ('_INT16Quant_INT64Bias_Set', True, True, dtypes.int64, True), ('_INT16Quant_FLOAT32Bias_Set', True, True, dtypes.float32, False), ) @test_util.run_v2_only def testBiasQuantization(self, is_int16_quantize, explicitly_set_bias, bias_type, is_valid_bias_type): model = tf.keras.models.Sequential([ tf.keras.layers.Dense( 1024, input_shape=[1024], activation=None, bias_initializer='ones') ]) saved_model_dir = os.path.join(self.get_temp_dir(), 'dense_saved_model') save(model, saved_model_dir) k_dense_bias_name = 'dense/bias' quantized_converter = tf.lite.TFLiteConverter.from_saved_model( saved_model_dir) quantized_converter.optimizations = [lite.Optimize.DEFAULT] if explicitly_set_bias: quantized_converter._experimental_full_integer_quantization_bias_type = bias_type if is_int16_quantize: quantized_converter.target_spec.supported_ops = [ lite.OpsSet .EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 ] else: quantized_converter.target_spec.supported_ops = [ lite.OpsSet.TFLITE_BUILTINS_INT8 ] def calibration_gen(): for _ in range(5): yield [np.random.randn(1, 1024).astype(np.float32)] quantized_converter.representative_dataset = calibration_gen if not is_valid_bias_type: with self.assertRaisesRegex(ValueError, 'Expected bias type to be'): quantized_converter.convert() return quantized_tflite_model = quantized_converter.convert() self.assertIsNotNone(quantized_tflite_model) interpreter = Interpreter(model_content=quantized_tflite_model) interpreter.allocate_tensors() dense_bias = next((d for d in interpreter.get_tensor_details() if d['name'] == k_dense_bias_name)) self.assertEqual(bias_type, dense_bias['dtype']) @parameterized.named_parameters( ('_Int8PerChannelMlirDynamicRangeQuant', True, False, False), ('_Int8PerChannelTocoDynamicRangeQuant', False, False, False), ('_Int8PerTensorMlirDynamicRangeQuant', True, True, False), ('_Int8PerTensorTocoDynamicRangeQuant', False, True, False), ('_Float16DynamicRangeQuant', True, False, True)) @test_util.run_v2_only def testMlirDynamicRangeQuantization(self, enable_new_dynamic_range_quantizer, disable_per_channel, test_float16): num_filters = 1024 conv_name = 'sequential/conv2d/Conv2D1' model = tf.keras.models.Sequential( [tf.keras.layers.Conv2D(num_filters, (3, 3), activation='relu')]) model.build(input_shape=(1, 32, 32, 3)) saved_model_dir = self.create_tempdir() save(model, saved_model_dir.full_path) converter = tf.lite.TFLiteConverter.from_saved_model( saved_model_dir.full_path) converter.optimizations = [lite.Optimize.DEFAULT] converter._experimental_new_dynamic_range_quantizer = ( enable_new_dynamic_range_quantizer) converter._experimental_disable_per_channel = disable_per_channel if test_float16: converter.target_spec.supported_types = [tf.float16] quantized_tflite_model = converter.convert() self.assertIsNotNone(quantized_tflite_model) interpreter = Interpreter(model_content=quantized_tflite_model) interpreter.allocate_tensors() quantized_weight = next( d for d in interpreter.get_tensor_details() if d['name'] == conv_name) quant_params = quantized_weight['quantization_parameters'] if test_float16: expected_num_params = 0 else: expected_num_params = 1 if disable_per_channel else num_filters self.assertLen(quant_params['scales'], expected_num_params) self.assertLen(quant_params['zero_points'], expected_num_params) input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() self.assertEqual(np.float32, input_details[0]['dtype']) self.assertEqual(np.float32, output_details[0]['dtype']) if test_float16: self.assertEqual(np.float16, quantized_weight['dtype']) else: self.assertEqual(np.int8, quantized_weight['dtype']) class FromKerasModelTest(lite_v2_test_util.ModelTest): @test_util.run_v2_only def testSequentialModel(self): """Test a simple sequential tf.Keras model.""" input_data = tf.constant(1., shape=[1, 1]) # Create a simple Keras model. x = np.array([[1.], [2.]]) y = np.array([[2.], [4.]]) model = tf.keras.models.Sequential([ tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=1, input_shape=[1]) ]) model.compile(optimizer='sgd', loss='mean_squared_error') model.fit(x, y, epochs=1) # Convert model and ensure model is not None. converter = lite.TFLiteConverterV2.from_keras_model(model) tflite_model = converter.convert() # Check the conversion metadata. metadata = get_conversion_metadata(tflite_model) self.assertIsNotNone(metadata) self.assertEqual(metadata.environment.modelType, metadata_fb.ModelType.KERAS_MODEL) # Check values from converted model. expected_value = model.predict(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) self.assertEqual(expected_value, actual_value) @test_util.run_v2_only def testSequentialMultiInputOutputModel(self): """Test a tf.Keras model with multiple inputs and outputs.""" left_input_data = tf.constant(1., shape=[1, 3]) right_input_data = tf.constant(1., shape=[1, 3]) # Create a simple Keras model. input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_c_np = np.random.random((10, 3)) output_d_np = np.random.random((10, 2)) input_a = tf.keras.layers.Input(shape=(3,), name='input_a') input_b = tf.keras.layers.Input(shape=(3,), name='input_b') dense = tf.keras.layers.Dense(8, name='dense_1') interm_a = dense(input_a) interm_b = dense(input_b) merged = tf.keras.layers.concatenate([interm_a, interm_b], name='merge') output_c = tf.keras.layers.Dense( 3, activation='softmax', name='dense_2')( merged) output_d = tf.keras.layers.Dense( 2, activation='softmax', name='dense_3')( merged) model = tf.keras.models.Model( inputs=[input_a, input_b], outputs=[output_c, output_d]) model.compile(optimizer='sgd', loss='mean_squared_error') model.fit([input_a_np, input_b_np], [output_c_np, output_d_np], epochs=1) # Convert model and ensure model is not None. converter = lite.TFLiteConverterV2.from_keras_model(model) tflite_model = converter.convert() # Check values from converted model. input_data = [left_input_data, right_input_data] expected_value = model.predict(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, input_data) for tf_result, tflite_result in zip(expected_value, actual_value): self.assertAllClose(tf_result, tflite_result, atol=1e-05) @test_util.run_v2_only def testGraphDebugInfo(self): """Test a tf.Keras model has debug info captured.""" # Create a simple Keras model. x = [-1, 0, 1, 2, 3, 4] y = [-3, -1, 1, 3, 5, 7] model = tf.keras.models.Sequential( [tf.keras.layers.Dense(units=1, input_shape=[1])]) model.compile(optimizer='sgd', loss='mean_squared_error') model.fit(x, y, epochs=1) converter = lite.TFLiteConverterV2.from_keras_model(model) converter.convert() self._assertValidDebugInfo(converter._debug_info) @test_util.run_v2_only def testKerasFallbackPath(self): """Test keras model which failed when exporting to the saved model.""" input_data = tf.constant( np.array(np.random.random_sample((20)), dtype=np.float32)) class Model(tf.keras.Model): def __init__(self): super(Model, self).__init__() # A None name will cause a failure in exporting to a saved model. self.shared_weights = self.add_weight( name=None, shape=(20, 1), dtype=tf.float32, initializer=tf.random_normal_initializer( mean=0.0, stddev=300**(-0.5))) def call(self, x): return tf.add(self.shared_weights, x) # Building the model. model = Model() model.compile(optimizer='sgd', loss='mean_squared_error') model.fit(input_data, input_data, epochs=1) # Convert model. converter = lite.TFLiteConverterV2.from_keras_model(model) tflite_model = converter.convert() self.assertTrue(tflite_model) @test_util.run_v2_only def testSignatureDefs(self): """Test converting SignatureDef is correct and uses SignatureDef API.""" keras_model = tf.keras.Sequential([ tf.keras.layers.Conv2D( 32, kernel_size=3, padding='same', activation='relu', input_shape=(32, 32, 3), name='tensor'), tf.keras.layers.Dense(10, name='output_tensor') ]) converter = lite.TFLiteConverterV2.from_keras_model(keras_model) tflite_model = converter.convert() # Check values from converted model. input_data = tf.constant( np.random.uniform(-1, 1, size=(1, 32, 32, 3)).astype(np.float32)) expected_value = keras_model(input_data) interpreter = Interpreter(model_content=tflite_model) signature_defs = interpreter.get_signature_list() results = self._evaluateTFLiteModelUsingSignatureDef( tflite_model, 'serving_default', {'tensor_input': input_data}) self.assertEqual(list(results.keys()), ['output_tensor']) self.assertAllClose(expected_value.numpy(), results['output_tensor']) # Verify the SignatureDef structure returned is as expected. self.assertEqual(len(signature_defs), 1) self.assertEqual(list(signature_defs.keys()), ['serving_default']) self.assertEqual(len(signature_defs.values()), 1) self.assertEqual( list(signature_defs['serving_default'].keys()), ['inputs', 'outputs']) self.assertCountEqual(signature_defs['serving_default']['inputs'], ['tensor_input']) self.assertEqual( list(signature_defs['serving_default']['outputs']), ['output_tensor']) @parameterized.named_parameters( ('_PerChannelMlirDynamicRangeQuant', True, False, False), ('_PerChannelTocoDynamicRangeQuant', False, False, False), ('_PerTensorMlirDynamicRangeQuant', True, True, False), ('_PerTensorTocoDynamicRangeQuant', False, True, False), ('_Float16DynamicRangeQuant', True, False, True)) @test_util.run_v2_only def testMlirDynamicRangeQuantization(self, enable_new_dynamic_range_quantizer, disable_per_channel, test_float16): num_filters = 1024 conv_name = 'sequential/conv2d/Conv2D1' model = tf.keras.models.Sequential( [tf.keras.Input(shape=(32, 32, 3)), tf.keras.layers.Conv2D(num_filters, (3, 3), activation='relu')]) model.build() converter = lite.TFLiteConverterV2.from_keras_model(model) converter.optimizations = [lite.Optimize.DEFAULT] converter._experimental_new_dynamic_range_quantizer = ( enable_new_dynamic_range_quantizer) converter._experimental_disable_per_channel = disable_per_channel if test_float16: converter.target_spec.supported_types = [tf.float16] quantized_tflite_model = converter.convert() self.assertIsNotNone(quantized_tflite_model) interpreter = Interpreter(model_content=quantized_tflite_model) interpreter.allocate_tensors() quantized_weight = next( d for d in interpreter.get_tensor_details() if d['name'] == conv_name) quant_params = quantized_weight['quantization_parameters'] if test_float16: expected_num_params = 0 else: expected_num_params = 1 if disable_per_channel else num_filters self.assertLen(quant_params['scales'], expected_num_params) self.assertLen(quant_params['zero_points'], expected_num_params) input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() self.assertEqual(np.float32, input_details[0]['dtype']) self.assertEqual(np.float32, output_details[0]['dtype']) if test_float16: self.assertEqual(np.float16, quantized_weight['dtype']) else: self.assertEqual(np.int8, quantized_weight['dtype']) @parameterized.named_parameters([ ('{}BitWeightOnly={}LowBit={}'.format(num_bits, weight_only, low_bit), num_bits, weight_only, low_bit) for num_bits, weight_only, low_bit in itertools.product((2, 4, 6), (True, False), (True, False))]) @test_util.run_v2_only def testQATLowBitKerasModel(self, num_bits, weight_only, low_bit): bit_max = (1 << (num_bits - 1)) - 1 bit_min = -bit_max tf_input_shape = (5, 5, 3) tflite_input_shape = (1,) + tf_input_shape model, input_name, output_name = (self._createV2QATLowBitKerasModel( tf_input_shape, weight_only, num_bits, bit_min, bit_max)) input_data = np.linspace( 0, 6, np.prod(tflite_input_shape)).reshape(tflite_input_shape) tf_result = model(input_data) converter = tf.lite.TFLiteConverter.from_keras_model(model) converter.optimizations = [tf.lite.Optimize.DEFAULT] if low_bit: converter._experimental_low_bit_qat = True tflite_model = converter.convert() result = self._evaluateTFLiteModelUsingSignatureDef( tflite_model, 'serving_default', {input_name: input_data.astype(np.float32)})[output_name] self.assertAllClose( [np.linalg.norm(result - tf_result.numpy().astype(np.float32))], [0.0]) interpreter = tf.lite.Interpreter(model_content=tflite_model) interpreter.allocate_tensors() num_8bit_activations = 0 num_8bit_weights = 0 kernel_name = ('model/conv_wrapper/Conv2D;model/conv_wrapper/' 'FakeQuantWithMinMaxVarsPerChannel') for detail in interpreter.get_tensor_details(): if (detail['dtype'] == np.int8 and detail['name'] and detail['name'] == kernel_name): num_8bit_weights += 1 weights = interpreter.get_tensor(detail['index']) if low_bit: self.assertFalse((bit_min > weights).any() or (weights > bit_max).any()) else: self.assertTrue((bit_min > weights).any() or (weights > bit_max).any()) self.assertIn('scales', detail['quantization_parameters']) if low_bit and detail['quantization_parameters']['scales']: self.assertAllClose( detail['quantization_parameters']['scales'], [1.0]) elif detail['dtype'] == np.int8 and detail['name']: self.assertFalse(weight_only) self.assertIn('scales', detail['quantization_parameters']) if detail['quantization_parameters']['scales']: self.assertAllClose( detail['quantization_parameters']['scales'], [6/255]) num_8bit_activations += 1 self.assertEqual(num_8bit_weights, 0 if weight_only and not low_bit else 1) # 3 activations with full integer: conv_input, conv_output, reshape_output self.assertEqual(num_8bit_activations, 0 if weight_only else 3) class FromJaxModelTest(lite_v2_test_util.ModelTest): @test_util.run_v2_only def testInvalidInputsModel(self): if DISABLE_JAX_TEST: return def simple_model(input1, input2): return jnp.sin(input1) + jnp.cos(input2) input_tensor = jnp.zeros([10, 10]) # Invalid case: not specify serving_func converter = lite.TFLiteConverterV2.experimental_from_jax( None, [{ 'input1': input_tensor }]) with self.assertRaisesRegex(ValueError, 'No serving func is specified.'): converter.convert() # Invalid case: not specify input converter = lite.TFLiteConverterV2.experimental_from_jax([simple_model], None) with self.assertRaisesRegex(ValueError, 'Input tensors are not specified.'): converter.convert() converter = lite.TFLiteConverterV2.experimental_from_jax([simple_model], []) with self.assertRaisesRegex(ValueError, 'Input tensors are not specified.'): converter.convert() # Invalid case: not wrap input_tensor in a list. converter = lite.TFLiteConverterV2.experimental_from_jax([simple_model], input_tensor) with self.assertRaisesRegex( ValueError, 'The truth value of an array with more than one element is ambiguous.'): converter.convert() # Invalid case: only partial inputs are provided. converter = lite.TFLiteConverterV2.experimental_from_jax( [simple_model], [[('input1', input_tensor)]]) with self.assertRaisesRegex( ValueError, 'Failed to convert the given Jax function to hlo.'): converter.convert() # Invalid case: serving functions length does not match input mapping. converter = lite.TFLiteConverterV2.experimental_from_jax( [simple_model, simple_model], [[ ('input1', input_tensor), ('input2', input_tensor), ]]) with self.assertRaisesRegex( ValueError, 'Input tensor mapping len 1 does not match serving func len 2.'): converter.convert() # Invalid case: multiple serving function is provided. converter = lite.TFLiteConverterV2.experimental_from_jax( [simple_model, simple_model], [[ ('input1', input_tensor), ('input2', input_tensor), ], [ ('input1', input_tensor), ('input2', input_tensor), ]]) with self.assertRaisesRegex( ValueError, 'Currently only support single serving function.'): converter.convert() @test_util.run_v2_only def testSingleInputModel(self): if DISABLE_JAX_TEST: return def single_input(input_tensor): return jnp.sin(input_tensor) # Convert model. input_tensor = jnp.zeros([10, 10]) converter = lite.TFLiteConverterV2.experimental_from_jax( [single_input], [[('input_tensor', input_tensor)]]) tflite_model = converter.convert() # Check the conversion metadata. metadata = get_conversion_metadata(tflite_model) self.assertIsNotNone(metadata) self.assertEqual(metadata.environment.modelType, metadata_fb.ModelType.JAX) # Check values from converted_model input_data = np.random.random_sample((10, 10)) tf_input_data = tf.constant(input_data, dtype=np.float32) actual_value = self._evaluateTFLiteModel(tflite_model, [tf_input_data])[0] expected_value = single_input(input_data) self.assertAllClose(expected_value, actual_value, atol=1e-05) @test_util.run_v2_only def testMultipleInputsModel(self): if DISABLE_JAX_TEST: return def multiple_inputs(input1, input2): return input1 + input2 # Convert model. input1 = jnp.zeros([10, 10]) input2 = jnp.zeros([10, 1]) converter = lite.TFLiteConverterV2.experimental_from_jax( [multiple_inputs], [[('input1', input1), ('input2', input2)]]) tflite_model = converter.convert() # Check values from converted_model input1_data = np.random.random_sample((10, 10)) tf_input1_data = tf.constant(input1_data, dtype=np.float32) input2_data = np.random.random_sample((10, 1)) tf_input2_data = tf.constant(input2_data, dtype=np.float32) actual_value = self._evaluateTFLiteModel( tflite_model, [tf_input1_data, tf_input2_data])[0] expected_value = multiple_inputs(input1_data, input2_data) self.assertAllClose(expected_value, actual_value, atol=1e-05) @test_util.run_v2_only def testInputSignaturesModel(self): if DISABLE_JAX_TEST: return def multiple_inputs(input1, input2): return input1 + input2 # Convert model. input1 = jnp.zeros([10, 10]) input2 = jnp.zeros([10, 1]) converter = lite.TFLiteConverterV2.experimental_from_jax( [multiple_inputs], [[('input1', input1), ('input2', input2)]]) tflite_model = converter.convert() # Check values from converted_model input1_data = np.random.random_sample((10, 10)) tf_input1_data = tf.constant(input1_data, dtype=np.float32) input2_data = np.random.random_sample((10, 1)) tf_input2_data = tf.constant(input2_data, dtype=np.float32) actual_value = self._evaluateTFLiteModel( tflite_model, [tf_input1_data, tf_input2_data])[0] expected_value = multiple_inputs(input1_data, input2_data) self.assertAllClose(expected_value, actual_value, atol=1e-05) @test_util.run_v2_only def testModelWithParams(self): if DISABLE_JAX_TEST: return def model(inputs, weights): return jnp.matmul(weights, inputs) weights = np.random.random_sample((10, 10)) serving_func = functools.partial(model, weights=weights) # Convert model input_tensor = jnp.zeros([10, 10]) converter = lite.TFLiteConverterV2.experimental_from_jax( [serving_func], [[('inputs', input_tensor)]]) tflite_model = converter.convert() # Check values from converted_model input_data = np.random.random_sample((10, 10)) tf_input_data = tf.constant(input_data, dtype=np.float32) actual_value = self._evaluateTFLiteModel(tflite_model, [tf_input_data])[0] expected_value = serving_func(input_data) self.assertAllClose(expected_value, actual_value, atol=1e-05) @test_util.run_v2_only def testWhileLoop(self): if DISABLE_JAX_TEST: return def condition(x): return jnp.sum(x, keepdims=False) < 100 def body(x): return jnp.add(x, 2.0) def model(x): result = jax.lax.while_loop(condition, body, x) return result[0] # Convert model. input_tensor = jnp.zeros([3, 3]) converter = lite.TFLiteConverterV2.experimental_from_jax( [model], [[('x', input_tensor)]]) tflite_model = converter.convert() # Check values from converted_model input_data = np.random.random_sample((3, 3)) tf_input_data = tf.constant(input_data, dtype=np.float32) actual_value = self._evaluateTFLiteModel(tflite_model, [tf_input_data])[0] expected_value = model(input_data) self.assertAllClose(expected_value, actual_value, atol=1e-05) class ControlFlowTest(lite_v2_test_util.ModelTest): @test_util.run_v2_only def testCond(self): input_data = { 'x': tf.constant([1., 2.], shape=[1, 2]), 'b': tf.constant(True) } weights = tf.Variable([[0.1, 0.2], [0.3, 0.4]], dtype=tf.float32) def true_fn(x): return tf.matmul(x, weights) def false_fn(x): return tf.add(x, weights) @tf.function(input_signature=[ tf.TensorSpec(shape=[1, 2], dtype=tf.float32), tf.TensorSpec(shape=(), dtype=tf.bool) ]) def model(x, b): return tf.cond( b, true_fn=lambda: true_fn(x), false_fn=lambda: false_fn(x)) concrete_func = model.get_concrete_function() # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func], model) tflite_model = converter.convert() # Check values from converted model. expected_value = concrete_func(**input_data) actual_value = self._evaluateTFLiteModel( tflite_model, [input_data['x'], input_data['b']])[0] self.assertAllClose(expected_value, actual_value) @test_util.run_v2_only def testConverterErrorOnControlFlowV1Ops(self): filename = resource_loader.get_path_to_datafile( 'testdata/control_flow_v1_saved_model') converter = lite.TFLiteConverterV2.from_saved_model(filename) with self.assertRaises(convert.ConverterError) as error: converter.convert() self.assertIn( 'Failed to functionalize Control Flow V1 ops. Consider using Control ' 'Flow V2 ops instead. See https://www.tensorflow.org/api_docs/python/' 'tf/compat/v1/enable_control_flow_v2.', str(error.exception)) @test_util.run_v2_only def testStaticRnn(self): input_data = tf.constant( np.array(np.random.random_sample((3, 10)), dtype=np.float32)) cell = tf.keras.layers.LSTMCell(10) @tf.function( input_signature=[tf.TensorSpec(shape=[3, 10], dtype=tf.float32)]) def model(x): seq = tf.split(x, 3, 0) return rnn.static_rnn(cell, seq, dtype=tf.float32, sequence_length=[1]) concrete_func = model.get_concrete_function() # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func], model) tflite_model = converter.convert() # Check values from converted model. expected_value = concrete_func(input_data)[0] actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) for expected, actual in zip(expected_value, actual_value): self.assertAllClose(expected, actual) @test_util.run_v2_only def testWhileLoop(self): input_data = tf.constant([1., 2., 3., 4.], shape=[2, 2]) weights = tf.Variable([[0.1, 0.2], [0.3, 0.4]], dtype=tf.float32) def condition(x): return tf.reduce_sum(x) < 100 def body(x): return tf.add(x, weights) @tf.function( input_signature=[tf.TensorSpec(shape=[2, 2], dtype=tf.float32)]) def model(x): return tf.while_loop(condition, body, [x]) concrete_func = model.get_concrete_function() # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func], model) tflite_model = converter.convert() # Check values from converted model. expected_value = concrete_func(input_data)[0] actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0] self.assertAllClose(expected_value, actual_value) @test_util.run_v2_only def testDynamicRnn(self): input_data = tf.constant( np.array(np.random.random_sample((3, 10, 10)), dtype=np.float32)) cell = tf.keras.layers.LSTMCell(10) @tf.function( input_signature=[tf.TensorSpec(shape=[3, 10, 10], dtype=tf.float32)]) def model(x): rnn_layer = tf.keras.layers.RNN([cell], return_sequences=True) return rnn_layer(x) concrete_func = model.get_concrete_function() # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func], model) tflite_model = converter.convert() # Check values from converted model. expected_value = concrete_func(input_data) lite_outputs = self._evaluateTFLiteModel(tflite_model, [input_data]) self.assertLen(lite_outputs, 1) actual_value = lite_outputs[0] for expected, actual in zip(expected_value, actual_value): self.assertAllClose(expected, actual) @parameterized.named_parameters( ('LSTMBatchSizeOne', tf.keras.layers.LSTM, True), ('LSTM', tf.keras.layers.LSTM, False), ('SimpleRNNBatchSizeOne', tf.keras.layers.SimpleRNN, True), ('SimpleRNN', tf.keras.layers.SimpleRNN, False), ('GRUBatchSizeOne', tf.keras.layers.GRU, True), ('GRU', tf.keras.layers.GRU, False)) @test_util.run_v2_only def testKerasRNN(self, rnn_layer, default_to_single_batch): input_data = tf.constant( np.array(np.random.random_sample((1, 10, 10)), dtype=np.float32)) rnn_obj = rnn_layer(units=10, input_shape=(10, 10)) model = tf.keras.models.Sequential([ tf.keras.layers.Input(shape=(10, 10), name='input'), rnn_obj, ]) # Convert model. converter = lite.TFLiteConverterV2.from_keras_model(model) converter._experimental_default_to_single_batch_in_tensor_list_ops = default_to_single_batch if not default_to_single_batch: converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS ] tflite_model = converter.convert() actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0] # Check values from converted model. expected_value = model.predict(input_data) self.assertAllClose(expected_value, actual_value, atol=1e-05) @parameterized.named_parameters(('LSTM', tf.keras.layers.LSTM), ('SimpleRNN', tf.keras.layers.SimpleRNN), ('GRU', tf.keras.layers.GRU)) @test_util.run_v2_only def testKerasRNNMultiBatches(self, rnn_layer): input_data = tf.constant( np.array(np.random.random_sample((4, 10, 10)), dtype=np.float32)) # Specify a fixed batch size(4) for the test model. x = tf.keras.layers.Input(batch_shape=(4, 10, 10)) y = rnn_layer(units=10, input_shape=(10, 10))(x) model = tf.keras.Model(inputs=[x], outputs=[y]) # Convert model. converter = lite.TFLiteConverterV2.from_keras_model(model) tflite_model = converter.convert() actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0] # Check values from converted model. expected_value = model.predict(input_data) self.assertAllClose(expected_value, actual_value, atol=1e-05) @parameterized.named_parameters(('ForceToUseBatchSizeOne', True), ('DontForceToUseBatchSizeOne', False)) @test_util.run_v2_only def testKerasBidirectionalRNNReturnSequence(self, default_to_single_batch): input_data = tf.constant( np.array(np.random.random_sample((1, 10, 10)), dtype=np.float32)) model = tf.keras.models.Sequential() model.add(tf.keras.layers.Input(shape=(10, 10), name='input')) model.add( tf.keras.layers.Bidirectional( tf.keras.layers.LSTM(units=10, return_sequences=True), input_shape=(10, 10))) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(5)) model.add(tf.keras.layers.Activation('softmax')) # Convert model. converter = lite.TFLiteConverterV2.from_keras_model(model) converter._experimental_default_to_single_batch_in_tensor_list_ops = default_to_single_batch if not default_to_single_batch: converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS ] tflite_model = converter.convert() actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0] # Check values from converted model. expected_value = model.predict(input_data) self.assertAllClose(expected_value, actual_value, atol=1e-05) @parameterized.named_parameters(('ForceToUseBatchSizeOne', True), ('DontForceToUseBatchSizeOne', False)) @test_util.run_v2_only def testKerasBidirectionalRNN(self, default_to_single_batch): input_data = tf.constant( np.array(np.random.random_sample((1, 10, 10)), dtype=np.float32)) model = tf.keras.models.Sequential() model.add(tf.keras.layers.Input(shape=(10, 10), name='input')) model.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(units=10))) model.add(tf.keras.layers.Dense(5)) model.add(tf.keras.layers.Activation('softmax')) # Convert model. converter = lite.TFLiteConverterV2.from_keras_model(model) converter._experimental_default_to_single_batch_in_tensor_list_ops = default_to_single_batch if not default_to_single_batch: converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS ] tflite_model = converter.convert() actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0] # Check values from converted model. expected_value = model.predict(input_data) self.assertAllClose(expected_value, actual_value, atol=1e-05) class GrapplerTest(lite_v2_test_util.ModelTest): @test_util.run_v2_only def testConstantFolding(self): # Constant folding handles the tf.broadcast_to operation which was not # supported by the TFLite at the time this test was added. input_data = tf.constant([1., 2., 3., 4., 5., 6., 7., 8., 9.], shape=[3, 3]) @tf.function def func(x): y_const = tf.constant([1., 2., 3.]) y_broadcast = tf.broadcast_to(y_const, [3, 3]) return tf.matmul(x, y_broadcast) root = tracking.AutoTrackable() root.f = func concrete_func = root.f.get_concrete_function(input_data) # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func], root) tflite_model = converter.convert() # Check values from converted model. expected_value = root.f(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0] self.assertAllClose(expected_value, actual_value) # Enable hybrid quantization, same result converter.optimizations = [lite.Optimize.DEFAULT] tflite_model = converter.convert() actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0] self.assertAllClose(expected_value, actual_value) class UnknownShapes(lite_v2_test_util.ModelTest): @test_util.run_v2_only def testMatMul(self): input_data = tf.constant( np.array(np.random.random_sample((10, 4)), dtype=np.float32)) @tf.function( input_signature=[tf.TensorSpec(shape=[None, 4], dtype=tf.float32)]) def model(in_tensor): shape = tf.shape(in_tensor) fill = tf.transpose(tf.fill(shape, 1.)) return tf.matmul(fill, in_tensor) concrete_func = model.get_concrete_function() converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func], model) tflite_model = converter.convert() # Check values from converted model. expected_value = concrete_func(input_data) actual_value = self._evaluateTFLiteModel( tflite_model, [input_data], input_shapes=[([-1, 4], [10, 4])])[0] self.assertAllClose(expected_value, actual_value, atol=1e-06) def _getIntegerQuantizeModelWithUnknownShapes(self): np.random.seed(0) @tf.function( input_signature=[tf.TensorSpec(shape=[None, 33], dtype=tf.float32)]) def model(input_tensor): """Define a model with tf.MatMul and unknown shapes.""" # We need the tensor to have more than 1024 elements for quantize_weights # to kick in. Thus, the [33, 33] shape. const_tensor = tf.constant( np.random.uniform(low=-10., high=10., size=[33, 33]), shape=[33, 33], dtype=tf.float32, name='inputB') shape = tf.shape(input_tensor) fill = tf.transpose(tf.fill(shape, 1.)) mult = tf.matmul(fill, input_tensor) return tf.matmul(mult, const_tensor) root = tracking.AutoTrackable() root.f = model concrete_func = root.f.get_concrete_function() def calibration_gen(): for batch in range(5, 20, 5): for _ in range(5): yield [np.random.uniform(-1, 1, size=(batch, 33)).astype(np.float32)] return root, concrete_func, calibration_gen @test_util.run_v2_only def testMatMulQuantize(self): root, concrete_func, _ = self._getIntegerQuantizeModelWithUnknownShapes() float_converter = lite.TFLiteConverterV2.from_concrete_functions( [concrete_func], root) float_tflite_model = float_converter.convert() quantized_converter = lite.TFLiteConverterV2.from_concrete_functions( [concrete_func], root) quantized_converter.optimizations = [lite.Optimize.DEFAULT] quantized_tflite_model = quantized_converter.convert() # The default input and output types should be float. quantized_interpreter = Interpreter(model_content=quantized_tflite_model) quantized_interpreter.allocate_tensors() input_details = quantized_interpreter.get_input_details() self.assertLen(input_details, 1) self.assertEqual(np.float32, input_details[0]['dtype']) self.assertAllEqual([-1, 33], input_details[0]['shape_signature']) # Ensure that the quantized weights tflite model is smaller. self.assertLess(len(quantized_tflite_model), len(float_tflite_model)) @test_util.run_v2_only def testMatMulCalibrateAndQuantize(self): root, concrete_func, calibration_gen = ( self._getIntegerQuantizeModelWithUnknownShapes()) float_converter = lite.TFLiteConverterV2.from_concrete_functions( [concrete_func], root) float_tflite_model = float_converter.convert() quantized_converter = lite.TFLiteConverterV2.from_concrete_functions( [concrete_func], root) quantized_converter.optimizations = [lite.Optimize.DEFAULT] quantized_converter.representative_dataset = calibration_gen quantized_tflite_model = quantized_converter.convert() # The default input and output types should be float. quantized_interpreter = Interpreter(model_content=quantized_tflite_model) quantized_interpreter.allocate_tensors() input_details = quantized_interpreter.get_input_details() self.assertLen(input_details, 1) self.assertEqual(np.float32, input_details[0]['dtype']) self.assertAllEqual([-1, 33], input_details[0]['shape_signature']) # Ensure that the quantized weights tflite model is smaller. self.assertLess(len(quantized_tflite_model), len(float_tflite_model)) def testBatchMatMul(self): input_data_1 = tf.constant( np.array(np.random.random_sample((1, 256, 256)), dtype=np.float32)) input_data_2 = tf.constant( np.array(np.random.random_sample((1, 256, 256)), dtype=np.float32)) @tf.function(input_signature=[ tf.TensorSpec(shape=[None, 256, 256], dtype=tf.float32), tf.TensorSpec(shape=[None, 256, 256], dtype=tf.float32) ]) def model(in_tensor_1, in_tensor_2): return tf.matmul(in_tensor_1, in_tensor_2) concrete_func = model.get_concrete_function() converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func], model) tflite_model = converter.convert() # Check values from converted model. expected_value = concrete_func(input_data_1, input_data_2) actual_value = self._evaluateTFLiteModel( tflite_model, [input_data_1, input_data_2], input_shapes=[([-1, 256, 256], [1, 256, 256])])[0] self.assertAllClose(expected_value, actual_value, atol=4) def testSizeInvalid(self): @tf.function(input_signature=[ tf.TensorSpec(shape=[1, None, 16, 3], dtype=tf.float32) ]) def model(in_tensor): return in_tensor + in_tensor concrete_func = model.get_concrete_function() # Test invalid shape. None after 1st dimension. Run with TOCO in order to # invoke shape checking code. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func], model) converter.experimental_new_converter = False with self.assertRaises(ValueError) as error: converter.convert() self.assertEqual( 'None is only supported in the 1st dimension. Tensor ' '\'in_tensor\' has invalid shape \'[1, None, 16, 3]\'.', str(error.exception)) class ResourceAndVariantTypes(lite_v2_test_util.ModelTest): @test_util.run_v2_only def testVariants(self): @tf.function(input_signature=[tf.TensorSpec(shape=[1], dtype=tf.float32)]) def model(v): m = map_ops.empty_tensor_map() k = tf.constant(1.0) p = tf.add(k, v) with ops.control_dependencies([m]): m2 = map_ops.tensor_map_insert(m, p, v) with ops.control_dependencies([m2]): return map_ops.tensor_map_size(m2) concrete_func = model.get_concrete_function() converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func], model) converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS ] tflite_model = converter.convert() self.assertIsNotNone(tflite_model) # Check values from converted model. interpreter = Interpreter(model_content=tflite_model) input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() interpreter.allocate_tensors() input_data = np.array([1.0], dtype=np.float32) interpreter.set_tensor(input_details[0]['index'], input_data) interpreter.invoke() actual_value = interpreter.get_tensor(output_details[0]['index']) self.assertEqual(1, actual_value) interpreter.invoke() actual_value = interpreter.get_tensor(output_details[0]['index']) self.assertEqual(1, actual_value) interpreter.invoke() actual_value = interpreter.get_tensor(output_details[0]['index']) self.assertEqual(1, actual_value) @test_util.run_v2_only def testVariantsWithCond(self): def create_v1_saved_model(): saved_model_dir = os.path.join(self.get_temp_dir(), 'variants_with_cond') with tf.Graph().as_default(): with tf.compat.v1.Session() as sess: m = map_ops.empty_tensor_map() def body(i, m): m = map_ops.tensor_map_insert(m, i, i) return i + 1, m in_tensor = tf.compat.v1.placeholder( shape=[1], dtype=tf.int32, name='input') _, result_m = tf.cond(in_tensor < 10, lambda: body(in_tensor, m), lambda: body(in_tensor + 1, m)) out_tensor = in_tensor + map_ops.tensor_map_size(result_m) inputs = {'x': in_tensor} outputs = {'z': out_tensor} saved_model.simple_save(sess, saved_model_dir, inputs, outputs) return saved_model_dir saved_model_dir = create_v1_saved_model() converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir) converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS ] tflite_model = converter.convert() self.assertIsNotNone(tflite_model) # Check values from converted model. interpreter = Interpreter(model_content=tflite_model) input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() interpreter.allocate_tensors() input_data = np.array([0], dtype=np.int32) interpreter.set_tensor(input_details[0]['index'], input_data) interpreter.invoke() expected_value = np.array([1], dtype=np.int32) actual_value = interpreter.get_tensor(output_details[0]['index']) self.assertEqual(expected_value, actual_value) interpreter.invoke() actual_value = interpreter.get_tensor(output_details[0]['index']) self.assertEqual(expected_value, actual_value) interpreter.invoke() actual_value = interpreter.get_tensor(output_details[0]['index']) self.assertEqual(expected_value, actual_value) @test_util.run_v2_only def testVariantsWithWhile(self): def create_v1_saved_model(): saved_model_dir = os.path.join(self.get_temp_dir(), 'variants_with_while') with tf.Graph().as_default(): with tf.compat.v1.Session() as sess: m = map_ops.empty_tensor_map() def cond(i, m): del m return i < 10 def body(i, m): m = map_ops.tensor_map_insert(m, i, i) return i + 1, m _, result_m = tf.while_loop(cond, body, [0, m]) in_tensor = tf.compat.v1.placeholder( shape=[1], dtype=tf.int32, name='input') out_tensor = in_tensor + map_ops.tensor_map_size(result_m) inputs = {'x': in_tensor} outputs = {'z': out_tensor} saved_model.simple_save(sess, saved_model_dir, inputs, outputs) return saved_model_dir saved_model_dir = create_v1_saved_model() converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir) converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS ] tflite_model = converter.convert() self.assertIsNotNone(tflite_model) # Check values from converted model. interpreter = Interpreter(model_content=tflite_model) input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() interpreter.allocate_tensors() input_data = np.array([0], dtype=np.int32) interpreter.set_tensor(input_details[0]['index'], input_data) interpreter.invoke() actual_value = interpreter.get_tensor(output_details[0]['index']) self.assertEqual(10, actual_value) interpreter.invoke() actual_value = interpreter.get_tensor(output_details[0]['index']) self.assertEqual(10, actual_value) interpreter.invoke() actual_value = interpreter.get_tensor(output_details[0]['index']) self.assertEqual(10, actual_value) @test_util.run_v2_only def testResources(self): def create_v1_saved_model(): saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_resources') with tf.Graph().as_default(): with tf.compat.v1.Session() as sess: in_tensor = tf.compat.v1.placeholder( shape=[1], dtype=tf.float32, name='input') stack = tf.raw_ops.StackV2(max_size=10, elem_type=tf.float32) w = tf.raw_ops.StackPushV2(handle=stack, elem=in_tensor) with ops.control_dependencies([w]): a = in_tensor + in_tensor with ops.control_dependencies([a]): out_tensor = a + tf.raw_ops.StackPopV2( handle=stack, elem_type=tf.float32) inputs = {'x': in_tensor} outputs = {'z': out_tensor} saved_model.simple_save(sess, saved_model_dir, inputs, outputs) return saved_model_dir saved_model_dir = create_v1_saved_model() converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir) converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS ] tflite_model = converter.convert() self.assertIsNotNone(tflite_model) # Check values from converted model. interpreter = Interpreter(model_content=tflite_model) input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() interpreter.allocate_tensors() input_data = np.array([1.0], dtype=np.float32) interpreter.set_tensor(input_details[0]['index'], input_data) interpreter.invoke() actual_value = interpreter.get_tensor(output_details[0]['index']) self.assertEqual(3.0, actual_value) interpreter.invoke() actual_value = interpreter.get_tensor(output_details[0]['index']) self.assertEqual(3.0, actual_value) interpreter.invoke() actual_value = interpreter.get_tensor(output_details[0]['index']) self.assertEqual(3.0, actual_value) @test_util.run_v2_only def testResourcesWithCond(self): def create_v1_saved_model(): saved_model_dir = os.path.join(self.get_temp_dir(), 'resources_with_cond') with tf.Graph().as_default(): with tf.compat.v1.Session() as sess: in_tensor = tf.compat.v1.placeholder( shape=[1], dtype=tf.float32, name='input') def body(i, arr): n = tf.raw_ops.StackPushV2( handle=arr, elem=tf.cast(i, dtype=tf.float32)) return n, arr arr = tf.raw_ops.StackV2(max_size=10, elem_type=tf.float32) n, result_arr = tf.cond(in_tensor < 10, lambda: body(0, arr), lambda: body(1, arr)) with ops.control_dependencies([result_arr, n]): out_tensor = tf.raw_ops.StackPopV2( handle=result_arr, elem_type=tf.float32) inputs = {'x': in_tensor} outputs = {'a': out_tensor} saved_model.simple_save(sess, saved_model_dir, inputs, outputs) return saved_model_dir saved_model_dir = create_v1_saved_model() converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir) converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS ] tflite_model = converter.convert() self.assertIsNotNone(tflite_model) # Check values from converted model. interpreter = Interpreter(model_content=tflite_model) input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() interpreter.allocate_tensors() input_data = np.array([1.0], dtype=np.float32) interpreter.set_tensor(input_details[0]['index'], input_data) interpreter.invoke() actual_value = interpreter.get_tensor(output_details[0]['index']) self.assertEqual(0.0, actual_value) @test_util.run_v2_only def testResourcesWithWhile(self): def create_v1_saved_model(): saved_model_dir = os.path.join(self.get_temp_dir(), 'resources_with_while') with tf.Graph().as_default(): with tf.compat.v1.Session() as sess: in_tensor = tf.compat.v1.placeholder( shape=[1], dtype=tf.float32, name='input') def cond(i, arr, m): del arr del m return i < 10 def body(i, arr, m): del m n = tf.raw_ops.StackPushV2( handle=arr, elem=tf.cast(i, dtype=tf.float32)) return i + 1, arr, n arr = tf.raw_ops.StackV2(max_size=10, elem_type=tf.float32) _, result_arr, n = tf.while_loop(cond, body, [0, arr, 0.0]) with ops.control_dependencies([result_arr, n]): out_tensor = tf.raw_ops.StackPopV2( handle=result_arr, elem_type=tf.float32) inputs = {'x': in_tensor} outputs = {'a': out_tensor} saved_model.simple_save(sess, saved_model_dir, inputs, outputs) return saved_model_dir saved_model_dir = create_v1_saved_model() converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir) converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS ] tflite_model = converter.convert() self.assertIsNotNone(tflite_model) # Check values from converted model. interpreter = Interpreter(model_content=tflite_model) input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() interpreter.allocate_tensors() input_data = np.array([1.0], dtype=np.float32) interpreter.set_tensor(input_details[0]['index'], input_data) interpreter.invoke() actual_value = interpreter.get_tensor(output_details[0]['index']) self.assertEqual(9.0, actual_value) @parameterized.named_parameters(('EnableLoweringTensorListOps', True), ('DisableLoweringTensorListOps', False)) @test_util.run_v2_only def testTensorListWithStaticSize(self, lower_tensor_list_ops): def create_v1_saved_model(): saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_mutable_variable') with tf.Graph().as_default(): with tf.compat.v1.Session() as sess: in_tensor = tf.compat.v1.placeholder( shape=[1], dtype=tf.float32, name='input') ta = tf.TensorArray( tf.float32, size=3, dynamic_size=False, clear_after_read=False) ta = ta.write(0, 10.0) ta = ta.write(1, 20.0) ta = ta.write(2, 30.0) out_tensor = ta.read(0) + ta.read(2) inputs = {'x': in_tensor} outputs = {'z': out_tensor} saved_model.simple_save(sess, saved_model_dir, inputs, outputs) return saved_model_dir saved_model_dir = create_v1_saved_model() converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir) if not lower_tensor_list_ops: converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS ] converter._experimental_lower_tensor_list_ops = lower_tensor_list_ops tflite_model = converter.convert() self.assertIsNotNone(tflite_model) # Check values from converted model. interpreter = Interpreter(model_content=tflite_model) input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() interpreter.allocate_tensors() input_data = np.array([1.0], dtype=np.float32) interpreter.set_tensor(input_details[0]['index'], input_data) interpreter.invoke() actual_value = interpreter.get_tensor(output_details[0]['index']) self.assertEqual(40.0, actual_value) @parameterized.named_parameters(('EnableLoweringTensorListOps', True), ('DisableLoweringTensorListOps', False)) @test_util.run_v2_only def testTensorListWithDynamicSize(self, lower_tensor_list_ops): def create_v1_saved_model(): saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_mutable_variable') with tf.Graph().as_default(): with tf.compat.v1.Session() as sess: in_tensor = tf.compat.v1.placeholder( shape=[1], dtype=tf.float32, name='input') ta = tf.TensorArray( tf.float32, size=0, dynamic_size=True, clear_after_read=False) ta = ta.write(0, 10.0) ta = ta.write(1, 20.0) ta = ta.write(2, 30.0) out_tensor = ta.read(0) + ta.read(2) inputs = {'x': in_tensor} outputs = {'z': out_tensor} saved_model.simple_save(sess, saved_model_dir, inputs, outputs) return saved_model_dir saved_model_dir = create_v1_saved_model() converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir) if lower_tensor_list_ops: with self.assertRaises(convert.ConverterError) as error: converter.convert() self.assertIn( 'Lowering tensor list ops is failed. Please consider using Select ' 'TF ops and disabling `_experimental_lower_tensor_list_ops` flag in ' 'the TFLite converter object.', str(error.exception)) converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS ] tflite_model = converter.convert() self.assertIsNotNone(tflite_model) # Check values from converted model. interpreter = Interpreter(model_content=tflite_model) input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() interpreter.allocate_tensors() input_data = np.array([1.0], dtype=np.float32) interpreter.set_tensor(input_details[0]['index'], input_data) interpreter.invoke() actual_value = interpreter.get_tensor(output_details[0]['index']) self.assertEqual(40.0, actual_value) class CalibrateAndQuantizeWithCustomOpTest(lite_v2_test_util.ModelTest): def _createGraphWithCustomOp(self): # Create a graph that has one double op. np.random.seed(0) saved_model_dir = os.path.join(self.get_temp_dir(), 'double_model') with ops.Graph().as_default(): with tf.compat.v1.Session() as sess: in_tensor = tf.compat.v1.placeholder( shape=[1, 4], dtype=dtypes.float32, name='input') out_tensor = double_op.double(in_tensor) inputs = {'x': in_tensor} outputs = {'z': out_tensor} saved_model.simple_save(sess, saved_model_dir, inputs, outputs) def calibration_gen(): for _ in range(100): yield [np.random.uniform(-1, 1, size=(1, 4)).astype(np.float32)] return (saved_model_dir, calibration_gen) def testCustomOpRegistererByName(self): """Test a calibration with custom op registered by name.""" saved_model_dir, calibration_gen = self._createGraphWithCustomOp() converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir) converter.optimizations = [lite.Optimize.DEFAULT] converter.representative_dataset = calibration_gen converter.allow_custom_ops = True converter.target_spec._experimental_custom_op_registerers = [ 'TF_TestRegisterer' ] tflite_model = converter.convert() self.assertTrue(tflite_model) self.assertGreater(test_registerer.get_num_test_registerer_calls(), 0) self.assertIn('Double', tflite_test_util.get_ops_list(tflite_model)) # Check the conversion metadata. metadata = get_conversion_metadata(tflite_model) self.assertIsNotNone(metadata) self.assertEqual(metadata.options.allowCustomOps, True) # Check the model works with custom ops. interpreter = InterpreterWithCustomOps( model_content=tflite_model, custom_op_registerers=['TF_TestRegisterer']) interpreter.allocate_tensors() input_details = interpreter.get_input_details() test_input = np.array([[0.0, 0.1, 0.2, 0.3]], dtype=np.float32) interpreter.set_tensor(input_details[0]['index'], test_input) interpreter.invoke() output_details = interpreter.get_output_details() expected_output = np.array([[0.0, 0.2, 0.4, 0.6]], dtype=np.float32) output_data = interpreter.get_tensor(output_details[0]['index']) self.assertArrayNear(expected_output[0], output_data[0], err=1e-2) def testCustomOpRegistererByFunc(self): """Test a calibration with custom op registered by function.""" saved_model_dir, calibration_gen = self._createGraphWithCustomOp() converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir) converter.optimizations = [lite.Optimize.DEFAULT] converter.representative_dataset = calibration_gen converter.allow_custom_ops = True converter.target_spec._experimental_custom_op_registerers = [ test_registerer.TF_TestRegisterer ] tflite_model = converter.convert() self.assertTrue(tflite_model) self.assertGreater(test_registerer.get_num_test_registerer_calls(), 0) self.assertIn('Double', tflite_test_util.get_ops_list(tflite_model)) # Check the model works with custom ops. interpreter = InterpreterWithCustomOps( model_content=tflite_model, custom_op_registerers=[test_registerer.TF_TestRegisterer]) interpreter.allocate_tensors() input_details = interpreter.get_input_details() test_input = np.array([[0.0, 0.1, 0.2, 0.3]], dtype=np.float32) interpreter.set_tensor(input_details[0]['index'], test_input) interpreter.invoke() output_details = interpreter.get_output_details() expected_output = np.array([[0.0, 0.2, 0.4, 0.6]], dtype=np.float32) output_data = interpreter.get_tensor(output_details[0]['index']) self.assertArrayNear(expected_output[0], output_data[0], err=1e-2) def testCustomOpRegistererFailure(self): """Test a calibration with wrong custom op registerer.""" saved_model_dir, calibration_gen = self._createGraphWithCustomOp() bogus_name = 'CompletelyBogusRegistererName' converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir) converter.optimizations = [lite.Optimize.DEFAULT] converter.representative_dataset = calibration_gen converter.allow_custom_ops = True converter.target_spec._experimental_custom_op_registerers = [bogus_name] with self.assertRaisesRegex( ValueError, 'Looking up symbol \'' + bogus_name + '\' failed'): converter.convert() class IntermediatesTest(lite_v2_test_util.ModelTest): def _run(self, experimental_preserve_all_tensors): @tf.function def f(x): y = tf.add(x, x, name='y') z = tf.add(y, y, name='z') w = tf.add(z, z, name='w') return w # NOTE this is exactly representable as a float as are the intermeidates of # f. So direct comparison is ok below. input_data = np.array(2.0, np.float32) concrete_func = f.get_concrete_function(input_data) converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func], f) tflite_model = converter.convert() interpreter = Interpreter( model_content=tflite_model, experimental_preserve_all_tensors=experimental_preserve_all_tensors) interpreter.allocate_tensors() interpreter.set_tensor(interpreter.get_input_details()[0]['index'], input_data) interpreter.invoke() out = interpreter.get_tensor(interpreter.get_output_details()[0]['index']) tensors = {} for t in interpreter.get_tensor_details(): # With Tensorflow Lite default delegate applied to the model graph, the # access to original tensors of a delegated op could cause a ValueError # (i.e. 'Tensor data is null. Run allocate_tensors() first') to be thrown # out because the tensor memory isn't allocated at all. val = None try: val = interpreter.get_tensor(t['index']) except ValueError: pass tensors.update({t['name']: val}) return (tensors, out) def testPreserve(self): tensors, result = self._run(experimental_preserve_all_tensors=True) # All intermediates should be true and result be true. self.assertAllClose(tensors['x'], 2.0) self.assertAllClose(tensors['y'], 4.0) self.assertAllClose(tensors['z'], 8.0) self.assertAllClose(result, 16.0) def testNoPreserve(self): tensors, result = self._run(experimental_preserve_all_tensors=False) # One of them should be wrong if preserve is not true, but result should be # ok. Input should still be ok for repeated invocation. self.assertAllClose(tensors['x'], 2.0) self.assertTrue(tensors['y'] != 4.0 or tensors['z'] != 8.0) self.assertAllClose(result, 16.0) class DatasetOpsTest(lite_v2_test_util.ModelTest): @test_util.run_v2_only def testReduceDataset(self): @tf.function def model(): dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3, 4]) output = dataset.reduce(np.int32(0), lambda x, y: x + y) return output concrete_func = model.get_concrete_function() converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func], model) converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS ] tflite_model = converter.convert() self.assertIsNotNone(tflite_model) # Check values from converted model. interpreter = Interpreter(model_content=tflite_model) output_details = interpreter.get_output_details() interpreter.allocate_tensors() interpreter.invoke() actual_value = interpreter.get_tensor(output_details[0]['index']) self.assertEqual(10, actual_value) class SparsityTest(lite_v2_test_util.ModelTest): def _getSparsificableModel(self, matrix_b_values): np.random.seed(0) root = tracking.AutoTrackable() @tf.function( input_signature=[tf.TensorSpec(shape=[16, 4], dtype=tf.float32)]) def func(inp): matrix_b = tf.constant(matrix_b_values, dtype=tf.float32) matrix_b = tf.reshape(matrix_b, [4, 8]) matmul = tf.matmul(inp, matrix_b, transpose_a=False, transpose_b=False) output = tf.nn.relu(matmul, name='output') return output root.f = func to_save = root.f.get_concrete_function() return (root, to_save) def testRandomSparsity(self): matrix_b_values = [ 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 ] root, func = self._getSparsificableModel(matrix_b_values) float_converter = lite.TFLiteConverterV2.from_concrete_functions([func], root) float_converter.optimizations = [lite.Optimize.EXPERIMENTAL_SPARSITY] float_tflite_model = float_converter.convert() self.assertIsNotNone(float_tflite_model) # Check the conversion metadata. metadata = get_conversion_metadata(float_tflite_model) self.assertIsNotNone(metadata) self.assertAllEqual([metadata_fb.ModelOptimizationMode.RANDOM_SPARSITY], metadata.options.modelOptimizationModes) def testBlockSparsity(self): matrix_b_values = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0 ] root, func = self._getSparsificableModel(matrix_b_values) float_converter = lite.TFLiteConverterV2.from_concrete_functions([func], root) float_converter.optimizations = [lite.Optimize.EXPERIMENTAL_SPARSITY] float_tflite_model = float_converter.convert() self.assertIsNotNone(float_tflite_model) # Check the conversion metadata. metadata = get_conversion_metadata(float_tflite_model) self.assertIsNotNone(metadata) self.assertAllEqual([metadata_fb.ModelOptimizationMode.BLOCK_SPARSITY], metadata.options.modelOptimizationModes) if __name__ == '__main__': test.main()
[ "tensorflow.lite.python.schema_py_generated.ModelT.InitFromObj", "tensorflow.control_dependencies", "tensorflow.reduce_sum", "tensorflow.raw_ops.LookupTableFindV2", "tensorflow.python.saved_model.saved_model.simple_save", "tensorflow.compat.v1.saved_model.signature_def_utils.build_signature_def", "numpy.full", "tensorflow.python.saved_model.loader_impl.parse_saved_model", "tensorflow.quantization.fake_quant_with_min_max_args", "tensorflow.keras.layers.LSTMCell", "tensorflow.random_normal_initializer", "tensorflow.keras.layers.Flatten", "tensorflow.math.erf", "tensorflow.matmul", "tensorflow.TensorShape", "tensorflow.python.saved_model.save.save", "tensorflow.keras.Model", "tensorflow.function", "tensorflow.raw_ops.StackPopV2", "tensorflow.split", "numpy.array", "tensorflow.python.ops.map_ops.tensor_map_insert", "tensorflow.raw_ops.StackV2", "tensorflow.ones", "tensorflow.keras.layers.Dropout", "numpy.random.uniform", "tensorflow.lite.python.interpreter.InterpreterWithCustomOps", "tensorflow.math.add", "tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model", "tensorflow.lite.python.interpreter.Interpreter", "numpy.random.random_sample", "tensorflow.lite.python.lite.TFLiteConverterV2", "tensorflow.python.ops.map_ops.empty_tensor_map", "tensorflow.Graph", "tensorflow.lite.TFLiteConverter.from_keras_model", "tensorflow.lite.python.util.get_conversion_metadata", "tensorflow.lite.TFLiteConverter.from_concrete_functions", "tensorflow.gather", "tensorflow.python.platform.test.main", "tensorflow.lite.TFLiteConverter.from_saved_model", "tensorflow.TensorArray", "tensorflow.keras.layers.Dense", "tensorflow.python.training.tracking.tracking.AutoTrackable", "tensorflow.random.uniform", "tensorflow.nn.tanh", "tensorflow.lookup.KeyValueTensorInitializer", "tensorflow.compat.v1.disable_eager_execution", "tensorflow.broadcast_to", "tensorflow.compat.v1.variables_initializer", "numpy.ones", "tensorflow.keras.layers.LSTM", "tensorflow.lite.python.testdata.double_op.double", "tensorflow.keras.layers.Input", "tensorflow.cast", "tensorflow.compat.v1.enable_eager_execution", "tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model", "numpy.mean", "tensorflow.keras.layers.ConvLSTM2D", "tensorflow.compat.v1.tables_initializer", "tensorflow.math.tanh", "tensorflow.while_loop", "tensorflow.Variable", "tensorflow.quantization.fake_quant_with_min_max_vars", "tensorflow.raw_ops.LookupTableInsertV2", "tensorflow.lite.python.test_util.get_ops_list", "tensorflow.keras.models.Sequential", "tensorflow.python.platform.resource_loader.get_path_to_datafile", "tensorflow.keras.models.Model", "tensorflow.shape", "tensorflow.python.ops.map_ops.tensor_map_size", "tensorflow.compat.v1.initializers.global_variables", "tensorflow.keras.initializers.Constant", "tensorflow.raw_ops.MutableHashTableV2", "tensorflow.constant", "tensorflow.keras.layers.RNN", "tensorflow.lite.python.schema_py_generated.Model.GetRootAsModel", "tensorflow.reshape", "tensorflow.python.framework.ops.Graph", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.placeholder", "tensorflow.raw_ops.StackPushV2", "tensorflow.compat.v1.reset_default_graph", "tensorflow.lite.python.testdata._pywrap_test_registerer.get_num_test_registerer_calls", "numpy.random.randn", "tensorflow.lite.python.lite.TFLiteConverterV2.experimental_from_jax", "tensorflow.python.saved_model.save_options.SaveOptions", "tensorflow.keras.Input", "tensorflow.lite.Interpreter", "tensorflow.keras.layers.Conv2D", "tensorflow.compat.v1.saved_model.builder.SavedModelBuilder", "tensorflow.math.ceil", "tensorflow.add", "tensorflow.compat.v1.saved_model.utils.build_tensor_info", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.fill", "tensorflow.python.ops.rnn.static_rnn", "tensorflow.lite.python.util._convert_model_from_bytearray_to_object", "tensorflow.compat.v1.image.resize_bilinear", "tensorflow.keras.layers.Reshape", "tensorflow.nn.relu", "numpy.random.random", "tensorflow.keras.layers.Activation", "numpy.random.seed", "tensorflow.python.lib.io.file_io.FileIO", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.lite.python.convert.mlir_quantize", "numpy.int32", "tensorflow.keras.layers.concatenate", "tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions", "numpy.prod", "tensorflow.TensorSpec" ]
tensorflow/lite/python/lite_v2_test.py
[(102, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('_INT8InputOutput', dtypes.int8)", "('_UINT8InputOutput', dtypes.uint8)", "('_INT16InputOutput', dtypes.int16)"], {}), False, 'from absl.testing import parameterized\n'), (258, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('EnableMlirQuantizer', True)", "('DisableMlirQuantizer', False)"], {}), False, 'from absl.testing import parameterized\n'), (306, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('_INT8InputOutput', dtypes.int8)", "('_UINT8InputOutput', dtypes.uint8)", "('_INT16InputOutput', dtypes.int16)"], {}), False, 'from absl.testing import parameterized\n'), (331, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('EnableMlirQuantizer', True)", "('DisableMlirQuantizer', False)"], {}), False, 'from absl.testing import parameterized\n'), (355, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('_Default', False, False, dtypes.float32)", "('_INT8InputOutput', False, False, dtypes.int8)", "('_UINT8InputOutput', False, False, dtypes.uint8)", "('_INT16Quantize', False, True, dtypes.float32)", "('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16)", "('_IntOnly', True, False, dtypes.float32)", "('_IntOnly_INT8InputOutput', True, False, dtypes.int8)", "('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8)", "('_IntOnly_INT16Quantize', True, True, dtypes.float32)", "('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16)"], {}), False, 'from absl.testing import parameterized\n'), (424, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('_INT16Quantize_INT8InputOutput', True, dtypes.int8)"], {}), False, 'from absl.testing import parameterized\n'), (604, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('_DefaultFLOAT32InputOutput', dtypes.float32)", "('_INT8InputOutput', dtypes.int8)", "('_UINT8InputOutput', dtypes.uint8)"], {}), False, 'from absl.testing import parameterized\n'), (749, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('_Default', False, False, dtypes.float32)", "('_INT8InputOutput', False, False, dtypes.int8)", "('_UINT8InputOutput', False, False, dtypes.uint8)", "('_INT16Quantize', False, True, dtypes.float32)", "('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16)", "('_IntOnly', True, False, dtypes.float32)", "('_IntOnly_INT8InputOutput', True, False, dtypes.int8)", "('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8)", "('_IntOnly_INT16Quantize', True, True, dtypes.float32)", "('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16)"], {}), False, 'from absl.testing import parameterized\n'), (846, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('_INT8InputOutput', False, False, dtypes.int8)", "('_UINT8InputOutput', False, False, dtypes.uint8)", "('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16)", "('_IntOnly_INT8InputOutput', True, False, dtypes.int8)", "('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8)", "('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16)", "('_IntOnly_INT8InputOutputMlirQuant', True, False, dtypes.int8, True)", "('_IntOnly_UINT8InputOutputMlirQuant', True, False, dtypes.uint8, True)"], {}), False, 'from absl.testing import parameterized\n'), (949, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('_INT8InputOutput', False, False, dtypes.int8)", "('_UINT8InputOutput', False, False, dtypes.uint8)", "('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16)", "('_IntOnly_INT8InputOutput', True, False, dtypes.int8)", "('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8)", "('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16)"], {}), False, 'from absl.testing import parameterized\n'), (1014, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('_BlocklistedNoneWithLowering', None, None, True)", "('_BlocklistedNoneWithoutLowering', None, None, False)", "('_BlocklistedOpsWithLowering', {'CONV_2D'}, None, True)", "('_BlocklistedOpsWithoutLowering', {'CONV_2D'}, None, False)", "('_BlocklistedNodesWithLowering', None, {'PartitionedCall:0'}, True)", "('_BlocklistedNodesWithoutLowering', None, {'Identity'}, False)"], {}), False, 'from absl.testing import parameterized\n'), (1051, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('_SingleLayer', False)", "('_WholeModel', True)"], {}), False, 'from absl.testing import parameterized\n'), (1137, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('_PerChannelQuant', False, False)", "('_PerChannelMlirQuant', False, True)", "('_PerTensorQuant', True, False)", "('_PerTensorMlirQuant', True, True)", "('_PerChannelDynamicRange', False, False, False)", "('_PerTensorDynamicRange', True, False, False)"], {}), False, 'from absl.testing import parameterized\n'), (1175, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('MlirQuantize', True)", "('TocoQuantize', False)"], {}), False, 'from absl.testing import parameterized\n'), (1366, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('Default', False)", "('UnfoldLargeConstant', True)"], {}), False, 'from absl.testing import parameterized\n'), (1641, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('EnableResourceVariables', True)", "('DisableResourceVariables', False)"], {}), False, 'from absl.testing import parameterized\n'), (1928, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('_Default', False, False, dtypes.float32, False)", "('_DefaultMlirQuant', False, False, dtypes.float32, True)", "('_INT8InputOutput', False, False, dtypes.int8)", "('_UINT8InputOutput', False, False, dtypes.uint8)", "('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16)", "('_IntOnly_INT8InputOutput', True, False, dtypes.int8)", "('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8)", "('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16)", "('_IntOnly_INT8InputOutputMlirQuant', True, False, dtypes.int8, True)", "('_IntOnly_UINT8InputOutputMlirQuant', True, False, dtypes.uint8, True)"], {}), False, 'from absl.testing import parameterized\n'), (2221, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('_PerChannelQuant', False, False)", "('_PerChannelMlirQuant', False, True)", "('_PerTensorQuant', True, False)", "('_PerTensorMlirQuant', True, True)", "('_PerChannelDynamicRange', False, False, True)", "('_PerTensorDynamicRange', True, False, True)"], {}), False, 'from absl.testing import parameterized\n'), (2271, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('_INT8Quant_INT32Bias', False, False, dtypes.int32, True)", "('_INT16Quant_INT64Bias', True, False, dtypes.int64, True)", "('_INT8Quant_INT32Bias_Set', False, True, dtypes.int32, True)", "('_INT8Quant_INT64Bias_Set', False, True, dtypes.int64, False)", "('_INT16Quant_INT32Bias_Set', True, True, dtypes.int32, True)", "('_INT16Quant_INT64Bias_Set', True, True, dtypes.int64, True)", "('_INT16Quant_FLOAT32Bias_Set', True, True, dtypes.float32, False)"], {}), False, 'from absl.testing import parameterized\n'), (2327, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('_Int8PerChannelMlirDynamicRangeQuant', True, False, False)", "('_Int8PerChannelTocoDynamicRangeQuant', False, False, False)", "('_Int8PerTensorMlirDynamicRangeQuant', True, True, False)", "('_Int8PerTensorTocoDynamicRangeQuant', False, True, False)", "('_Float16DynamicRangeQuant', True, False, True)"], {}), False, 'from absl.testing import parameterized\n'), (2537, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('_PerChannelMlirDynamicRangeQuant', True, False, False)", "('_PerChannelTocoDynamicRangeQuant', False, False, False)", "('_PerTensorMlirDynamicRangeQuant', True, True, False)", "('_PerTensorTocoDynamicRangeQuant', False, True, False)", "('_Float16DynamicRangeQuant', True, False, True)"], {}), False, 'from absl.testing import parameterized\n'), (2976, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('LSTMBatchSizeOne', tf.keras.layers.LSTM, True)", "('LSTM', tf.keras.layers.LSTM, False)", "('SimpleRNNBatchSizeOne', tf.keras.layers.SimpleRNN, True)", "('SimpleRNN', tf.keras.layers.SimpleRNN, False)", "('GRUBatchSizeOne', tf.keras.layers.GRU, True)", "('GRU', tf.keras.layers.GRU, False)"], {}), False, 'from absl.testing import parameterized\n'), (3007, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('LSTM', tf.keras.layers.LSTM)", "('SimpleRNN', tf.keras.layers.SimpleRNN)", "('GRU', tf.keras.layers.GRU)"], {}), False, 'from absl.testing import parameterized\n'), (3028, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('ForceToUseBatchSizeOne', True)", "('DontForceToUseBatchSizeOne', False)"], {}), False, 'from absl.testing import parameterized\n'), (3058, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('ForceToUseBatchSizeOne', True)", "('DontForceToUseBatchSizeOne', False)"], {}), False, 'from absl.testing import parameterized\n'), (3597, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('EnableLoweringTensorListOps', True)", "('DisableLoweringTensorListOps', False)"], {}), False, 'from absl.testing import parameterized\n'), (3648, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('EnableLoweringTensorListOps', True)", "('DisableLoweringTensorListOps', False)"], {}), False, 'from absl.testing import parameterized\n'), (3954, 'tensorflow.python.platform.test.main', 'test.main', ([], {}), False, 'from tensorflow.python.platform import test\n'), (89, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (93, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (108, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (112, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (125, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[]'}), True, 'import tensorflow as tf\n'), (129, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (156, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (165, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (169, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (182, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (187, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[add_func, sub_func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (192, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (229, 'tensorflow.lite.python.util.get_conversion_metadata', 'get_conversion_metadata', (['tflite_model'], {}), False, 'from tensorflow.lite.python.util import get_conversion_metadata\n'), (237, 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), True, 'import numpy as np\n'), (239, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (265, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (271, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (279, 'tensorflow.lite.python.util.get_conversion_metadata', 'get_conversion_metadata', (['quantized_tflite_model'], {}), False, 'from tensorflow.lite.python.util import get_conversion_metadata\n'), (294, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'quantized_tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (315, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (320, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (345, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'quantized_model', 'experimental_op_resolver_type': 'OpResolverType.BUILTIN_WITHOUT_DEFAULT_DELEGATES'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (371, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (376, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (402, 'tensorflow.lite.python.util.get_conversion_metadata', 'get_conversion_metadata', (['quantized_tflite_model'], {}), False, 'from tensorflow.lite.python.util import get_conversion_metadata\n'), (410, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'quantized_tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (431, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (453, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (458, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (465, 'tensorflow.lite.python.convert.mlir_quantize', 'mlir_quantize', (['calibrated_tflite'], {'inference_type': '_types_pb2.QUANTIZED_INT16'}), False, 'from tensorflow.lite.python.convert import mlir_quantize\n'), (471, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'quantized_tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (487, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (490, 'tensorflow.lite.python.lite.TFLiteConverterV2', 'lite.TFLiteConverterV2', (['[add_func]'], {'trackable_obj': 'root'}), False, 'from tensorflow.lite.python import lite\n'), (495, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (520, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (523, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]', 'None'], {}), False, 'from tensorflow.lite.python import lite\n'), (528, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (537, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (545, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (554, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (557, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[add_func]'], {'trackable_obj': 'root'}), False, 'from tensorflow.lite.python import lite\n'), (611, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model', 'lite.TFLiteConverterV2.from_keras_model', (['model'], {}), False, 'from tensorflow.lite.python import lite\n'), (615, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model', 'lite.TFLiteConverterV2.from_keras_model', (['model'], {}), False, 'from tensorflow.lite.python import lite\n'), (622, 'tensorflow.lite.python.util.get_conversion_metadata', 'get_conversion_metadata', (['quantized_tflite_model'], {}), False, 'from tensorflow.lite.python.util import get_conversion_metadata\n'), (628, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'quantized_tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (647, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (662, 'six.moves.range', 'range', (['(5)'], {}), False, 'from six.moves import range\n'), (695, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (707, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (708, 'tensorflow.Variable', 'tf.Variable', (['(3.0)'], {}), True, 'import tensorflow as tf\n'), (709, 'tensorflow.function', 'tf.function', (['(lambda x: root.v1 * x)'], {}), True, 'import tensorflow as tf\n'), (710, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (714, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (720, 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), True, 'import numpy as np\n'), (722, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (765, 'tensorflow.lite.TFLiteConverter.from_concrete_functions', 'tf.lite.TFLiteConverter.from_concrete_functions', (['[func]', 'root'], {}), True, 'import tensorflow as tf\n'), (798, 'tensorflow.lite.python.util.get_conversion_metadata', 'get_conversion_metadata', (['quantized_tflite_model'], {}), False, 'from tensorflow.lite.python.util import get_conversion_metadata\n'), (807, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'quantized_tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (819, 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), True, 'import numpy as np\n'), (821, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (864, 'tensorflow.lite.TFLiteConverter.from_concrete_functions', 'tf.lite.TFLiteConverter.from_concrete_functions', (['[func]', 'root'], {}), True, 'import tensorflow as tf\n'), (902, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'quantized_tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (968, 'tensorflow.lite.TFLiteConverter.from_concrete_functions', 'tf.lite.TFLiteConverter.from_concrete_functions', (['[func]', 'root'], {}), True, 'import tensorflow as tf\n'), (1004, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'quantized_tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (1026, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (1037, 'tensorflow.lite.python.convert.mlir_quantize', 'mlir_quantize', (['calibrated'], {'denylisted_ops': 'denylisted_ops', 'denylisted_nodes': 'denylisted_nodes'}), False, 'from tensorflow.lite.python.convert import mlir_quantize\n'), (1041, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'quantized_tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (1060, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (1074, 'tensorflow.lite.python.convert.mlir_quantize', 'mlir_quantize', (['calibrated'], {'enable_numeric_verify': '(True)', 'enable_whole_model_verify': 'whole_model_verify'}), False, 'from tensorflow.lite.python.convert import mlir_quantize\n'), (1152, 'tensorflow.lite.TFLiteConverter.from_concrete_functions', 'tf.lite.TFLiteConverter.from_concrete_functions', (['[func]', 'root'], {}), True, 'import tensorflow as tf\n'), (1166, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'quantized_tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (1180, 'numpy.array', 'np.array', (['[[-0.001, 0.001]]'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (1186, 'tensorflow.constant', 'tf.constant', (['[-0.001, 0.001]'], {'shape': '(1, 2)'}), True, 'import tensorflow as tf\n'), (1188, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (1195, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'quantized_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (1221, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]', 'custom_resize'], {}), False, 'from tensorflow.lite.python import lite\n'), (1224, 'tensorflow.lite.python.schema_py_generated.Model.GetRootAsModel', 'schema_fb.Model.GetRootAsModel', (['tflite_model', '(0)'], {}), True, 'from tensorflow.lite.python import schema_py_generated as schema_fb\n'), (1225, 'tensorflow.lite.python.schema_py_generated.ModelT.InitFromObj', 'schema_fb.ModelT.InitFromObj', (['model_object'], {}), True, 'from tensorflow.lite.python import schema_py_generated as schema_fb\n'), (1236, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (1240, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (1247, 'tensorflow.lite.python.util.get_conversion_metadata', 'get_conversion_metadata', (['tflite_model'], {}), False, 'from tensorflow.lite.python.util import get_conversion_metadata\n'), (1258, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (1262, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (1267, 'tensorflow.lite.python.util.get_conversion_metadata', 'get_conversion_metadata', (['tflite_model'], {}), False, 'from tensorflow.lite.python.util import get_conversion_metadata\n'), (1277, 'tensorflow.lite.python.util.get_conversion_metadata', 'get_conversion_metadata', (['quantized_model'], {}), False, 'from tensorflow.lite.python.util import get_conversion_metadata\n'), (1284, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (1290, 'tensorflow.lite.python.util.get_conversion_metadata', 'get_conversion_metadata', (['quantized_model'], {}), False, 'from tensorflow.lite.python.util import get_conversion_metadata\n'), (1321, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (['(32, 32, 128)'], {'name': 'input_name'}), True, 'import tensorflow as tf\n'), (1322, 'tensorflow.quantization.fake_quant_with_min_max_args', 'tf.quantization.fake_quant_with_min_max_args', (['input_tensor', '(-3.0)', '(3.0)'], {}), True, 'import tensorflow as tf\n'), (1324, 'tensorflow.quantization.fake_quant_with_min_max_args', 'tf.quantization.fake_quant_with_min_max_args', (['x', '(-3.0)', '(3.0)'], {}), True, 'import tensorflow as tf\n'), (1326, 'tensorflow.keras.Model', 'tf.keras.Model', (['input_tensor', 'scores'], {}), True, 'import tensorflow as tf\n'), (1385, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['saved_model_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (1390, 'tensorflow.lite.python.util._convert_model_from_bytearray_to_object', 'util._convert_model_from_bytearray_to_object', (['tflite_model'], {}), False, 'from tensorflow.lite.python import util\n'), (1401, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (1431, 'tensorflow.python.saved_model.loader_impl.parse_saved_model', 'parse_saved_model', (['saved_model_dir'], {}), False, 'from tensorflow.python.saved_model.loader_impl import parse_saved_model\n'), (1434, 'os.path.join', 'os.path.join', (['saved_model_dir', '"""saved_model.pb"""'], {}), False, 'import os\n'), (1439, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['saved_model_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (1445, 'tensorflow.compat.v1.disable_eager_execution', 'tf.compat.v1.disable_eager_execution', ([], {}), True, 'import tensorflow as tf\n'), (1449, 'tensorflow.lookup.KeyValueTensorInitializer', 'tf.lookup.KeyValueTensorInitializer', ([], {'keys': "['a', 'b', 'c', 'd']", 'values': '[1, 2, 3, 4]', 'key_dtype': 'tf.string', 'value_dtype': 'tf.int64'}), True, 'import tensorflow as tf\n'), (1457, 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.string'], {'shape': '()', 'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (1460, 'tensorflow.compat.v1.saved_model.utils.build_tensor_info', 'tf.compat.v1.saved_model.utils.build_tensor_info', (['x'], {}), True, 'import tensorflow as tf\n'), (1461, 'tensorflow.compat.v1.saved_model.utils.build_tensor_info', 'tf.compat.v1.saved_model.utils.build_tensor_info', (['y'], {}), True, 'import tensorflow as tf\n'), (1471, 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), True, 'import tensorflow as tf\n'), (1474, 'tensorflow.compat.v1.saved_model.builder.SavedModelBuilder', 'tf.compat.v1.saved_model.builder.SavedModelBuilder', (['saved_model_dir'], {}), True, 'import tensorflow as tf\n'), (1485, 'tensorflow.compat.v1.reset_default_graph', 'tf.compat.v1.reset_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (1486, 'tensorflow.compat.v1.enable_eager_execution', 'tf.compat.v1.enable_eager_execution', ([], {}), True, 'import tensorflow as tf\n'), (1495, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['saved_model_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (1499, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (1503, 'numpy.array', 'np.array', (["['a', 'b', 'c', 'z']"], {'dtype': 'np.string_'}), True, 'import numpy as np\n'), (1525, 'tensorflow.compat.v1.disable_eager_execution', 'tf.compat.v1.disable_eager_execution', ([], {}), True, 'import tensorflow as tf\n'), (1529, 'tensorflow.raw_ops.MutableHashTableV2', 'tf.raw_ops.MutableHashTableV2', ([], {'key_dtype': 'tf.string', 'value_dtype': 'tf.int64'}), True, 'import tensorflow as tf\n'), (1531, 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.string'], {'shape': '()', 'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (1532, 'tensorflow.constant', 'tf.constant', (["['a', 'b']", 'tf.string'], {}), True, 'import tensorflow as tf\n'), (1533, 'tensorflow.constant', 'tf.constant', (['[1, 5]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (1534, 'tensorflow.constant', 'tf.constant', (['(-1)', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (1535, 'tensorflow.raw_ops.LookupTableInsertV2', 'tf.raw_ops.LookupTableInsertV2', ([], {'table_handle': 'table', 'keys': 'keys', 'values': 'values'}), True, 'import tensorflow as tf\n'), (1541, 'tensorflow.compat.v1.saved_model.utils.build_tensor_info', 'tf.compat.v1.saved_model.utils.build_tensor_info', (['x'], {}), True, 'import tensorflow as tf\n'), (1542, 'tensorflow.compat.v1.saved_model.utils.build_tensor_info', 'tf.compat.v1.saved_model.utils.build_tensor_info', (['y'], {}), True, 'import tensorflow as tf\n'), (1552, 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), True, 'import tensorflow as tf\n'), (1554, 'tensorflow.compat.v1.saved_model.builder.SavedModelBuilder', 'tf.compat.v1.saved_model.builder.SavedModelBuilder', (['saved_model_dir'], {}), True, 'import tensorflow as tf\n'), (1565, 'tensorflow.compat.v1.reset_default_graph', 'tf.compat.v1.reset_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (1566, 'tensorflow.compat.v1.enable_eager_execution', 'tf.compat.v1.enable_eager_execution', ([], {}), True, 'import tensorflow as tf\n'), (1575, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['saved_model_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (1582, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (1586, 'numpy.array', 'np.array', (["['a', 'b', 'c']"], {'dtype': 'np.string_'}), True, 'import numpy as np\n'), (1600, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (1601, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (1602, 'tensorflow.function', 'tf.function', (['(lambda x: 2.0 * x)'], {}), True, 'import tensorflow as tf\n'), (1606, 'tensorflow.python.saved_model.save.save', 'save', (['root', 'save_dir', 'to_save'], {}), False, 'from tensorflow.python.saved_model.save import save\n'), (1609, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['save_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (1621, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (1625, 'tensorflow.python.saved_model.save.save', 'save', (['root', 'save_dir', 'to_save'], {}), False, 'from tensorflow.python.saved_model.save import save\n'), (1628, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['save_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (1631, 'tensorflow.lite.python.util.get_conversion_metadata', 'get_conversion_metadata', (['tflite_model'], {}), False, 'from tensorflow.lite.python.util import get_conversion_metadata\n'), (1647, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1, 10]'}), True, 'import tensorflow as tf\n'), (1651, 'tensorflow.python.saved_model.save.save', 'save', (['root', 'save_dir', 'to_save'], {}), False, 'from tensorflow.python.saved_model.save import save\n'), (1654, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['save_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (1673, 'six.moves.zip', 'zip', (['expected_value', 'actual_value[0]'], {}), False, 'from six.moves import zip\n'), (1680, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (1684, 'tensorflow.python.saved_model.save.save', 'save', (['root', 'save_dir', 'to_save'], {}), False, 'from tensorflow.python.saved_model.save import save\n'), (1693, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['save_dir'], {'signature_keys': '[]'}), False, 'from tensorflow.lite.python import lite\n'), (1714, 'tensorflow.lite.TFLiteConverter.from_saved_model', 'tf.lite.TFLiteConverter.from_saved_model', (['tf_saved_model_dir'], {}), True, 'import tensorflow as tf\n'), (1718, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (1732, 'tensorflow.lite.TFLiteConverter.from_saved_model', 'tf.lite.TFLiteConverter.from_saved_model', (['tf_saved_model_dir'], {}), True, 'import tensorflow as tf\n'), (1738, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model_quant'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (1752, 'numpy.random.random', 'np.random.random', (['tflite_input_shape'], {}), True, 'import numpy as np\n'), (1773, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (1774, 'tensorflow.constant', 'tf.constant', (['(3.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (1779, 'tensorflow.python.saved_model.save.save', 'save', (['root', 'save_dir', "{'mul_add': mul_add_func}"], {}), False, 'from tensorflow.python.saved_model.save import save\n'), (1781, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['save_dir'], {'signature_keys': "['mul_add']"}), False, 'from tensorflow.lite.python import lite\n'), (1787, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (1813, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (1814, 'tensorflow.constant', 'tf.constant', (['(3.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (1819, 'tensorflow.python.saved_model.save.save', 'save', (['root', 'save_dir', "{'mul_add': mul_add_func}"], {}), False, 'from tensorflow.python.saved_model.save import save\n'), (1821, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['save_dir'], {'signature_keys': "['mul_add']"}), False, 'from tensorflow.lite.python import lite\n'), (1827, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (1850, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (1851, 'tensorflow.constant', 'tf.constant', (['(3.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (1856, 'tensorflow.python.saved_model.save.save', 'save', (['root', 'save_dir', "{'mul_add': mul_add_func}"], {}), False, 'from tensorflow.python.saved_model.save import save\n'), (1858, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['save_dir'], {'signature_keys': "['mul_add']"}), False, 'from tensorflow.lite.python import lite\n'), (1877, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (1893, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (1898, 'tensorflow.python.saved_model.save.save', 'save', (['root', 'save_dir', "{'add': add_func, 'sub': sub_func}"], {}), False, 'from tensorflow.python.saved_model.save import save\n'), (1901, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['save_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (1905, 'tensorflow.lite.Interpreter', 'tf.lite.Interpreter', ([], {'model_content': 'tflite_model'}), True, 'import tensorflow as tf\n'), (1947, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (1952, 'tensorflow.python.saved_model.save.save', 'save', (['root', 'save_dir', "{'add': add_func, 'sub': sub_func}"], {}), False, 'from tensorflow.python.saved_model.save import save\n'), (1955, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['save_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (1991, 'tensorflow.lite.Interpreter', 'tf.lite.Interpreter', ([], {'model_content': 'tflite_model'}), True, 'import tensorflow as tf\n'), (2038, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (2044, 'tensorflow.python.saved_model.save.save', 'save', (['root', 'save_dir', "{'add': add_func, 'sub': sub_func, 'mul': mul_func}"], {}), False, 'from tensorflow.python.saved_model.save import save\n'), (2047, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['save_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (2056, 'tensorflow.lite.Interpreter', 'tf.lite.Interpreter', ([], {'model_content': 'tflite_model'}), True, 'import tensorflow as tf\n'), (2071, 'tensorflow.python.saved_model.save.save', 'save', (['root', 'save_dir'], {}), False, 'from tensorflow.python.saved_model.save import save\n'), (2081, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1, 1]'}), True, 'import tensorflow as tf\n'), (2083, 'numpy.array', 'np.array', (['[[1.0], [2.0]]'], {}), True, 'import numpy as np\n'), (2084, 'numpy.array', 'np.array', (['[[2.0], [4.0]]'], {}), True, 'import numpy as np\n'), (2094, 'tensorflow.python.saved_model.save.save', 'save', (['model', 'save_dir'], {}), False, 'from tensorflow.python.saved_model.save import save\n'), (2097, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['save_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (2108, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (2109, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (2110, 'tensorflow.function', 'tf.function', (['(lambda x: 2.0 * x)'], {}), True, 'import tensorflow as tf\n'), (2112, 'tensorflow.python.saved_model.save_options.SaveOptions', 'save_options.SaveOptions', ([], {'save_debug_info': '(True)'}), False, 'from tensorflow.python.saved_model import save_options\n'), (2114, 'tensorflow.python.saved_model.save.save', 'save', (['root', 'save_dir', 'to_save', 'options'], {}), False, 'from tensorflow.python.saved_model.save import save\n'), (2117, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['save_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (2139, 'tensorflow.lite.TFLiteConverter.from_saved_model', 'tf.lite.TFLiteConverter.from_saved_model', (['saved_model_dir'], {}), True, 'import tensorflow as tf\n'), (2148, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'batch_size': '(8)', 'shape': '[9, 10, 11, 12]', 'name': '"""input_tensor"""', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (2165, 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': '[input_tensor]', 'outputs': 'output'}), True, 'import tensorflow as tf\n'), (2176, 'tensorflow.lite.TFLiteConverter.from_saved_model', 'tf.lite.TFLiteConverter.from_saved_model', (['saved_model_dir'], {}), True, 'import tensorflow as tf\n'), (2202, 'tensorflow.lite.TFLiteConverter.from_saved_model', 'tf.lite.TFLiteConverter.from_saved_model', (['saved_model_dir'], {}), True, 'import tensorflow as tf\n'), (2207, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (2211, 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0]'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (2240, 'tensorflow.python.saved_model.save.save', 'save', (['model', 'saved_model_dir'], {}), False, 'from tensorflow.python.saved_model.save import save\n'), (2242, 'tensorflow.lite.TFLiteConverter.from_saved_model', 'tf.lite.TFLiteConverter.from_saved_model', (['saved_model_dir'], {}), True, 'import tensorflow as tf\n'), (2260, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'quantized_tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (2288, 'tensorflow.python.saved_model.save.save', 'save', (['model', 'saved_model_dir'], {}), False, 'from tensorflow.python.saved_model.save import save\n'), (2290, 'tensorflow.lite.TFLiteConverter.from_saved_model', 'tf.lite.TFLiteConverter.from_saved_model', (['saved_model_dir'], {}), True, 'import tensorflow as tf\n'), (2321, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'quantized_tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (2342, 'tensorflow.python.saved_model.save.save', 'save', (['model', 'saved_model_dir.full_path'], {}), False, 'from tensorflow.python.saved_model.save import save\n'), (2344, 'tensorflow.lite.TFLiteConverter.from_saved_model', 'tf.lite.TFLiteConverter.from_saved_model', (['saved_model_dir.full_path'], {}), True, 'import tensorflow as tf\n'), (2355, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'quantized_tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (2383, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1, 1]'}), True, 'import tensorflow as tf\n'), (2386, 'numpy.array', 'np.array', (['[[1.0], [2.0]]'], {}), True, 'import numpy as np\n'), (2387, 'numpy.array', 'np.array', (['[[2.0], [4.0]]'], {}), True, 'import numpy as np\n'), (2397, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model', 'lite.TFLiteConverterV2.from_keras_model', (['model'], {}), False, 'from tensorflow.lite.python import lite\n'), (2400, 'tensorflow.lite.python.util.get_conversion_metadata', 'get_conversion_metadata', (['tflite_model'], {}), False, 'from tensorflow.lite.python.util import get_conversion_metadata\n'), (2413, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1, 3]'}), True, 'import tensorflow as tf\n'), (2414, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1, 3]'}), True, 'import tensorflow as tf\n'), (2417, 'numpy.random.random', 'np.random.random', (['(10, 3)'], {}), True, 'import numpy as np\n'), (2418, 'numpy.random.random', 'np.random.random', (['(10, 3)'], {}), True, 'import numpy as np\n'), (2419, 'numpy.random.random', 'np.random.random', (['(10, 3)'], {}), True, 'import numpy as np\n'), (2420, 'numpy.random.random', 'np.random.random', (['(10, 2)'], {}), True, 'import numpy as np\n'), (2422, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(3,)', 'name': '"""input_a"""'}), True, 'import tensorflow as tf\n'), (2423, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(3,)', 'name': '"""input_b"""'}), True, 'import tensorflow as tf\n'), (2425, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(8)'], {'name': '"""dense_1"""'}), True, 'import tensorflow as tf\n'), (2428, 'tensorflow.keras.layers.concatenate', 'tf.keras.layers.concatenate', (['[interm_a, interm_b]'], {'name': '"""merge"""'}), True, 'import tensorflow as tf\n'), (2437, 'tensorflow.keras.models.Model', 'tf.keras.models.Model', ([], {'inputs': '[input_a, input_b]', 'outputs': '[output_c, output_d]'}), True, 'import tensorflow as tf\n'), (2443, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model', 'lite.TFLiteConverterV2.from_keras_model', (['model'], {}), False, 'from tensorflow.lite.python import lite\n'), (2450, 'six.moves.zip', 'zip', (['expected_value', 'actual_value'], {}), False, 'from six.moves import zip\n'), (2463, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model', 'lite.TFLiteConverterV2.from_keras_model', (['model'], {}), False, 'from tensorflow.lite.python import lite\n'), (2494, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model', 'lite.TFLiteConverterV2.from_keras_model', (['model'], {}), False, 'from tensorflow.lite.python import lite\n'), (2512, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model', 'lite.TFLiteConverterV2.from_keras_model', (['keras_model'], {}), False, 'from tensorflow.lite.python import lite\n'), (2519, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (2554, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model', 'lite.TFLiteConverterV2.from_keras_model', (['model'], {}), False, 'from tensorflow.lite.python import lite\n'), (2564, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'quantized_tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (2602, 'tensorflow.lite.TFLiteConverter.from_keras_model', 'tf.lite.TFLiteConverter.from_keras_model', (['model'], {}), True, 'import tensorflow as tf\n'), (2612, 'tensorflow.lite.Interpreter', 'tf.lite.Interpreter', ([], {'model_content': 'tflite_model'}), True, 'import tensorflow as tf\n'), (2657, 'jax.numpy.zeros', 'jnp.zeros', (['[10, 10]'], {}), True, 'from jax import numpy as jnp\n'), (2659, 'tensorflow.lite.python.lite.TFLiteConverterV2.experimental_from_jax', 'lite.TFLiteConverterV2.experimental_from_jax', (['None', "[{'input1': input_tensor}]"], {}), False, 'from tensorflow.lite.python import lite\n'), (2667, 'tensorflow.lite.python.lite.TFLiteConverterV2.experimental_from_jax', 'lite.TFLiteConverterV2.experimental_from_jax', (['[simple_model]', 'None'], {}), False, 'from tensorflow.lite.python import lite\n'), (2672, 'tensorflow.lite.python.lite.TFLiteConverterV2.experimental_from_jax', 'lite.TFLiteConverterV2.experimental_from_jax', (['[simple_model]', '[]'], {}), False, 'from tensorflow.lite.python import lite\n'), (2677, 'tensorflow.lite.python.lite.TFLiteConverterV2.experimental_from_jax', 'lite.TFLiteConverterV2.experimental_from_jax', (['[simple_model]', 'input_tensor'], {}), False, 'from tensorflow.lite.python import lite\n'), (2685, 'tensorflow.lite.python.lite.TFLiteConverterV2.experimental_from_jax', 'lite.TFLiteConverterV2.experimental_from_jax', (['[simple_model]', "[[('input1', input_tensor)]]"], {}), False, 'from tensorflow.lite.python import lite\n'), (2692, 'tensorflow.lite.python.lite.TFLiteConverterV2.experimental_from_jax', 'lite.TFLiteConverterV2.experimental_from_jax', (['[simple_model, simple_model]', "[[('input1', input_tensor), ('input2', input_tensor)]]"], {}), False, 'from tensorflow.lite.python import lite\n'), (2703, 'tensorflow.lite.python.lite.TFLiteConverterV2.experimental_from_jax', 'lite.TFLiteConverterV2.experimental_from_jax', (['[simple_model, simple_model]', "[[('input1', input_tensor), ('input2', input_tensor)], [('input1',\n input_tensor), ('input2', input_tensor)]]"], {}), False, 'from tensorflow.lite.python import lite\n'), (2724, 'jax.numpy.zeros', 'jnp.zeros', (['[10, 10]'], {}), True, 'from jax import numpy as jnp\n'), (2725, 'tensorflow.lite.python.lite.TFLiteConverterV2.experimental_from_jax', 'lite.TFLiteConverterV2.experimental_from_jax', (['[single_input]', "[[('input_tensor', input_tensor)]]"], {}), False, 'from tensorflow.lite.python import lite\n'), (2729, 'tensorflow.lite.python.util.get_conversion_metadata', 'get_conversion_metadata', (['tflite_model'], {}), False, 'from tensorflow.lite.python.util import get_conversion_metadata\n'), (2734, 'numpy.random.random_sample', 'np.random.random_sample', (['(10, 10)'], {}), True, 'import numpy as np\n'), (2735, 'tensorflow.constant', 'tf.constant', (['input_data'], {'dtype': 'np.float32'}), True, 'import tensorflow as tf\n'), (2749, 'jax.numpy.zeros', 'jnp.zeros', (['[10, 10]'], {}), True, 'from jax import numpy as jnp\n'), (2750, 'jax.numpy.zeros', 'jnp.zeros', (['[10, 1]'], {}), True, 'from jax import numpy as jnp\n'), (2751, 'tensorflow.lite.python.lite.TFLiteConverterV2.experimental_from_jax', 'lite.TFLiteConverterV2.experimental_from_jax', (['[multiple_inputs]', "[[('input1', input1), ('input2', input2)]]"], {}), False, 'from tensorflow.lite.python import lite\n'), (2756, 'numpy.random.random_sample', 'np.random.random_sample', (['(10, 10)'], {}), True, 'import numpy as np\n'), (2757, 'tensorflow.constant', 'tf.constant', (['input1_data'], {'dtype': 'np.float32'}), True, 'import tensorflow as tf\n'), (2758, 'numpy.random.random_sample', 'np.random.random_sample', (['(10, 1)'], {}), True, 'import numpy as np\n'), (2759, 'tensorflow.constant', 'tf.constant', (['input2_data'], {'dtype': 'np.float32'}), True, 'import tensorflow as tf\n'), (2774, 'jax.numpy.zeros', 'jnp.zeros', (['[10, 10]'], {}), True, 'from jax import numpy as jnp\n'), (2775, 'jax.numpy.zeros', 'jnp.zeros', (['[10, 1]'], {}), True, 'from jax import numpy as jnp\n'), (2776, 'tensorflow.lite.python.lite.TFLiteConverterV2.experimental_from_jax', 'lite.TFLiteConverterV2.experimental_from_jax', (['[multiple_inputs]', "[[('input1', input1), ('input2', input2)]]"], {}), False, 'from tensorflow.lite.python import lite\n'), (2781, 'numpy.random.random_sample', 'np.random.random_sample', (['(10, 10)'], {}), True, 'import numpy as np\n'), (2782, 'tensorflow.constant', 'tf.constant', (['input1_data'], {'dtype': 'np.float32'}), True, 'import tensorflow as tf\n'), (2783, 'numpy.random.random_sample', 'np.random.random_sample', (['(10, 1)'], {}), True, 'import numpy as np\n'), (2784, 'tensorflow.constant', 'tf.constant', (['input2_data'], {'dtype': 'np.float32'}), True, 'import tensorflow as tf\n'), (2798, 'numpy.random.random_sample', 'np.random.random_sample', (['(10, 10)'], {}), True, 'import numpy as np\n'), (2799, 'functools.partial', 'functools.partial', (['model'], {'weights': 'weights'}), False, 'import functools\n'), (2802, 'jax.numpy.zeros', 'jnp.zeros', (['[10, 10]'], {}), True, 'from jax import numpy as jnp\n'), (2803, 'tensorflow.lite.python.lite.TFLiteConverterV2.experimental_from_jax', 'lite.TFLiteConverterV2.experimental_from_jax', (['[serving_func]', "[[('inputs', input_tensor)]]"], {}), False, 'from tensorflow.lite.python import lite\n'), (2808, 'numpy.random.random_sample', 'np.random.random_sample', (['(10, 10)'], {}), True, 'import numpy as np\n'), (2809, 'tensorflow.constant', 'tf.constant', (['input_data'], {'dtype': 'np.float32'}), True, 'import tensorflow as tf\n'), (2830, 'jax.numpy.zeros', 'jnp.zeros', (['[3, 3]'], {}), True, 'from jax import numpy as jnp\n'), (2831, 'tensorflow.lite.python.lite.TFLiteConverterV2.experimental_from_jax', 'lite.TFLiteConverterV2.experimental_from_jax', (['[model]', "[[('x', input_tensor)]]"], {}), False, 'from tensorflow.lite.python import lite\n'), (2836, 'numpy.random.random_sample', 'np.random.random_sample', (['(3, 3)'], {}), True, 'import numpy as np\n'), (2837, 'tensorflow.constant', 'tf.constant', (['input_data'], {'dtype': 'np.float32'}), True, 'import tensorflow as tf\n'), (2852, 'tensorflow.Variable', 'tf.Variable', (['[[0.1, 0.2], [0.3, 0.4]]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (2871, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]', 'model'], {}), False, 'from tensorflow.lite.python import lite\n'), (2883, 'tensorflow.python.platform.resource_loader.get_path_to_datafile', 'resource_loader.get_path_to_datafile', (['"""testdata/control_flow_v1_saved_model"""'], {}), False, 'from tensorflow.python.platform import resource_loader\n'), (2885, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['filename'], {}), False, 'from tensorflow.lite.python import lite\n'), (2898, 'tensorflow.keras.layers.LSTMCell', 'tf.keras.layers.LSTMCell', (['(10)'], {}), True, 'import tensorflow as tf\n'), (2909, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]', 'model'], {}), False, 'from tensorflow.lite.python import lite\n'), (2916, 'six.moves.zip', 'zip', (['expected_value', 'actual_value'], {}), False, 'from six.moves import zip\n'), (2921, 'tensorflow.constant', 'tf.constant', (['[1.0, 2.0, 3.0, 4.0]'], {'shape': '[2, 2]'}), True, 'import tensorflow as tf\n'), (2923, 'tensorflow.Variable', 'tf.Variable', (['[[0.1, 0.2], [0.3, 0.4]]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (2939, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]', 'model'], {}), False, 'from tensorflow.lite.python import lite\n'), (2953, 'tensorflow.keras.layers.LSTMCell', 'tf.keras.layers.LSTMCell', (['(10)'], {}), True, 'import tensorflow as tf\n'), (2964, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]', 'model'], {}), False, 'from tensorflow.lite.python import lite\n'), (2973, 'six.moves.zip', 'zip', (['expected_value', 'actual_value'], {}), False, 'from six.moves import zip\n'), (2994, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model', 'lite.TFLiteConverterV2.from_keras_model', (['model'], {}), False, 'from tensorflow.lite.python import lite\n'), (3015, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'batch_shape': '(4, 10, 10)'}), True, 'import tensorflow as tf\n'), (3017, 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': '[x]', 'outputs': '[y]'}), True, 'import tensorflow as tf\n'), (3020, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model', 'lite.TFLiteConverterV2.from_keras_model', (['model'], {}), False, 'from tensorflow.lite.python import lite\n'), (3034, 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), True, 'import tensorflow as tf\n'), (3045, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model', 'lite.TFLiteConverterV2.from_keras_model', (['model'], {}), False, 'from tensorflow.lite.python import lite\n'), (3064, 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), True, 'import tensorflow as tf\n'), (3071, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model', 'lite.TFLiteConverterV2.from_keras_model', (['model'], {}), False, 'from tensorflow.lite.python import lite\n'), (3091, 'tensorflow.constant', 'tf.constant', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]'], {'shape': '[3, 3]'}), True, 'import tensorflow as tf\n'), (3099, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (3104, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (3136, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]', 'model'], {}), False, 'from tensorflow.lite.python import lite\n'), (3147, 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), True, 'import numpy as np\n'), (3166, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (3180, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (3184, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (3190, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'quantized_tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (3204, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (3208, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (3215, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'quantized_tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (3240, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]', 'model'], {}), False, 'from tensorflow.lite.python import lite\n'), (3263, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]', 'model'], {}), False, 'from tensorflow.lite.python import lite\n'), (3291, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]', 'model'], {}), False, 'from tensorflow.lite.python import lite\n'), (3300, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (3306, 'numpy.array', 'np.array', (['[1.0]'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (3347, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['saved_model_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (3355, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (3361, 'numpy.array', 'np.array', (['[0]'], {'dtype': 'np.int32'}), True, 'import numpy as np\n'), (3365, 'numpy.array', 'np.array', (['[1]'], {'dtype': 'np.int32'}), True, 'import numpy as np\n'), (3406, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['saved_model_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (3414, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (3420, 'numpy.array', 'np.array', (['[0]'], {'dtype': 'np.int32'}), True, 'import numpy as np\n'), (3460, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['saved_model_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (3468, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (3474, 'numpy.array', 'np.array', (['[1.0]'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (3519, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['saved_model_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (3527, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (3533, 'numpy.array', 'np.array', (['[1.0]'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (3576, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['saved_model_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (3584, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (3590, 'numpy.array', 'np.array', (['[1.0]'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (3625, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['saved_model_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (3635, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (3641, 'numpy.array', 'np.array', (['[1.0]'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (3676, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['saved_model_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (3692, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (3698, 'numpy.array', 'np.array', (['[1.0]'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (3710, 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), True, 'import numpy as np\n'), (3732, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['saved_model_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (3744, 'tensorflow.lite.python.util.get_conversion_metadata', 'get_conversion_metadata', (['tflite_model'], {}), False, 'from tensorflow.lite.python.util import get_conversion_metadata\n'), (3749, 'tensorflow.lite.python.interpreter.InterpreterWithCustomOps', 'InterpreterWithCustomOps', ([], {'model_content': 'tflite_model', 'custom_op_registerers': "['TF_TestRegisterer']"}), False, 'from tensorflow.lite.python.interpreter import InterpreterWithCustomOps\n'), (3753, 'numpy.array', 'np.array', (['[[0.0, 0.1, 0.2, 0.3]]'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (3758, 'numpy.array', 'np.array', (['[[0.0, 0.2, 0.4, 0.6]]'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (3766, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['saved_model_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (3779, 'tensorflow.lite.python.interpreter.InterpreterWithCustomOps', 'InterpreterWithCustomOps', ([], {'model_content': 'tflite_model', 'custom_op_registerers': '[test_registerer.TF_TestRegisterer]'}), False, 'from tensorflow.lite.python.interpreter import InterpreterWithCustomOps\n'), (3784, 'numpy.array', 'np.array', (['[[0.0, 0.1, 0.2, 0.3]]'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (3789, 'numpy.array', 'np.array', (['[[0.0, 0.2, 0.4, 0.6]]'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (3799, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['saved_model_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (3824, 'numpy.array', 'np.array', (['(2.0)', 'np.float32'], {}), True, 'import numpy as np\n'), (3826, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]', 'f'], {}), False, 'from tensorflow.lite.python import lite\n'), (3829, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model', 'experimental_preserve_all_tensors': 'experimental_preserve_all_tensors'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (3880, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]', 'model'], {}), False, 'from tensorflow.lite.python import lite\n'), (3889, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (3902, 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), True, 'import numpy as np\n'), (3903, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (3924, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (3930, 'tensorflow.lite.python.util.get_conversion_metadata', 'get_conversion_metadata', (['float_tflite_model'], {}), False, 'from tensorflow.lite.python.util import get_conversion_metadata\n'), (3941, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[func]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (3947, 'tensorflow.lite.python.util.get_conversion_metadata', 'get_conversion_metadata', (['float_tflite_model'], {}), False, 'from tensorflow.lite.python.util import get_conversion_metadata\n'), (35, 'sys.getdlopenflags', 'sys.getdlopenflags', ([], {}), False, 'import sys\n'), (83, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[root.f]', 'root'], {}), False, 'from tensorflow.lite.python import lite\n'), (142, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (144, 'tensorflow.function', 'tf.function', ([], {'input_signature': '[]'}), True, 'import tensorflow as tf\n'), (247, 'tensorflow.nn.relu', 'tf.nn.relu', (['conv'], {'name': '"""output"""'}), True, 'import tensorflow as tf\n'), (251, 'six.moves.range', 'range', (['(5)'], {}), False, 'from six.moves import range\n'), (728, 'tensorflow.math.tanh', 'tf.math.tanh', (['inp'], {}), True, 'import tensorflow as tf\n'), (736, 'tensorflow.math.erf', 'tf.math.erf', (['conv3d'], {}), True, 'import tensorflow as tf\n'), (737, 'tensorflow.math.tanh', 'tf.math.tanh', (['erf'], {}), True, 'import tensorflow as tf\n'), (741, 'six.moves.range', 'range', (['(5)'], {}), False, 'from six.moves import range\n'), (829, 'tensorflow.math.ceil', 'tf.math.ceil', (['a'], {}), True, 'import tensorflow as tf\n'), (830, 'tensorflow.nn.tanh', 'tf.nn.tanh', (['b'], {}), True, 'import tensorflow as tf\n'), (831, 'tensorflow.math.add', 'tf.math.add', (['left', 'right'], {}), True, 'import tensorflow as tf\n'), (833, 'tensorflow.math.ceil', 'tf.math.ceil', (['add'], {}), True, 'import tensorflow as tf\n'), (837, 'six.moves.range', 'range', (['(5)'], {}), False, 'from six.moves import range\n'), (930, 'six.moves.range', 'range', (['(5)'], {}), False, 'from six.moves import range\n'), (938, 'six.moves.range', 'range', (['(5)'], {}), False, 'from six.moves import range\n'), (1087, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_content', 'experimental_op_resolver_type': 'OpResolverType.BUILTIN_WITHOUT_DEFAULT_DELEGATES'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (1214, 'tensorflow.compat.v1.image.resize_bilinear', 'tf.compat.v1.image.resize_bilinear', (['image', '[2, 2]'], {'half_pixel_centers': '(True)'}), True, 'import tensorflow as tf\n'), (1217, 'tensorflow.compat.v1.image.resize_bilinear', 'tf.compat.v1.image.resize_bilinear', (['image', '[2, 2]'], {}), True, 'import tensorflow as tf\n'), (1323, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(1)', '(3, 3)'], {}), True, 'import tensorflow as tf\n'), (1325, 'tensorflow.keras.layers.Reshape', 'tf.keras.layers.Reshape', (['(-1,)'], {'name': 'output_name'}), True, 'import tensorflow as tf\n'), (1337, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['saved_model_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (1341, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (1418, 'numpy.ones', 'np.ones', ([], {'shape': '[1000, 1000]', 'dtype': 'np.float32'}), True, 'import numpy as np\n'), (1421, 'numpy.full', 'np.full', ([], {'shape': '[1000, 1000]', 'fill_value': '(2.0)', 'dtype': 'np.float32'}), True, 'import numpy as np\n'), (1435, 'tensorflow.python.lib.io.file_io.FileIO', 'file_io.FileIO', (['saved_model_pb_file_path', '"""wb"""'], {}), False, 'from tensorflow.python.lib.io import file_io\n'), (1469, 'tensorflow.compat.v1.tables_initializer', 'tf.compat.v1.tables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (1472, 'tensorflow.compat.v1.initializers.global_variables', 'tf.compat.v1.initializers.global_variables', ([], {}), True, 'import tensorflow as tf\n'), (1537, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[insert_call]'], {}), True, 'import tensorflow as tf\n'), (1538, 'tensorflow.raw_ops.LookupTableFindV2', 'tf.raw_ops.LookupTableFindV2', ([], {'table_handle': 'table', 'keys': 'x', 'default_value': 'default_value'}), True, 'import tensorflow as tf\n'), (1550, 'tensorflow.compat.v1.tables_initializer', 'tf.compat.v1.tables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (1688, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['save_dir'], {'signature_keys': "['INVALID']"}), False, 'from tensorflow.lite.python import lite\n'), (1766, 'numpy.mean', 'np.mean', (['((result - result_quant) ** 2)'], {}), True, 'import numpy as np\n'), (1862, 'six.moves.range', 'range', (['(2)'], {}), False, 'from six.moves import range\n'), (1958, 'six.moves.range', 'range', (['(2)'], {}), False, 'from six.moves import range\n'), (1962, 'six.moves.range', 'range', (['(2)'], {}), False, 'from six.moves import range\n'), (2074, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['save_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (2154, 'tensorflow.keras.layers.ConvLSTM2D', 'tf.keras.layers.ConvLSTM2D', ([], {'filters': '(3)', 'kernel_size': '(3)', 'strides': '(1)', 'padding': '"""VALID"""', 'dilation_rate': '(2)', 'use_bias': '(False)', 'bias_initializer': '"""ones"""', 'data_format': '"""channels_last"""'}), True, 'import tensorflow as tf\n'), (2308, 'six.moves.range', 'range', (['(5)'], {}), False, 'from six.moves import range\n'), (2430, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(3)'], {'activation': '"""softmax"""', 'name': '"""dense_2"""'}), True, 'import tensorflow as tf\n'), (2433, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)'], {'activation': '"""softmax"""', 'name': '"""dense_3"""'}), True, 'import tensorflow as tf\n'), (2721, 'jax.numpy.sin', 'jnp.sin', (['input_tensor'], {}), True, 'from jax import numpy as jnp\n'), (2796, 'jax.numpy.matmul', 'jnp.matmul', (['weights', 'inputs'], {}), True, 'from jax import numpy as jnp\n'), (2823, 'jax.numpy.add', 'jnp.add', (['x', '(2.0)'], {}), True, 'from jax import numpy as jnp\n'), (2826, 'jax.lax.while_loop', 'jax.lax.while_loop', (['condition', 'body', 'x'], {}), False, 'import jax\n'), (2848, 'tensorflow.constant', 'tf.constant', (['[1.0, 2.0]'], {'shape': '[1, 2]'}), True, 'import tensorflow as tf\n'), (2849, 'tensorflow.constant', 'tf.constant', (['(True)'], {}), True, 'import tensorflow as tf\n'), (2855, 'tensorflow.matmul', 'tf.matmul', (['x', 'weights'], {}), True, 'import tensorflow as tf\n'), (2858, 'tensorflow.add', 'tf.add', (['x', 'weights'], {}), True, 'import tensorflow as tf\n'), (2903, 'tensorflow.split', 'tf.split', (['x', '(3)', '(0)'], {}), True, 'import tensorflow as tf\n'), (2904, 'tensorflow.python.ops.rnn.static_rnn', 'rnn.static_rnn', (['cell', 'seq'], {'dtype': 'tf.float32', 'sequence_length': '[1]'}), False, 'from tensorflow.python.ops import rnn\n'), (2929, 'tensorflow.add', 'tf.add', (['x', 'weights'], {}), True, 'import tensorflow as tf\n'), (2934, 'tensorflow.while_loop', 'tf.while_loop', (['condition', 'body', '[x]'], {}), True, 'import tensorflow as tf\n'), (2958, 'tensorflow.keras.layers.RNN', 'tf.keras.layers.RNN', (['[cell]'], {'return_sequences': '(True)'}), True, 'import tensorflow as tf\n'), (3035, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(10, 10)', 'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (3040, 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), True, 'import tensorflow as tf\n'), (3041, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(5)'], {}), True, 'import tensorflow as tf\n'), (3042, 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""softmax"""'], {}), True, 'import tensorflow as tf\n'), (3065, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(10, 10)', 'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (3067, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(5)'], {}), True, 'import tensorflow as tf\n'), (3068, 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""softmax"""'], {}), True, 'import tensorflow as tf\n'), (3095, 'tensorflow.constant', 'tf.constant', (['[1.0, 2.0, 3.0]'], {}), True, 'import tensorflow as tf\n'), (3096, 'tensorflow.broadcast_to', 'tf.broadcast_to', (['y_const', '[3, 3]'], {}), True, 'import tensorflow as tf\n'), (3097, 'tensorflow.matmul', 'tf.matmul', (['x', 'y_broadcast'], {}), True, 'import tensorflow as tf\n'), (3130, 'tensorflow.shape', 'tf.shape', (['in_tensor'], {}), True, 'import tensorflow as tf\n'), (3132, 'tensorflow.matmul', 'tf.matmul', (['fill', 'in_tensor'], {}), True, 'import tensorflow as tf\n'), (3161, 'tensorflow.shape', 'tf.shape', (['input_tensor'], {}), True, 'import tensorflow as tf\n'), (3163, 'tensorflow.matmul', 'tf.matmul', (['fill', 'input_tensor'], {}), True, 'import tensorflow as tf\n'), (3164, 'tensorflow.matmul', 'tf.matmul', (['mult', 'const_tensor'], {}), True, 'import tensorflow as tf\n'), (3171, 'six.moves.range', 'range', (['(5)', '(20)', '(5)'], {}), False, 'from six.moves import range\n'), (3236, 'tensorflow.matmul', 'tf.matmul', (['in_tensor_1', 'in_tensor_2'], {}), True, 'import tensorflow as tf\n'), (3281, 'tensorflow.python.ops.map_ops.empty_tensor_map', 'map_ops.empty_tensor_map', ([], {}), False, 'from tensorflow.python.ops import map_ops\n'), (3282, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {}), True, 'import tensorflow as tf\n'), (3283, 'tensorflow.add', 'tf.add', (['k', 'v'], {}), True, 'import tensorflow as tf\n'), (3723, 'six.moves.range', 'range', (['(100)'], {}), False, 'from six.moves import range\n'), (3741, 'tensorflow.lite.python.testdata._pywrap_test_registerer.get_num_test_registerer_calls', 'test_registerer.get_num_test_registerer_calls', ([], {}), True, 'from tensorflow.lite.python.testdata import _pywrap_test_registerer as test_registerer\n'), (3742, 'tensorflow.lite.python.test_util.get_ops_list', 'tflite_test_util.get_ops_list', (['tflite_model'], {}), True, 'from tensorflow.lite.python import test_util as tflite_test_util\n'), (3775, 'tensorflow.lite.python.testdata._pywrap_test_registerer.get_num_test_registerer_calls', 'test_registerer.get_num_test_registerer_calls', ([], {}), True, 'from tensorflow.lite.python.testdata import _pywrap_test_registerer as test_registerer\n'), (3776, 'tensorflow.lite.python.test_util.get_ops_list', 'tflite_test_util.get_ops_list', (['tflite_model'], {}), True, 'from tensorflow.lite.python import test_util as tflite_test_util\n'), (3816, 'tensorflow.add', 'tf.add', (['x', 'x'], {'name': '"""y"""'}), True, 'import tensorflow as tf\n'), (3817, 'tensorflow.add', 'tf.add', (['y', 'y'], {'name': '"""z"""'}), True, 'import tensorflow as tf\n'), (3818, 'tensorflow.add', 'tf.add', (['z', 'z'], {'name': '"""w"""'}), True, 'import tensorflow as tf\n'), (3875, 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['[1, 2, 3, 4]'], {}), True, 'import tensorflow as tf\n'), (3908, 'tensorflow.constant', 'tf.constant', (['matrix_b_values'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (3909, 'tensorflow.reshape', 'tf.reshape', (['matrix_b', '[4, 8]'], {}), True, 'import tensorflow as tf\n'), (3910, 'tensorflow.matmul', 'tf.matmul', (['inp', 'matrix_b'], {'transpose_a': '(False)', 'transpose_b': '(False)'}), True, 'import tensorflow as tf\n'), (3911, 'tensorflow.nn.relu', 'tf.nn.relu', (['matmul'], {'name': '"""output"""'}), True, 'import tensorflow as tf\n'), (146, 'tensorflow.random.uniform', 'tf.random.uniform', ([], {'shape': '[1]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (246, 'tensorflow.ones', 'tf.ones', (['[3, 3, 3, num_filters]'], {}), True, 'import tensorflow as tf\n'), (541, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (590, 'tensorflow.quantization.fake_quant_with_min_max_vars', 'tf.quantization.fake_quant_with_min_max_vars', (['inputs', 'self.min_var', 'self.max_var'], {}), True, 'import tensorflow as tf\n'), (593, 'tensorflow.quantization.fake_quant_with_min_max_vars', 'tf.quantization.fake_quant_with_min_max_vars', (['self.w', 'self.min_var', 'self.max_var'], {}), True, 'import tensorflow as tf\n'), (595, 'tensorflow.matmul', 'tf.matmul', (['x', 'w_fq'], {}), True, 'import tensorflow as tf\n'), (597, 'tensorflow.quantization.fake_quant_with_min_max_vars', 'tf.quantization.fake_quant_with_min_max_vars', (['x', 'self.min_var', 'self.max_var'], {}), True, 'import tensorflow as tf\n'), (673, 'numpy.random.random_sample', 'np.random.random_sample', (['(20)'], {}), True, 'import numpy as np\n'), (688, 'tensorflow.gather', 'tf.gather', (['self.shared_weights', 'x'], {}), True, 'import tensorflow as tf\n'), (733, 'tensorflow.ones', 'tf.ones', (['[3, 3, 3, 3, 3]'], {}), True, 'import tensorflow as tf\n'), (1302, 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), True, 'import tensorflow as tf\n'), (1303, 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'shape': 'shape', 'dtype': 'tf.float32', 'name': '"""inputB"""'}), True, 'import tensorflow as tf\n'), (1305, 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'shape': 'shape', 'dtype': 'tf.float32', 'name': '"""inputA"""'}), True, 'import tensorflow as tf\n'), (1307, 'tensorflow.Variable', 'tf.Variable', (['(1.0)'], {'name': '"""variable_node"""'}), True, 'import tensorflow as tf\n'), (1312, 'tensorflow.python.saved_model.saved_model.simple_save', 'saved_model.simple_save', (['sess', 'saved_model_dir', 'inputs', 'outputs'], {}), False, 'from tensorflow.python.saved_model import saved_model\n'), (1375, 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), True, 'import tensorflow as tf\n'), (1376, 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'shape': '[1000, 1000]', 'dtype': 'tf.float32', 'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (1378, 'tensorflow.constant', 'tf.constant', ([], {'value': '(1)', 'dtype': 'tf.float32', 'shape': '[1000, 1000]'}), True, 'import tensorflow as tf\n'), (1382, 'tensorflow.python.saved_model.saved_model.simple_save', 'saved_model.simple_save', (['sess', 'saved_model_dir', 'inputs', 'outputs'], {}), False, 'from tensorflow.python.saved_model import saved_model\n'), (1455, 'tensorflow.constant', 'tf.constant', (['(-1)'], {'dtype': 'tf.int64'}), True, 'import tensorflow as tf\n'), (1465, 'tensorflow.compat.v1.saved_model.signature_def_utils.build_signature_def', 'tf.compat.v1.saved_model.signature_def_utils.build_signature_def', ([], {'inputs': "{'x': tensor_info_x}", 'outputs': "{'y': tensor_info_y}", 'method_name': '"""some_function"""'}), True, 'import tensorflow as tf\n'), (1546, 'tensorflow.compat.v1.saved_model.signature_def_utils.build_signature_def', 'tf.compat.v1.saved_model.signature_def_utils.build_signature_def', ([], {'inputs': "{'x': tensor_info_x}", 'outputs': "{'y': tensor_info_y}", 'method_name': '"""some_function"""'}), True, 'import tensorflow as tf\n'), (1726, 'numpy.random.random', 'np.random.random', (['tflite_input_shape'], {}), True, 'import numpy as np\n'), (2087, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), True, 'import tensorflow as tf\n'), (2088, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), True, 'import tensorflow as tf\n'), (2126, 'tensorflow.keras.layers.ConvLSTM2D', 'tf.keras.layers.ConvLSTM2D', (['(32)', '(3, 3)'], {'padding': '"""same"""', 'return_sequences': '(True)', 'stateful': '(False)', 'batch_input_shape': '(1, 1, 10, 10, 1)'}), True, 'import tensorflow as tf\n'), (2187, 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), True, 'import tensorflow as tf\n'), (2188, 'tensorflow.TensorShape', 'tf.TensorShape', (['None'], {}), True, 'import tensorflow as tf\n'), (2189, 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'shape': 'unknown_shape', 'dtype': 'tf.float32', 'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (2194, 'tensorflow.python.saved_model.saved_model.simple_save', 'saved_model.simple_save', (['sess', 'saved_model_dir', 'inputs', 'outputs'], {}), False, 'from tensorflow.python.saved_model import saved_model\n'), (2236, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['k_num_filters', '(3, 3)'], {'activation': '"""relu"""'}), True, 'import tensorflow as tf\n'), (2247, 'six.moves.range', 'range', (['(5)'], {}), False, 'from six.moves import range\n'), (2284, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1024)'], {'input_shape': '[1024]', 'activation': 'None', 'bias_initializer': '"""ones"""'}), True, 'import tensorflow as tf\n'), (2339, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['num_filters', '(3, 3)'], {'activation': '"""relu"""'}), True, 'import tensorflow as tf\n'), (2390, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), True, 'import tensorflow as tf\n'), (2391, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(1)', 'input_shape': '[1]'}), True, 'import tensorflow as tf\n'), (2460, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(1)', 'input_shape': '[1]'}), True, 'import tensorflow as tf\n'), (2471, 'numpy.random.random_sample', 'np.random.random_sample', (['(20)'], {}), True, 'import numpy as np\n'), (2486, 'tensorflow.add', 'tf.add', (['self.shared_weights', 'x'], {}), True, 'import tensorflow as tf\n'), (2502, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)'], {'kernel_size': '(3)', 'padding': '"""same"""', 'activation': '"""relu"""', 'input_shape': '(32, 32, 3)', 'name': '"""tensor"""'}), True, 'import tensorflow as tf\n'), (2509, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {'name': '"""output_tensor"""'}), True, 'import tensorflow as tf\n'), (2550, 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(32, 32, 3)'}), True, 'import tensorflow as tf\n'), (2551, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['num_filters', '(3, 3)'], {'activation': '"""relu"""'}), True, 'import tensorflow as tf\n'), (2589, 'itertools.product', 'itertools.product', (['(2, 4, 6)', '(True, False)', '(True, False)'], {}), False, 'import itertools\n'), (2655, 'jax.numpy.sin', 'jnp.sin', (['input1'], {}), True, 'from jax import numpy as jnp\n'), (2655, 'jax.numpy.cos', 'jnp.cos', (['input2'], {}), True, 'from jax import numpy as jnp\n'), (2820, 'jax.numpy.sum', 'jnp.sum', (['x'], {'keepdims': '(False)'}), True, 'from jax import numpy as jnp\n'), (2896, 'numpy.random.random_sample', 'np.random.random_sample', (['(3, 10)'], {}), True, 'import numpy as np\n'), (2926, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['x'], {}), True, 'import tensorflow as tf\n'), (2951, 'numpy.random.random_sample', 'np.random.random_sample', (['(3, 10, 10)'], {}), True, 'import numpy as np\n'), (2986, 'numpy.random.random_sample', 'np.random.random_sample', (['(1, 10, 10)'], {}), True, 'import numpy as np\n'), (2989, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(10, 10)', 'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (3013, 'numpy.random.random_sample', 'np.random.random_sample', (['(4, 10, 10)'], {}), True, 'import numpy as np\n'), (3033, 'numpy.random.random_sample', 'np.random.random_sample', (['(1, 10, 10)'], {}), True, 'import numpy as np\n'), (3038, 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', ([], {'units': '(10)', 'return_sequences': '(True)'}), True, 'import tensorflow as tf\n'), (3063, 'numpy.random.random_sample', 'np.random.random_sample', (['(1, 10, 10)'], {}), True, 'import numpy as np\n'), (3066, 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', ([], {'units': '(10)'}), True, 'import tensorflow as tf\n'), (3125, 'numpy.random.random_sample', 'np.random.random_sample', (['(10, 4)'], {}), True, 'import numpy as np\n'), (3131, 'tensorflow.fill', 'tf.fill', (['shape', '(1.0)'], {}), True, 'import tensorflow as tf\n'), (3156, 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-10.0)', 'high': '(10.0)', 'size': '[33, 33]'}), True, 'import numpy as np\n'), (3162, 'tensorflow.fill', 'tf.fill', (['shape', '(1.0)'], {}), True, 'import tensorflow as tf\n'), (3172, 'six.moves.range', 'range', (['(5)'], {}), False, 'from six.moves import range\n'), (3227, 'numpy.random.random_sample', 'np.random.random_sample', (['(1, 256, 256)'], {}), True, 'import numpy as np\n'), (3229, 'numpy.random.random_sample', 'np.random.random_sample', (['(1, 256, 256)'], {}), True, 'import numpy as np\n'), (3284, 'tensorflow.python.framework.ops.control_dependencies', 'ops.control_dependencies', (['[m]'], {}), False, 'from tensorflow.python.framework import ops\n'), (3285, 'tensorflow.python.ops.map_ops.tensor_map_insert', 'map_ops.tensor_map_insert', (['m', 'p', 'v'], {}), False, 'from tensorflow.python.ops import map_ops\n'), (3714, 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), True, 'import tensorflow as tf\n'), (3715, 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'shape': '[1, 4]', 'dtype': 'dtypes.float32', 'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (3717, 'tensorflow.lite.python.testdata.double_op.double', 'double_op.double', (['in_tensor'], {}), False, 'from tensorflow.lite.python.testdata import double_op\n'), (3720, 'tensorflow.python.saved_model.saved_model.simple_save', 'saved_model.simple_save', (['sess', 'saved_model_dir', 'inputs', 'outputs'], {}), False, 'from tensorflow.python.saved_model import saved_model\n'), (3876, 'numpy.int32', 'np.int32', (['(0)'], {}), True, 'import numpy as np\n'), (242, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[1, 5, 5, 3]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (725, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[3, 3, 3, 3, 3]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (824, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[3]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (825, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[3]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (921, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[1, 2]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (922, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '()', 'dtype': 'tf.bool'}), True, 'import tensorflow as tf\n'), (1084, 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(1, 5, 5, 3)'}), True, 'import numpy as np\n'), (1209, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[5, 5]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (1301, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (1311, 'tensorflow.compat.v1.variables_initializer', 'tf.compat.v1.variables_initializer', (['[variable_node]'], {}), True, 'import tensorflow as tf\n'), (1333, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (1374, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (2007, 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(1,)'}), True, 'import numpy as np\n'), (2186, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (2517, 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(1, 32, 32, 3)'}), True, 'import numpy as np\n'), (2599, 'numpy.prod', 'np.prod', (['tflite_input_shape'], {}), True, 'import numpy as np\n'), (2861, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[1, 2]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (2862, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '()', 'dtype': 'tf.bool'}), True, 'import tensorflow as tf\n'), (2901, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[3, 10]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (2932, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[2, 2]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (2956, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[3, 10, 10]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (3128, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, 4]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (3150, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, 33]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (3232, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, 256, 256]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (3233, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, 256, 256]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (3254, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[1, None, 16, 3]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (3286, 'tensorflow.python.framework.ops.control_dependencies', 'ops.control_dependencies', (['[m2]'], {}), False, 'from tensorflow.python.framework import ops\n'), (3287, 'tensorflow.python.ops.map_ops.tensor_map_size', 'map_ops.tensor_map_size', (['m2'], {}), False, 'from tensorflow.python.ops import map_ops\n'), (3279, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[1]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (3327, 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), True, 'import tensorflow as tf\n'), (3328, 'tensorflow.python.ops.map_ops.empty_tensor_map', 'map_ops.empty_tensor_map', ([], {}), False, 'from tensorflow.python.ops import map_ops\n'), (3334, 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'shape': '[1]', 'dtype': 'tf.int32', 'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (3342, 'tensorflow.python.saved_model.saved_model.simple_save', 'saved_model.simple_save', (['sess', 'saved_model_dir', 'inputs', 'outputs'], {}), False, 'from tensorflow.python.saved_model import saved_model\n'), (3383, 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), True, 'import tensorflow as tf\n'), (3384, 'tensorflow.python.ops.map_ops.empty_tensor_map', 'map_ops.empty_tensor_map', ([], {}), False, 'from tensorflow.python.ops import map_ops\n'), (3394, 'tensorflow.while_loop', 'tf.while_loop', (['cond', 'body', '[0, m]'], {}), True, 'import tensorflow as tf\n'), (3395, 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'shape': '[1]', 'dtype': 'tf.int32', 'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (3401, 'tensorflow.python.saved_model.saved_model.simple_save', 'saved_model.simple_save', (['sess', 'saved_model_dir', 'inputs', 'outputs'], {}), False, 'from tensorflow.python.saved_model import saved_model\n'), (3441, 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), True, 'import tensorflow as tf\n'), (3442, 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'shape': '[1]', 'dtype': 'tf.float32', 'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (3445, 'tensorflow.raw_ops.StackV2', 'tf.raw_ops.StackV2', ([], {'max_size': '(10)', 'elem_type': 'tf.float32'}), True, 'import tensorflow as tf\n'), (3446, 'tensorflow.raw_ops.StackPushV2', 'tf.raw_ops.StackPushV2', ([], {'handle': 'stack', 'elem': 'in_tensor'}), True, 'import tensorflow as tf\n'), (3455, 'tensorflow.python.saved_model.saved_model.simple_save', 'saved_model.simple_save', (['sess', 'saved_model_dir', 'inputs', 'outputs'], {}), False, 'from tensorflow.python.saved_model import saved_model\n'), (3495, 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), True, 'import tensorflow as tf\n'), (3496, 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'shape': '[1]', 'dtype': 'tf.float32', 'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (3504, 'tensorflow.raw_ops.StackV2', 'tf.raw_ops.StackV2', ([], {'max_size': '(10)', 'elem_type': 'tf.float32'}), True, 'import tensorflow as tf\n'), (3514, 'tensorflow.python.saved_model.saved_model.simple_save', 'saved_model.simple_save', (['sess', 'saved_model_dir', 'inputs', 'outputs'], {}), False, 'from tensorflow.python.saved_model import saved_model\n'), (3547, 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), True, 'import tensorflow as tf\n'), (3548, 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'shape': '[1]', 'dtype': 'tf.float32', 'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (3562, 'tensorflow.raw_ops.StackV2', 'tf.raw_ops.StackV2', ([], {'max_size': '(10)', 'elem_type': 'tf.float32'}), True, 'import tensorflow as tf\n'), (3563, 'tensorflow.while_loop', 'tf.while_loop', (['cond', 'body', '[0, arr, 0.0]'], {}), True, 'import tensorflow as tf\n'), (3571, 'tensorflow.python.saved_model.saved_model.simple_save', 'saved_model.simple_save', (['sess', 'saved_model_dir', 'inputs', 'outputs'], {}), False, 'from tensorflow.python.saved_model import saved_model\n'), (3606, 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), True, 'import tensorflow as tf\n'), (3607, 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'shape': '[1]', 'dtype': 'tf.float32', 'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (3610, 'tensorflow.TensorArray', 'tf.TensorArray', (['tf.float32'], {'size': '(3)', 'dynamic_size': '(False)', 'clear_after_read': '(False)'}), True, 'import tensorflow as tf\n'), (3620, 'tensorflow.python.saved_model.saved_model.simple_save', 'saved_model.simple_save', (['sess', 'saved_model_dir', 'inputs', 'outputs'], {}), False, 'from tensorflow.python.saved_model import saved_model\n'), (3657, 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), True, 'import tensorflow as tf\n'), (3658, 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'shape': '[1]', 'dtype': 'tf.float32', 'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (3661, 'tensorflow.TensorArray', 'tf.TensorArray', (['tf.float32'], {'size': '(0)', 'dynamic_size': '(True)', 'clear_after_read': '(False)'}), True, 'import tensorflow as tf\n'), (3671, 'tensorflow.python.saved_model.saved_model.simple_save', 'saved_model.simple_save', (['sess', 'saved_model_dir', 'inputs', 'outputs'], {}), False, 'from tensorflow.python.saved_model import saved_model\n'), (3713, 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (3906, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[16, 4]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (582, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(-6.0)'], {}), True, 'import tensorflow as tf\n'), (586, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(6.0)'], {}), True, 'import tensorflow as tf\n'), (664, 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(1, 5, 5, 3)'}), True, 'import numpy as np\n'), (683, 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(300 ** -0.5)'}), True, 'import tensorflow as tf\n'), (686, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '(20)', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (936, 'tensorflow.constant', 'tf.constant', (['(True)'], {}), True, 'import tensorflow as tf\n'), (944, 'tensorflow.constant', 'tf.constant', (['(False)'], {}), True, 'import tensorflow as tf\n'), (2482, 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(300 ** -0.5)'}), True, 'import tensorflow as tf\n'), (3326, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (3331, 'tensorflow.python.ops.map_ops.tensor_map_insert', 'map_ops.tensor_map_insert', (['m', 'i', 'i'], {}), False, 'from tensorflow.python.ops import map_ops\n'), (3338, 'tensorflow.python.ops.map_ops.tensor_map_size', 'map_ops.tensor_map_size', (['result_m'], {}), False, 'from tensorflow.python.ops import map_ops\n'), (3382, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (3391, 'tensorflow.python.ops.map_ops.tensor_map_insert', 'map_ops.tensor_map_insert', (['m', 'i', 'i'], {}), False, 'from tensorflow.python.ops import map_ops\n'), (3397, 'tensorflow.python.ops.map_ops.tensor_map_size', 'map_ops.tensor_map_size', (['result_m'], {}), False, 'from tensorflow.python.ops import map_ops\n'), (3440, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (3447, 'tensorflow.python.framework.ops.control_dependencies', 'ops.control_dependencies', (['[w]'], {}), False, 'from tensorflow.python.framework import ops\n'), (3494, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (3508, 'tensorflow.python.framework.ops.control_dependencies', 'ops.control_dependencies', (['[result_arr, n]'], {}), False, 'from tensorflow.python.framework import ops\n'), (3509, 'tensorflow.raw_ops.StackPopV2', 'tf.raw_ops.StackPopV2', ([], {'handle': 'result_arr', 'elem_type': 'tf.float32'}), True, 'import tensorflow as tf\n'), (3546, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (3565, 'tensorflow.python.framework.ops.control_dependencies', 'ops.control_dependencies', (['[result_arr, n]'], {}), False, 'from tensorflow.python.framework import ops\n'), (3566, 'tensorflow.raw_ops.StackPopV2', 'tf.raw_ops.StackPopV2', ([], {'handle': 'result_arr', 'elem_type': 'tf.float32'}), True, 'import tensorflow as tf\n'), (3605, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (3656, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (3449, 'tensorflow.python.framework.ops.control_dependencies', 'ops.control_dependencies', (['[a]'], {}), False, 'from tensorflow.python.framework import ops\n'), (252, 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(1, 5, 5, 3)'}), True, 'import numpy as np\n'), (743, 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(3, 3, 3, 3, 3)'}), True, 'import numpy as np\n'), (839, 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(3)'}), True, 'import numpy as np\n'), (840, 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(3)'}), True, 'import numpy as np\n'), (932, 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(1, 2)'}), True, 'import numpy as np\n'), (940, 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(1, 2)'}), True, 'import numpy as np\n'), (1865, 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0)', 'high': '(1)', 'size': '(1, 1)'}), True, 'import numpy as np\n'), (1868, 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0)', 'high': '(1)', 'size': '(1, 1)'}), True, 'import numpy as np\n'), (2309, 'numpy.random.randn', 'np.random.randn', (['(1)', '(1024)'], {}), True, 'import numpy as np\n'), (3450, 'tensorflow.raw_ops.StackPopV2', 'tf.raw_ops.StackPopV2', ([], {'handle': 'stack', 'elem_type': 'tf.float32'}), True, 'import tensorflow as tf\n'), (3501, 'tensorflow.cast', 'tf.cast', (['i'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (3559, 'tensorflow.cast', 'tf.cast', (['i'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (3724, 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(1, 4)'}), True, 'import numpy as np\n'), (1960, 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0)', 'high': '(1)', 'size': '(1,)'}), True, 'import numpy as np\n'), (1964, 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0)', 'high': '(1)', 'size': '(1,)'}), True, 'import numpy as np\n'), (2248, 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(1, 5, 5, 3)'}), True, 'import numpy as np\n'), (3173, 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(batch, 33)'}), True, 'import numpy as np\n')]
NivekNey/tensorflow
3e21fe5faedab3a8258d344c8ad1cec2612a8aa8
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for convert_to_constants.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.python import keras from tensorflow.python.client import session from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import convert_to_constants from tensorflow.python.framework import importer from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.saved_model.load import load from tensorflow.python.saved_model.save import save from tensorflow.python.training.tracking import tracking class VariablesToConstantsTest(test.TestCase): def _hasStatefulPartitionedCallOp(self, graph_def): """Determines if a StatefulPartitionedCall op exists in the graph.""" for node in graph_def.node: if node.op == "StatefulPartitionedCall": return True return False def _getNumVariables(self, graph_def): """Returns the number of ReadVariableOp in the graph.""" return sum(node.op == "ReadVariableOp" for node in graph_def.node) def _getTensors(self, sess, tensor_list): """Returns a list of Tensor objects from the Session.""" return [ sess.graph.get_tensor_by_name(tensor.name) for tensor in tensor_list ] def _evaluateGraphDef(self, graph_def, func, input_data): """Evaluates the GraphDef using Sessions.""" with ops.Graph().as_default() as graph: importer.import_graph_def(graph_def, name="") func.add_to_graph(graph) sess = session.Session(graph=graph) input_tensors = self._getTensors(sess, func.inputs) output_tensors = self._getTensors(sess, func.outputs) return sess.run( output_tensors, feed_dict=dict(zip(input_tensors, input_data))) @test_util.run_v2_only def testConstSavedModel(self): """Test a basic model with functions to make sure functions are inlined.""" input_data = constant_op.constant(1., shape=[1]) root = tracking.AutoTrackable() root.f = def_function.function(lambda x: 2. * x) to_save = root.f.get_concrete_function(input_data) save_dir = os.path.join(self.get_temp_dir(), "saved_model") save(root, save_dir, to_save) saved_model = load(save_dir) concrete_func = saved_model.signatures["serving_default"] variable_graph_def = concrete_func.graph.as_graph_def() self.assertEqual(0, self._getNumVariables(variable_graph_def)) self.assertTrue(variable_graph_def.library.function) constant_graph_def = convert_to_constants.convert_variables_to_constants_v2( concrete_func) self.assertEqual(0, self._getNumVariables(constant_graph_def)) self.assertFalse(constant_graph_def.library.function) # Check value. expected_value = root.f(input_data) actual_value = self._evaluateGraphDef(constant_graph_def, concrete_func, [input_data.numpy()]) self.assertEqual(expected_value.numpy(), actual_value) @test_util.run_v2_only def testVariableModel(self): """Test a basic model with Variables.""" input_data = constant_op.constant(1., shape=[1]) root = tracking.AutoTrackable() root.v1 = variables.Variable(3.) root.v2 = variables.Variable(2.) root.f = def_function.function(lambda x: root.v1 * root.v2 * x) concrete_func = root.f.get_concrete_function(input_data) variable_graph_def = concrete_func.graph.as_graph_def() self.assertEqual(2, self._getNumVariables(variable_graph_def)) constant_graph_def = convert_to_constants.convert_variables_to_constants_v2( concrete_func) self.assertEqual(0, self._getNumVariables(constant_graph_def)) self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def)) # Check value. expected_value = root.f(input_data) actual_value = self._evaluateGraphDef(constant_graph_def, concrete_func, [input_data.numpy()]) self.assertEqual(expected_value.numpy(), actual_value) @test_util.run_v2_only def testVariableSavedModel(self): """Test a basic model with Variables with saving/loading the SavedModel.""" input_data = constant_op.constant(1., shape=[1]) root = tracking.AutoTrackable() root.v1 = variables.Variable(3.) root.v2 = variables.Variable(2.) root.f = def_function.function(lambda x: root.v1 * root.v2 * x) to_save = root.f.get_concrete_function(input_data) save_dir = os.path.join(self.get_temp_dir(), "saved_model") save(root, save_dir, to_save) saved_model = load(save_dir) concrete_func = saved_model.signatures["serving_default"] variable_graph_def = concrete_func.graph.as_graph_def() self.assertTrue(self._hasStatefulPartitionedCallOp(variable_graph_def)) constant_graph_def = convert_to_constants.convert_variables_to_constants_v2( concrete_func) self.assertEqual(0, self._getNumVariables(constant_graph_def)) self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def)) # Check value. expected_value = root.f(input_data) actual_value = self._evaluateGraphDef(constant_graph_def, concrete_func, [input_data.numpy()]) self.assertEqual(expected_value.numpy(), actual_value) @test_util.run_v2_only def testMultiFunctionModel(self): """Test a basic model with Variables.""" class BasicModel(tracking.AutoTrackable): def __init__(self): self.y = None self.z = None @def_function.function def add(self, x): if self.y is None: self.y = variables.Variable(2.) return x + self.y @def_function.function def sub(self, x): if self.z is None: self.z = variables.Variable(3.) return x - self.z input_data = constant_op.constant(1., shape=[1]) root = BasicModel() concrete_func = root.add.get_concrete_function(input_data) variable_graph_def = concrete_func.graph.as_graph_def() self.assertEqual(1, self._getNumVariables(variable_graph_def)) constant_graph_def = convert_to_constants.convert_variables_to_constants_v2( concrete_func) self.assertEqual(0, self._getNumVariables(constant_graph_def)) self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def)) # Check value. expected_value = root.add(input_data) actual_value = self._evaluateGraphDef(constant_graph_def, concrete_func, [input_data.numpy()]) self.assertEqual(expected_value.numpy(), actual_value) @test_util.run_v2_only def testKerasModel(self): input_data = constant_op.constant(1., shape=[1, 1]) # Create a simple Keras model. x = [-1, 0, 1, 2, 3, 4] y = [-3, -1, 1, 3, 5, 7] model = keras.models.Sequential( [keras.layers.Dense(units=1, input_shape=[1])]) model.compile(optimizer="sgd", loss="mean_squared_error") model.fit(x, y, epochs=1) # Get the concrete function from the Keras model. @def_function.function def to_save(x): return model(x) concrete_func = to_save.get_concrete_function(input_data) variable_graph_def = concrete_func.graph.as_graph_def() self.assertEqual(2, self._getNumVariables(variable_graph_def)) constant_graph_def = convert_to_constants.convert_variables_to_constants_v2( concrete_func) self.assertEqual(0, self._getNumVariables(constant_graph_def)) self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def)) # Check value. expected_value = to_save(input_data) actual_value = self._evaluateGraphDef(constant_graph_def, concrete_func, [input_data.numpy()]) self.assertEqual(expected_value.numpy(), actual_value) if __name__ == "__main__": test.main()
[ "tensorflow.python.saved_model.load.load", "tensorflow.python.framework.importer.import_graph_def", "tensorflow.python.saved_model.save.save", "tensorflow.python.keras.layers.Dense", "tensorflow.python.eager.def_function.function", "tensorflow.python.training.tracking.tracking.AutoTrackable", "tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2", "tensorflow.python.framework.ops.Graph", "tensorflow.python.ops.variables.Variable", "tensorflow.python.client.session.Session", "tensorflow.python.platform.test.main", "tensorflow.python.framework.constant_op.constant" ]
tensorflow/python/framework/convert_to_constants_test.py
[(226, 'tensorflow.python.platform.test.main', 'test.main', ([], {}), False, 'from tensorflow.python.platform import test\n'), (72, 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {'shape': '[1]'}), False, 'from tensorflow.python.framework import constant_op\n'), (73, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (74, 'tensorflow.python.eager.def_function.function', 'def_function.function', (['(lambda x: 2.0 * x)'], {}), False, 'from tensorflow.python.eager import def_function\n'), (78, 'tensorflow.python.saved_model.save.save', 'save', (['root', 'save_dir', 'to_save'], {}), False, 'from tensorflow.python.saved_model.save import save\n'), (79, 'tensorflow.python.saved_model.load.load', 'load', (['save_dir'], {}), False, 'from tensorflow.python.saved_model.load import load\n'), (86, 'tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2', 'convert_to_constants.convert_variables_to_constants_v2', (['concrete_func'], {}), False, 'from tensorflow.python.framework import convert_to_constants\n'), (100, 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {'shape': '[1]'}), False, 'from tensorflow.python.framework import constant_op\n'), (101, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (102, 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(3.0)'], {}), False, 'from tensorflow.python.ops import variables\n'), (103, 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(2.0)'], {}), False, 'from tensorflow.python.ops import variables\n'), (104, 'tensorflow.python.eager.def_function.function', 'def_function.function', (['(lambda x: root.v1 * root.v2 * x)'], {}), False, 'from tensorflow.python.eager import def_function\n'), (110, 'tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2', 'convert_to_constants.convert_variables_to_constants_v2', (['concrete_func'], {}), False, 'from tensorflow.python.framework import convert_to_constants\n'), (124, 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {'shape': '[1]'}), False, 'from tensorflow.python.framework import constant_op\n'), (125, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (126, 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(3.0)'], {}), False, 'from tensorflow.python.ops import variables\n'), (127, 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(2.0)'], {}), False, 'from tensorflow.python.ops import variables\n'), (128, 'tensorflow.python.eager.def_function.function', 'def_function.function', (['(lambda x: root.v1 * root.v2 * x)'], {}), False, 'from tensorflow.python.eager import def_function\n'), (132, 'tensorflow.python.saved_model.save.save', 'save', (['root', 'save_dir', 'to_save'], {}), False, 'from tensorflow.python.saved_model.save import save\n'), (133, 'tensorflow.python.saved_model.load.load', 'load', (['save_dir'], {}), False, 'from tensorflow.python.saved_model.load import load\n'), (139, 'tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2', 'convert_to_constants.convert_variables_to_constants_v2', (['concrete_func'], {}), False, 'from tensorflow.python.framework import convert_to_constants\n'), (172, 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {'shape': '[1]'}), False, 'from tensorflow.python.framework import constant_op\n'), (179, 'tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2', 'convert_to_constants.convert_variables_to_constants_v2', (['concrete_func'], {}), False, 'from tensorflow.python.framework import convert_to_constants\n'), (192, 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {'shape': '[1, 1]'}), False, 'from tensorflow.python.framework import constant_op\n'), (213, 'tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2', 'convert_to_constants.convert_variables_to_constants_v2', (['concrete_func'], {}), False, 'from tensorflow.python.framework import convert_to_constants\n'), (60, 'tensorflow.python.framework.importer.import_graph_def', 'importer.import_graph_def', (['graph_def'], {'name': '""""""'}), False, 'from tensorflow.python.framework import importer\n'), (62, 'tensorflow.python.client.session.Session', 'session.Session', ([], {'graph': 'graph'}), False, 'from tensorflow.python.client import session\n'), (199, 'tensorflow.python.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': '(1)', 'input_shape': '[1]'}), False, 'from tensorflow.python import keras\n'), (59, 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (163, 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(2.0)'], {}), False, 'from tensorflow.python.ops import variables\n'), (169, 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(3.0)'], {}), False, 'from tensorflow.python.ops import variables\n')]
vanshhhhh/federated
20fdca66d01051c55413868310d60c068c84b35d
# Copyright 2019, The TensorFlow Federated Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import numpy as np import tensorflow as tf import tensorflow_federated as tff def construct_example_training_comp(): """Constructs a `tff.templates.IterativeProcess` via the FL API.""" np.random.seed(0) input_spec = collections.OrderedDict( x=tf.TensorSpec(shape=[None, 2], dtype=tf.float32), y=tf.TensorSpec(shape=[None, 1], dtype=tf.int32)) def model_fn(): """Constructs keras model.""" keras_model = tf.keras.models.Sequential([ tf.keras.layers.Dense( 1, activation=tf.nn.softmax, kernel_initializer='zeros', input_shape=(2,)) ]) return tff.learning.from_keras_model( keras_model, input_spec=input_spec, loss=tf.keras.losses.SparseCategoricalCrossentropy(), metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]) return tff.learning.build_federated_averaging_process( model_fn, client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.01)) class MapReduceFormTest(tff.test.TestCase): def test_map_reduce_form_with_learning_structure_contains_only_one_broadcast_and_one_aggregate( self): ip = construct_example_training_comp() cf = tff.backends.mapreduce.get_map_reduce_form_for_iterative_process(ip) # This type spec test actually carries the meaning that TFF's vanilla path # to canonical form will broadcast and aggregate exactly one copy of the # parameters. So the type test below in fact functions as a regression test # for the TFF compiler pipeline. # pyformat: disable self.assertEqual( '(<\n' ' <\n' ' x=float32[?,2],\n' ' y=int32[?,1]\n' ' >*,\n' ' <\n' ' <\n' ' trainable=<\n' ' float32[2,1],\n' ' float32[1]\n' ' >,\n' ' non_trainable=<>\n' ' >\n' ' >\n' '> -> <\n' ' <\n' ' <\n' ' float32[2,1],\n' ' float32[1]\n' ' >,\n' ' float32,\n' ' <\n' ' sparse_categorical_accuracy=<\n' ' float32,\n' ' float32\n' ' >,\n' ' loss=<\n' ' float32,\n' ' float32\n' ' >\n' ' >,\n' ' <\n' ' num_examples=int64\n' ' >\n' ' >,\n' ' <>,\n' ' <>,\n' ' <>\n' '>)', cf.work.type_signature.formatted_representation()) # pyformat: enable def test_map_reduce_form_with_learning_structure_does_not_change_execution_of_iterative_process( self): if tf.config.list_logical_devices('GPU'): self.skipTest( 'b/137602785: bring GPU test back after the fix for `wrap_function`') ip_1 = construct_example_training_comp() # We disable Grappler to prevent a single TF function from being pulled into # the eager TF runtime with multiple definitions. grappler_config = tf.compat.v1.ConfigProto() grappler_config.graph_options.rewrite_options.disable_meta_optimizer = True cf = tff.backends.mapreduce.get_map_reduce_form_for_iterative_process( ip_1, grappler_config=grappler_config) ip_2 = tff.backends.mapreduce.get_iterative_process_for_map_reduce_form(cf) ip_1.initialize.type_signature.check_equivalent_to( ip_2.initialize.type_signature) # The next functions type_signatures may not be equal, since we may have # appended an empty tuple as client side-channel outputs if none existed. ip_1.next.type_signature.parameter.check_equivalent_to( ip_2.next.type_signature.parameter) ip_1.next.type_signature.result.check_equivalent_to( ip_2.next.type_signature.result) sample_batch = collections.OrderedDict( x=np.array([[1., 1.]], dtype=np.float32), y=np.array([[0]], dtype=np.int32), ) client_data = [sample_batch] state_1 = ip_1.initialize() server_state_1, server_output_1 = ip_1.next(state_1, [client_data]) server_state_1 = tff.structure.from_container( server_state_1, recursive=True) server_output_1 = tff.structure.from_container( server_output_1, recursive=True) server_state_1_arrays = tff.structure.flatten(server_state_1) server_output_1_arrays = tff.structure.flatten(server_output_1) state_2 = ip_2.initialize() server_state_2, server_output_2 = ip_2.next(state_2, [client_data]) server_state_2_arrays = tff.structure.flatten(server_state_2) server_output_2_arrays = tff.structure.flatten(server_output_2) self.assertEmpty(server_state_1.model_broadcast_state) # Note that we cannot simply use assertEqual because the values may differ # due to floating point issues. self.assertTrue( tff.structure.is_same_structure(server_state_1, server_state_2)) self.assertTrue( tff.structure.is_same_structure(server_output_1, server_output_2)) self.assertAllClose(server_state_1_arrays, server_state_2_arrays) self.assertAllClose(server_output_1_arrays[:2], server_output_2_arrays[:2]) if __name__ == '__main__': tff.backends.test.set_test_execution_context() tff.test.main()
[ "tensorflow.compat.v1.ConfigProto", "numpy.random.seed", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.keras.layers.Dense", "tensorflow.config.list_logical_devices", "tensorflow.keras.metrics.SparseCategoricalAccuracy", "numpy.array", "tensorflow.TensorSpec", "tensorflow.keras.optimizers.SGD" ]
tensorflow_federated/python/tests/map_reduce_form_test.py
[(24, 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), True, 'import numpy as np\n'), (160, 'tensorflow_federated.backends.test.set_test_execution_context', 'tff.backends.test.set_test_execution_context', ([], {}), True, 'import tensorflow_federated as tff\n'), (161, 'tensorflow_federated.test.main', 'tff.test.main', ([], {}), True, 'import tensorflow_federated as tff\n'), (57, 'tensorflow_federated.backends.mapreduce.get_map_reduce_form_for_iterative_process', 'tff.backends.mapreduce.get_map_reduce_form_for_iterative_process', (['ip'], {}), True, 'import tensorflow_federated as tff\n'), (109, 'tensorflow.config.list_logical_devices', 'tf.config.list_logical_devices', (['"""GPU"""'], {}), True, 'import tensorflow as tf\n'), (115, 'tensorflow.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {}), True, 'import tensorflow as tf\n'), (117, 'tensorflow_federated.backends.mapreduce.get_map_reduce_form_for_iterative_process', 'tff.backends.mapreduce.get_map_reduce_form_for_iterative_process', (['ip_1'], {'grappler_config': 'grappler_config'}), True, 'import tensorflow_federated as tff\n'), (119, 'tensorflow_federated.backends.mapreduce.get_iterative_process_for_map_reduce_form', 'tff.backends.mapreduce.get_iterative_process_for_map_reduce_form', (['cf'], {}), True, 'import tensorflow_federated as tff\n'), (137, 'tensorflow_federated.structure.from_container', 'tff.structure.from_container', (['server_state_1'], {'recursive': '(True)'}), True, 'import tensorflow_federated as tff\n'), (139, 'tensorflow_federated.structure.from_container', 'tff.structure.from_container', (['server_output_1'], {'recursive': '(True)'}), True, 'import tensorflow_federated as tff\n'), (141, 'tensorflow_federated.structure.flatten', 'tff.structure.flatten', (['server_state_1'], {}), True, 'import tensorflow_federated as tff\n'), (142, 'tensorflow_federated.structure.flatten', 'tff.structure.flatten', (['server_output_1'], {}), True, 'import tensorflow_federated as tff\n'), (145, 'tensorflow_federated.structure.flatten', 'tff.structure.flatten', (['server_state_2'], {}), True, 'import tensorflow_federated as tff\n'), (146, 'tensorflow_federated.structure.flatten', 'tff.structure.flatten', (['server_output_2'], {}), True, 'import tensorflow_federated as tff\n'), (27, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, 2]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (28, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, 1]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (152, 'tensorflow_federated.structure.is_same_structure', 'tff.structure.is_same_structure', (['server_state_1', 'server_state_2'], {}), True, 'import tensorflow_federated as tff\n'), (154, 'tensorflow_federated.structure.is_same_structure', 'tff.structure.is_same_structure', (['server_output_1', 'server_output_2'], {}), True, 'import tensorflow_federated as tff\n'), (33, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': 'tf.nn.softmax', 'kernel_initializer': '"""zeros"""', 'input_shape': '(2,)'}), True, 'import tensorflow as tf\n'), (43, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {}), True, 'import tensorflow as tf\n'), (48, 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': '(0.01)'}), True, 'import tensorflow as tf\n'), (131, 'numpy.array', 'np.array', (['[[1.0, 1.0]]'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (132, 'numpy.array', 'np.array', (['[[0]]'], {'dtype': 'np.int32'}), True, 'import numpy as np\n'), (44, 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', ([], {}), True, 'import tensorflow as tf\n')]
wiki-yu/fastapi-algorithm-library
8f745e9fe4d1d063dc8505d4c7f467e95209a385
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Export a YOLOv5 PyTorch model to TorchScript, ONNX, CoreML, TensorFlow (saved_model, pb, TFLite, TF.js,) formats TensorFlow exports authored by https://github.com/zldrobit Usage: $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml saved_model pb tflite tfjs Inference: $ python path/to/detect.py --weights yolov5s.pt yolov5s.onnx (must export with --dynamic) yolov5s_saved_model yolov5s.pb yolov5s.tflite TensorFlow.js: $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example $ npm install $ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model $ npm start """ import argparse import os import subprocess import sys import time from pathlib import Path import torch import torch.nn as nn from torch.utils.mobile_optimizer import optimize_for_mobile FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative from app.libs.detection_tracking.yolov5.models.common import Conv from app.libs.detection_tracking.yolov5.models.experimental import attempt_load from app.libs.detection_tracking.yolov5.models.yolo import Detect from app.libs.detection_tracking.yolov5.utils.activations import SiLU from app.libs.detection_tracking.yolov5.utils.datasets import LoadImages from app.libs.detection_tracking.yolov5.utils.general import colorstr, check_dataset, check_img_size, \ check_requirements, file_size, print_args, set_logging, url2file from app.libs.detection_tracking.yolov5.utils.torch_utils import select_device def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')): # YOLOv5 TorchScript model export try: print(f'\n{prefix} starting export with torch {torch.__version__}...') f = file.with_suffix('.torchscript.pt') ts = torch.jit.trace(model, im, strict=False) (optimize_for_mobile(ts) if optimize else ts).save(f) print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: print(f'{prefix} export failure: {e}') def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorstr('ONNX:')): # YOLOv5 ONNX export try: check_requirements(('onnx',)) import onnx print(f'\n{prefix} starting export with onnx {onnx.__version__}...') f = file.with_suffix('.onnx') torch.onnx.export(model, im, f, verbose=False, opset_version=opset, training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, do_constant_folding=not train, input_names=['images'], output_names=['output'], dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # shape(1,3,640,640) 'output': {0: 'batch', 1: 'anchors'} # shape(1,25200,85) } if dynamic else None) # Checks model_onnx = onnx.load(f) # load onnx model onnx.checker.check_model(model_onnx) # check onnx model # print(onnx.helper.printable_graph(model_onnx.graph)) # print # Simplify if simplify: try: check_requirements(('onnx-simplifier',)) import onnxsim print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') model_onnx, check = onnxsim.simplify( model_onnx, dynamic_input_shape=dynamic, input_shapes={'images': list(im.shape)} if dynamic else None) assert check, 'assert check failed' onnx.save(model_onnx, f) except Exception as e: print(f'{prefix} simplifier failure: {e}') print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') print(f"{prefix} run --dynamic ONNX model inference with: 'python detect.py --weights {f}'") except Exception as e: print(f'{prefix} export failure: {e}') def export_coreml(model, im, file, prefix=colorstr('CoreML:')): # YOLOv5 CoreML export ct_model = None try: check_requirements(('coremltools',)) import coremltools as ct print(f'\n{prefix} starting export with coremltools {ct.__version__}...') f = file.with_suffix('.mlmodel') model.train() # CoreML exports should be placed in model.train() mode ts = torch.jit.trace(model, im, strict=False) # TorchScript model ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255.0, bias=[0, 0, 0])]) ct_model.save(f) print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: print(f'\n{prefix} export failure: {e}') return ct_model def export_saved_model(model, im, file, dynamic, tf_nms=False, agnostic_nms=False, topk_per_class=100, topk_all=100, iou_thres=0.45, conf_thres=0.25, prefix=colorstr('TensorFlow saved_model:')): # YOLOv5 TensorFlow saved_model export keras_model = None try: import tensorflow as tf from tensorflow import keras from app.libs.detection_tracking.yolov5.models.tf import TFModel, TFDetect print(f'\n{prefix} starting export with tensorflow {tf.__version__}...') f = str(file).replace('.pt', '_saved_model') batch_size, ch, *imgsz = list(im.shape) # BCHW tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) im = tf.zeros((batch_size, *imgsz, 3)) # BHWC order for TensorFlow y = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) inputs = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size) outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) keras_model = keras.Model(inputs=inputs, outputs=outputs) keras_model.trainable = False keras_model.summary() keras_model.save(f, save_format='tf') print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: print(f'\n{prefix} export failure: {e}') return keras_model def export_pb(keras_model, im, file, prefix=colorstr('TensorFlow GraphDef:')): # YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow try: import tensorflow as tf from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 print(f'\n{prefix} starting export with tensorflow {tf.__version__}...') f = file.with_suffix('.pb') m = tf.function(lambda x: keras_model(x)) # full model m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)) frozen_func = convert_variables_to_constants_v2(m) frozen_func.graph.as_graph_def() tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False) print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: print(f'\n{prefix} export failure: {e}') def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('TensorFlow Lite:')): # YOLOv5 TensorFlow Lite export try: import tensorflow as tf from models.tf import representative_dataset_gen print(f'\n{prefix} starting export with tensorflow {tf.__version__}...') batch_size, ch, *imgsz = list(im.shape) # BCHW f = str(file).replace('.pt', '-fp16.tflite') converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] converter.target_spec.supported_types = [tf.float16] converter.optimizations = [tf.lite.Optimize.DEFAULT] if int8: dataset = LoadImages(check_dataset(data)['train'], img_size=imgsz, auto=False) # representative data converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib) converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] converter.target_spec.supported_types = [] converter.inference_input_type = tf.uint8 # or tf.int8 converter.inference_output_type = tf.uint8 # or tf.int8 converter.experimental_new_quantizer = False f = str(file).replace('.pt', '-int8.tflite') tflite_model = converter.convert() open(f, "wb").write(tflite_model) print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: print(f'\n{prefix} export failure: {e}') def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): # YOLOv5 TensorFlow.js export try: check_requirements(('tensorflowjs',)) import re import tensorflowjs as tfjs print(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') f = str(file).replace('.pt', '_web_model') # js dir f_pb = file.with_suffix('.pb') # *.pb path f_json = f + '/model.json' # *.json path cmd = f"tensorflowjs_converter --input_format=tf_frozen_model " \ f"--output_node_names='Identity,Identity_1,Identity_2,Identity_3' {f_pb} {f}" subprocess.run(cmd, shell=True) json = open(f_json).read() with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order subst = re.sub( r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, ' r'"Identity.?.?": {"name": "Identity.?.?"}, ' r'"Identity.?.?": {"name": "Identity.?.?"}, ' r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, ' r'"Identity_1": {"name": "Identity_1"}, ' r'"Identity_2": {"name": "Identity_2"}, ' r'"Identity_3": {"name": "Identity_3"}}}', json) j.write(subst) print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: print(f'\n{prefix} export failure: {e}') @torch.no_grad() def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' weights=ROOT / 'yolov5s.pt', # weights path imgsz=(640, 640), # image (height, width) batch_size=1, # batch size device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu include=('torchscript', 'onnx', 'coreml'), # include formats half=False, # FP16 half-precision export inplace=False, # set YOLOv5 Detect() inplace=True train=False, # model.train() mode optimize=False, # TorchScript: optimize for mobile int8=False, # CoreML/TF INT8 quantization dynamic=False, # ONNX/TF: dynamic axes simplify=False, # ONNX: simplify model opset=12, # ONNX: opset version topk_per_class=100, # TF.js NMS: topk per class to keep topk_all=100, # TF.js NMS: topk for all classes to keep iou_thres=0.45, # TF.js NMS: IoU threshold conf_thres=0.25 # TF.js NMS: confidence threshold ): t = time.time() include = [x.lower() for x in include] tf_exports = list(x in include for x in ('saved_model', 'pb', 'tflite', 'tfjs')) # TensorFlow exports imgsz *= 2 if len(imgsz) == 1 else 1 # expand file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # Load PyTorch model device = select_device(device) assert not (device.type == 'cpu' and half), '--half only compatible with GPU export, i.e. use --device 0' model = attempt_load(weights, map_location=device, inplace=True, fuse=True) # load FP32 model nc, names = model.nc, model.names # number of classes, class names # Input gs = int(max(model.stride)) # grid size (max stride) imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection # Update model if half: im, model = im.half(), model.half() # to FP16 model.train() if train else model.eval() # training mode = no Detect() layer grid construction for k, m in model.named_modules(): if isinstance(m, Conv): # assign export-friendly activations if isinstance(m.act, nn.SiLU): m.act = SiLU() elif isinstance(m, Detect): m.inplace = inplace m.onnx_dynamic = dynamic # m.forward = m.forward_export # assign forward (optional) for _ in range(2): y = model(im) # dry runs print(f"\n{colorstr('PyTorch:')} starting from {file} ({file_size(file):.1f} MB)") # Exports if 'torchscript' in include: export_torchscript(model, im, file, optimize) if 'onnx' in include: export_onnx(model, im, file, opset, train, dynamic, simplify) if 'coreml' in include: export_coreml(model, im, file) # TensorFlow Exports if any(tf_exports): pb, tflite, tfjs = tf_exports[1:] assert not (tflite and tfjs), 'TFLite and TF.js models must be exported separately, please pass only one type.' model = export_saved_model(model, im, file, dynamic, tf_nms=tfjs, agnostic_nms=tfjs, topk_per_class=topk_per_class, topk_all=topk_all, conf_thres=conf_thres, iou_thres=iou_thres) # keras model if pb or tfjs: # pb prerequisite to tfjs export_pb(model, im, file) if tflite: export_tflite(model, im, file, int8=int8, data=data, ncalib=100) if tfjs: export_tfjs(model, im, file) # Finish print(f'\nExport complete ({time.time() - t:.2f}s)' f"\nResults saved to {colorstr('bold', file.parent.resolve())}" f'\nVisualize with https://netron.app') def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640, 640], help='image (h, w)') parser.add_argument('--batch-size', type=int, default=1, help='batch size') parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--half', action='store_true', help='FP16 half-precision export') parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') parser.add_argument('--train', action='store_true', help='model.train() mode') parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') parser.add_argument('--dynamic', action='store_true', help='ONNX/TF: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') parser.add_argument('--opset', type=int, default=13, help='ONNX: opset version') parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep') parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep') parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold') parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold') parser.add_argument('--include', nargs='+', default=['torchscript', 'onnx'], help='available formats are (torchscript, onnx, coreml, saved_model, pb, tflite, tfjs)') opt = parser.parse_args() print_args(FILE.stem, opt) return opt def main(opt): set_logging() run(**vars(opt)) if __name__ == "__main__": opt = parse_opt() main(opt)
[ "torch.onnx.export", "torch.jit.trace", "tensorflow.keras.Input", "tensorflow.lite.TFLiteConverter.from_keras_model", "tensorflow.zeros", "torch.zeros", "torch.utils.mobile_optimizer.optimize_for_mobile", "tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2", "tensorflow.keras.Model", "torch.no_grad", "tensorflow.TensorSpec" ]
app/libs/detection_tracking/yolov5/export.py
[(248, 'torch.no_grad', 'torch.no_grad', ([], {}), False, 'import torch\n'), (50, 'app.libs.detection_tracking.yolov5.utils.general.colorstr', 'colorstr', (['"""TorchScript:"""'], {}), False, 'from app.libs.detection_tracking.yolov5.utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, set_logging, url2file\n'), (64, 'app.libs.detection_tracking.yolov5.utils.general.colorstr', 'colorstr', (['"""ONNX:"""'], {}), False, 'from app.libs.detection_tracking.yolov5.utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, set_logging, url2file\n'), (108, 'app.libs.detection_tracking.yolov5.utils.general.colorstr', 'colorstr', (['"""CoreML:"""'], {}), False, 'from app.libs.detection_tracking.yolov5.utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, set_logging, url2file\n'), (132, 'app.libs.detection_tracking.yolov5.utils.general.colorstr', 'colorstr', (['"""TensorFlow saved_model:"""'], {}), False, 'from app.libs.detection_tracking.yolov5.utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, set_logging, url2file\n'), (161, 'app.libs.detection_tracking.yolov5.utils.general.colorstr', 'colorstr', (['"""TensorFlow GraphDef:"""'], {}), False, 'from app.libs.detection_tracking.yolov5.utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, set_logging, url2file\n'), (181, 'app.libs.detection_tracking.yolov5.utils.general.colorstr', 'colorstr', (['"""TensorFlow Lite:"""'], {}), False, 'from app.libs.detection_tracking.yolov5.utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, set_logging, url2file\n'), (213, 'app.libs.detection_tracking.yolov5.utils.general.colorstr', 'colorstr', (['"""TensorFlow.js:"""'], {}), False, 'from app.libs.detection_tracking.yolov5.utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, set_logging, url2file\n'), (268, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (275, 'app.libs.detection_tracking.yolov5.utils.torch_utils.select_device', 'select_device', (['device'], {}), False, 'from app.libs.detection_tracking.yolov5.utils.torch_utils import select_device\n'), (277, 'app.libs.detection_tracking.yolov5.models.experimental.attempt_load', 'attempt_load', (['weights'], {'map_location': 'device', 'inplace': '(True)', 'fuse': '(True)'}), False, 'from app.libs.detection_tracking.yolov5.models.experimental import attempt_load\n'), (331, 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), False, 'import argparse\n'), (353, 'app.libs.detection_tracking.yolov5.utils.general.print_args', 'print_args', (['FILE.stem', 'opt'], {}), False, 'from app.libs.detection_tracking.yolov5.utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, set_logging, url2file\n'), (358, 'app.libs.detection_tracking.yolov5.utils.general.set_logging', 'set_logging', ([], {}), False, 'from app.libs.detection_tracking.yolov5.utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, set_logging, url2file\n'), (34, 'pathlib.Path', 'Path', (['__file__'], {}), False, 'from pathlib import Path\n'), (38, 'pathlib.Path.cwd', 'Path.cwd', ([], {}), False, 'from pathlib import Path\n'), (56, 'torch.jit.trace', 'torch.jit.trace', (['model', 'im'], {'strict': '(False)'}), False, 'import torch\n'), (67, 'app.libs.detection_tracking.yolov5.utils.general.check_requirements', 'check_requirements', (["('onnx',)"], {}), False, 'from app.libs.detection_tracking.yolov5.utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, set_logging, url2file\n'), (73, 'torch.onnx.export', 'torch.onnx.export', (['model', 'im', 'f'], {'verbose': '(False)', 'opset_version': 'opset', 'training': '(torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL)', 'do_constant_folding': '(not train)', 'input_names': "['images']", 'output_names': "['output']", 'dynamic_axes': "({'images': {(0): 'batch', (2): 'height', (3): 'width'}, 'output': {(0):\n 'batch', (1): 'anchors'}} if dynamic else None)"}), False, 'import torch\n'), (83, 'onnx.load', 'onnx.load', (['f'], {}), False, 'import onnx\n'), (84, 'onnx.checker.check_model', 'onnx.checker.check_model', (['model_onnx'], {}), False, 'import onnx\n'), (112, 'app.libs.detection_tracking.yolov5.utils.general.check_requirements', 'check_requirements', (["('coremltools',)"], {}), False, 'from app.libs.detection_tracking.yolov5.utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, set_logging, url2file\n'), (119, 'torch.jit.trace', 'torch.jit.trace', (['model', 'im'], {'strict': '(False)'}), False, 'import torch\n'), (144, 'app.libs.detection_tracking.yolov5.models.tf.TFModel', 'TFModel', ([], {'cfg': 'model.yaml', 'model': 'model', 'nc': 'model.nc', 'imgsz': 'imgsz'}), False, 'from app.libs.detection_tracking.yolov5.models.tf import TFModel, TFDetect\n'), (145, 'tensorflow.zeros', 'tf.zeros', (['(batch_size, *imgsz, 3)'], {}), True, 'import tensorflow as tf\n'), (147, 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(*imgsz, 3)', 'batch_size': '(None if dynamic else batch_size)'}), False, 'from tensorflow import keras\n'), (149, 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'inputs', 'outputs': 'outputs'}), False, 'from tensorflow import keras\n'), (172, 'tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2', 'convert_variables_to_constants_v2', (['m'], {}), False, 'from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2\n'), (191, 'tensorflow.lite.TFLiteConverter.from_keras_model', 'tf.lite.TFLiteConverter.from_keras_model', (['keras_model'], {}), True, 'import tensorflow as tf\n'), (216, 'app.libs.detection_tracking.yolov5.utils.general.check_requirements', 'check_requirements', (["('tensorflowjs',)"], {}), False, 'from app.libs.detection_tracking.yolov5.utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, set_logging, url2file\n'), (227, 'subprocess.run', 'subprocess.run', (['cmd'], {'shell': '(True)'}), False, 'import subprocess\n'), (282, 'app.libs.detection_tracking.yolov5.utils.general.check_img_size', 'check_img_size', (['x', 'gs'], {}), False, 'from app.libs.detection_tracking.yolov5.utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, set_logging, url2file\n'), (171, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['keras_model.inputs[0].shape', 'keras_model.inputs[0].dtype'], {}), True, 'import tensorflow as tf\n'), (231, 're.sub', 're.sub', (['"""{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, "Identity.?.?": {"name": "Identity.?.?"}, "Identity.?.?": {"name": "Identity.?.?"}, "Identity.?.?": {"name": "Identity.?.?"}}}"""', '"""{"outputs": {"Identity": {"name": "Identity"}, "Identity_1": {"name": "Identity_1"}, "Identity_2": {"name": "Identity_2"}, "Identity_3": {"name": "Identity_3"}}}"""', 'json'], {}), False, 'import re\n'), (272, 'app.libs.detection_tracking.yolov5.utils.general.url2file', 'url2file', (['weights'], {}), False, 'from app.libs.detection_tracking.yolov5.utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, set_logging, url2file\n'), (283, 'torch.zeros', 'torch.zeros', (['batch_size', '(3)', '*imgsz'], {}), False, 'import torch\n'), (90, 'app.libs.detection_tracking.yolov5.utils.general.check_requirements', 'check_requirements', (["('onnx-simplifier',)"], {}), False, 'from app.libs.detection_tracking.yolov5.utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, set_logging, url2file\n'), (99, 'onnx.save', 'onnx.save', (['model_onnx', 'f'], {}), False, 'import onnx\n'), (197, 'models.tf.representative_dataset_gen', 'representative_dataset_gen', (['dataset', 'ncalib'], {}), False, 'from models.tf import representative_dataset_gen\n'), (292, 'app.libs.detection_tracking.yolov5.utils.activations.SiLU', 'SiLU', ([], {}), False, 'from app.libs.detection_tracking.yolov5.utils.activations import SiLU\n'), (300, 'app.libs.detection_tracking.yolov5.utils.general.colorstr', 'colorstr', (['"""PyTorch:"""'], {}), False, 'from app.libs.detection_tracking.yolov5.utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, set_logging, url2file\n'), (300, 'app.libs.detection_tracking.yolov5.utils.general.file_size', 'file_size', (['file'], {}), False, 'from app.libs.detection_tracking.yolov5.utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, set_logging, url2file\n'), (57, 'torch.utils.mobile_optimizer.optimize_for_mobile', 'optimize_for_mobile', (['ts'], {}), False, 'from torch.utils.mobile_optimizer import optimize_for_mobile\n'), (59, 'app.libs.detection_tracking.yolov5.utils.general.file_size', 'file_size', (['f'], {}), False, 'from app.libs.detection_tracking.yolov5.utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, set_logging, url2file\n'), (102, 'app.libs.detection_tracking.yolov5.utils.general.file_size', 'file_size', (['f'], {}), False, 'from app.libs.detection_tracking.yolov5.utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, set_logging, url2file\n'), (120, 'coremltools.ImageType', 'ct.ImageType', (['"""image"""'], {'shape': 'im.shape', 'scale': '(1 / 255.0)', 'bias': '[0, 0, 0]'}), True, 'import coremltools as ct\n'), (123, 'app.libs.detection_tracking.yolov5.utils.general.file_size', 'file_size', (['f'], {}), False, 'from app.libs.detection_tracking.yolov5.utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, set_logging, url2file\n'), (154, 'app.libs.detection_tracking.yolov5.utils.general.file_size', 'file_size', (['f'], {}), False, 'from app.libs.detection_tracking.yolov5.utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, set_logging, url2file\n'), (176, 'app.libs.detection_tracking.yolov5.utils.general.file_size', 'file_size', (['f'], {}), False, 'from app.libs.detection_tracking.yolov5.utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, set_logging, url2file\n'), (196, 'app.libs.detection_tracking.yolov5.utils.general.check_dataset', 'check_dataset', (['data'], {}), False, 'from app.libs.detection_tracking.yolov5.utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, set_logging, url2file\n'), (207, 'app.libs.detection_tracking.yolov5.utils.general.file_size', 'file_size', (['f'], {}), False, 'from app.libs.detection_tracking.yolov5.utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, set_logging, url2file\n'), (243, 'app.libs.detection_tracking.yolov5.utils.general.file_size', 'file_size', (['f'], {}), False, 'from app.libs.detection_tracking.yolov5.utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, set_logging, url2file\n'), (325, 'time.time', 'time.time', ([], {}), False, 'import time\n')]
levinxo/tensorflow
897aa385703eb45afb6006ca701c6d51c2f8d835
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=invalid-name """DenseNet models for Keras. Reference: - [Densely Connected Convolutional Networks]( https://arxiv.org/abs/1608.06993) (CVPR 2017) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras import backend from tensorflow.python.keras.applications import imagenet_utils from tensorflow.python.keras.engine import training from tensorflow.python.keras.layers import VersionAwareLayers from tensorflow.python.keras.utils import data_utils from tensorflow.python.keras.utils import layer_utils from tensorflow.python.lib.io import file_io from tensorflow.python.util.tf_export import keras_export BASE_WEIGTHS_PATH = ('https://storage.googleapis.com/tensorflow/' 'keras-applications/densenet/') DENSENET121_WEIGHT_PATH = ( BASE_WEIGTHS_PATH + 'densenet121_weights_tf_dim_ordering_tf_kernels.h5') DENSENET121_WEIGHT_PATH_NO_TOP = ( BASE_WEIGTHS_PATH + 'densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5') DENSENET169_WEIGHT_PATH = ( BASE_WEIGTHS_PATH + 'densenet169_weights_tf_dim_ordering_tf_kernels.h5') DENSENET169_WEIGHT_PATH_NO_TOP = ( BASE_WEIGTHS_PATH + 'densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5') DENSENET201_WEIGHT_PATH = ( BASE_WEIGTHS_PATH + 'densenet201_weights_tf_dim_ordering_tf_kernels.h5') DENSENET201_WEIGHT_PATH_NO_TOP = ( BASE_WEIGTHS_PATH + 'densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5') layers = VersionAwareLayers() def dense_block(x, blocks, name): """A dense block. Arguments: x: input tensor. blocks: integer, the number of building blocks. name: string, block label. Returns: Output tensor for the block. """ for i in range(blocks): x = conv_block(x, 32, name=name + '_block' + str(i + 1)) return x def transition_block(x, reduction, name): """A transition block. Arguments: x: input tensor. reduction: float, compression rate at transition layers. name: string, block label. Returns: output tensor for the block. """ bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1 x = layers.BatchNormalization( axis=bn_axis, epsilon=1.001e-5, name=name + '_bn')( x) x = layers.Activation('relu', name=name + '_relu')(x) x = layers.Conv2D( int(backend.int_shape(x)[bn_axis] * reduction), 1, use_bias=False, name=name + '_conv')( x) x = layers.AveragePooling2D(2, strides=2, name=name + '_pool')(x) return x def conv_block(x, growth_rate, name): """A building block for a dense block. Arguments: x: input tensor. growth_rate: float, growth rate at dense layers. name: string, block label. Returns: Output tensor for the block. """ bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1 x1 = layers.BatchNormalization( axis=bn_axis, epsilon=1.001e-5, name=name + '_0_bn')( x) x1 = layers.Activation('relu', name=name + '_0_relu')(x1) x1 = layers.Conv2D( 4 * growth_rate, 1, use_bias=False, name=name + '_1_conv')( x1) x1 = layers.BatchNormalization( axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')( x1) x1 = layers.Activation('relu', name=name + '_1_relu')(x1) x1 = layers.Conv2D( growth_rate, 3, padding='same', use_bias=False, name=name + '_2_conv')( x1) x = layers.Concatenate(axis=bn_axis, name=name + '_concat')([x, x1]) return x def DenseNet( blocks, include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax'): """Instantiates the DenseNet architecture. Reference: - [Densely Connected Convolutional Networks]( https://arxiv.org/abs/1608.06993) (CVPR 2017) Optionally loads weights pre-trained on ImageNet. Note that the data format convention used by the model is the one specified in your Keras config at `~/.keras/keras.json`. Caution: Be sure to properly pre-process your inputs to the application. Please see `applications.densenet.preprocess_input` for an example. Arguments: blocks: numbers of building blocks for the four dense layers. include_top: whether to include the fully-connected layer at the top of the network. weights: one of `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded. input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. input_shape: optional shape tuple, only to be specified if `include_top` is False (otherwise the input shape has to be `(224, 224, 3)` (with `'channels_last'` data format) or `(3, 224, 224)` (with `'channels_first'` data format). It should have exactly 3 inputs channels, and width and height should be no smaller than 32. E.g. `(200, 200, 3)` would be one valid value. pooling: optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional block. - `avg` means that global average pooling will be applied to the output of the last convolutional block, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classes: optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. classifier_activation: A `str` or callable. The activation function to use on the "top" layer. Ignored unless `include_top=True`. Set `classifier_activation=None` to return the logits of the "top" layer. Returns: A `keras.Model` instance. Raises: ValueError: in case of invalid argument for `weights`, or invalid input shape. ValueError: if `classifier_activation` is not `softmax` or `None` when using a pretrained top layer. """ if not (weights in {'imagenet', None} or file_io.file_exists_v2(weights)): raise ValueError('The `weights` argument should be either ' '`None` (random initialization), `imagenet` ' '(pre-training on ImageNet), ' 'or the path to the weights file to be loaded.') if weights == 'imagenet' and include_top and classes != 1000: raise ValueError('If using `weights` as `"imagenet"` with `include_top`' ' as true, `classes` should be 1000') # Determine proper input shape input_shape = imagenet_utils.obtain_input_shape( input_shape, default_size=224, min_size=32, data_format=backend.image_data_format(), require_flatten=include_top, weights=weights) if input_tensor is None: img_input = layers.Input(shape=input_shape) else: if not backend.is_keras_tensor(input_tensor): img_input = layers.Input(tensor=input_tensor, shape=input_shape) else: img_input = input_tensor bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1 x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)))(img_input) x = layers.Conv2D(64, 7, strides=2, use_bias=False, name='conv1/conv')(x) x = layers.BatchNormalization( axis=bn_axis, epsilon=1.001e-5, name='conv1/bn')( x) x = layers.Activation('relu', name='conv1/relu')(x) x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x) x = layers.MaxPooling2D(3, strides=2, name='pool1')(x) x = dense_block(x, blocks[0], name='conv2') x = transition_block(x, 0.5, name='pool2') x = dense_block(x, blocks[1], name='conv3') x = transition_block(x, 0.5, name='pool3') x = dense_block(x, blocks[2], name='conv4') x = transition_block(x, 0.5, name='pool4') x = dense_block(x, blocks[3], name='conv5') x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='bn')(x) x = layers.Activation('relu', name='relu')(x) if include_top: x = layers.GlobalAveragePooling2D(name='avg_pool')(x) imagenet_utils.validate_activation(classifier_activation, weights) x = layers.Dense(classes, activation=classifier_activation, name='predictions')(x) else: if pooling == 'avg': x = layers.GlobalAveragePooling2D(name='avg_pool')(x) elif pooling == 'max': x = layers.GlobalMaxPooling2D(name='max_pool')(x) # Ensure that the model takes into account # any potential predecessors of `input_tensor`. if input_tensor is not None: inputs = layer_utils.get_source_inputs(input_tensor) else: inputs = img_input # Create model. if blocks == [6, 12, 24, 16]: model = training.Model(inputs, x, name='densenet121') elif blocks == [6, 12, 32, 32]: model = training.Model(inputs, x, name='densenet169') elif blocks == [6, 12, 48, 32]: model = training.Model(inputs, x, name='densenet201') else: model = training.Model(inputs, x, name='densenet') # Load weights. if weights == 'imagenet': if include_top: if blocks == [6, 12, 24, 16]: weights_path = data_utils.get_file( 'densenet121_weights_tf_dim_ordering_tf_kernels.h5', DENSENET121_WEIGHT_PATH, cache_subdir='models', file_hash='9d60b8095a5708f2dcce2bca79d332c7') elif blocks == [6, 12, 32, 32]: weights_path = data_utils.get_file( 'densenet169_weights_tf_dim_ordering_tf_kernels.h5', DENSENET169_WEIGHT_PATH, cache_subdir='models', file_hash='d699b8f76981ab1b30698df4c175e90b') elif blocks == [6, 12, 48, 32]: weights_path = data_utils.get_file( 'densenet201_weights_tf_dim_ordering_tf_kernels.h5', DENSENET201_WEIGHT_PATH, cache_subdir='models', file_hash='1ceb130c1ea1b78c3bf6114dbdfd8807') else: if blocks == [6, 12, 24, 16]: weights_path = data_utils.get_file( 'densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5', DENSENET121_WEIGHT_PATH_NO_TOP, cache_subdir='models', file_hash='30ee3e1110167f948a6b9946edeeb738') elif blocks == [6, 12, 32, 32]: weights_path = data_utils.get_file( 'densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5', DENSENET169_WEIGHT_PATH_NO_TOP, cache_subdir='models', file_hash='b8c4d4c20dd625c148057b9ff1c1176b') elif blocks == [6, 12, 48, 32]: weights_path = data_utils.get_file( 'densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5', DENSENET201_WEIGHT_PATH_NO_TOP, cache_subdir='models', file_hash='c13680b51ded0fb44dff2d8f86ac8bb1') model.load_weights(weights_path) elif weights is not None: model.load_weights(weights) return model @keras_export('keras.applications.densenet.DenseNet121', 'keras.applications.DenseNet121') def DenseNet121(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000): """Instantiates the Densenet121 architecture.""" return DenseNet([6, 12, 24, 16], include_top, weights, input_tensor, input_shape, pooling, classes) @keras_export('keras.applications.densenet.DenseNet169', 'keras.applications.DenseNet169') def DenseNet169(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000): """Instantiates the Densenet169 architecture.""" return DenseNet([6, 12, 32, 32], include_top, weights, input_tensor, input_shape, pooling, classes) @keras_export('keras.applications.densenet.DenseNet201', 'keras.applications.DenseNet201') def DenseNet201(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000): """Instantiates the Densenet201 architecture.""" return DenseNet([6, 12, 48, 32], include_top, weights, input_tensor, input_shape, pooling, classes) @keras_export('keras.applications.densenet.preprocess_input') def preprocess_input(x, data_format=None): return imagenet_utils.preprocess_input( x, data_format=data_format, mode='torch') @keras_export('keras.applications.densenet.decode_predictions') def decode_predictions(preds, top=5): return imagenet_utils.decode_predictions(preds, top=top) preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format( mode='', ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TORCH, error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC) decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__ DOC = """ Reference: - [Densely Connected Convolutional Networks]( https://arxiv.org/abs/1608.06993) (CVPR 2017) Optionally loads weights pre-trained on ImageNet. Note that the data format convention used by the model is the one specified in your Keras config at `~/.keras/keras.json`. Caution: Be sure to properly pre-process your inputs to the application. Please see `applications.densenet.preprocess_input` for an example. Arguments: include_top: whether to include the fully-connected layer at the top of the network. weights: one of `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded. input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. input_shape: optional shape tuple, only to be specified if `include_top` is False (otherwise the input shape has to be `(224, 224, 3)` (with `'channels_last'` data format) or `(3, 224, 224)` (with `'channels_first'` data format). It should have exactly 3 inputs channels, and width and height should be no smaller than 32. E.g. `(200, 200, 3)` would be one valid value. pooling: Optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional block. - `avg` means that global average pooling will be applied to the output of the last convolutional block, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classes: optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. Returns: A Keras model instance. """ setattr(DenseNet121, '__doc__', DenseNet121.__doc__ + DOC) setattr(DenseNet169, '__doc__', DenseNet169.__doc__ + DOC) setattr(DenseNet201, '__doc__', DenseNet201.__doc__ + DOC)
[ "tensorflow.python.keras.applications.imagenet_utils.decode_predictions", "tensorflow.python.keras.backend.image_data_format", "tensorflow.python.keras.utils.data_utils.get_file", "tensorflow.python.keras.layers.VersionAwareLayers", "tensorflow.python.util.tf_export.keras_export", "tensorflow.python.keras.utils.layer_utils.get_source_inputs", "tensorflow.python.keras.backend.is_keras_tensor", "tensorflow.python.lib.io.file_io.file_exists_v2", "tensorflow.python.keras.backend.int_shape", "tensorflow.python.keras.applications.imagenet_utils.PREPROCESS_INPUT_DOC.format", "tensorflow.python.keras.applications.imagenet_utils.preprocess_input", "tensorflow.python.keras.applications.imagenet_utils.validate_activation", "tensorflow.python.keras.engine.training.Model" ]
tensorflow/python/keras/applications/densenet.py
[(54, 'tensorflow.python.keras.layers.VersionAwareLayers', 'VersionAwareLayers', ([], {}), False, 'from tensorflow.python.keras.layers import VersionAwareLayers\n'), (320, 'tensorflow.python.util.tf_export.keras_export', 'keras_export', (['"""keras.applications.densenet.DenseNet121"""', '"""keras.applications.DenseNet121"""'], {}), False, 'from tensorflow.python.util.tf_export import keras_export\n'), (333, 'tensorflow.python.util.tf_export.keras_export', 'keras_export', (['"""keras.applications.densenet.DenseNet169"""', '"""keras.applications.DenseNet169"""'], {}), False, 'from tensorflow.python.util.tf_export import keras_export\n'), (346, 'tensorflow.python.util.tf_export.keras_export', 'keras_export', (['"""keras.applications.densenet.DenseNet201"""', '"""keras.applications.DenseNet201"""'], {}), False, 'from tensorflow.python.util.tf_export import keras_export\n'), (359, 'tensorflow.python.util.tf_export.keras_export', 'keras_export', (['"""keras.applications.densenet.preprocess_input"""'], {}), False, 'from tensorflow.python.util.tf_export import keras_export\n'), (365, 'tensorflow.python.util.tf_export.keras_export', 'keras_export', (['"""keras.applications.densenet.decode_predictions"""'], {}), False, 'from tensorflow.python.util.tf_export import keras_export\n'), (370, 'tensorflow.python.keras.applications.imagenet_utils.PREPROCESS_INPUT_DOC.format', 'imagenet_utils.PREPROCESS_INPUT_DOC.format', ([], {'mode': '""""""', 'ret': 'imagenet_utils.PREPROCESS_INPUT_RET_DOC_TORCH', 'error': 'imagenet_utils.PREPROCESS_INPUT_ERROR_DOC'}), False, 'from tensorflow.python.keras.applications import imagenet_utils\n'), (361, 'tensorflow.python.keras.applications.imagenet_utils.preprocess_input', 'imagenet_utils.preprocess_input', (['x'], {'data_format': 'data_format', 'mode': '"""torch"""'}), False, 'from tensorflow.python.keras.applications import imagenet_utils\n'), (367, 'tensorflow.python.keras.applications.imagenet_utils.decode_predictions', 'imagenet_utils.decode_predictions', (['preds'], {'top': 'top'}), False, 'from tensorflow.python.keras.applications import imagenet_utils\n'), (247, 'tensorflow.python.keras.applications.imagenet_utils.validate_activation', 'imagenet_utils.validate_activation', (['classifier_activation', 'weights'], {}), False, 'from tensorflow.python.keras.applications import imagenet_utils\n'), (259, 'tensorflow.python.keras.utils.layer_utils.get_source_inputs', 'layer_utils.get_source_inputs', (['input_tensor'], {}), False, 'from tensorflow.python.keras.utils import layer_utils\n'), (265, 'tensorflow.python.keras.engine.training.Model', 'training.Model', (['inputs', 'x'], {'name': '"""densenet121"""'}), False, 'from tensorflow.python.keras.engine import training\n'), (84, 'tensorflow.python.keras.backend.image_data_format', 'backend.image_data_format', ([], {}), False, 'from tensorflow.python.keras import backend\n'), (110, 'tensorflow.python.keras.backend.image_data_format', 'backend.image_data_format', ([], {}), False, 'from tensorflow.python.keras import backend\n'), (195, 'tensorflow.python.lib.io.file_io.file_exists_v2', 'file_io.file_exists_v2', (['weights'], {}), False, 'from tensorflow.python.lib.io import file_io\n'), (210, 'tensorflow.python.keras.backend.image_data_format', 'backend.image_data_format', ([], {}), False, 'from tensorflow.python.keras import backend\n'), (217, 'tensorflow.python.keras.backend.is_keras_tensor', 'backend.is_keras_tensor', (['input_tensor'], {}), False, 'from tensorflow.python.keras import backend\n'), (222, 'tensorflow.python.keras.backend.image_data_format', 'backend.image_data_format', ([], {}), False, 'from tensorflow.python.keras import backend\n'), (267, 'tensorflow.python.keras.engine.training.Model', 'training.Model', (['inputs', 'x'], {'name': '"""densenet169"""'}), False, 'from tensorflow.python.keras.engine import training\n'), (269, 'tensorflow.python.keras.engine.training.Model', 'training.Model', (['inputs', 'x'], {'name': '"""densenet201"""'}), False, 'from tensorflow.python.keras.engine import training\n'), (271, 'tensorflow.python.keras.engine.training.Model', 'training.Model', (['inputs', 'x'], {'name': '"""densenet"""'}), False, 'from tensorflow.python.keras.engine import training\n'), (277, 'tensorflow.python.keras.utils.data_utils.get_file', 'data_utils.get_file', (['"""densenet121_weights_tf_dim_ordering_tf_kernels.h5"""', 'DENSENET121_WEIGHT_PATH'], {'cache_subdir': '"""models"""', 'file_hash': '"""9d60b8095a5708f2dcce2bca79d332c7"""'}), False, 'from tensorflow.python.keras.utils import data_utils\n'), (296, 'tensorflow.python.keras.utils.data_utils.get_file', 'data_utils.get_file', (['"""densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5"""', 'DENSENET121_WEIGHT_PATH_NO_TOP'], {'cache_subdir': '"""models"""', 'file_hash': '"""30ee3e1110167f948a6b9946edeeb738"""'}), False, 'from tensorflow.python.keras.utils import data_utils\n'), (283, 'tensorflow.python.keras.utils.data_utils.get_file', 'data_utils.get_file', (['"""densenet169_weights_tf_dim_ordering_tf_kernels.h5"""', 'DENSENET169_WEIGHT_PATH'], {'cache_subdir': '"""models"""', 'file_hash': '"""d699b8f76981ab1b30698df4c175e90b"""'}), False, 'from tensorflow.python.keras.utils import data_utils\n'), (302, 'tensorflow.python.keras.utils.data_utils.get_file', 'data_utils.get_file', (['"""densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5"""', 'DENSENET169_WEIGHT_PATH_NO_TOP'], {'cache_subdir': '"""models"""', 'file_hash': '"""b8c4d4c20dd625c148057b9ff1c1176b"""'}), False, 'from tensorflow.python.keras.utils import data_utils\n'), (90, 'tensorflow.python.keras.backend.int_shape', 'backend.int_shape', (['x'], {}), False, 'from tensorflow.python.keras import backend\n'), (289, 'tensorflow.python.keras.utils.data_utils.get_file', 'data_utils.get_file', (['"""densenet201_weights_tf_dim_ordering_tf_kernels.h5"""', 'DENSENET201_WEIGHT_PATH'], {'cache_subdir': '"""models"""', 'file_hash': '"""1ceb130c1ea1b78c3bf6114dbdfd8807"""'}), False, 'from tensorflow.python.keras.utils import data_utils\n'), (308, 'tensorflow.python.keras.utils.data_utils.get_file', 'data_utils.get_file', (['"""densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5"""', 'DENSENET201_WEIGHT_PATH_NO_TOP'], {'cache_subdir': '"""models"""', 'file_hash': '"""c13680b51ded0fb44dff2d8f86ac8bb1"""'}), False, 'from tensorflow.python.keras.utils import data_utils\n')]
jianming93/incremental_learner
dd477bb65c2d1f56365b487cebc6357a70e8f460
import pickle import timeit from collections import OrderedDict import os import sys sys.path.append(os.path.dirname(__file__)) import numpy as np from tqdm import tqdm from sklearn.svm import OneClassSVM from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg16 import preprocess_input as vgg16_preprocess_input from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.resnet50 import preprocess_input as resnet50_preprocess_input from tensorflow.keras.applications import MobileNet from tensorflow.keras.applications.mobilenet import preprocess_input as mobilenet_preprocess_input from tensorflow.keras.layers import GlobalAvgPool2D from tensorflow.keras import Model, Sequential from tensorflow.keras.preprocessing import image from utils import normalize from utils import sorted_neighbors_of_i # from utils import evaluate from utils import fit_to_list ACCEPTED_PREPROCESSORS = ("vgg16", "resnet50", "mobilenet") PREPROCESSORS_PREPROCESS_FUNCTIONS = {'vgg16': vgg16_preprocess_input, 'resnet50': resnet50_preprocess_input, 'mobilenet': mobilenet_preprocess_input} # assumes data has been pre-normalized class ShellModel: """Creates a shell for one class mean. """ def __init__(self): self.shell_id = None self.raw_features = None self.shell_mean = None self.num_instances = None self.noise_mean = None self.noise_std = None self.created_at = None self.updated_at = None def fit(self, global_mean): """Generate the shell parameters based on the global mean the shell family currently sees """ self.__generate_one_class_mean(global_mean) def __generate_one_class_mean(self, global_mean): """Generate the one class mean which is the 'center' of the shell along with its 'diameter' """ normalized_features, _ = normalize(self.raw_features, global_mean) normalized_mean = np.mean(normalized_features, axis=0, keepdims=True) # normalized_mean = np.mean(self.raw_features, axis=0, keepdims=True) # noise = self.raw_features - normalized_mean noise = normalized_features - normalized_mean noise = np.linalg.norm(noise, axis=1) self.shell_mean = normalized_mean self.num_instances = normalized_features.shape[0] self.noise_mean = np.median(noise) self.noise_std = np.median(np.absolute(noise - np.mean(noise))) def score(self, feat, global_mean, with_norm=True): """Perform a distance score based on how far a feature is from the shell """ # smaller scores are better, muliply - to reverse that score = self.__generate_one_class_mean_score(feat, global_mean, with_norm=with_norm) return -score def __generate_one_class_mean_score(self, feat, global_mean, with_norm=True): """Perform a distance score based on how far a feature is from the shell. """ if with_norm: feat_, _ = normalize(feat, global_mean) else: feat_ = feat.copy() feat_ = feat_ - self.shell_mean feat_ = np.linalg.norm(feat_, axis=1) shell_score = (feat_ - self.noise_mean) / self.noise_std return shell_score def update(self, feat, global_mean): """Perform an update to shell parameter. To be used for 1 data point of feature to update the model. """ self.raw_features = np.concatenate([self.raw_features, feat]) normalized_features, _ = normalize(self.raw_features, global_mean) self.shell_mean= np.mean(normalized_features, axis=0, keepdims=True) self.num_instances = normalized_features.shape[0] noise = normalized_features - self.shell_mean noise = np.linalg.norm(noise , axis=1) self.noise_mean = np.median(noise) self.noise_std = np.median(np.absolute(noise - np.mean(noise))) class ShellFamily(): def __init__(self): self.classifiers = OrderedDict() self.feature_extractor_model = None self.preprocessor = None self.global_mean = None self.instances = 0 self.mapping = [] self.created_at = None self.updated_at = None def create_preprocessor(self, feature_extractor_model): if feature_extractor_model in ACCEPTED_PREPROCESSORS: model = Sequential() if feature_extractor_model == 'vgg': vgg = VGG16(weights='imagenet', include_top=False) model.add(vgg) elif feature_extractor_model == 'resnet50': resnet = ResNet50(weights='imagenet', include_top=False) model.add(resnet) model.add(GlobalAvgPool2D()) elif feature_extractor_model == 'mobilenet': mobilenet = MobileNet(weights='imagenet', include_top=False) model.add(mobilenet) model.add(GlobalAvgPool2D()) self.preprocessor = model self.feature_extractor_model = feature_extractor_model self.preprocessor_preprocess_function = PREPROCESSORS_PREPROCESS_FUNCTIONS[self.feature_extractor_model] else: raise ValueError("Preprocessor model not found! Please enter the following models: {}".format(ACCEPTED_PREPROCESSORS)) def load(self, shell_file): with open(shell_file, "rb") as saved_data: shell_family_configuration = pickle.load(saved_data) for class_name in shell_family_configuration['classifiers']: self.classifiers[class_name] = shell_family_configuration['classifiers'][class_name] self.feature_extractor_model = shell_family_configuration['feature_extractor_model'] self.mapping = shell_family_configuration['mapping'] self.global_mean = shell_family_configuration['global_mean'] self.instances = shell_family_configuration['instances'] self.shell_file = shell_file self.create_preprocessor(self.feature_extractor_model) def fit(self, data_generator, raw_mapping): """To be used when creating an entire new family of shells """ # Generate empty shell if needed for class_index in range(len(raw_mapping)): if raw_mapping[class_index] not in self.classifiers: self.classifiers[raw_mapping[class_index]] = ShellModel() self.mapping.append(raw_mapping[class_index]) # Extract features and prepare for shell creation for data in data_generator: images = data[0] filepaths = data[1] classes = data[2] unique_classes = np.unique(classes) for class_index in unique_classes: # Generate class features indexes = np.where(classes == class_index) target_images = images[indexes] class_features = self.preprocessor.predict(target_images) # Update shell family params if self.global_mean is None: self.global_mean = np.mean(class_features, axis=0, keepdims=True) else: self.global_mean = np.mean( np.concatenate( [ np.repeat( self.global_mean, self.instances, axis=0 ), class_features ] ), axis=0, keepdims=True ) self.instances += class_features.shape[0] class_name = raw_mapping[class_index] # Append raw features to classifiers if self.classifiers[class_name].raw_features is None: self.classifiers[class_name].raw_features = class_features else: self.classifiers[class_name].raw_features =\ np.concatenate([self.classifiers[class_name].raw_features, class_features]) # Create shells from features self.update_shells(self.global_mean) # self.save(output_datafile) # self.shell_file = output_datafile def update_shells(self, global_mean): for shell_name in self.classifiers: self.classifiers[shell_name].fit(global_mean) def score(self, feat, threshold, with_update=True, return_full_results=True): results = OrderedDict() best_class_name = None best_class_index = None best_result = -9999999 for class_name, shell in self.classifiers.items(): results[class_name] = shell.score(feat, self.global_mean) if results[class_name] > best_result: best_class_name = class_name best_result = results[class_name] best_class_index = self.mapping.index(class_name) if with_update: self.global_mean = (self.global_mean * self.instances + feat) / (self.instances + 1) self.instances += 1 self.classifiers[best_class_name].update(feat, self.global_mean) if return_full_results: return best_class_index, best_class_name, best_result, results else: return best_class_index, best_class_name, best_result # def scoreV2(self, feat, threshold, with_norm=True, with_update=True, add_new_class=True): # results = OrderedDict() # best_class_name = None # best_class_index = None # best_result = -9999999 # for class_name, shell in self.classifiers.items(): # results[class_name] = shell.score(feat, self.global_mean) # if results[class_name] > best_result: # best_class_name = class_name # best_result = results[class_name] # best_class_index = self.mapping.index(class_name) # if best_result >= threshold: # if with_update: # self.global_mean = (self.global_mean * self.instances + feat) / (self.instances + 1) # self.instances += 1 # self.classifiers[best_class_name].update(feat, self.global_mean) # else: # if add_new_class: # self.create_new_class(feat) # return best_class_index, best_class_name, best_result def save(self, output_filename): save_data = {'classifiers': self.classifiers, 'feature_extractor_model': self.feature_extractor_model, 'mapping': self.mapping, 'global_mean': self.global_mean, 'instances': self.instances} with open(output_filename, "wb") as data_file: pickle.dump(save_data, data_file) # def create_new_class(self, feat, new_class_name): # """To be used when a family of shell is already present # """ # shell = ShellModel() # shell.fit(feat) # self.mapping.append(new_class_name) # self.classifiers[new_class_name] = shell # with open(self.mapping_file, "w") as data_file: # for class_name in self.mapping: # data_file.write("%s\n" % class_name) # with open(self.shell_file, "wb") as data_file: # pickle.dump(self.classifiers, data_file) def delete_class(self, class_to_delete): """To be used when a shell needs to be deleted """ all_features_total_value = self.global_mean * self.instances class_to_delete_raw_features_sum = np.sum(self.classifiers[class_to_delete].raw_features, axis=0) class_to_delete_raw_features_sum = np.expand_dims(class_to_delete_raw_features_sum, 0) self.global_mean = (all_features_total_value - class_to_delete_raw_features_sum) / (self.instances - self.classifiers[class_to_delete].num_instances) self.instances -= self.classifiers[class_to_delete].num_instances del self.classifiers[class_to_delete] # Re update all shell configurations self.update_shells(self.global_mean) # Save new configuration self.save(self.shell_file) def normIt(data, m=None): nData = data.copy() #nData = data/np.linalg.norm(data, axis =1, keepdims=True) if m is None: m = np.mean(nData, axis =0, keepdims=True) nData = nData - m nData = nData/np.linalg.norm(nData, axis =1, keepdims=True) return nData, m # def ocMean(feat): # m_ = np.mean(feat, axis=0, keepdims=True) # d = feat - m_ # d = np.linalg.norm(d, axis=1) # model ={'clusMean': m_, # 'numInstance': feat.shape[0], # 'noiseMean': np.median(d), # 'noiseStd':np.median(np.absolute(d-np.mean(d))), # 'mean_norm': 0} # return model # def ocMeanScore(feat, model, withNorm=True): # if withNorm: # feat_, _ = normalize(feat, model['mean_norm']) # else: # feat_ = feat.copy() # feat_ = feat_ - model['clusMean'] # feat_ = np.linalg.norm(feat_, axis=1) # ss = (feat_ - model['noiseMean'])/model['noiseStd'] # return ss def evalOneClassMean(testFeat, testGt, trainFeat, trainGt, verbose=True, withNorm=True): if type(trainFeat) is list: featList = trainFeat.copy() numClass = len(featList) else: featList = [] numClass = np.max(trainGt)+1 for i in range(numClass): featList.append(trainFeat[trainGt==i]) trainTime = 0 testTime = 0 scores = np.zeros([testFeat.shape[0], numClass]) for i in range(numClass): sOCM = OneClassMean() # training start = timeit.default_timer() sOCM.fit(featList[i]) stop = timeit.default_timer() trainTime = trainTime + stop-start # testing start = timeit.default_timer() scores[:,i] = sOCM.score(testFeat, withNorm=withNorm) stop = timeit.default_timer() testTime = testTime + stop-start trainTime = trainTime/numClass testTime = testTime/numClass if verbose: print('Train Time: ', trainTime) print('Test Time: ', testTime) labelEst = np.argmax(scores, axis=1) meanEST, mapEST, rocEST = evaluate(labelEst, scores, testGt, verbose) return meanEST, mapEST, rocEST # class StackedOneClassMean(OneClassMean): # """Create stacked shell of one class mean. # """ # def __init__(self): # self.classifers = [] # def fit(self, feat, target, multiMeans): # self.classifers = self.__generate_stacked_one_class_mean(feat, target, multiMeans) # def __generate_stacked_one_class_mean(self, feat, target, m_all): # _, neighs = sorted_neighbors_of_i(m_all, target) # classifers = [] # current_shell = [] # for i in neighs: # current_shell.append(i) # if len(current_shell)> 1: # m1 = np.mean(m_all[current_shell,:], axis =0, keepdims=True) # tf = feat-m1 # tf = tf/np.linalg.norm(tf, axis =1, keepdims=True) # model = super(StackedOneClassMean, self).__generate_one_class_mean(tf) # model['mean_norm'] = m1 # classifers.append(model) # tf = feat/np.linalg.norm(feat, axis =1, keepdims=True) # model = super(StackedOneClassMean, self).__generate_one_class_mean(tf) # model['mean_norm'] = np.zeros([1, feat.shape[1]]) # classifers.append(model) # return classifers # def score(self, testFeat, with_norm=True): # scores = self.__generate_stacked_one_class_mean_score(testFeat, with_norm) # labels = np.argmin(scores, axis=1) # return labels, -scores # def __generate_stacked_one_class_mean_score(self, feat, with_norm=True): # score = np.zeros([feat.shape[0], len(self.classifers)]) # for i in range(len(self.classifers)): # score[:,i] = super(StackedOneClassMean, self).__generate_one_class_mean_score(feat, self.classifers[i]) # return score # def stackedMean(train_feat, target, m_all): # _, neighs = sorted_neighbors_of_i(m_all, target) # classifers = [] # current_shell = [] # for i in neighs: # current_shell.append(i) # if len(current_shell)> 1: # m1 = np.mean(m_all[current_shell,:], axis =0, keepdims=True) # tf = train_feat-m1 # tf = tf/np.linalg.norm(tf, axis =1, keepdims=True) # model = ocMean(tf) # model['mean_norm'] = m1 # classifers.append(model) # tf = train_feat/np.linalg.norm(train_feat, axis =1, keepdims=True) # model = ocMean(tf) # model['mean_norm'] = np.zeros([1,train_feat.shape[1]]) # classifers.append(model) # return classifers # def stackedMeanScore(classifers, test_feat): # score = np.zeros([test_feat.shape[0], len(classifers)]) # for i in range(len(classifers)): # score[:,i] = ocMeanScore(test_feat, classifers[i]) # return score # def evalStackedOneClassMean(testFeat, testGt, trainFeat, trainGt, verbose=True): # sOCM = StackedOneClassMean() # sOCM.train(trainFeat, trainGt) # labelEst, scores = sOCM.score(testFeat) # meanEST, mapEST, rocEST = evaluate(labelEst, scores, testGt, verbose) # return meanEST, mapEST, rocEST # class StackedMultiClassMean(StackedOneClassMean): # """Create multi class stacked shell class mean. # """ # def __init__(self): # self.classifers = [] # def fit(self, feat, gt=-1): # if type(feat) is list: # featList = feat.copy() # numClass = len(featList) # else: # featList = fit_to_list(feat, gt) # numClass = len(featList) # allMeans = np.zeros([numClass, featList[0].shape[1]]) # for i in range(numClass): # allMeans[i,:] = np.mean(feat[i], axis =0) # self.classifers = self.__generate_stacked_multi_class_mean(featList, allMeans) # def __generate_stacked_multi_class_mean(self, featList, allMeans): # numClass = len(allMeans) # allClassifiers = [] # for i in range(numClass): # target = i # classifers = super(StackedMultiClassMean, self).__generate_stacked_one_class_mean(featList[target], target, allMeans) # allClassifiers.append(classifers) # return allClassifiers # # def trainSingleClass(self, feat, target, multiMeans): # # classifier = stackedMean(feat, target, multiMeans) # # return classifier # def score(self, testFeat): # scores = self.__generate_stacked_multi_class_mean_score(testFeat, self.classifers) # labels = np.argmin(scores, axis=1) # return labels, -scores # def __generate_stacked_multi_class_mean_score(self, testFeat, allClassifiers): # numClass = len(allClassifiers) # scores = np.zeros([testFeat.shape[0], numClass]) # for i in range(numClass): # stacked_one_class_shell = super(StackedMultiClassMean, self).__generate_stacked_one_class_mean_score(allClassifiers[i], testFeat) # stacked_one_class_shell = np.mean(stacked_one_class_shell, axis =1) # scores[:,i] = stacked_one_class_shell # return scores # def multiStackedOneClassMean(trainFeat, allMeans): # numClass = len(allMeans) # allClassifiers = [] # for i in range(numClass): # target = i # classifers = stackedMean(trainFeat[target], target, allMeans) # allClassifiers.append(classifers) # return allClassifiers # def scoreMultiStackedOneClassMean(testFeat, allClassifiers): # numClass = len(allClassifiers) # scores = np.zeros([testFeat.shape[0], numClass]) # for i in range(numClass): # s = stackedMeanScore(allClassifiers[i], testFeat) # s = np.mean(s, axis =1) # scores[:,i] = s # return scores
[ "tensorflow.keras.applications.MobileNet", "tensorflow.keras.applications.resnet50.ResNet50", "numpy.expand_dims", "tensorflow.keras.layers.GlobalAvgPool2D", "numpy.unique", "numpy.median", "tensorflow.keras.applications.vgg16.VGG16", "tensorflow.keras.Sequential", "numpy.linalg.norm", "numpy.concatenate", "numpy.max", "numpy.argmax", "numpy.mean", "numpy.where", "numpy.repeat", "numpy.zeros", "numpy.sum" ]
src/shell_v2.py
[(6, 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), False, 'import os\n'), (324, 'numpy.zeros', 'np.zeros', (['[testFeat.shape[0], numClass]'], {}), True, 'import numpy as np\n'), (342, 'numpy.argmax', 'np.argmax', (['scores'], {'axis': '(1)'}), True, 'import numpy as np\n'), (52, 'utils.normalize', 'normalize', (['self.raw_features', 'global_mean'], {}), False, 'from utils import normalize\n'), (53, 'numpy.mean', 'np.mean', (['normalized_features'], {'axis': '(0)', 'keepdims': '(True)'}), True, 'import numpy as np\n'), (57, 'numpy.linalg.norm', 'np.linalg.norm', (['noise'], {'axis': '(1)'}), True, 'import numpy as np\n'), (60, 'numpy.median', 'np.median', (['noise'], {}), True, 'import numpy as np\n'), (78, 'numpy.linalg.norm', 'np.linalg.norm', (['feat_'], {'axis': '(1)'}), True, 'import numpy as np\n'), (86, 'numpy.concatenate', 'np.concatenate', (['[self.raw_features, feat]'], {}), True, 'import numpy as np\n'), (88, 'utils.normalize', 'normalize', (['self.raw_features', 'global_mean'], {}), False, 'from utils import normalize\n'), (89, 'numpy.mean', 'np.mean', (['normalized_features'], {'axis': '(0)', 'keepdims': '(True)'}), True, 'import numpy as np\n'), (94, 'numpy.linalg.norm', 'np.linalg.norm', (['noise'], {'axis': '(1)'}), True, 'import numpy as np\n'), (95, 'numpy.median', 'np.median', (['noise'], {}), True, 'import numpy as np\n'), (101, 'collections.OrderedDict', 'OrderedDict', ([], {}), False, 'from collections import OrderedDict\n'), (201, 'collections.OrderedDict', 'OrderedDict', ([], {}), False, 'from collections import OrderedDict\n'), (268, 'numpy.sum', 'np.sum', (['self.classifiers[class_to_delete].raw_features'], {'axis': '(0)'}), True, 'import numpy as np\n'), (269, 'numpy.expand_dims', 'np.expand_dims', (['class_to_delete_raw_features_sum', '(0)'], {}), True, 'import numpy as np\n'), (284, 'numpy.mean', 'np.mean', (['nData'], {'axis': '(0)', 'keepdims': '(True)'}), True, 'import numpy as np\n'), (286, 'numpy.linalg.norm', 'np.linalg.norm', (['nData'], {'axis': '(1)', 'keepdims': '(True)'}), True, 'import numpy as np\n'), (328, 'timeit.default_timer', 'timeit.default_timer', ([], {}), False, 'import timeit\n'), (330, 'timeit.default_timer', 'timeit.default_timer', ([], {}), False, 'import timeit\n'), (333, 'timeit.default_timer', 'timeit.default_timer', ([], {}), False, 'import timeit\n'), (335, 'timeit.default_timer', 'timeit.default_timer', ([], {}), False, 'import timeit\n'), (74, 'utils.normalize', 'normalize', (['feat', 'global_mean'], {}), False, 'from utils import normalize\n'), (112, 'tensorflow.keras.Sequential', 'Sequential', ([], {}), False, 'from tensorflow.keras import Model, Sequential\n'), (132, 'pickle.load', 'pickle.load', (['saved_data'], {}), False, 'import pickle\n'), (156, 'numpy.unique', 'np.unique', (['classes'], {}), True, 'import numpy as np\n'), (248, 'pickle.dump', 'pickle.dump', (['save_data', 'data_file'], {}), False, 'import pickle\n'), (319, 'numpy.max', 'np.max', (['trainGt'], {}), True, 'import numpy as np\n'), (114, 'tensorflow.keras.applications.vgg16.VGG16', 'VGG16', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), False, 'from tensorflow.keras.applications.vgg16 import VGG16\n'), (159, 'numpy.where', 'np.where', (['(classes == class_index)'], {}), True, 'import numpy as np\n'), (61, 'numpy.mean', 'np.mean', (['noise'], {}), True, 'import numpy as np\n'), (96, 'numpy.mean', 'np.mean', (['noise'], {}), True, 'import numpy as np\n'), (117, 'tensorflow.keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), False, 'from tensorflow.keras.applications.resnet50 import ResNet50\n'), (164, 'numpy.mean', 'np.mean', (['class_features'], {'axis': '(0)', 'keepdims': '(True)'}), True, 'import numpy as np\n'), (189, 'numpy.concatenate', 'np.concatenate', (['[self.classifiers[class_name].raw_features, class_features]'], {}), True, 'import numpy as np\n'), (119, 'tensorflow.keras.layers.GlobalAvgPool2D', 'GlobalAvgPool2D', ([], {}), False, 'from tensorflow.keras.layers import GlobalAvgPool2D\n'), (121, 'tensorflow.keras.applications.MobileNet', 'MobileNet', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), False, 'from tensorflow.keras.applications import MobileNet\n'), (123, 'tensorflow.keras.layers.GlobalAvgPool2D', 'GlobalAvgPool2D', ([], {}), False, 'from tensorflow.keras.layers import GlobalAvgPool2D\n'), (171, 'numpy.repeat', 'np.repeat', (['self.global_mean', 'self.instances'], {'axis': '(0)'}), True, 'import numpy as np\n')]
xebastien/NumeraiX
4ee1ce63cf2f6212e6b2297d3574bc6d70006f53
########################################################################### # Neural network training for numer.ai # ########################################################################## # %% import package import tensorflow as tf # call gpu tf if available (CUDA required) from tensorflow.keras import layers, models # keras tf v2 import numpy as np import matplotlib import pandas as pd import time #import tensorflow_probability as tfp # from keras.callbacks import EarlyStopping # need to insert callbacks import gc # garbage collector / needed for my laptop #import lightgbm as lgb import matplotlib.pyplot as plt #from scipy.stats import spearmanr # will need to use dask for cluster # import dask.dataframe as dd # work on external clusters # from dask.array import from_array # look for S3 bucket below for loading in cloud # public S3 # %%Create instance of NumerAPI and open downloaed file if 0: import numerapi # numerai api napi = numerapi.NumerAPI(verbosity="info") # download current dataset napi.download_current_dataset(unzip=True) # getting the latest round information current_ds = napi.get_current_round() # latest_round = os.path.join('numerai_dataset_'+str(current_ds)) ## already downloaded #napi.download_dataset("numerai_training_data_int8.parquet", train_pq_path) #napi.download_dataset("numerai_validation_data_int8.parquet", val_pq_path) # memory - using parquet/int8 data file for now train_pq_path = "numerai_training_data_int8.parquet" val_pq_path = "numerai_validation_data_int8.parquet" #Read parquet files and put to DataFrames df_train = pd.read_parquet('Numerai/data/numerai_training_data_int8.parquet') df_val = pd.read_parquet('Numerai/data/numerai_validation_data_int8.parquet') # subsampling as eras are overlaping eras = [i for i in range(1, len(df_train.era.unique())+1, 4)] df_train = df_train[df_train.era.astype(int).isin(eras)] gc.collect() # %% Features names and eras features = [c for c in df_train if c.startswith("feature")] features_erano = features + ["erano"] targets = [c for c in df_train if c.startswith("target")] # not used here, times series disabled # cast era time from string to integer and store in df df_train["erano"] = df_train.era.astype(int) df_val["erano"] = df_val.era.astype(int) print(f"Loaded {len(features)} features colum names") # %% Create tf tensors gc.collect() x_train = df_train.reset_index()[features].to_numpy() y_train = df_train.reset_index()["target"].to_numpy() # time series # x_train_erano = df_train.reset_index()[features_erano].to_numpy() del df_train; gc.collect() # low on memory print("Tensor training data ok - df dropped") x_test = df_val.reset_index()[features].to_numpy() y_test = df_val.reset_index()["target"].to_numpy() del df_val; gc.collect() # low on memory print("Tensor validation data ok - df dropped") # slicing data for batch processing batch_size = len(x_test) // 100 train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(batch_size) test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(batch_size) print("Tensor sliced") # %% Define model - optimizer - loss function epochs = 15 # model here #leaky_relu = LeakyReLU(alpha=0.01) model = models.Sequential([ layers.Dense(1000, activation='relu', kernel_initializer='normal',input_shape=[len(features)]), layers.Dense(50, activation='elu', kernel_regularizer='l2'), layers.Dense(16, activation='relu', kernel_regularizer='l2'), layers.Dense(1) # classify in [0 0.25 0.5 0.75 1] ]) # Adam ok optimizer = tf.keras.optimizers.Adam() # define loss objecctives loss_object = tf.keras.losses.MeanSquaredError() # or custum correlation lost funtion for regression # def MaxCorrelation(y_true,y_pred): # return -tf.math.abs(tfp.stats.correlation(y_pred,y_true, sample_axis=None, event_axis=None)) #loss_object = MaxCorrelation() # metrics train_loss = tf.keras.metrics.Mean() train_accuracy = tf.keras.metrics.MeanSquaredError() test_loss = tf.keras.metrics.Mean() test_accuracy = tf.keras.metrics.MeanSquaredError() ## %% define functions @tf.function def train_step(train_ds, labels): with tf.GradientTape() as tape: predictions=model(train_ds) loss = loss_object(labels, predictions) #loss = MaxCorrelation(y_true,y_pred) gradients=tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) train_loss(loss) # averaging loss train_accuracy(labels, predictions) def train(X_train, epochs): for itrain in range(epochs): start=time.time() # train by batch for train_ds, labels in X_train: train_step(train_ds, labels) # verbose message='Epoch {:04d}, loss: {:6.4f}, accuracy: {:7.4f}%, temps: {:7.4f}' print(message.format(itrain+1, train_loss.result(), train_accuracy.result()*100, time.time()-start)) train_loss.reset_states() train_accuracy.reset_states() def test(test_ds): start=time.time() for test_x, test_labels in test_ds: predictions = model(test_x) t_loss=loss_object(test_labels, predictions) test_loss(t_loss) # averaging test_accuracy(test_labels, predictions) message='Loss: {:6.4f}, accuracy: {:7.4f}%, temps: {:7.4f}' print(message.format(test_loss.result(), test_accuracy.result()*100, time.time()-start)) # %% Run optimization and prediction on validation print("Training dataset - Optimization") train(train_ds, epochs) print("Validation dataset") test(test_ds) y_pred = model(x_test).numpy().reshape((-1,)) y_true = y_test # %% metrics # Score based on the rank-correlation (spearman) / eras def numerai_score(y_true, y_pred, eras): rank_pred = y_pred.groupby(eras).apply(lambda x: x.rank(pct=True, method="first")) return np.corrcoef(y_true, rank_pred)[0,1] # Pearson correlation def correlation_score(y_true, y_pred): return np.corrcoef(y_true, y_pred)[0,1] # rank correlation no era from scipy.stats import spearmanr def spearman(y_true, y_pred): return spearmanr(y_pred, y_true).correlation # sum of square mean difference def ssmd(y_true, y_pred): squared_difference = tf.square(y_true - y_pred) return tf.reduce_mean(squared_difference, axis=-1) # remove warnings tf.autograph.set_verbosity(0) # %% ############################################################################### # upload prediction # import numerapi # napi = numerapi.NumerAPI("xebastien", "") # download data # napi.download_current_dataset(unzip=True) # upload predictions # napi.upload_predictions("predictions.csv", model_id="model_id") # %%
[ "tensorflow.reduce_mean", "tensorflow.keras.losses.MeanSquaredError", "tensorflow.keras.metrics.MeanSquaredError", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.keras.layers.Dense", "tensorflow.autograph.set_verbosity", "pandas.read_parquet", "tensorflow.keras.optimizers.Adam", "tensorflow.square", "numpy.corrcoef", "scipy.stats.spearmanr", "tensorflow.keras.metrics.Mean", "tensorflow.GradientTape" ]
NNmodel_tfv2.py
[(50, 'pandas.read_parquet', 'pd.read_parquet', (['"""Numerai/data/numerai_training_data_int8.parquet"""'], {}), True, 'import pandas as pd\n'), (51, 'pandas.read_parquet', 'pd.read_parquet', (['"""Numerai/data/numerai_validation_data_int8.parquet"""'], {}), True, 'import pandas as pd\n'), (56, 'gc.collect', 'gc.collect', ([], {}), False, 'import gc\n'), (71, 'gc.collect', 'gc.collect', ([], {}), False, 'import gc\n'), (78, 'gc.collect', 'gc.collect', ([], {}), False, 'import gc\n'), (83, 'gc.collect', 'gc.collect', ([], {}), False, 'import gc\n'), (102, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), True, 'import tensorflow as tf\n'), (104, 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), True, 'import tensorflow as tf\n'), (112, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {}), True, 'import tensorflow as tf\n'), (113, 'tensorflow.keras.metrics.MeanSquaredError', 'tf.keras.metrics.MeanSquaredError', ([], {}), True, 'import tensorflow as tf\n'), (114, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {}), True, 'import tensorflow as tf\n'), (115, 'tensorflow.keras.metrics.MeanSquaredError', 'tf.keras.metrics.MeanSquaredError', ([], {}), True, 'import tensorflow as tf\n'), (193, 'tensorflow.autograph.set_verbosity', 'tf.autograph.set_verbosity', (['(0)'], {}), True, 'import tensorflow as tf\n'), (32, 'numerapi.NumerAPI', 'numerapi.NumerAPI', ([], {'verbosity': '"""info"""'}), False, 'import numerapi\n'), (146, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (188, 'tensorflow.square', 'tf.square', (['(y_true - y_pred)'], {}), True, 'import tensorflow as tf\n'), (189, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['squared_difference'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (87, 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x_train, y_train)'], {}), True, 'import tensorflow as tf\n'), (88, 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x_test, y_test)'], {}), True, 'import tensorflow as tf\n'), (97, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(50)'], {'activation': '"""elu"""', 'kernel_regularizer': '"""l2"""'}), False, 'from tensorflow.keras import layers, models\n'), (98, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(16)'], {'activation': '"""relu"""', 'kernel_regularizer': '"""l2"""'}), False, 'from tensorflow.keras import layers, models\n'), (99, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {}), False, 'from tensorflow.keras import layers, models\n'), (121, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (132, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (175, 'numpy.corrcoef', 'np.corrcoef', (['y_true', 'rank_pred'], {}), True, 'import numpy as np\n'), (179, 'numpy.corrcoef', 'np.corrcoef', (['y_true', 'y_pred'], {}), True, 'import numpy as np\n'), (184, 'scipy.stats.spearmanr', 'spearmanr', (['y_pred', 'y_true'], {}), False, 'from scipy.stats import spearmanr\n'), (155, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (141, 'time.time', 'time.time', ([], {}), False, 'import time\n')]
LadaOndris/IBT
5f8f9207f8cce3124cf92c60874691ebb8d0fe42
import tensorflow as tf import src.estimation.configuration as configs import src.utils.logs as logs_utils from src.datasets.bighand.dataset import BighandDataset from src.datasets.msra.dataset import MSRADataset from src.estimation.architecture.jgrp2o import JGR_J2O from src.estimation.architecture.losses import CoordinateLoss, OffsetLoss from src.estimation.configuration import Config from src.estimation.preprocessing import DatasetPreprocessor from src.utils.camera import Camera from src.utils.paths import BIGHAND_DATASET_DIR, MSRAHANDGESTURE_DATASET_DIR def get_train_and_test_generator(dataset_name: str, network, train_config: configs.Config, test_config: configs.Config) -> (DatasetPreprocessor, DatasetPreprocessor): cam = Camera(dataset_name) def get_preprocessor(dataset, config): return DatasetPreprocessor(iter(dataset), network.input_size, network.out_size, camera=cam, config=config) if dataset_name == 'bighand': ds = BighandDataset(BIGHAND_DATASET_DIR, test_subject="Subject_8", batch_size=train_config.batch_size, shuffle=True) elif dataset_name == 'msra': ds = MSRADataset(MSRAHANDGESTURE_DATASET_DIR, batch_size=train_config.batch_size, shuffle=True) else: raise ValueError(F"Invalid dataset: {dataset_name}") train_ds_gen = get_preprocessor(ds.train_dataset, train_config) test_ds_gen = get_preprocessor(ds.test_dataset, test_config) return ds, train_ds_gen, test_ds_gen def train(dataset_name: str, weights_path: str, config: Config, model_features=128): network = JGR_J2O(n_features=model_features) model = network.graph() print(model.summary(line_length=120)) if weights_path is not None: model.load_weights(weights_path) dataset, train_ds_gen, test_ds_gen = get_train_and_test_generator(dataset_name, network, config) monitor_loss = 'val_loss' if dataset.num_test_batches == 0: test_ds_gen = None monitor_loss = 'loss' log_dir = logs_utils.make_log_dir() checkpoint_path = logs_utils.compose_ckpt_path(log_dir) callbacks = [ tf.keras.callbacks.TensorBoard(log_dir=log_dir, update_freq='epoch'), tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, monitor=monitor_loss, save_weights_only=True), tf.keras.callbacks.EarlyStopping(monitor=monitor_loss, patience=10, restore_best_weights=True), tf.keras.callbacks.TerminateOnNaN() ] steps_per_epoch = dataset.num_train_batches if dataset_name == 'bighand': steps_per_epoch = 1024 lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=config.learning_rate, decay_steps=steps_per_epoch, decay_rate=config.learning_decay_rate, staircase=True) adam = tf.keras.optimizers.Adam(learning_rate=lr_schedule, beta_1=0.98) model.compile(optimizer=adam, loss=[CoordinateLoss(), OffsetLoss()]) if dataset_name == "bighand": model.fit(train_ds_gen, epochs=1000, verbose=0, callbacks=callbacks, steps_per_epoch=steps_per_epoch, validation_data=test_ds_gen, validation_steps=dataset.num_test_batches) else: model.fit(train_ds_gen, epochs=70, verbose=0, callbacks=callbacks, steps_per_epoch=steps_per_epoch, validation_data=test_ds_gen, validation_steps=dataset.num_test_batches) # probably won't come to this, but just to be sure. # (the best checkpoint is being saved after each epoch) model_filepath = logs_utils.compose_model_path(prefix=F"jgrp2o_{dataset_name}_") model.save_weights(model_filepath) # checkpoints are located in the log_dir # the saved model is located in the model_filepath return log_dir, str(model_filepath)
[ "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.keras.optimizers.schedules.ExponentialDecay", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.callbacks.TerminateOnNaN", "tensorflow.keras.callbacks.TensorBoard", "tensorflow.keras.callbacks.EarlyStopping" ]
src/estimation/train.py
[(17, 'src.utils.camera.Camera', 'Camera', (['dataset_name'], {}), False, 'from src.utils.camera import Camera\n'), (37, 'src.estimation.architecture.jgrp2o.JGR_J2O', 'JGR_J2O', ([], {'n_features': 'model_features'}), False, 'from src.estimation.architecture.jgrp2o import JGR_J2O\n'), (49, 'src.utils.logs.make_log_dir', 'logs_utils.make_log_dir', ([], {}), True, 'import src.utils.logs as logs_utils\n'), (50, 'src.utils.logs.compose_ckpt_path', 'logs_utils.compose_ckpt_path', (['log_dir'], {}), True, 'import src.utils.logs as logs_utils\n'), (62, 'tensorflow.keras.optimizers.schedules.ExponentialDecay', 'tf.keras.optimizers.schedules.ExponentialDecay', ([], {'initial_learning_rate': 'config.learning_rate', 'decay_steps': 'steps_per_epoch', 'decay_rate': 'config.learning_decay_rate', 'staircase': '(True)'}), True, 'import tensorflow as tf\n'), (67, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'lr_schedule', 'beta_1': '(0.98)'}), True, 'import tensorflow as tf\n'), (79, 'src.utils.logs.compose_model_path', 'logs_utils.compose_model_path', ([], {'prefix': 'f"""jgrp2o_{dataset_name}_"""'}), True, 'import src.utils.logs as logs_utils\n'), (24, 'src.datasets.bighand.dataset.BighandDataset', 'BighandDataset', (['BIGHAND_DATASET_DIR'], {'test_subject': '"""Subject_8"""', 'batch_size': 'train_config.batch_size', 'shuffle': '(True)'}), False, 'from src.datasets.bighand.dataset import BighandDataset\n'), (52, 'tensorflow.keras.callbacks.TensorBoard', 'tf.keras.callbacks.TensorBoard', ([], {'log_dir': 'log_dir', 'update_freq': '"""epoch"""'}), True, 'import tensorflow as tf\n'), (53, 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', ([], {'filepath': 'checkpoint_path', 'monitor': 'monitor_loss', 'save_weights_only': '(True)'}), True, 'import tensorflow as tf\n'), (54, 'tensorflow.keras.callbacks.EarlyStopping', 'tf.keras.callbacks.EarlyStopping', ([], {'monitor': 'monitor_loss', 'patience': '(10)', 'restore_best_weights': '(True)'}), True, 'import tensorflow as tf\n'), (55, 'tensorflow.keras.callbacks.TerminateOnNaN', 'tf.keras.callbacks.TerminateOnNaN', ([], {}), True, 'import tensorflow as tf\n'), (27, 'src.datasets.msra.dataset.MSRADataset', 'MSRADataset', (['MSRAHANDGESTURE_DATASET_DIR'], {'batch_size': 'train_config.batch_size', 'shuffle': '(True)'}), False, 'from src.datasets.msra.dataset import MSRADataset\n'), (68, 'src.estimation.architecture.losses.CoordinateLoss', 'CoordinateLoss', ([], {}), False, 'from src.estimation.architecture.losses import CoordinateLoss, OffsetLoss\n'), (68, 'src.estimation.architecture.losses.OffsetLoss', 'OffsetLoss', ([], {}), False, 'from src.estimation.architecture.losses import CoordinateLoss, OffsetLoss\n')]
mm493821882/Mask_recognition-with-GUI
42c020f11d764e561800069f1d32b58e973dc29e
import os import time from functools import partial import numpy as np import tensorflow as tf import tensorflow.keras.backend as K from tensorflow.keras.callbacks import (EarlyStopping, ReduceLROnPlateau, TensorBoard) from tensorflow.keras.layers import Input, Lambda from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam from tqdm import tqdm from nets.loss import yolo_loss from nets.yolo4_tiny import yolo_body from utils.utils import (ModelCheckpoint, WarmUpCosineDecayScheduler, get_random_data, get_random_data_with_Mosaic, rand) from tensorflow.compat.v1 import ConfigProto from tensorflow.compat.v1 import InteractiveSession config = ConfigProto() config.gpu_options.allow_growth = True session = InteractiveSession(config=config) #---------------------------------------------------# # 获得类和先验框 #---------------------------------------------------# def get_classes(classes_path): '''loads the classes''' with open(classes_path) as f: class_names = f.readlines() class_names = [c.strip() for c in class_names] return class_names def get_anchors(anchors_path): '''loads the anchors from a file''' with open(anchors_path) as f: anchors = f.readline() anchors = [float(x) for x in anchors.split(',')] return np.array(anchors).reshape(-1, 2) #---------------------------------------------------# # 训练数据生成器 #---------------------------------------------------# def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes, mosaic=False, random=True): n = len(annotation_lines) i = 0 flag = True while True: image_data = [] box_data = [] for b in range(batch_size): if i==0: np.random.shuffle(annotation_lines) if mosaic: if flag and (i+4) < n: image, box = get_random_data_with_Mosaic(annotation_lines[i:i+4], input_shape) i = (i+4) % n else: image, box = get_random_data(annotation_lines[i], input_shape, random=random) i = (i+1) % n flag = bool(1-flag) else: image, box = get_random_data(annotation_lines[i], input_shape, random=random) i = (i+1) % n image_data.append(image) box_data.append(box) image_data = np.array(image_data) box_data = np.array(box_data) y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes) yield image_data, y_true[0], y_true[1] #---------------------------------------------------# # 读入xml文件,并输出y_true #---------------------------------------------------# def preprocess_true_boxes(true_boxes, input_shape, anchors, num_classes): assert (true_boxes[..., 4]<num_classes).all(), 'class id must be less than num_classes' # 一共有两个特征层数 num_layers = len(anchors)//3 #-----------------------------------------------------------# # 13x13的特征层对应的anchor是[81,82], [135,169], [344,319] # 26x26的特征层对应的anchor是[23,27], [37,58], [81,82] #-----------------------------------------------------------# anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]] #-----------------------------------------------------------# # 获得框的坐标和图片的大小 #-----------------------------------------------------------# true_boxes = np.array(true_boxes, dtype='float32') input_shape = np.array(input_shape, dtype='int32') #-----------------------------------------------------------# # 通过计算获得真实框的中心和宽高 # 中心点(m,n,2) 宽高(m,n,2) #-----------------------------------------------------------# boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2 boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2] #-----------------------------------------------------------# # 将真实框归一化到小数形式 #-----------------------------------------------------------# true_boxes[..., 0:2] = boxes_xy/input_shape[::-1] true_boxes[..., 2:4] = boxes_wh/input_shape[::-1] # m为图片数量,grid_shapes为网格的shape m = true_boxes.shape[0] grid_shapes = [input_shape//{0:32, 1:16, 2:8}[l] for l in range(num_layers)] #-----------------------------------------------------------# # y_true的格式为(m,13,13,3,85)(m,26,26,3,85) #-----------------------------------------------------------# y_true = [np.zeros((m,grid_shapes[l][0],grid_shapes[l][1],len(anchor_mask[l]),5+num_classes), dtype='float32') for l in range(num_layers)] #-----------------------------------------------------------# # [6,2] -> [1,6,2] #-----------------------------------------------------------# anchors = np.expand_dims(anchors, 0) anchor_maxes = anchors / 2. anchor_mins = -anchor_maxes #-----------------------------------------------------------# # 长宽要大于0才有效 #-----------------------------------------------------------# valid_mask = boxes_wh[..., 0]>0 for b in range(m): # 对每一张图进行处理 wh = boxes_wh[b, valid_mask[b]] if len(wh)==0: continue #-----------------------------------------------------------# # [n,2] -> [n,1,2] #-----------------------------------------------------------# wh = np.expand_dims(wh, -2) box_maxes = wh / 2. box_mins = -box_maxes #-----------------------------------------------------------# # 计算所有真实框和先验框的交并比 # intersect_area [n,6] # box_area [n,1] # anchor_area [1,6] # iou [n,6] #-----------------------------------------------------------# intersect_mins = np.maximum(box_mins, anchor_mins) intersect_maxes = np.minimum(box_maxes, anchor_maxes) intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.) intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1] box_area = wh[..., 0] * wh[..., 1] anchor_area = anchors[..., 0] * anchors[..., 1] iou = intersect_area / (box_area + anchor_area - intersect_area) #-----------------------------------------------------------# # 维度是[n,] 感谢 消尽不死鸟 的提醒 #-----------------------------------------------------------# best_anchor = np.argmax(iou, axis=-1) for t, n in enumerate(best_anchor): #-----------------------------------------------------------# # 找到每个真实框所属的特征层 #-----------------------------------------------------------# for l in range(num_layers): if n in anchor_mask[l]: #-----------------------------------------------------------# # floor用于向下取整,找到真实框所属的特征层对应的x、y轴坐标 #-----------------------------------------------------------# i = np.floor(true_boxes[b,t,0] * grid_shapes[l][1]).astype('int32') j = np.floor(true_boxes[b,t,1] * grid_shapes[l][0]).astype('int32') #-----------------------------------------------------------# # k指的的当前这个特征点的第k个先验框 #-----------------------------------------------------------# k = anchor_mask[l].index(n) #-----------------------------------------------------------# # c指的是当前这个真实框的种类 #-----------------------------------------------------------# c = true_boxes[b, t, 4].astype('int32') #-----------------------------------------------------------# # y_true的shape为(m,13,13,3,85)(m,26,26,3,85)(m,52,52,3,85) # 最后的85可以拆分成4+1+80,4代表的是框的中心与宽高、 # 1代表的是置信度、80代表的是种类 #-----------------------------------------------------------# y_true[l][b, j, i, k, 0:4] = true_boxes[b, t, 0:4] y_true[l][b, j, i, k, 4] = 1 y_true[l][b, j, i, k, 5+c] = 1 return y_true # 防止bug def get_train_step_fn(): @tf.function def train_step(imgs, yolo_loss, targets, net, optimizer, regularization, normalize): with tf.GradientTape() as tape: # 计算loss P5_output, P4_output = net(imgs, training=True) args = [P5_output, P4_output] + targets loss_value = yolo_loss(args,anchors,num_classes,label_smoothing=label_smoothing,normalize=normalize) if regularization: # 加入正则化损失 loss_value = tf.reduce_sum(net.losses) + loss_value grads = tape.gradient(loss_value, net.trainable_variables) optimizer.apply_gradients(zip(grads, net.trainable_variables)) return loss_value return train_step def fit_one_epoch(net, yolo_loss, optimizer, epoch, epoch_size, epoch_size_val, gen, genval, Epoch, anchors, num_classes, label_smoothing, regularization=False, train_step=None): loss = 0 val_loss = 0 with tqdm(total=epoch_size,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar: for iteration, batch in enumerate(gen): if iteration>=epoch_size: break images, target0, target1 = batch[0], batch[1], batch[2] targets = [target0, target1] targets = [tf.convert_to_tensor(target) for target in targets] loss_value = train_step(images, yolo_loss, targets, net, optimizer, regularization, normalize=normalize) loss = loss + loss_value pbar.set_postfix(**{'total_loss': float(loss) / (iteration + 1), 'lr' : optimizer._decayed_lr(tf.float32).numpy()}) pbar.update(1) print('Start Validation') with tqdm(total=epoch_size_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar: for iteration, batch in enumerate(genval): if iteration>=epoch_size_val: break # 计算验证集loss images, target0, target1 = batch[0], batch[1], batch[2] targets = [target0, target1] targets = [tf.convert_to_tensor(target) for target in targets] P5_output, P4_output = net(images) args = [P5_output, P4_output] + targets loss_value = yolo_loss(args,anchors,num_classes,label_smoothing=label_smoothing,normalize=normalize) if regularization: # 加入正则化损失 loss_value = tf.reduce_sum(net.losses) + loss_value # 更新验证集loss val_loss = val_loss + loss_value pbar.set_postfix(**{'total_loss': float(val_loss)/ (iteration + 1)}) pbar.update(1) print('Finish Validation') print('Epoch:'+ str(epoch+1) + '/' + str(Epoch)) print('Total Loss: %.4f || Val Loss: %.4f ' % (loss/(epoch_size+1),val_loss/(epoch_size_val+1))) net.save_weights('logs/Epoch%d-Total_Loss%.4f-Val_Loss%.4f.h5'%((epoch+1),loss/(epoch_size+1),val_loss/(epoch_size_val+1))) gpus = tf.config.experimental.list_physical_devices(device_type='GPU') for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) #----------------------------------------------------# # 检测精度mAP和pr曲线计算参考视频 # https://www.bilibili.com/video/BV1zE411u7Vw #----------------------------------------------------# if __name__ == "__main__": #----------------------------------------------------# # 获得图片路径和标签 #----------------------------------------------------# annotation_path = '2007_train.txt' #------------------------------------------------------# # 训练后的模型保存的位置,保存在logs文件夹里面 #------------------------------------------------------# log_dir = 'logs/' #----------------------------------------------------# # classes和anchor的路径,非常重要 # 训练前一定要修改classes_path,使其对应自己的数据集 #----------------------------------------------------# classes_path = 'model_data/voc_classes.txt' anchors_path = 'model_data/yolo_anchors.txt' #------------------------------------------------------# # 权值文件请看README,百度网盘下载 # 训练自己的数据集时提示维度不匹配正常 # 预测的东西都不一样了自然维度不匹配 #------------------------------------------------------# weights_path = 'model_data/yolov4_tiny_weights_coco.h5' #------------------------------------------------------# # 训练用图片大小 # 一般在416x416和608x608选择 #------------------------------------------------------# input_shape = (416,416) #------------------------------------------------------# # 是否对损失进行归一化,用于改变loss的大小 # 用于决定计算最终loss是除上batch_size还是除上正样本数量 #------------------------------------------------------# normalize = False #----------------------------------------------------# # 获取classes和anchor #----------------------------------------------------# class_names = get_classes(classes_path) anchors = get_anchors(anchors_path) #------------------------------------------------------# # 一共有多少类和多少先验框 #------------------------------------------------------# num_classes = len(class_names) num_anchors = len(anchors) #------------------------------------------------------# # Yolov4的tricks应用 # mosaic 马赛克数据增强 True or False # 实际测试时mosaic数据增强并不稳定,所以默认为False # Cosine_scheduler 余弦退火学习率 True or False # label_smoothing 标签平滑 0.01以下一般 如0.01、0.005 #------------------------------------------------------# mosaic = False Cosine_scheduler = False label_smoothing = 0 regularization = True #-------------------------------# # Dataloder的使用 #-------------------------------# Use_Data_Loader = True #------------------------------------------------------# # 创建yolo模型 #------------------------------------------------------# image_input = Input(shape=(None, None, 3)) h, w = input_shape print('Create YOLOv4 model with {} anchors and {} classes.'.format(num_anchors, num_classes)) model_body = yolo_body(image_input, num_anchors//2, num_classes) #-------------------------------------------# # 权值文件的下载请看README #-------------------------------------------# print('Load weights {}.'.format(weights_path)) model_body.load_weights(weights_path, by_name=True, skip_mismatch=True) #----------------------------------------------------------------------# # 验证集的划分在train.py代码里面进行 # 2007_test.txt和2007_val.txt里面没有内容是正常的。训练不会使用到。 # 当前划分方式下,验证集和训练集的比例为1:9 #----------------------------------------------------------------------# val_split = 0.1 with open(annotation_path) as f: lines = f.readlines() np.random.seed(10101) np.random.shuffle(lines) np.random.seed(None) num_val = int(len(lines)*val_split) num_train = len(lines) - num_val freeze_layers = 60 for i in range(freeze_layers): model_body.layers[i].trainable = False print('Freeze the first {} layers of total {} layers.'.format(freeze_layers, len(model_body.layers))) #------------------------------------------------------# # 主干特征提取网络特征通用,冻结训练可以加快训练速度 # 也可以在训练初期防止权值被破坏。 # Init_Epoch为起始世代 # Freeze_Epoch为冻结训练的世代 # Epoch总训练世代 # 提示OOM或者显存不足请调小Batch_size #------------------------------------------------------# if True: Init_epoch = 0 Freeze_epoch = 5 batch_size = 8 learning_rate_base = 1e-3 if Use_Data_Loader: gen = partial(data_generator, annotation_lines = lines[:num_train], batch_size = batch_size, input_shape = input_shape, anchors = anchors, num_classes = num_classes, mosaic=mosaic) gen = tf.data.Dataset.from_generator(gen, (tf.float32, tf.float32, tf.float32)) gen_val = partial(data_generator, annotation_lines = lines[num_train:], batch_size = batch_size, input_shape = input_shape, anchors = anchors, num_classes = num_classes, mosaic=False) gen_val = tf.data.Dataset.from_generator(gen_val, (tf.float32, tf.float32, tf.float32)) gen = gen.shuffle(buffer_size=batch_size).prefetch(buffer_size=batch_size) gen_val = gen_val.shuffle(buffer_size=batch_size).prefetch(buffer_size=batch_size) else: gen = data_generator(lines[:num_train], batch_size, input_shape, anchors, num_classes, mosaic=mosaic) gen_val = data_generator(lines[num_train:], batch_size, input_shape, anchors, num_classes, mosaic=False) epoch_size = num_train//batch_size epoch_size_val = num_val//batch_size if Cosine_scheduler: lr_schedule = tf.keras.experimental.CosineDecayRestarts( initial_learning_rate = learning_rate_base, first_decay_steps = 5*epoch_size, t_mul = 1.0, alpha = 1e-2 ) else: lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=learning_rate_base, decay_steps=epoch_size, decay_rate=0.92, staircase=True ) print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size)) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) for epoch in range(Init_epoch,Freeze_epoch): fit_one_epoch(model_body, yolo_loss, optimizer, epoch, epoch_size, epoch_size_val,gen, gen_val, Freeze_epoch, anchors, num_classes, label_smoothing, regularization, get_train_step_fn()) for i in range(freeze_layers): model_body.layers[i].trainable = True # 解冻后训练 if True: Freeze_epoch = 5 Epoch = 10 batch_size = 8 learning_rate_base = 1e-4 if Use_Data_Loader: gen = partial(data_generator, annotation_lines = lines[:num_train], batch_size = batch_size, input_shape = input_shape, anchors = anchors, num_classes = num_classes, mosaic=mosaic) gen = tf.data.Dataset.from_generator(gen, (tf.float32, tf.float32, tf.float32)) gen_val = partial(data_generator, annotation_lines = lines[num_train:], batch_size = batch_size, input_shape = input_shape, anchors = anchors, num_classes = num_classes, mosaic=False) gen_val = tf.data.Dataset.from_generator(gen_val, (tf.float32, tf.float32, tf.float32)) gen = gen.shuffle(buffer_size=batch_size).prefetch(buffer_size=batch_size) gen_val = gen_val.shuffle(buffer_size=batch_size).prefetch(buffer_size=batch_size) else: gen = data_generator(lines[:num_train], batch_size, input_shape, anchors, num_classes, mosaic=mosaic) gen_val = data_generator(lines[num_train:], batch_size, input_shape, anchors, num_classes, mosaic=False) epoch_size = num_train//batch_size epoch_size_val = num_val//batch_size if Cosine_scheduler: lr_schedule = tf.keras.experimental.CosineDecayRestarts( initial_learning_rate = learning_rate_base, first_decay_steps = 5*epoch_size, t_mul = 1.0, alpha = 1e-2 ) else: lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=learning_rate_base, decay_steps = epoch_size, decay_rate=0.92, staircase=True ) print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size)) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) for epoch in range(Freeze_epoch,Epoch): fit_one_epoch(model_body, yolo_loss, optimizer, epoch, epoch_size, epoch_size_val,gen, gen_val, Epoch, anchors, num_classes, label_smoothing, regularization, get_train_step_fn())
[ "tensorflow.convert_to_tensor", "numpy.expand_dims", "numpy.minimum", "tensorflow.keras.experimental.CosineDecayRestarts", "tensorflow.reduce_sum", "tensorflow.compat.v1.InteractiveSession", "tensorflow.data.Dataset.from_generator", "tensorflow.config.experimental.set_memory_growth", "numpy.argmax", "tensorflow.config.experimental.list_physical_devices", "numpy.floor", "numpy.array", "tensorflow.GradientTape", "tensorflow.compat.v1.ConfigProto", "numpy.maximum", "numpy.random.seed", "tensorflow.keras.optimizers.schedules.ExponentialDecay", "numpy.random.shuffle", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.layers.Input" ]
train_eager.py
[(24, 'tensorflow.compat.v1.ConfigProto', 'ConfigProto', ([], {}), False, 'from tensorflow.compat.v1 import ConfigProto\n'), (26, 'tensorflow.compat.v1.InteractiveSession', 'InteractiveSession', ([], {'config': 'config'}), False, 'from tensorflow.compat.v1 import InteractiveSession\n'), (251, 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', ([], {'device_type': '"""GPU"""'}), True, 'import tensorflow as tf\n'), (92, 'numpy.array', 'np.array', (['true_boxes'], {'dtype': '"""float32"""'}), True, 'import numpy as np\n'), (93, 'numpy.array', 'np.array', (['input_shape'], {'dtype': '"""int32"""'}), True, 'import numpy as np\n'), (118, 'numpy.expand_dims', 'np.expand_dims', (['anchors', '(0)'], {}), True, 'import numpy as np\n'), (253, 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), True, 'import tensorflow as tf\n'), (321, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(None, None, 3)'}), False, 'from tensorflow.keras.layers import Input, Lambda\n'), (324, 'nets.yolo4_tiny.yolo_body', 'yolo_body', (['image_input', '(num_anchors // 2)', 'num_classes'], {}), False, 'from nets.yolo4_tiny import yolo_body\n'), (340, 'numpy.random.seed', 'np.random.seed', (['(10101)'], {}), True, 'import numpy as np\n'), (341, 'numpy.random.shuffle', 'np.random.shuffle', (['lines'], {}), True, 'import numpy as np\n'), (342, 'numpy.random.seed', 'np.random.seed', (['None'], {}), True, 'import numpy as np\n'), (71, 'numpy.array', 'np.array', (['image_data'], {}), True, 'import numpy as np\n'), (72, 'numpy.array', 'np.array', (['box_data'], {}), True, 'import numpy as np\n'), (134, 'numpy.expand_dims', 'np.expand_dims', (['wh', '(-2)'], {}), True, 'import numpy as np\n'), (145, 'numpy.maximum', 'np.maximum', (['box_mins', 'anchor_mins'], {}), True, 'import numpy as np\n'), (146, 'numpy.minimum', 'np.minimum', (['box_maxes', 'anchor_maxes'], {}), True, 'import numpy as np\n'), (147, 'numpy.maximum', 'np.maximum', (['(intersect_maxes - intersect_mins)', '(0.0)'], {}), True, 'import numpy as np\n'), (157, 'numpy.argmax', 'np.argmax', (['iou'], {'axis': '(-1)'}), True, 'import numpy as np\n'), (210, 'tqdm.tqdm', 'tqdm', ([], {'total': 'epoch_size', 'desc': 'f"""Epoch {epoch + 1}/{Epoch}"""', 'postfix': 'dict', 'mininterval': '(0.3)'}), False, 'from tqdm import tqdm\n'), (225, 'tqdm.tqdm', 'tqdm', ([], {'total': 'epoch_size_val', 'desc': 'f"""Epoch {epoch + 1}/{Epoch}"""', 'postfix': 'dict', 'mininterval': '(0.3)'}), False, 'from tqdm import tqdm\n'), (398, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'lr_schedule'}), True, 'import tensorflow as tf\n'), (444, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'lr_schedule'}), True, 'import tensorflow as tf\n'), (43, 'numpy.array', 'np.array', (['anchors'], {}), True, 'import numpy as np\n'), (193, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (197, 'nets.loss.yolo_loss', 'yolo_loss', (['args', 'anchors', 'num_classes'], {'label_smoothing': 'label_smoothing', 'normalize': 'normalize'}), False, 'from nets.loss import yolo_loss\n'), (236, 'nets.loss.yolo_loss', 'yolo_loss', (['args', 'anchors', 'num_classes'], {'label_smoothing': 'label_smoothing', 'normalize': 'normalize'}), False, 'from nets.loss import yolo_loss\n'), (365, 'functools.partial', 'partial', (['data_generator'], {'annotation_lines': 'lines[:num_train]', 'batch_size': 'batch_size', 'input_shape': 'input_shape', 'anchors': 'anchors', 'num_classes': 'num_classes', 'mosaic': 'mosaic'}), False, 'from functools import partial\n'), (367, 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['gen', '(tf.float32, tf.float32, tf.float32)'], {}), True, 'import tensorflow as tf\n'), (369, 'functools.partial', 'partial', (['data_generator'], {'annotation_lines': 'lines[num_train:]', 'batch_size': 'batch_size', 'input_shape': 'input_shape', 'anchors': 'anchors', 'num_classes': 'num_classes', 'mosaic': '(False)'}), False, 'from functools import partial\n'), (371, 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['gen_val', '(tf.float32, tf.float32, tf.float32)'], {}), True, 'import tensorflow as tf\n'), (384, 'tensorflow.keras.experimental.CosineDecayRestarts', 'tf.keras.experimental.CosineDecayRestarts', ([], {'initial_learning_rate': 'learning_rate_base', 'first_decay_steps': '(5 * epoch_size)', 't_mul': '(1.0)', 'alpha': '(0.01)'}), True, 'import tensorflow as tf\n'), (391, 'tensorflow.keras.optimizers.schedules.ExponentialDecay', 'tf.keras.optimizers.schedules.ExponentialDecay', ([], {'initial_learning_rate': 'learning_rate_base', 'decay_steps': 'epoch_size', 'decay_rate': '(0.92)', 'staircase': '(True)'}), True, 'import tensorflow as tf\n'), (413, 'functools.partial', 'partial', (['data_generator'], {'annotation_lines': 'lines[:num_train]', 'batch_size': 'batch_size', 'input_shape': 'input_shape', 'anchors': 'anchors', 'num_classes': 'num_classes', 'mosaic': 'mosaic'}), False, 'from functools import partial\n'), (415, 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['gen', '(tf.float32, tf.float32, tf.float32)'], {}), True, 'import tensorflow as tf\n'), (417, 'functools.partial', 'partial', (['data_generator'], {'annotation_lines': 'lines[num_train:]', 'batch_size': 'batch_size', 'input_shape': 'input_shape', 'anchors': 'anchors', 'num_classes': 'num_classes', 'mosaic': '(False)'}), False, 'from functools import partial\n'), (419, 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['gen_val', '(tf.float32, tf.float32, tf.float32)'], {}), True, 'import tensorflow as tf\n'), (430, 'tensorflow.keras.experimental.CosineDecayRestarts', 'tf.keras.experimental.CosineDecayRestarts', ([], {'initial_learning_rate': 'learning_rate_base', 'first_decay_steps': '(5 * epoch_size)', 't_mul': '(1.0)', 'alpha': '(0.01)'}), True, 'import tensorflow as tf\n'), (437, 'tensorflow.keras.optimizers.schedules.ExponentialDecay', 'tf.keras.optimizers.schedules.ExponentialDecay', ([], {'initial_learning_rate': 'learning_rate_base', 'decay_steps': 'epoch_size', 'decay_rate': '(0.92)', 'staircase': '(True)'}), True, 'import tensorflow as tf\n'), (57, 'numpy.random.shuffle', 'np.random.shuffle', (['annotation_lines'], {}), True, 'import numpy as np\n'), (67, 'utils.utils.get_random_data', 'get_random_data', (['annotation_lines[i]', 'input_shape'], {'random': 'random'}), False, 'from utils.utils import ModelCheckpoint, WarmUpCosineDecayScheduler, get_random_data, get_random_data_with_Mosaic, rand\n'), (216, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['target'], {}), True, 'import tensorflow as tf\n'), (232, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['target'], {}), True, 'import tensorflow as tf\n'), (60, 'utils.utils.get_random_data_with_Mosaic', 'get_random_data_with_Mosaic', (['annotation_lines[i:i + 4]', 'input_shape'], {}), False, 'from utils.utils import ModelCheckpoint, WarmUpCosineDecayScheduler, get_random_data, get_random_data_with_Mosaic, rand\n'), (63, 'utils.utils.get_random_data', 'get_random_data', (['annotation_lines[i]', 'input_shape'], {'random': 'random'}), False, 'from utils.utils import ModelCheckpoint, WarmUpCosineDecayScheduler, get_random_data, get_random_data_with_Mosaic, rand\n'), (200, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['net.losses'], {}), True, 'import tensorflow as tf\n'), (239, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['net.losses'], {}), True, 'import tensorflow as tf\n'), (168, 'numpy.floor', 'np.floor', (['(true_boxes[b, t, 0] * grid_shapes[l][1])'], {}), True, 'import numpy as np\n'), (169, 'numpy.floor', 'np.floor', (['(true_boxes[b, t, 1] * grid_shapes[l][0])'], {}), True, 'import numpy as np\n')]
shan18/Image-Captioning
3bc75bcc32317d5b2a3967f95cf2e28d6f3554b2
""" This file downloads the InceptionV3 model pre-trained on the imagenet dataset. It then removes the last layer from the model and returns the modified model. """ from tensorflow.keras.models import Model from tensorflow.keras.applications import InceptionV3 def load_inception_v3(): model = InceptionV3(include_top=True, weights='imagenet') conv_model = Model(model.input, model.layers[-2].output) return conv_model
[ "tensorflow.keras.applications.InceptionV3", "tensorflow.keras.models.Model" ]
models/inception_v3.py
[(13, 'tensorflow.keras.applications.InceptionV3', 'InceptionV3', ([], {'include_top': '(True)', 'weights': '"""imagenet"""'}), False, 'from tensorflow.keras.applications import InceptionV3\n'), (14, 'tensorflow.keras.models.Model', 'Model', (['model.input', 'model.layers[-2].output'], {}), False, 'from tensorflow.keras.models import Model\n')]
BioSystemsUM/DeepMol
62904fac46f62ec6231543891efbe52ac7ea1cf1
from unittest import TestCase from deepchem.models import MultitaskClassifier from rdkit.Chem.rdMolAlign import AlignMol from rdkit.Chem.rdmolfiles import MolFromSmiles from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import roc_auc_score, classification_report, precision_score, accuracy_score, confusion_matrix from sklearn.svm import SVC from tensorflow.python.keras import Sequential from tensorflow.python.keras.layers import Dense, Dropout, GaussianNoise, Reshape, Conv1D, Flatten from tensorflow.keras.optimizers import Adadelta, Adam, RMSprop from compoundFeaturization.mixedDescriptors import MixedFeaturizer from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, \ PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, \ SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, \ ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file from compoundFeaturization.rdkitFingerprints import MorganFingerprint from loaders.Loaders import SDFLoader, CSVLoader from metrics.Metrics import Metric from models.DeepChemModels import DeepChemModel from models.kerasModels import KerasModel from models.sklearnModels import SklearnModel from splitters.splitters import SingletaskStratifiedSplitter import pandas as pd class Test3DGeneration(TestCase): def test_align(self): generator = ThreeDimensionalMoleculeGenerator() mol = MolFromSmiles("Cc1cc2-c3c(O)cc(cc3OC3(Oc4cc(O)ccc4-c(c1O)c23)c1ccc(O)cc1O)-c1cc2cccc(O)c2o1") generator.generate_conformers(mol) class TestSdfImporter(TestCase): def test_sdf_importer(self): loader = SDFLoader("../data/dataset_sweet_3D_to_test.sdf", "_SourceID", labels_fields=["_SWEET"]) dataset = loader.create_dataset() assert len(loader.mols_handler) == 100 assert len(dataset.y) == 100 assert len(dataset.X) == 100 def test_2_sdf_importer(self): loader = SDFLoader("../data/A2780.sdf", "ChEMBL_ID", labels_fields=["pIC50"]) dataset = loader.create_dataset() assert len(dataset.X) == 2255 class TestRdkit3DDescriptors(TestCase): def setUp(self) -> None: self.test_dataset_sdf = "../data/dataset_sweet_3D_to_test.sdf" self.test_dataset_to_fail = "../data/preprocessed_dataset_wfoodb.csv" def test_autocorr3D(self): loader = SDFLoader(self.test_dataset_sdf, "_SourceID", labels_fields=["_SWEET"]) dataset = loader.create_dataset() dataset = AutoCorr3D().featurize(dataset) assert len(dataset.X) == 100 assert isinstance(dataset.X[0][0], float) def test_autocorr3D_to_fail(self): loader = CSVLoader(self.test_dataset_to_fail, mols_field='Smiles', labels_fields='Class', id_field='ID') dataset = loader.create_dataset() with self.assertRaises(SystemExit) as cm: AutoCorr3D().featurize(dataset) self.assertEqual(cm.exception.code, 1) def test_RDF(self): loader = SDFLoader(self.test_dataset_sdf, "_SourceID", labels_fields=["_SWEET"]) dataset = loader.create_dataset() dataset = RadialDistributionFunction().featurize(dataset) assert len(dataset.X) == 100 assert isinstance(dataset.X[0][0], float) assert len(dataset.X[0]) == 210 def test_RDF_to_fail(self): loader = CSVLoader(self.test_dataset_to_fail, mols_field='Smiles', labels_fields='Class', id_field='ID') dataset = loader.create_dataset() with self.assertRaises(SystemExit) as cm: RadialDistributionFunction().featurize(dataset) self.assertEqual(cm.exception.code, 1) def test_PBF(self): loader = SDFLoader(self.test_dataset_sdf, "_SourceID", labels_fields=["_SWEET"]) dataset = loader.create_dataset() dataset = PlaneOfBestFit().featurize(dataset) assert len(dataset.X) == 100 assert isinstance(dataset.X[0][0], float) assert len(dataset.X[0]) == 1 def test_PDF_to_fail(self): loader = CSVLoader(self.test_dataset_to_fail, mols_field='Smiles', labels_fields='Class', id_field='ID') dataset = loader.create_dataset() with self.assertRaises(SystemExit) as cm: PlaneOfBestFit().featurize(dataset) self.assertEqual(cm.exception.code, 1) def test_MORSE(self): loader = SDFLoader(self.test_dataset_sdf, "_SourceID", labels_fields=["_SWEET"]) dataset = loader.create_dataset() dataset = MORSE().featurize(dataset) assert len(dataset.X) == 100 assert isinstance(dataset.X[0][0], float) assert len(dataset.X[0]) == 224 def test_MORSE_to_fail(self): loader = CSVLoader(self.test_dataset_to_fail, mols_field='Smiles', labels_fields='Class', id_field='ID') dataset = loader.create_dataset() with self.assertRaises(SystemExit) as cm: MORSE().featurize(dataset) self.assertEqual(cm.exception.code, 1) def test_WHIM(self): loader = SDFLoader(self.test_dataset_sdf, "_SourceID", labels_fields=["_SWEET"]) dataset = loader.create_dataset() dataset = WHIM().featurize(dataset) assert len(dataset.X) == 100 assert isinstance(dataset.X[0][0], float) assert len(dataset.X[0]) == 114 def test_WHIM_to_fail(self): loader = CSVLoader(self.test_dataset_to_fail, mols_field='Smiles', labels_fields='Class', id_field='ID') dataset = loader.create_dataset() with self.assertRaises(SystemExit) as cm: WHIM().featurize(dataset) self.assertEqual(cm.exception.code, 1) def test_radius_of_gyration(self): loader = SDFLoader(self.test_dataset_sdf, "_SourceID", labels_fields=["_SWEET"]) dataset = loader.create_dataset() dataset = RadiusOfGyration().featurize(dataset) assert len(dataset.X) == 100 assert isinstance(dataset.X[0][0], float) assert len(dataset.X[0]) == 1 def test_radius_of_gyration_to_fail(self): loader = CSVLoader(self.test_dataset_to_fail, mols_field='Smiles', labels_fields='Class', id_field='ID') dataset = loader.create_dataset() with self.assertRaises(SystemExit) as cm: RadiusOfGyration().featurize(dataset) self.assertEqual(cm.exception.code, 1) def test_isf(self): loader = SDFLoader(self.test_dataset_sdf, "_SourceID", labels_fields=["_SWEET"]) dataset = loader.create_dataset() dataset = InertialShapeFactor().featurize(dataset) assert len(dataset.X) == 100 assert isinstance(dataset.X[0][0], float) assert len(dataset.X[0]) == 1 def test_isf_to_fail(self): loader = CSVLoader(self.test_dataset_to_fail, mols_field='Smiles', labels_fields='Class', id_field='ID') dataset = loader.create_dataset() with self.assertRaises(SystemExit) as cm: InertialShapeFactor().featurize(dataset) self.assertEqual(cm.exception.code, 1) def test_Eccentricity(self): loader = SDFLoader(self.test_dataset_sdf, "_SourceID", labels_fields=["_SWEET"]) dataset = loader.create_dataset() dataset = Eccentricity().featurize(dataset) assert len(dataset.X) == 100 assert isinstance(dataset.X[0][0], float) assert len(dataset.X[0]) == 1 def test_eccentricity_to_fail(self): loader = CSVLoader(self.test_dataset_to_fail, mols_field='Smiles', labels_fields='Class', id_field='ID') dataset = loader.create_dataset() with self.assertRaises(SystemExit) as cm: Eccentricity().featurize(dataset) self.assertEqual(cm.exception.code, 1) def test_Asphericity(self): loader = SDFLoader(self.test_dataset_sdf, "_SourceID", labels_fields=["_SWEET"]) dataset = loader.create_dataset() dataset = Asphericity().featurize(dataset) assert len(dataset.X) == 100 assert isinstance(dataset.X[0][0], float) assert len(dataset.X[0]) == 1 def test_asphericity_to_fail(self): loader = CSVLoader(self.test_dataset_to_fail, mols_field='Smiles', labels_fields='Class', id_field='ID') dataset = loader.create_dataset() with self.assertRaises(SystemExit) as cm: Asphericity().featurize(dataset) self.assertEqual(cm.exception.code, 1) def test_SpherocityIndex(self): loader = SDFLoader(self.test_dataset_sdf, "_SourceID", labels_fields=["_SWEET"]) dataset = loader.create_dataset() dataset = SpherocityIndex().featurize(dataset) assert len(dataset.X) == 100 assert isinstance(dataset.X[0][0], float) assert len(dataset.X[0]) == 1 def test_SpherocityIndex_to_fail(self): loader = CSVLoader(self.test_dataset_to_fail, mols_field='Smiles', labels_fields='Class', id_field='ID') dataset = loader.create_dataset() with self.assertRaises(SystemExit) as cm: SpherocityIndex().featurize(dataset) self.assertEqual(cm.exception.code, 1) def test_PMI(self): loader = SDFLoader(self.test_dataset_sdf, "_SourceID", labels_fields=["_SWEET"]) dataset = loader.create_dataset() dataset = PrincipalMomentsOfInertia().featurize(dataset) assert len(dataset.X) == 100 assert isinstance(dataset.X[0][0], float) assert len(dataset.X[0]) == 3 def test_PMI_to_fail(self): loader = CSVLoader(self.test_dataset_to_fail, mols_field='Smiles', labels_fields='Class', id_field='ID') dataset = loader.create_dataset() with self.assertRaises(SystemExit) as cm: PrincipalMomentsOfInertia().featurize(dataset) self.assertEqual(cm.exception.code, 1) def test_NormalizedPrincipalMomentsRatios(self): loader = SDFLoader(self.test_dataset_sdf, "_SourceID", labels_fields=["_SWEET"]) dataset = loader.create_dataset() dataset = NormalizedPrincipalMomentsRatios().featurize(dataset) assert len(dataset.X) == 100 assert isinstance(dataset.X[0][0], float) assert len(dataset.X[0]) == 2 def test_NormalizedPrincipalMomentsRatios_to_fail(self): loader = CSVLoader(self.test_dataset_to_fail, mols_field='Smiles', labels_fields='Class', id_field='ID') dataset = loader.create_dataset() with self.assertRaises(SystemExit) as cm: NormalizedPrincipalMomentsRatios().featurize(dataset) self.assertEqual(cm.exception.code, 1) def test_all_rdkit_descriptors(self): loader = SDFLoader(self.test_dataset_sdf, "_SourceID", labels_fields=["_SWEET"]) dataset = loader.create_dataset() dataset = All3DDescriptors().featurize(dataset) assert len(dataset.X) == 100 assert len(dataset.X[0]) == 639 def test_all_rdkit_descriptors_to_fail(self): loader = CSVLoader(self.test_dataset_to_fail, mols_field='Smiles', labels_fields='Class', id_field='ID') dataset = loader.create_dataset() with self.assertRaises(SystemExit) as cm: All3DDescriptors().featurize(dataset) self.assertEqual(cm.exception.code, 1) class TestMixedDescriptors(TestCase): def setUp(self) -> None: self.test_dataset_sdf = "../data/dataset_sweet_3D_to_test.sdf" self.test_dataset_to_fail = "../preprocessed_dataset_wfoodb.csv" def test_mixed_descriptors_fingerprints_rdkit(self): loader = SDFLoader("../data/dataset_sweet_3D_to_test.sdf", "_SourceID", labels_fields=["_SWEET"]) dataset = loader.create_dataset() descriptors = [All3DDescriptors(), MorganFingerprint()] dataset = MixedFeaturizer(featurizers=descriptors).featurize(dataset) assert len(dataset.X) == 100 assert len(dataset.X[0]) == 2687 class TestModels3DDescriptors(TestCase): def setUp(self) -> None: loader = SDFLoader("../data/dataset_sweet_3d_balanced.sdf", "_SourceID", labels_fields=["_SWEET"]) self.dataset = loader.create_dataset() self.dataset = All3DDescriptors().featurize(self.dataset, scale=True) splitter = SingletaskStratifiedSplitter() self.train_dataset, self.valid_dataset, self.test_dataset = splitter.train_valid_test_split( dataset=self.dataset, frac_train=0.6, frac_valid=0.2, frac_test=0.2) def test_svm_3d_descriptors(self): svm = SVC() model = SklearnModel(model=svm) res = model.cross_validate(self.dataset, Metric(roc_auc_score), folds=3) model = res[0] model.fit(self.train_dataset) metrics = [Metric(roc_auc_score), Metric(precision_score), Metric(accuracy_score), Metric(confusion_matrix), Metric(classification_report)] print("#############################") # evaluate the model print('Training Dataset: ') train_score = model.evaluate(self.train_dataset, metrics) self.assertAlmostEqual(train_score["accuracy_score"], 0.86, delta=0.05) print("#############################") print('Validation Dataset: ') valid_score = model.evaluate(self.valid_dataset, metrics) self.assertAlmostEqual(valid_score["accuracy_score"], 0.80, delta=0.1) print("#############################") print('Test Dataset: ') test_score = model.evaluate(self.test_dataset, metrics) self.assertAlmostEqual(test_score["accuracy_score"], 0.80, delta=0.1) print("#############################") def test_rf_3d_descriptors(self): rf = RandomForestClassifier() model = SklearnModel(model=rf) res = model.cross_validate(self.dataset, Metric(roc_auc_score), folds=3) model = res[0] model.fit(self.train_dataset) metrics = [Metric(roc_auc_score), Metric(precision_score), Metric(accuracy_score), Metric(confusion_matrix), Metric(classification_report)] print("#############################") # evaluate the model print('Training Dataset: ') train_score = model.evaluate(self.train_dataset, metrics) self.assertAlmostEqual(train_score["accuracy_score"], 0.99, delta=0.02) print("#############################") print('Validation Dataset: ') valid_score = model.evaluate(self.valid_dataset, metrics) self.assertAlmostEqual(valid_score["accuracy_score"], 0.80, delta=0.1) print("#############################") print('Test Dataset: ') test_score = model.evaluate(self.test_dataset, metrics) self.assertAlmostEqual(test_score["accuracy_score"], 0.80, delta=0.1) print("#############################") def test_dnn_3d_descriptors(self): input_dim = self.train_dataset.X.shape[1] def create_model(optimizer='adam', dropout=0.5, input_dim=input_dim): # create model model = Sequential() model.add(Dense(12, input_dim=input_dim, activation='relu')) model.add(Dropout(dropout)) model.add(Dense(8, activation='relu')) model.add(Dense(1, activation='sigmoid')) # Compile model model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) return model model = KerasModel(create_model, epochs=50, verbose=1, optimizer='adam') model.fit(self.train_dataset) metrics = [Metric(roc_auc_score), Metric(precision_score), Metric(accuracy_score), Metric(confusion_matrix), Metric(classification_report)] print("#############################") # evaluate the model print('Training Dataset: ') train_score = model.evaluate(self.train_dataset, metrics) self.assertAlmostEqual(train_score["accuracy_score"], 0.87, delta=0.02) print("#############################") print('Validation Dataset: ') valid_score = model.evaluate(self.valid_dataset, metrics) self.assertAlmostEqual(valid_score["accuracy_score"], 0.80, delta=0.1) print("#############################") print('Test Dataset: ') test_score = model.evaluate(self.test_dataset, metrics) self.assertAlmostEqual(test_score["accuracy_score"], 0.80, delta=0.1) print("#############################") def test_cnn_3d_descriptors(self): input_dim = self.train_dataset.X.shape[1] def make_cnn_model(input_dim=input_dim, g_noise=0.05, DENSE=128, DROPOUT=0.5, C1_K=8, C1_S=32, C2_K=16, C2_S=32, activation='relu', loss='binary_crossentropy', optimizer='adadelta', learning_rate=0.01, metrics='accuracy'): model = Sequential() # Adding a bit of GaussianNoise also works as regularization model.add(GaussianNoise(g_noise, input_shape=(input_dim,))) # First two is number of filter + kernel size model.add(Reshape((input_dim, 1))) model.add(Conv1D(C1_K, C1_S, activation=activation, padding="same")) model.add(Conv1D(C2_K, C2_S, padding="same", activation=activation)) model.add(Flatten()) model.add(Dropout(DROPOUT)) model.add(Dense(DENSE, activation=activation)) model.add(Dense(1, activation='sigmoid')) if optimizer == 'adadelta': opt = Adadelta(lr=learning_rate) elif optimizer == 'adam': opt = Adam(lr=learning_rate) elif optimizer == 'rsmprop': opt = RMSprop(lr=learning_rate) else: opt = optimizer model.compile(loss=loss, optimizer=opt, metrics=metrics, ) return model model = KerasModel(make_cnn_model, epochs=10, verbose=1, optimizer="adam") model.fit(self.train_dataset) metrics = [Metric(roc_auc_score), Metric(precision_score), Metric(accuracy_score), Metric(confusion_matrix), Metric(classification_report)] train_score = model.evaluate(self.train_dataset, metrics) print('training set score:', train_score) self.assertAlmostEqual(train_score["accuracy_score"], 0.81, delta=0.3) validation_score = model.evaluate(self.valid_dataset, metrics) print('validation set score:', validation_score) self.assertAlmostEqual(validation_score["accuracy_score"], 0.81, delta=0.3) test_score = model.evaluate(self.test_dataset, metrics) print('test set score:', model.evaluate(self.test_dataset, metrics)) self.assertAlmostEqual(test_score["accuracy_score"], 0.81, delta=0.3) def test_multitaskclass_3d_descriptors(self): multitask = MultitaskClassifier(n_tasks=1, n_features=self.train_dataset.X.shape[1], layer_sizes=[1000]) model_multi = DeepChemModel(multitask) # Model training model_multi.fit(self.train_dataset) # Evaluation metrics = [Metric(roc_auc_score), Metric(precision_score), Metric(accuracy_score)] print('Training Dataset: ') train_score = model_multi.evaluate(self.train_dataset, metrics) self.assertAlmostEqual(train_score["accuracy_score"], 0.80, delta=0.1) print('Valid Dataset: ') valid_score = model_multi.evaluate(self.valid_dataset, metrics) self.assertAlmostEqual(valid_score["accuracy_score"], 0.80, delta=0.1) print('Test Dataset: ') test_score = model_multi.evaluate(self.test_dataset, metrics) self.assertAlmostEqual(test_score["accuracy_score"], 0.80, delta=0.1) class Test3DGenerator(TestCase): def setUp(self) -> None: self.generator = ThreeDimensionalMoleculeGenerator(n_conformations=20) self.test_dataset_to_convert = "../data/test_to_convert_to_sdf.csv" loader = CSVLoader(self.test_dataset_to_convert, mols_field='Smiles', labels_fields='Class', id_field='ID') self.test_dataset_to_convert_object = loader.create_dataset() def test_generate_20_conformers(self): mol = MolFromSmiles("CC(CC(C)(O)C=C)=CC=CC") self.assertEquals(mol.GetConformers(), ()) mol = self.generator.generate_conformers(mol, 1) self.assertEquals(len(mol.GetConformers()), 20) def test_optimize_geometry(self): new_generator = ThreeDimensionalMoleculeGenerator(n_conformations=10) mol_raw = MolFromSmiles("CC(CC(C)(O)C=C)=CC=C") mol_raw2 = MolFromSmiles("CC(CC(C)(O)C=C)=CC=C") self.assertEquals(mol_raw.GetConformers(), ()) new_mol = new_generator.generate_conformers(mol_raw, 1) conformer_1_before = new_mol.GetConformer(1) self.assertEquals(len(new_mol.GetConformers()), 10) new_mol = new_generator.optimize_molecular_geometry(new_mol, "MMFF94") conformer_1_after = new_mol.GetConformer(1) mol_raw.AddConformer(conformer_1_before) mol_raw2.AddConformer(conformer_1_after) rmsd = AlignMol(mol_raw, mol_raw2) self.assertNotEqual(rmsd, 0) def test_export_to_sdf(self): generate_conformers_to_sdf_file(self.test_dataset_to_convert_object, "../data/test.sdf", timeout_per_molecule=40) loader = SDFLoader("../data/test.sdf", "_ID", "_Class") dataset = loader.create_dataset() All3DDescriptors().featurize(dataset) features_number = dataset.len_X()[1] self.assertEqual(features_number, 639)
[ "tensorflow.python.keras.layers.Flatten", "sklearn.ensemble.RandomForestClassifier", "tensorflow.python.keras.layers.Dense", "tensorflow.keras.optimizers.RMSprop", "tensorflow.keras.optimizers.Adadelta", "tensorflow.keras.optimizers.Adam", "tensorflow.python.keras.Sequential", "tensorflow.python.keras.layers.Dropout", "sklearn.svm.SVC", "tensorflow.python.keras.layers.Conv1D", "tensorflow.python.keras.layers.Reshape", "tensorflow.python.keras.layers.GaussianNoise" ]
src/tests/test_3d_descriptors.py
[(32, 'compoundFeaturization.rdkit3DDescriptors.ThreeDimensionalMoleculeGenerator', 'ThreeDimensionalMoleculeGenerator', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (34, 'rdkit.Chem.rdmolfiles.MolFromSmiles', 'MolFromSmiles', (['"""Cc1cc2-c3c(O)cc(cc3OC3(Oc4cc(O)ccc4-c(c1O)c23)c1ccc(O)cc1O)-c1cc2cccc(O)c2o1"""'], {}), False, 'from rdkit.Chem.rdmolfiles import MolFromSmiles\n'), (42, 'loaders.Loaders.SDFLoader', 'SDFLoader', (['"""../data/dataset_sweet_3D_to_test.sdf"""', '"""_SourceID"""'], {'labels_fields': "['_SWEET']"}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (50, 'loaders.Loaders.SDFLoader', 'SDFLoader', (['"""../data/A2780.sdf"""', '"""ChEMBL_ID"""'], {'labels_fields': "['pIC50']"}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (63, 'loaders.Loaders.SDFLoader', 'SDFLoader', (['self.test_dataset_sdf', '"""_SourceID"""'], {'labels_fields': "['_SWEET']"}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (72, 'loaders.Loaders.CSVLoader', 'CSVLoader', (['self.test_dataset_to_fail'], {'mols_field': '"""Smiles"""', 'labels_fields': '"""Class"""', 'id_field': '"""ID"""'}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (85, 'loaders.Loaders.SDFLoader', 'SDFLoader', (['self.test_dataset_sdf', '"""_SourceID"""'], {'labels_fields': "['_SWEET']"}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (95, 'loaders.Loaders.CSVLoader', 'CSVLoader', (['self.test_dataset_to_fail'], {'mols_field': '"""Smiles"""', 'labels_fields': '"""Class"""', 'id_field': '"""ID"""'}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (108, 'loaders.Loaders.SDFLoader', 'SDFLoader', (['self.test_dataset_sdf', '"""_SourceID"""'], {'labels_fields': "['_SWEET']"}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (118, 'loaders.Loaders.CSVLoader', 'CSVLoader', (['self.test_dataset_to_fail'], {'mols_field': '"""Smiles"""', 'labels_fields': '"""Class"""', 'id_field': '"""ID"""'}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (131, 'loaders.Loaders.SDFLoader', 'SDFLoader', (['self.test_dataset_sdf', '"""_SourceID"""'], {'labels_fields': "['_SWEET']"}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (141, 'loaders.Loaders.CSVLoader', 'CSVLoader', (['self.test_dataset_to_fail'], {'mols_field': '"""Smiles"""', 'labels_fields': '"""Class"""', 'id_field': '"""ID"""'}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (154, 'loaders.Loaders.SDFLoader', 'SDFLoader', (['self.test_dataset_sdf', '"""_SourceID"""'], {'labels_fields': "['_SWEET']"}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (164, 'loaders.Loaders.CSVLoader', 'CSVLoader', (['self.test_dataset_to_fail'], {'mols_field': '"""Smiles"""', 'labels_fields': '"""Class"""', 'id_field': '"""ID"""'}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (177, 'loaders.Loaders.SDFLoader', 'SDFLoader', (['self.test_dataset_sdf', '"""_SourceID"""'], {'labels_fields': "['_SWEET']"}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (187, 'loaders.Loaders.CSVLoader', 'CSVLoader', (['self.test_dataset_to_fail'], {'mols_field': '"""Smiles"""', 'labels_fields': '"""Class"""', 'id_field': '"""ID"""'}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (200, 'loaders.Loaders.SDFLoader', 'SDFLoader', (['self.test_dataset_sdf', '"""_SourceID"""'], {'labels_fields': "['_SWEET']"}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (210, 'loaders.Loaders.CSVLoader', 'CSVLoader', (['self.test_dataset_to_fail'], {'mols_field': '"""Smiles"""', 'labels_fields': '"""Class"""', 'id_field': '"""ID"""'}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (223, 'loaders.Loaders.SDFLoader', 'SDFLoader', (['self.test_dataset_sdf', '"""_SourceID"""'], {'labels_fields': "['_SWEET']"}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (233, 'loaders.Loaders.CSVLoader', 'CSVLoader', (['self.test_dataset_to_fail'], {'mols_field': '"""Smiles"""', 'labels_fields': '"""Class"""', 'id_field': '"""ID"""'}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (246, 'loaders.Loaders.SDFLoader', 'SDFLoader', (['self.test_dataset_sdf', '"""_SourceID"""'], {'labels_fields': "['_SWEET']"}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (256, 'loaders.Loaders.CSVLoader', 'CSVLoader', (['self.test_dataset_to_fail'], {'mols_field': '"""Smiles"""', 'labels_fields': '"""Class"""', 'id_field': '"""ID"""'}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (269, 'loaders.Loaders.SDFLoader', 'SDFLoader', (['self.test_dataset_sdf', '"""_SourceID"""'], {'labels_fields': "['_SWEET']"}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (279, 'loaders.Loaders.CSVLoader', 'CSVLoader', (['self.test_dataset_to_fail'], {'mols_field': '"""Smiles"""', 'labels_fields': '"""Class"""', 'id_field': '"""ID"""'}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (292, 'loaders.Loaders.SDFLoader', 'SDFLoader', (['self.test_dataset_sdf', '"""_SourceID"""'], {'labels_fields': "['_SWEET']"}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (302, 'loaders.Loaders.CSVLoader', 'CSVLoader', (['self.test_dataset_to_fail'], {'mols_field': '"""Smiles"""', 'labels_fields': '"""Class"""', 'id_field': '"""ID"""'}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (315, 'loaders.Loaders.SDFLoader', 'SDFLoader', (['self.test_dataset_sdf', '"""_SourceID"""'], {'labels_fields': "['_SWEET']"}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (325, 'loaders.Loaders.CSVLoader', 'CSVLoader', (['self.test_dataset_to_fail'], {'mols_field': '"""Smiles"""', 'labels_fields': '"""Class"""', 'id_field': '"""ID"""'}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (338, 'loaders.Loaders.SDFLoader', 'SDFLoader', (['self.test_dataset_sdf', '"""_SourceID"""'], {'labels_fields': "['_SWEET']"}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (347, 'loaders.Loaders.CSVLoader', 'CSVLoader', (['self.test_dataset_to_fail'], {'mols_field': '"""Smiles"""', 'labels_fields': '"""Class"""', 'id_field': '"""ID"""'}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (367, 'loaders.Loaders.SDFLoader', 'SDFLoader', (['"""../data/dataset_sweet_3D_to_test.sdf"""', '"""_SourceID"""'], {'labels_fields': "['_SWEET']"}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (381, 'loaders.Loaders.SDFLoader', 'SDFLoader', (['"""../data/dataset_sweet_3d_balanced.sdf"""', '"""_SourceID"""'], {'labels_fields': "['_SWEET']"}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (386, 'splitters.splitters.SingletaskStratifiedSplitter', 'SingletaskStratifiedSplitter', ([], {}), False, 'from splitters.splitters import SingletaskStratifiedSplitter\n'), (394, 'sklearn.svm.SVC', 'SVC', ([], {}), False, 'from sklearn.svm import SVC\n'), (395, 'models.sklearnModels.SklearnModel', 'SklearnModel', ([], {'model': 'svm'}), False, 'from models.sklearnModels import SklearnModel\n'), (422, 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), False, 'from sklearn.ensemble import RandomForestClassifier\n'), (423, 'models.sklearnModels.SklearnModel', 'SklearnModel', ([], {'model': 'rf'}), False, 'from models.sklearnModels import SklearnModel\n'), (464, 'models.kerasModels.KerasModel', 'KerasModel', (['create_model'], {'epochs': '(50)', 'verbose': '(1)', 'optimizer': '"""adam"""'}), False, 'from models.kerasModels import KerasModel\n'), (529, 'models.kerasModels.KerasModel', 'KerasModel', (['make_cnn_model'], {'epochs': '(10)', 'verbose': '(1)', 'optimizer': '"""adam"""'}), False, 'from models.kerasModels import KerasModel\n'), (553, 'deepchem.models.MultitaskClassifier', 'MultitaskClassifier', ([], {'n_tasks': '(1)', 'n_features': 'self.train_dataset.X.shape[1]', 'layer_sizes': '[1000]'}), False, 'from deepchem.models import MultitaskClassifier\n'), (554, 'models.DeepChemModels.DeepChemModel', 'DeepChemModel', (['multitask'], {}), False, 'from models.DeepChemModels import DeepChemModel\n'), (573, 'compoundFeaturization.rdkit3DDescriptors.ThreeDimensionalMoleculeGenerator', 'ThreeDimensionalMoleculeGenerator', ([], {'n_conformations': '(20)'}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (575, 'loaders.Loaders.CSVLoader', 'CSVLoader', (['self.test_dataset_to_convert'], {'mols_field': '"""Smiles"""', 'labels_fields': '"""Class"""', 'id_field': '"""ID"""'}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (583, 'rdkit.Chem.rdmolfiles.MolFromSmiles', 'MolFromSmiles', (['"""CC(CC(C)(O)C=C)=CC=CC"""'], {}), False, 'from rdkit.Chem.rdmolfiles import MolFromSmiles\n'), (592, 'compoundFeaturization.rdkit3DDescriptors.ThreeDimensionalMoleculeGenerator', 'ThreeDimensionalMoleculeGenerator', ([], {'n_conformations': '(10)'}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (594, 'rdkit.Chem.rdmolfiles.MolFromSmiles', 'MolFromSmiles', (['"""CC(CC(C)(O)C=C)=CC=C"""'], {}), False, 'from rdkit.Chem.rdmolfiles import MolFromSmiles\n'), (595, 'rdkit.Chem.rdmolfiles.MolFromSmiles', 'MolFromSmiles', (['"""CC(CC(C)(O)C=C)=CC=C"""'], {}), False, 'from rdkit.Chem.rdmolfiles import MolFromSmiles\n'), (612, 'rdkit.Chem.rdMolAlign.AlignMol', 'AlignMol', (['mol_raw', 'mol_raw2'], {}), False, 'from rdkit.Chem.rdMolAlign import AlignMol\n'), (617, 'compoundFeaturization.rdkit3DDescriptors.generate_conformers_to_sdf_file', 'generate_conformers_to_sdf_file', (['self.test_dataset_to_convert_object', '"""../data/test.sdf"""'], {'timeout_per_molecule': '(40)'}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (620, 'loaders.Loaders.SDFLoader', 'SDFLoader', (['"""../data/test.sdf"""', '"""_ID"""', '"""_Class"""'], {}), False, 'from loaders.Loaders import SDFLoader, CSVLoader\n'), (370, 'compoundFeaturization.rdkit3DDescriptors.All3DDescriptors', 'All3DDescriptors', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (370, 'compoundFeaturization.rdkitFingerprints.MorganFingerprint', 'MorganFingerprint', ([], {}), False, 'from compoundFeaturization.rdkitFingerprints import MorganFingerprint\n'), (397, 'metrics.Metrics.Metric', 'Metric', (['roc_auc_score'], {}), False, 'from metrics.Metrics import Metric\n'), (402, 'metrics.Metrics.Metric', 'Metric', (['roc_auc_score'], {}), False, 'from metrics.Metrics import Metric\n'), (402, 'metrics.Metrics.Metric', 'Metric', (['precision_score'], {}), False, 'from metrics.Metrics import Metric\n'), (403, 'metrics.Metrics.Metric', 'Metric', (['accuracy_score'], {}), False, 'from metrics.Metrics import Metric\n'), (403, 'metrics.Metrics.Metric', 'Metric', (['confusion_matrix'], {}), False, 'from metrics.Metrics import Metric\n'), (404, 'metrics.Metrics.Metric', 'Metric', (['classification_report'], {}), False, 'from metrics.Metrics import Metric\n'), (425, 'metrics.Metrics.Metric', 'Metric', (['roc_auc_score'], {}), False, 'from metrics.Metrics import Metric\n'), (430, 'metrics.Metrics.Metric', 'Metric', (['roc_auc_score'], {}), False, 'from metrics.Metrics import Metric\n'), (430, 'metrics.Metrics.Metric', 'Metric', (['precision_score'], {}), False, 'from metrics.Metrics import Metric\n'), (431, 'metrics.Metrics.Metric', 'Metric', (['accuracy_score'], {}), False, 'from metrics.Metrics import Metric\n'), (431, 'metrics.Metrics.Metric', 'Metric', (['confusion_matrix'], {}), False, 'from metrics.Metrics import Metric\n'), (432, 'metrics.Metrics.Metric', 'Metric', (['classification_report'], {}), False, 'from metrics.Metrics import Metric\n'), (455, 'tensorflow.python.keras.Sequential', 'Sequential', ([], {}), False, 'from tensorflow.python.keras import Sequential\n'), (468, 'metrics.Metrics.Metric', 'Metric', (['roc_auc_score'], {}), False, 'from metrics.Metrics import Metric\n'), (468, 'metrics.Metrics.Metric', 'Metric', (['precision_score'], {}), False, 'from metrics.Metrics import Metric\n'), (469, 'metrics.Metrics.Metric', 'Metric', (['accuracy_score'], {}), False, 'from metrics.Metrics import Metric\n'), (469, 'metrics.Metrics.Metric', 'Metric', (['confusion_matrix'], {}), False, 'from metrics.Metrics import Metric\n'), (470, 'metrics.Metrics.Metric', 'Metric', (['classification_report'], {}), False, 'from metrics.Metrics import Metric\n'), (505, 'tensorflow.python.keras.Sequential', 'Sequential', ([], {}), False, 'from tensorflow.python.keras import Sequential\n'), (533, 'metrics.Metrics.Metric', 'Metric', (['roc_auc_score'], {}), False, 'from metrics.Metrics import Metric\n'), (534, 'metrics.Metrics.Metric', 'Metric', (['precision_score'], {}), False, 'from metrics.Metrics import Metric\n'), (535, 'metrics.Metrics.Metric', 'Metric', (['accuracy_score'], {}), False, 'from metrics.Metrics import Metric\n'), (536, 'metrics.Metrics.Metric', 'Metric', (['confusion_matrix'], {}), False, 'from metrics.Metrics import Metric\n'), (537, 'metrics.Metrics.Metric', 'Metric', (['classification_report'], {}), False, 'from metrics.Metrics import Metric\n'), (558, 'metrics.Metrics.Metric', 'Metric', (['roc_auc_score'], {}), False, 'from metrics.Metrics import Metric\n'), (558, 'metrics.Metrics.Metric', 'Metric', (['precision_score'], {}), False, 'from metrics.Metrics import Metric\n'), (558, 'metrics.Metrics.Metric', 'Metric', (['accuracy_score'], {}), False, 'from metrics.Metrics import Metric\n'), (66, 'compoundFeaturization.rdkit3DDescriptors.AutoCorr3D', 'AutoCorr3D', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (88, 'compoundFeaturization.rdkit3DDescriptors.RadialDistributionFunction', 'RadialDistributionFunction', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (111, 'compoundFeaturization.rdkit3DDescriptors.PlaneOfBestFit', 'PlaneOfBestFit', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (134, 'compoundFeaturization.rdkit3DDescriptors.MORSE', 'MORSE', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (157, 'compoundFeaturization.rdkit3DDescriptors.WHIM', 'WHIM', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (180, 'compoundFeaturization.rdkit3DDescriptors.RadiusOfGyration', 'RadiusOfGyration', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (203, 'compoundFeaturization.rdkit3DDescriptors.InertialShapeFactor', 'InertialShapeFactor', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (226, 'compoundFeaturization.rdkit3DDescriptors.Eccentricity', 'Eccentricity', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (249, 'compoundFeaturization.rdkit3DDescriptors.Asphericity', 'Asphericity', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (272, 'compoundFeaturization.rdkit3DDescriptors.SpherocityIndex', 'SpherocityIndex', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (295, 'compoundFeaturization.rdkit3DDescriptors.PrincipalMomentsOfInertia', 'PrincipalMomentsOfInertia', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (318, 'compoundFeaturization.rdkit3DDescriptors.NormalizedPrincipalMomentsRatios', 'NormalizedPrincipalMomentsRatios', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (341, 'compoundFeaturization.rdkit3DDescriptors.All3DDescriptors', 'All3DDescriptors', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (372, 'compoundFeaturization.mixedDescriptors.MixedFeaturizer', 'MixedFeaturizer', ([], {'featurizers': 'descriptors'}), False, 'from compoundFeaturization.mixedDescriptors import MixedFeaturizer\n'), (384, 'compoundFeaturization.rdkit3DDescriptors.All3DDescriptors', 'All3DDescriptors', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (456, 'tensorflow.python.keras.layers.Dense', 'Dense', (['(12)'], {'input_dim': 'input_dim', 'activation': '"""relu"""'}), False, 'from tensorflow.python.keras.layers import Dense, Dropout, GaussianNoise, Reshape, Conv1D, Flatten\n'), (457, 'tensorflow.python.keras.layers.Dropout', 'Dropout', (['dropout'], {}), False, 'from tensorflow.python.keras.layers import Dense, Dropout, GaussianNoise, Reshape, Conv1D, Flatten\n'), (458, 'tensorflow.python.keras.layers.Dense', 'Dense', (['(8)'], {'activation': '"""relu"""'}), False, 'from tensorflow.python.keras.layers import Dense, Dropout, GaussianNoise, Reshape, Conv1D, Flatten\n'), (459, 'tensorflow.python.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), False, 'from tensorflow.python.keras.layers import Dense, Dropout, GaussianNoise, Reshape, Conv1D, Flatten\n'), (507, 'tensorflow.python.keras.layers.GaussianNoise', 'GaussianNoise', (['g_noise'], {'input_shape': '(input_dim,)'}), False, 'from tensorflow.python.keras.layers import Dense, Dropout, GaussianNoise, Reshape, Conv1D, Flatten\n'), (509, 'tensorflow.python.keras.layers.Reshape', 'Reshape', (['(input_dim, 1)'], {}), False, 'from tensorflow.python.keras.layers import Dense, Dropout, GaussianNoise, Reshape, Conv1D, Flatten\n'), (510, 'tensorflow.python.keras.layers.Conv1D', 'Conv1D', (['C1_K', 'C1_S'], {'activation': 'activation', 'padding': '"""same"""'}), False, 'from tensorflow.python.keras.layers import Dense, Dropout, GaussianNoise, Reshape, Conv1D, Flatten\n'), (511, 'tensorflow.python.keras.layers.Conv1D', 'Conv1D', (['C2_K', 'C2_S'], {'padding': '"""same"""', 'activation': 'activation'}), False, 'from tensorflow.python.keras.layers import Dense, Dropout, GaussianNoise, Reshape, Conv1D, Flatten\n'), (512, 'tensorflow.python.keras.layers.Flatten', 'Flatten', ([], {}), False, 'from tensorflow.python.keras.layers import Dense, Dropout, GaussianNoise, Reshape, Conv1D, Flatten\n'), (513, 'tensorflow.python.keras.layers.Dropout', 'Dropout', (['DROPOUT'], {}), False, 'from tensorflow.python.keras.layers import Dense, Dropout, GaussianNoise, Reshape, Conv1D, Flatten\n'), (514, 'tensorflow.python.keras.layers.Dense', 'Dense', (['DENSE'], {'activation': 'activation'}), False, 'from tensorflow.python.keras.layers import Dense, Dropout, GaussianNoise, Reshape, Conv1D, Flatten\n'), (515, 'tensorflow.python.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), False, 'from tensorflow.python.keras.layers import Dense, Dropout, GaussianNoise, Reshape, Conv1D, Flatten\n'), (517, 'tensorflow.keras.optimizers.Adadelta', 'Adadelta', ([], {'lr': 'learning_rate'}), False, 'from tensorflow.keras.optimizers import Adadelta, Adam, RMSprop\n'), (623, 'compoundFeaturization.rdkit3DDescriptors.All3DDescriptors', 'All3DDescriptors', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (80, 'compoundFeaturization.rdkit3DDescriptors.AutoCorr3D', 'AutoCorr3D', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (103, 'compoundFeaturization.rdkit3DDescriptors.RadialDistributionFunction', 'RadialDistributionFunction', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (126, 'compoundFeaturization.rdkit3DDescriptors.PlaneOfBestFit', 'PlaneOfBestFit', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (149, 'compoundFeaturization.rdkit3DDescriptors.MORSE', 'MORSE', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (172, 'compoundFeaturization.rdkit3DDescriptors.WHIM', 'WHIM', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (195, 'compoundFeaturization.rdkit3DDescriptors.RadiusOfGyration', 'RadiusOfGyration', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (218, 'compoundFeaturization.rdkit3DDescriptors.InertialShapeFactor', 'InertialShapeFactor', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (241, 'compoundFeaturization.rdkit3DDescriptors.Eccentricity', 'Eccentricity', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (264, 'compoundFeaturization.rdkit3DDescriptors.Asphericity', 'Asphericity', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (287, 'compoundFeaturization.rdkit3DDescriptors.SpherocityIndex', 'SpherocityIndex', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (310, 'compoundFeaturization.rdkit3DDescriptors.PrincipalMomentsOfInertia', 'PrincipalMomentsOfInertia', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (333, 'compoundFeaturization.rdkit3DDescriptors.NormalizedPrincipalMomentsRatios', 'NormalizedPrincipalMomentsRatios', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (355, 'compoundFeaturization.rdkit3DDescriptors.All3DDescriptors', 'All3DDescriptors', ([], {}), False, 'from compoundFeaturization.rdkit3DDescriptors import AutoCorr3D, All3DDescriptors, RadialDistributionFunction, PlaneOfBestFit, MORSE, WHIM, RadiusOfGyration, InertialShapeFactor, Eccentricity, Asphericity, SpherocityIndex, PrincipalMomentsOfInertia, NormalizedPrincipalMomentsRatios, ThreeDimensionalMoleculeGenerator, generate_conformers_to_sdf_file\n'), (519, 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'lr': 'learning_rate'}), False, 'from tensorflow.keras.optimizers import Adadelta, Adam, RMSprop\n'), (521, 'tensorflow.keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': 'learning_rate'}), False, 'from tensorflow.keras.optimizers import Adadelta, Adam, RMSprop\n')]
ianseddon/GamestonkTerminal
89e57d8233f2085d8057d1edb48b2ca9ed54d94c
import argparse from helper_funcs import * import pandas as pd import numpy as np import matplotlib.pyplot as plt from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() from TimeSeriesCrossValidation import splitTrain from sklearn.preprocessing import MinMaxScaler, StandardScaler from tensorflow.keras.models import Sequential from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout, Activation, RepeatVector, TimeDistributed import config_neural_network_models as cfg_nn_models import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' from warnings import simplefilter simplefilter(action='ignore', category=FutureWarning) # ---------------------------------------------------------------------------------------------------- def build_neural_network_model(Recurrent_Neural_Network, n_inputs, n_days): model = Sequential() for idx_layer, d_layer in enumerate(Recurrent_Neural_Network): # Recurrent Neural Network if str(*d_layer) is 'SimpleRNN': # Is this the input layer? If so, define input_shape if idx_layer == 0: model.add(SimpleRNN(**d_layer['SimpleRNN'], input_shape=(n_inputs, 1))) # Is this the last output layer? If so, set units to prediction days elif idx_layer == (len(Recurrent_Neural_Network)-1): model.add(SimpleRNN(**d_layer['SimpleRNN'], units=n_days)) else: model.add(SimpleRNN(**d_layer['SimpleRNN'])) # Long-Short Term-Memory elif str(*d_layer) is 'LSTM': # Is this the input layer? If so, define input_shape if idx_layer == 0: model.add(LSTM(**d_layer['LSTM'], input_shape=(n_inputs, 1))) # Is this the last output layer? If so, set units to prediction days elif idx_layer == (len(Recurrent_Neural_Network)-1): model.add(LSTM(**d_layer['LSTM'], units=n_days)) else: model.add(LSTM(**d_layer['LSTM'])) # Dense (Simple Neuron) elif str(*d_layer) is 'Dense': # Is this the input layer? If so, define input_shape if idx_layer == 0: model.add(Dense(**d_layer['Dense'], input_dim=n_inputs)) # Is this the last output layer? If so, set units to prediction days elif idx_layer == (len(Recurrent_Neural_Network)-1): model.add(Dense(**d_layer['Dense'], units=n_days)) else: model.add(Dense(**d_layer['Dense'])) # Dropout (Regularization) elif str(*d_layer) is 'Dropout': model.add(Dropout(**d_layer['Dropout'])) else: print(f"Incorrect neuron type: {str(*d_layer)}") return model # -------------------------------------------------- MLP -------------------------------------------------- def mlp(l_args, s_ticker, s_interval, df_stock): parser = argparse.ArgumentParser(prog='mlp', description="""Multilayer Perceptron. """) parser.add_argument('-d', "--days", action="store", dest="n_days", type=check_positive, default=5, help='prediction days.') parser.add_argument('-i', "--input", action="store", dest="n_inputs", type=check_positive, default=40, help='number of days to use for prediction.') parser.add_argument('-e', "--epochs", action="store", dest="n_epochs", type=check_positive, default=200, help='number of training epochs.') parser.add_argument('-j', "--jumps", action="store", dest="n_jumps", type=check_positive, default=1, help='number of jumps in training data.') parser.add_argument('-p', "--pp", action="store", dest="s_preprocessing", default='normalization', choices=['normalization', 'standardization', 'none'], help='pre-processing data.') parser.add_argument('-o', "--optimizer", action="store", dest="s_optimizer", default='adam', choices=['adam', 'adagrad', 'adadelta', 'adamax', 'ftrl', 'nadam', 'optimizer', 'rmsprop', 'sgd'], help='optimization technique.') parser.add_argument('-l', "--loss", action="store", dest="s_loss", default='mae', choices=['mae', 'mape', 'mse', 'msle'], help='loss function.') try: (ns_parser, l_unknown_args) = parser.parse_known_args(l_args) if l_unknown_args: print(f"The following args couldn't be interpreted: {l_unknown_args}\n") return # Pre-process data if ns_parser.s_preprocessing == 'standardization': scaler = StandardScaler() stock_train_data = scaler.fit_transform(np.array(df_stock['5. adjusted close'].values.reshape(-1, 1))) elif ns_parser.s_preprocessing == 'normalization': scaler = MinMaxScaler() stock_train_data = scaler.fit_transform(np.array(df_stock['5. adjusted close'].values.reshape(-1, 1))) else: # No pre-processing stock_train_data = np.array(df_stock['5. adjusted close'].values.reshape(-1, 1)) # Split training data for the neural network stock_x, stock_y = splitTrain.split_train(stock_train_data, ns_parser.n_inputs, ns_parser.n_days, numJumps=ns_parser.n_jumps) stock_x = np.array(stock_x) stock_x = np.reshape(stock_x, (stock_x.shape[0], stock_x.shape[1])) stock_y = np.array(stock_y) stock_y = np.reshape(stock_y, (stock_y.shape[0], stock_y.shape[1])) # Build Neural Network model model = build_neural_network_model(cfg_nn_models.MultiLayer_Perceptron, ns_parser.n_inputs, ns_parser.n_days) model.compile(optimizer=ns_parser.s_optimizer, loss=ns_parser.s_loss) # Train our model model.fit(stock_x, stock_y, epochs=ns_parser.n_epochs, verbose=1); print("") print(model.summary()) print("") # Prediction yhat = model.predict(stock_train_data[-ns_parser.n_inputs:].reshape(1, ns_parser.n_inputs), verbose=0) # Re-scale the data back if (ns_parser.s_preprocessing == 'standardization') or (ns_parser.s_preprocessing == 'normalization'): y_pred_test_t = scaler.inverse_transform(yhat.tolist()) else: y_pred_test_t = yhat l_pred_days = get_next_stock_market_days(last_stock_day=df_stock['5. adjusted close'].index[-1], n_next_days=ns_parser.n_days) df_pred = pd.Series(y_pred_test_t[0].tolist(), index=l_pred_days, name='Price') # Plotting plt.plot(df_stock.index, df_stock['5. adjusted close'], lw=3) plt.title(f"MLP on {s_ticker} - {ns_parser.n_days} days prediction") plt.xlim(df_stock.index[0], get_next_stock_market_days(df_pred.index[-1], 1)[-1]) plt.xlabel('Time') plt.ylabel('Share Price ($)') plt.grid(b=True, which='major', color='#666666', linestyle='-') plt.minorticks_on() plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2) plt.plot([df_stock.index[-1], df_pred.index[0]], [df_stock['5. adjusted close'].values[-1], df_pred.values[0]], lw=1, c='tab:green', linestyle='--') plt.plot(df_pred.index, df_pred, lw=2, c='tab:green') plt.axvspan(df_stock.index[-1], df_pred.index[-1], facecolor='tab:orange', alpha=0.2) xmin, xmax, ymin, ymax = plt.axis() plt.vlines(df_stock.index[-1], ymin, ymax, colors='k', linewidth=3, linestyle='--', color='k') plt.show() # Print prediction data print("Predicted share price:") df_pred = df_pred.apply(lambda x: f"{x:.2f} $") print(df_pred.to_string()) print("") except: print("") # -------------------------------------------------- RNN -------------------------------------------------- def rnn(l_args, s_ticker, s_interval, df_stock): parser = argparse.ArgumentParser(prog='rnn', description="""Recurrent Neural Network. """) parser.add_argument('-d', "--days", action="store", dest="n_days", type=check_positive, default=5, help='prediction days.') parser.add_argument('-i', "--input", action="store", dest="n_inputs", type=check_positive, default=40, help='number of days to use for prediction.') parser.add_argument('-e', "--epochs", action="store", dest="n_epochs", type=check_positive, default=200, help='number of training epochs.') parser.add_argument('-j', "--jumps", action="store", dest="n_jumps", type=check_positive, default=1, help='number of jumps in training data.') parser.add_argument('-p', "--pp", action="store", dest="s_preprocessing", default='normalization', choices=['normalization', 'standardization', 'none'], help='pre-processing data.') parser.add_argument('-o', "--optimizer", action="store", dest="s_optimizer", default='adam', help='optimizer technique', choices=['adam', 'adagrad', 'adadelta', 'adamax', 'ftrl', 'nadam', 'optimizer', 'rmsprop', 'sgd']) parser.add_argument('-l', "--loss", action="store", dest="s_loss", default='mae', choices=['mae', 'mape', 'mse', 'msle'], help='loss function.') try: (ns_parser, l_unknown_args) = parser.parse_known_args(l_args) if l_unknown_args: print(f"The following args couldn't be interpreted: {l_unknown_args}\n") return # Pre-process data if ns_parser.s_preprocessing == 'standardization': scaler = StandardScaler() stock_train_data = scaler.fit_transform(np.array(df_stock['5. adjusted close'].values.reshape(-1, 1))) elif ns_parser.s_preprocessing == 'normalization': scaler = MinMaxScaler() stock_train_data = scaler.fit_transform(np.array(df_stock['5. adjusted close'].values.reshape(-1, 1))) else: # No pre-processing stock_train_data = np.array(df_stock['5. adjusted close'].values.reshape(-1, 1)) # Split training data for the neural network stock_x, stock_y = splitTrain.split_train(stock_train_data, ns_parser.n_inputs, ns_parser.n_days, numJumps=ns_parser.n_jumps) stock_x = np.array(stock_x) stock_x = np.reshape(stock_x, (stock_x.shape[0], stock_x.shape[1], 1)) stock_y = np.array(stock_y) stock_y = np.reshape(stock_y, (stock_y.shape[0], stock_y.shape[1], 1)) # Build Neural Network model model = build_neural_network_model(cfg_nn_models.Recurrent_Neural_Network, ns_parser.n_inputs, ns_parser.n_days) model.compile(optimizer=ns_parser.s_optimizer, loss=ns_parser.s_loss) # Train our model model.fit(stock_x, stock_y, epochs=ns_parser.n_epochs, verbose=1); print("") print(model.summary()) print("") # Prediction yhat = model.predict(stock_train_data[-ns_parser.n_inputs:].reshape(1, ns_parser.n_inputs, 1), verbose=0) # Re-scale the data back if (ns_parser.s_preprocessing == 'standardization') or (ns_parser.s_preprocessing == 'normalization'): y_pred_test_t = scaler.inverse_transform(yhat.tolist()) else: y_pred_test_t = yhat l_pred_days = get_next_stock_market_days(last_stock_day=df_stock['5. adjusted close'].index[-1], n_next_days=ns_parser.n_days) df_pred = pd.Series(y_pred_test_t[0].tolist(), index=l_pred_days, name='Price') # Plotting plt.plot(df_stock.index, df_stock['5. adjusted close'], lw=3) plt.title(f"RNN on {s_ticker} - {ns_parser.n_days} days prediction") plt.xlim(df_stock.index[0], get_next_stock_market_days(df_pred.index[-1], 1)[-1]) plt.xlabel('Time') plt.ylabel('Share Price ($)') plt.grid(b=True, which='major', color='#666666', linestyle='-') plt.minorticks_on() plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2) plt.plot([df_stock.index[-1], df_pred.index[0]], [df_stock['5. adjusted close'].values[-1], df_pred.values[0]], lw=1, c='tab:green', linestyle='--') plt.plot(df_pred.index, df_pred, lw=2, c='tab:green') plt.axvspan(df_stock.index[-1], df_pred.index[-1], facecolor='tab:orange', alpha=0.2) xmin, xmax, ymin, ymax = plt.axis() plt.vlines(df_stock.index[-1], ymin, ymax, colors='k', linewidth=3, linestyle='--', color='k') plt.show() # Print prediction data print("Predicted share price:") df_pred = df_pred.apply(lambda x: f"{x:.2f} $") print(df_pred.to_string()) print("") except: print("") # -------------------------------------------------- LSTM -------------------------------------------------- def lstm(l_args, s_ticker, s_interval, df_stock): parser = argparse.ArgumentParser(prog='lstm', description="""Long-Short Term Memory. """) parser.add_argument('-d', "--days", action="store", dest="n_days", type=check_positive, default=5, help='prediction days') parser.add_argument('-i', "--input", action="store", dest="n_inputs", type=check_positive, default=40, help='number of days to use for prediction.') parser.add_argument('-e', "--epochs", action="store", dest="n_epochs", type=check_positive, default=200, help='number of training epochs.') parser.add_argument('-j', "--jumps", action="store", dest="n_jumps", type=check_positive, default=1, help='number of jumps in training data.') parser.add_argument('-p', "--pp", action="store", dest="s_preprocessing", default='normalization', choices=['normalization', 'standardization', 'none'], help='pre-processing data.') parser.add_argument('-o', "--optimizer", action="store", dest="s_optimizer", default='adam', help='optimization technique.', choices=['adam', 'adagrad', 'adadelta', 'adamax', 'ftrl', 'nadam', 'optimizer', 'rmsprop', 'sgd']) parser.add_argument('-l', "--loss", action="store", dest="s_loss", default='mae', choices=['mae', 'mape', 'mse', 'msle'], help='loss function.') try: (ns_parser, l_unknown_args) = parser.parse_known_args(l_args) if l_unknown_args: print(f"The following args couldn't be interpreted: {l_unknown_args}\n") return # Pre-process data if ns_parser.s_preprocessing == 'standardization': scaler = StandardScaler() stock_train_data = scaler.fit_transform(np.array(df_stock['5. adjusted close'].values.reshape(-1, 1))) elif ns_parser.s_preprocessing == 'normalization': scaler = MinMaxScaler() stock_train_data = scaler.fit_transform(np.array(df_stock['5. adjusted close'].values.reshape(-1, 1))) else: # No pre-processing stock_train_data = np.array(df_stock['5. adjusted close'].values.reshape(-1, 1)) # Split training data for the neural network stock_x, stock_y = splitTrain.split_train(stock_train_data, ns_parser.n_inputs, ns_parser.n_days, numJumps=ns_parser.n_jumps) stock_x = np.array(stock_x) stock_x = np.reshape(stock_x, (stock_x.shape[0], stock_x.shape[1], 1)) stock_y = np.array(stock_y) stock_y = np.reshape(stock_y, (stock_y.shape[0], stock_y.shape[1], 1)) # Build Neural Network model model = build_neural_network_model(cfg_nn_models.Long_Short_Term_Memory, ns_parser.n_inputs, ns_parser.n_days) model.compile(optimizer=ns_parser.s_optimizer, loss=ns_parser.s_loss) # Train our model model.fit(stock_x, stock_y, epochs=ns_parser.n_epochs, verbose=1); print("") print(model.summary()) print("") # Prediction yhat = model.predict(stock_train_data[-ns_parser.n_inputs:].reshape(1, ns_parser.n_inputs, 1), verbose=0) # Re-scale the data back if (ns_parser.s_preprocessing == 'standardization') or (ns_parser.s_preprocessing == 'normalization'): y_pred_test_t = scaler.inverse_transform(yhat.tolist()) else: y_pred_test_t = yhat l_pred_days = get_next_stock_market_days(last_stock_day=df_stock['5. adjusted close'].index[-1], n_next_days=ns_parser.n_days) df_pred = pd.Series(y_pred_test_t[0].tolist(), index=l_pred_days, name='Price') # Plotting plt.plot(df_stock.index, df_stock['5. adjusted close'], lw=3) plt.title(f"LSTM on {s_ticker} - {ns_parser.n_days} days prediction") plt.xlim(df_stock.index[0], get_next_stock_market_days(df_pred.index[-1], 1)[-1]) plt.xlabel('Time') plt.ylabel('Share Price ($)') plt.grid(b=True, which='major', color='#666666', linestyle='-') plt.minorticks_on() plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2) plt.plot([df_stock.index[-1], df_pred.index[0]], [df_stock['5. adjusted close'].values[-1], df_pred.values[0]], lw=1, c='tab:green', linestyle='--') plt.plot(df_pred.index, df_pred, lw=2, c='tab:green') plt.axvspan(df_stock.index[-1], df_pred.index[-1], facecolor='tab:orange', alpha=0.2) xmin, xmax, ymin, ymax = plt.axis() plt.vlines(df_stock.index[-1], ymin, ymax, colors='k', linewidth=3, linestyle='--', color='k') plt.show() # Print prediction data print("Predicted share price:") df_pred = df_pred.apply(lambda x: f"{x:.2f} $") print(df_pred.to_string()) print("") except: print("")
[ "matplotlib.pyplot.minorticks_on", "matplotlib.pyplot.plot", "sklearn.preprocessing.MinMaxScaler", "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.SimpleRNN", "numpy.reshape", "pandas.plotting.register_matplotlib_converters", "matplotlib.pyplot.axis", "matplotlib.pyplot.vlines", "tensorflow.keras.models.Sequential", "matplotlib.pyplot.title", "tensorflow.keras.layers.Dense", "matplotlib.pyplot.show", "numpy.array", "matplotlib.pyplot.axvspan", "matplotlib.pyplot.ylabel", "tensorflow.keras.layers.LSTM", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "sklearn.preprocessing.StandardScaler" ]
prediction_techniques/neural_networks.py
[(7, 'pandas.plotting.register_matplotlib_converters', 'register_matplotlib_converters', ([], {}), False, 'from pandas.plotting import register_matplotlib_converters\n'), (21, 'warnings.simplefilter', 'simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), False, 'from warnings import simplefilter\n'), (26, 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), False, 'from tensorflow.keras.models import Sequential\n'), (74, 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""mlp"""', 'description': '"""Multilayer Perceptron. """'}), False, 'import argparse\n'), (167, 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""rnn"""', 'description': '"""Recurrent Neural Network. """'}), False, 'import argparse\n'), (260, 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""lstm"""', 'description': '"""Long-Short Term Memory. """'}), False, 'import argparse\n'), (110, 'TimeSeriesCrossValidation.splitTrain.split_train', 'splitTrain.split_train', (['stock_train_data', 'ns_parser.n_inputs', 'ns_parser.n_days'], {'numJumps': 'ns_parser.n_jumps'}), False, 'from TimeSeriesCrossValidation import splitTrain\n'), (111, 'numpy.array', 'np.array', (['stock_x'], {}), True, 'import numpy as np\n'), (112, 'numpy.reshape', 'np.reshape', (['stock_x', '(stock_x.shape[0], stock_x.shape[1])'], {}), True, 'import numpy as np\n'), (113, 'numpy.array', 'np.array', (['stock_y'], {}), True, 'import numpy as np\n'), (114, 'numpy.reshape', 'np.reshape', (['stock_y', '(stock_y.shape[0], stock_y.shape[1])'], {}), True, 'import numpy as np\n'), (140, 'matplotlib.pyplot.plot', 'plt.plot', (['df_stock.index', "df_stock['5. adjusted close']"], {'lw': '(3)'}), True, 'import matplotlib.pyplot as plt\n'), (141, 'matplotlib.pyplot.title', 'plt.title', (['f"""MLP on {s_ticker} - {ns_parser.n_days} days prediction"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (143, 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (144, 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Share Price ($)"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (145, 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)', 'which': '"""major"""', 'color': '"""#666666"""', 'linestyle': '"""-"""'}), True, 'import matplotlib.pyplot as plt\n'), (146, 'matplotlib.pyplot.minorticks_on', 'plt.minorticks_on', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (147, 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)', 'which': '"""minor"""', 'color': '"""#999999"""', 'linestyle': '"""-"""', 'alpha': '(0.2)'}), True, 'import matplotlib.pyplot as plt\n'), (148, 'matplotlib.pyplot.plot', 'plt.plot', (['[df_stock.index[-1], df_pred.index[0]]', "[df_stock['5. adjusted close'].values[-1], df_pred.values[0]]"], {'lw': '(1)', 'c': '"""tab:green"""', 'linestyle': '"""--"""'}), True, 'import matplotlib.pyplot as plt\n'), (149, 'matplotlib.pyplot.plot', 'plt.plot', (['df_pred.index', 'df_pred'], {'lw': '(2)', 'c': '"""tab:green"""'}), True, 'import matplotlib.pyplot as plt\n'), (150, 'matplotlib.pyplot.axvspan', 'plt.axvspan', (['df_stock.index[-1]', 'df_pred.index[-1]'], {'facecolor': '"""tab:orange"""', 'alpha': '(0.2)'}), True, 'import matplotlib.pyplot as plt\n'), (151, 'matplotlib.pyplot.axis', 'plt.axis', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (152, 'matplotlib.pyplot.vlines', 'plt.vlines', (['df_stock.index[-1]', 'ymin', 'ymax'], {'colors': '"""k"""', 'linewidth': '(3)', 'linestyle': '"""--"""', 'color': '"""k"""'}), True, 'import matplotlib.pyplot as plt\n'), (153, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (203, 'TimeSeriesCrossValidation.splitTrain.split_train', 'splitTrain.split_train', (['stock_train_data', 'ns_parser.n_inputs', 'ns_parser.n_days'], {'numJumps': 'ns_parser.n_jumps'}), False, 'from TimeSeriesCrossValidation import splitTrain\n'), (204, 'numpy.array', 'np.array', (['stock_x'], {}), True, 'import numpy as np\n'), (205, 'numpy.reshape', 'np.reshape', (['stock_x', '(stock_x.shape[0], stock_x.shape[1], 1)'], {}), True, 'import numpy as np\n'), (206, 'numpy.array', 'np.array', (['stock_y'], {}), True, 'import numpy as np\n'), (207, 'numpy.reshape', 'np.reshape', (['stock_y', '(stock_y.shape[0], stock_y.shape[1], 1)'], {}), True, 'import numpy as np\n'), (233, 'matplotlib.pyplot.plot', 'plt.plot', (['df_stock.index', "df_stock['5. adjusted close']"], {'lw': '(3)'}), True, 'import matplotlib.pyplot as plt\n'), (234, 'matplotlib.pyplot.title', 'plt.title', (['f"""RNN on {s_ticker} - {ns_parser.n_days} days prediction"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (236, 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (237, 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Share Price ($)"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (238, 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)', 'which': '"""major"""', 'color': '"""#666666"""', 'linestyle': '"""-"""'}), True, 'import matplotlib.pyplot as plt\n'), (239, 'matplotlib.pyplot.minorticks_on', 'plt.minorticks_on', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (240, 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)', 'which': '"""minor"""', 'color': '"""#999999"""', 'linestyle': '"""-"""', 'alpha': '(0.2)'}), True, 'import matplotlib.pyplot as plt\n'), (241, 'matplotlib.pyplot.plot', 'plt.plot', (['[df_stock.index[-1], df_pred.index[0]]', "[df_stock['5. adjusted close'].values[-1], df_pred.values[0]]"], {'lw': '(1)', 'c': '"""tab:green"""', 'linestyle': '"""--"""'}), True, 'import matplotlib.pyplot as plt\n'), (242, 'matplotlib.pyplot.plot', 'plt.plot', (['df_pred.index', 'df_pred'], {'lw': '(2)', 'c': '"""tab:green"""'}), True, 'import matplotlib.pyplot as plt\n'), (243, 'matplotlib.pyplot.axvspan', 'plt.axvspan', (['df_stock.index[-1]', 'df_pred.index[-1]'], {'facecolor': '"""tab:orange"""', 'alpha': '(0.2)'}), True, 'import matplotlib.pyplot as plt\n'), (244, 'matplotlib.pyplot.axis', 'plt.axis', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (245, 'matplotlib.pyplot.vlines', 'plt.vlines', (['df_stock.index[-1]', 'ymin', 'ymax'], {'colors': '"""k"""', 'linewidth': '(3)', 'linestyle': '"""--"""', 'color': '"""k"""'}), True, 'import matplotlib.pyplot as plt\n'), (246, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (296, 'TimeSeriesCrossValidation.splitTrain.split_train', 'splitTrain.split_train', (['stock_train_data', 'ns_parser.n_inputs', 'ns_parser.n_days'], {'numJumps': 'ns_parser.n_jumps'}), False, 'from TimeSeriesCrossValidation import splitTrain\n'), (297, 'numpy.array', 'np.array', (['stock_x'], {}), True, 'import numpy as np\n'), (298, 'numpy.reshape', 'np.reshape', (['stock_x', '(stock_x.shape[0], stock_x.shape[1], 1)'], {}), True, 'import numpy as np\n'), (299, 'numpy.array', 'np.array', (['stock_y'], {}), True, 'import numpy as np\n'), (300, 'numpy.reshape', 'np.reshape', (['stock_y', '(stock_y.shape[0], stock_y.shape[1], 1)'], {}), True, 'import numpy as np\n'), (326, 'matplotlib.pyplot.plot', 'plt.plot', (['df_stock.index', "df_stock['5. adjusted close']"], {'lw': '(3)'}), True, 'import matplotlib.pyplot as plt\n'), (327, 'matplotlib.pyplot.title', 'plt.title', (['f"""LSTM on {s_ticker} - {ns_parser.n_days} days prediction"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (329, 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (330, 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Share Price ($)"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (331, 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)', 'which': '"""major"""', 'color': '"""#666666"""', 'linestyle': '"""-"""'}), True, 'import matplotlib.pyplot as plt\n'), (332, 'matplotlib.pyplot.minorticks_on', 'plt.minorticks_on', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (333, 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)', 'which': '"""minor"""', 'color': '"""#999999"""', 'linestyle': '"""-"""', 'alpha': '(0.2)'}), True, 'import matplotlib.pyplot as plt\n'), (334, 'matplotlib.pyplot.plot', 'plt.plot', (['[df_stock.index[-1], df_pred.index[0]]', "[df_stock['5. adjusted close'].values[-1], df_pred.values[0]]"], {'lw': '(1)', 'c': '"""tab:green"""', 'linestyle': '"""--"""'}), True, 'import matplotlib.pyplot as plt\n'), (335, 'matplotlib.pyplot.plot', 'plt.plot', (['df_pred.index', 'df_pred'], {'lw': '(2)', 'c': '"""tab:green"""'}), True, 'import matplotlib.pyplot as plt\n'), (336, 'matplotlib.pyplot.axvspan', 'plt.axvspan', (['df_stock.index[-1]', 'df_pred.index[-1]'], {'facecolor': '"""tab:orange"""', 'alpha': '(0.2)'}), True, 'import matplotlib.pyplot as plt\n'), (337, 'matplotlib.pyplot.axis', 'plt.axis', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (338, 'matplotlib.pyplot.vlines', 'plt.vlines', (['df_stock.index[-1]', 'ymin', 'ymax'], {'colors': '"""k"""', 'linewidth': '(3)', 'linestyle': '"""--"""', 'color': '"""k"""'}), True, 'import matplotlib.pyplot as plt\n'), (339, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (101, 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), False, 'from sklearn.preprocessing import MinMaxScaler, StandardScaler\n'), (194, 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), False, 'from sklearn.preprocessing import MinMaxScaler, StandardScaler\n'), (287, 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), False, 'from sklearn.preprocessing import MinMaxScaler, StandardScaler\n'), (104, 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), False, 'from sklearn.preprocessing import MinMaxScaler, StandardScaler\n'), (197, 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), False, 'from sklearn.preprocessing import MinMaxScaler, StandardScaler\n'), (290, 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), False, 'from sklearn.preprocessing import MinMaxScaler, StandardScaler\n'), (33, 'tensorflow.keras.layers.SimpleRNN', 'SimpleRNN', ([], {'input_shape': '(n_inputs, 1)'}), False, 'from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout, Activation, RepeatVector, TimeDistributed\n'), (36, 'tensorflow.keras.layers.SimpleRNN', 'SimpleRNN', ([], {'units': 'n_days'}), False, 'from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout, Activation, RepeatVector, TimeDistributed\n'), (38, 'tensorflow.keras.layers.SimpleRNN', 'SimpleRNN', ([], {}), False, 'from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout, Activation, RepeatVector, TimeDistributed\n'), (44, 'tensorflow.keras.layers.LSTM', 'LSTM', ([], {'input_shape': '(n_inputs, 1)'}), False, 'from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout, Activation, RepeatVector, TimeDistributed\n'), (47, 'tensorflow.keras.layers.LSTM', 'LSTM', ([], {'units': 'n_days'}), False, 'from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout, Activation, RepeatVector, TimeDistributed\n'), (49, 'tensorflow.keras.layers.LSTM', 'LSTM', ([], {}), False, 'from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout, Activation, RepeatVector, TimeDistributed\n'), (55, 'tensorflow.keras.layers.Dense', 'Dense', ([], {'input_dim': 'n_inputs'}), False, 'from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout, Activation, RepeatVector, TimeDistributed\n'), (64, 'tensorflow.keras.layers.Dropout', 'Dropout', ([], {}), False, 'from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout, Activation, RepeatVector, TimeDistributed\n'), (58, 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'n_days'}), False, 'from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout, Activation, RepeatVector, TimeDistributed\n'), (60, 'tensorflow.keras.layers.Dense', 'Dense', ([], {}), False, 'from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout, Activation, RepeatVector, TimeDistributed\n')]
Van-an/tensorflow
322463c34a2fff12c8a8fd47b0ae99d7e1de1734
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """TensorFlow Lite Python Interface: Sanity check.""" import numpy as np from tensorflow.lite.python import convert from tensorflow.lite.python import op_hint from tensorflow.lite.python.interpreter import Interpreter from tensorflow.python.client import session from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.framework.graph_util_impl import _bfs_for_reachable_nodes from tensorflow.python.framework.graph_util_impl import _extract_graph_summary from tensorflow.python.framework.graph_util_impl import _node_name from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test class ConvertTest(test_util.TensorFlowTestCase): def testBasic(self): with ops.Graph().as_default(): in_tensor = array_ops.placeholder( shape=[1, 16, 16, 3], dtype=dtypes.float32) out_tensor = in_tensor + in_tensor sess = session.Session() # Try running on valid graph tflite_model = convert.toco_convert(sess.graph_def, [in_tensor], [out_tensor]) self.assertTrue(tflite_model) def testQuantization(self): with ops.Graph().as_default(): in_tensor = array_ops.placeholder( shape=[1, 16, 16, 3], dtype=dtypes.float32) out_tensor = array_ops.fake_quant_with_min_max_args( in_tensor + in_tensor, min=0., max=1.) sess = session.Session() tflite_model = convert.toco_convert( sess.graph_def, [in_tensor], [out_tensor], inference_type=dtypes.uint8, quantized_input_stats=[(0., 1.)]) self.assertTrue(tflite_model) def testGraphDefBasic(self): with ops.Graph().as_default(): in_tensor = array_ops.placeholder( shape=[1, 16, 16, 3], dtype=dtypes.float32, name="input") _ = in_tensor + in_tensor sess = session.Session() tflite_model = convert.toco_convert_graph_def( sess.graph_def, [("input", [1, 16, 16, 3])], ["add"], enable_mlir_converter=False, control_output_arrays=None, inference_type=dtypes.float32) self.assertTrue(tflite_model) # Check values from converted model. interpreter = Interpreter(model_content=tflite_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() self.assertEqual(1, len(input_details)) self.assertEqual("input", input_details[0]["name"]) self.assertEqual(np.float32, input_details[0]["dtype"]) self.assertTrue(([1, 16, 16, 3] == input_details[0]["shape"]).all()) self.assertEqual((0., 0.), input_details[0]["quantization"]) output_details = interpreter.get_output_details() self.assertEqual(1, len(output_details)) self.assertEqual("add", output_details[0]["name"]) self.assertEqual(np.float32, output_details[0]["dtype"]) self.assertTrue(([1, 16, 16, 3] == output_details[0]["shape"]).all()) self.assertEqual((0., 0.), output_details[0]["quantization"]) def testGraphDefQuantization(self): with ops.Graph().as_default(): in_tensor_1 = array_ops.placeholder( shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputA") in_tensor_2 = array_ops.placeholder( shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputB") _ = array_ops.fake_quant_with_min_max_args( in_tensor_1 + in_tensor_2, min=0., max=1., name="output") sess = session.Session() input_arrays_map = [("inputA", [1, 16, 16, 3]), ("inputB", [1, 16, 16, 3])] output_arrays = ["output"] tflite_model = convert.toco_convert_graph_def( sess.graph_def, input_arrays_map, output_arrays, enable_mlir_converter=False, control_output_arrays=None, inference_type=dtypes.uint8, quantized_input_stats=[(0., 1.), (0., 1.)]) self.assertTrue(tflite_model) # Check values from converted model. interpreter = Interpreter(model_content=tflite_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() self.assertEqual(2, len(input_details)) self.assertEqual("inputA", input_details[0]["name"]) self.assertEqual(np.uint8, input_details[0]["dtype"]) self.assertTrue(([1, 16, 16, 3] == input_details[0]["shape"]).all()) self.assertEqual((1., 0.), input_details[0]["quantization"]) # scale, zero_point self.assertEqual("inputB", input_details[1]["name"]) self.assertEqual(np.uint8, input_details[1]["dtype"]) self.assertTrue(([1, 16, 16, 3] == input_details[1]["shape"]).all()) self.assertEqual((1., 0.), input_details[1]["quantization"]) # scale, zero_point output_details = interpreter.get_output_details() self.assertEqual(1, len(output_details)) self.assertEqual("output", output_details[0]["name"]) self.assertEqual(np.uint8, output_details[0]["dtype"]) self.assertTrue(([1, 16, 16, 3] == output_details[0]["shape"]).all()) self.assertGreater(output_details[0]["quantization"][0], 0) # scale def testGraphDefQuantizationInvalid(self): with ops.Graph().as_default(): in_tensor_1 = array_ops.placeholder( shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputA") in_tensor_2 = array_ops.placeholder( shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputB") _ = array_ops.fake_quant_with_min_max_args( in_tensor_1 + in_tensor_2, min=0., max=1., name="output") sess = session.Session() input_arrays_map = [("inputA", [1, 16, 16, 3]), ("inputB", [1, 16, 16, 3])] output_arrays = ["output"] with self.assertRaises(ValueError) as error: convert.toco_convert_graph_def( sess.graph_def, input_arrays_map, output_arrays, enable_mlir_converter=False, control_output_arrays=None, inference_type=dtypes.uint8) self.assertEqual( "The `quantized_input_stats` flag must be defined when either " "`inference_type` flag or `inference_input_type` flag is set to " "tf.int8 or tf.uint8.", str(error.exception)) class ConvertTestOpHint(test_util.TensorFlowTestCase): """Test the hint to stub functionality.""" def _getGraphOpTypes(self, graphdef, output_nodes): """Returns used op types in `graphdef` reachable from `output_nodes`. This is used to check that after the stub transformation the expected nodes are there. NOTE: this is not a exact test that the graph is the correct output, but it balances compact expressibility of test with sanity checking. Args: graphdef: TensorFlow proto graphdef. output_nodes: A list of output node names that we need to reach. Returns: A set of node types reachable from `output_nodes`. """ name_to_input_name, name_to_node, _ = ( _extract_graph_summary(graphdef)) # Find all nodes that are needed by the outputs used_node_names = _bfs_for_reachable_nodes(output_nodes, name_to_input_name) return set([name_to_node[node_name].op for node_name in used_node_names]) def _countIdentities(self, nodes): """Count the number of "Identity" op types in the list of proto nodes. Args: nodes: NodeDefs of the graph. Returns: The number of nodes with op type "Identity" found. """ return len([x for x in nodes if x.op == "Identity"]) def testSwishLiteHint(self): """Makes a custom op swish and makes sure it gets converted as a unit.""" with ops.Graph().as_default(): image = array_ops.constant([1., 2., 3., 4.]) swish_scale = array_ops.constant(1.0) def _swish(input_tensor, scale): custom = op_hint.OpHint("cool_activation") input_tensor, scale = custom.add_inputs(input_tensor, scale) output = math_ops.sigmoid(input_tensor) * input_tensor * scale output, = custom.add_outputs(output) return output output = array_ops.identity( _swish(image, swish_scale), name="ModelOutput") with self.cached_session() as sess: # check if identities have been put into the graph (2 input, 1 output, # and 1 final output). self.assertEqual(self._countIdentities(sess.graph_def.node), 4) stubbed_graphdef = op_hint.convert_op_hints_to_stubs( graph_def=sess.graph_def) self.assertEqual( self._getGraphOpTypes( stubbed_graphdef, output_nodes=[op_hint._tensor_name_base(output.name)]), set(["cool_activation", "Const", "Identity"])) def testScaleAndBiasAndIdentity(self): """This tests a scaled add which has 3 inputs and 2 outputs.""" with ops.Graph().as_default(): a = array_ops.constant(1.) x = array_ops.constant([2., 3.]) b = array_ops.constant([4., 5.]) def _scaled_and_bias_and_identity(a, x, b): custom = op_hint.OpHint("scale_and_bias_and_identity") a, x, b = custom.add_inputs(a, x, b) return custom.add_outputs(a * x + b, x) output = array_ops.identity( _scaled_and_bias_and_identity(a, x, b), name="ModelOutput") with self.cached_session() as sess: # make sure one identity for each input (3) and output (2) => 3 + 2 = 5 # +1 for the final output self.assertEqual(self._countIdentities(sess.graph_def.node), 6) stubbed_graphdef = op_hint.convert_op_hints_to_stubs( graph_def=sess.graph_def) self.assertEqual( self._getGraphOpTypes( stubbed_graphdef, output_nodes=[op_hint._tensor_name_base(output.name)]), set(["scale_and_bias_and_identity", "Const", "Identity", "Pack"])) def testTwoFunctions(self): """Tests if two functions are converted correctly.""" with ops.Graph().as_default(): a = array_ops.constant([1.]) b = array_ops.constant([1.]) def _double_values(x): custom = op_hint.OpHint("add_test") x, = custom.add_inputs(x) output = math_ops.multiply(x, x) output, = custom.add_outputs(output) return output output = array_ops.identity( math_ops.add(_double_values(a), _double_values(b)), name="ModelOutput") with self.cached_session() as sess: # make sure one identity for each input (2) and output (2) => 2 + 2 # +1 for the final output self.assertEqual(self._countIdentities(sess.graph_def.node), 5) stubbed_graphdef = op_hint.convert_op_hints_to_stubs( graph_def=sess.graph_def) self.assertEqual( self._getGraphOpTypes( stubbed_graphdef, output_nodes=[op_hint._tensor_name_base(output.name)]), set(["add_test", "Const", "Identity", "AddV2"])) def _get_input_index(self, x): return x.op.node_def.attr[op_hint.OpHint.FUNCTION_INPUT_INDEX_ATTR].i def _get_output_index(self, x): return x.op.node_def.attr[op_hint.OpHint.FUNCTION_OUTPUT_INDEX_ATTR].i def _get_sort_index(self, x): return x.op.node_def.attr[op_hint.OpHint.FUNCTION_SORT_INDEX_ATTR].i def testTags(self): """Test if multiple args with the same tag are grouped.""" with ops.Graph().as_default(): a = array_ops.constant([1.]) b = array_ops.constant([2.]) c = array_ops.constant([3.]) d = array_ops.constant([4.]) custom = op_hint.OpHint("test_tag") a = custom.add_input( a, tag="mytag", aggregate=op_hint.OpHint.AGGREGATE_STACK) b, = custom.add_inputs(b) c = custom.add_input( c, tag="mytag", aggregate=op_hint.OpHint.AGGREGATE_STACK) d = custom.add_input( d, tag="mytag2", aggregate=op_hint.OpHint.AGGREGATE_STACK) res = math_ops.add(math_ops.mul(a, b), math_ops.mul(c, b)) custom.add_outputs([res]) with self.cached_session(): self.assertEqual(self._get_input_index(a), 0) self.assertEqual(self._get_sort_index(a), 0) self.assertEqual(self._get_input_index(b), 1) self.assertEqual(self._get_sort_index(b), 0) self.assertEqual(self._get_input_index(c), 0) self.assertEqual(self._get_sort_index(c), 1) def testOverrideIndex(self): with ops.Graph().as_default(): a = array_ops.constant([1.]) b = array_ops.constant([2.]) c = array_ops.constant([3.]) custom = op_hint.OpHint("test_override") b = custom.add_input(b) # should auto assign 0 a = custom.add_input(a, index_override=1) c = custom.add_input(c) # should auto assign 2 with self.cached_session(): self.assertEqual(self._get_input_index(a), 1) self.assertEqual(self._get_input_index(b), 0) self.assertEqual(self._get_input_index(c), 2) def testAggregate(self): with ops.Graph().as_default(): a = array_ops.constant([3., 4.]) b = array_ops.constant([5., 6.]) hint = op_hint.OpHint("agg") a0, a1 = array_ops.unstack(a) b0, b1 = array_ops.unstack(b) a0 = hint.add_input(a0, tag="c", aggregate=op_hint.OpHint.AGGREGATE_STACK) b0 = hint.add_input(b0, tag="n", aggregate=op_hint.OpHint.AGGREGATE_STACK) a1 = hint.add_input(a1, tag="c", aggregate=op_hint.OpHint.AGGREGATE_STACK) b1 = hint.add_input(b1, tag="n", aggregate=op_hint.OpHint.AGGREGATE_STACK) c0 = math_ops.add(a0, b0, name="addleft") c1 = math_ops.add(a1, b1, name="addright") c0 = hint.add_output( c0, tag="out", aggregate=op_hint.OpHint.AGGREGATE_STACK) c1 = hint.add_output( c1, tag="out", aggregate=op_hint.OpHint.AGGREGATE_STACK) curr = array_ops.stack([c0, c1]) output = array_ops.identity(curr, name="FINAL_OUTPUT") with self.cached_session() as sess: stubbed_graphdef = op_hint.convert_op_hints_to_stubs( graph_def=sess.graph_def) self.assertEqual( self._getGraphOpTypes( stubbed_graphdef, output_nodes=[op_hint._tensor_name_base(output.name)]), set(["agg", "Const", "Identity"])) def testFindHintedOutputNodes(self): """Test if all hinted output nodes are correctly found.""" with ops.Graph().as_default(): def _build_ophinted_op(name, input1, input2): custom_op = op_hint.OpHint(name) input1 = custom_op.add_input(input1) input2 = custom_op.add_input(input2) output = math_ops.mul(input1, input2) return custom_op.add_output(output) output_1 = _build_ophinted_op("custom_op_1", array_ops.constant([1.]), array_ops.constant([2.])) output_2 = _build_ophinted_op("custom_op_2", array_ops.constant([3.]), array_ops.constant([4.])) with self.cached_session() as sess: hinted_outputs_nodes = op_hint.find_all_hinted_output_nodes(sess) expected_hinted_output_nodes = [ _node_name(output_1.name), _node_name(output_2.name) ] self.assertEqual( len(hinted_outputs_nodes), len(expected_hinted_output_nodes)) if __name__ == "__main__": test.main()
[ "tensorflow.python.ops.array_ops.constant", "tensorflow.lite.python.op_hint._tensor_name_base", "tensorflow.lite.python.interpreter.Interpreter", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.ops.array_ops.fake_quant_with_min_max_args", "tensorflow.python.framework.graph_util_impl._node_name", "tensorflow.python.ops.array_ops.unstack", "tensorflow.python.ops.math_ops.add", "tensorflow.python.platform.test.main", "tensorflow.python.framework.graph_util_impl._bfs_for_reachable_nodes", "tensorflow.lite.python.convert.toco_convert", "tensorflow.python.framework.graph_util_impl._extract_graph_summary", "tensorflow.lite.python.op_hint.find_all_hinted_output_nodes", "tensorflow.python.ops.math_ops.mul", "tensorflow.python.client.session.Session", "tensorflow.python.ops.array_ops.stack", "tensorflow.python.ops.math_ops.sigmoid", "tensorflow.lite.python.op_hint.convert_op_hints_to_stubs", "tensorflow.python.framework.ops.Graph", "tensorflow.lite.python.op_hint.OpHint", "tensorflow.python.ops.math_ops.multiply", "tensorflow.lite.python.convert.toco_convert_graph_def" ]
tensorflow/lite/python/convert_test.py
[(395, 'tensorflow.python.platform.test.main', 'test.main', ([], {}), False, 'from tensorflow.python.platform import test\n'), (43, 'tensorflow.lite.python.convert.toco_convert', 'convert.toco_convert', (['sess.graph_def', '[in_tensor]', '[out_tensor]'], {}), False, 'from tensorflow.lite.python import convert\n'), (55, 'tensorflow.lite.python.convert.toco_convert', 'convert.toco_convert', (['sess.graph_def', '[in_tensor]', '[out_tensor]'], {'inference_type': 'dtypes.uint8', 'quantized_input_stats': '[(0.0, 1.0)]'}), False, 'from tensorflow.lite.python import convert\n'), (68, 'tensorflow.lite.python.convert.toco_convert_graph_def', 'convert.toco_convert_graph_def', (['sess.graph_def', "[('input', [1, 16, 16, 3])]", "['add']"], {'enable_mlir_converter': '(False)', 'control_output_arrays': 'None', 'inference_type': 'dtypes.float32'}), False, 'from tensorflow.lite.python import convert\n'), (76, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (105, 'tensorflow.lite.python.convert.toco_convert_graph_def', 'convert.toco_convert_graph_def', (['sess.graph_def', 'input_arrays_map', 'output_arrays'], {'enable_mlir_converter': '(False)', 'control_output_arrays': 'None', 'inference_type': 'dtypes.uint8', 'quantized_input_stats': '[(0.0, 1.0), (0.0, 1.0)]'}), False, 'from tensorflow.lite.python import convert\n'), (116, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (186, 'tensorflow.python.framework.graph_util_impl._extract_graph_summary', '_extract_graph_summary', (['graphdef'], {}), False, 'from tensorflow.python.framework.graph_util_impl import _extract_graph_summary\n'), (188, 'tensorflow.python.framework.graph_util_impl._bfs_for_reachable_nodes', '_bfs_for_reachable_nodes', (['output_nodes', 'name_to_input_name'], {}), False, 'from tensorflow.python.framework.graph_util_impl import _bfs_for_reachable_nodes\n'), (37, 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', ([], {'shape': '[1, 16, 16, 3]', 'dtype': 'dtypes.float32'}), False, 'from tensorflow.python.ops import array_ops\n'), (40, 'tensorflow.python.client.session.Session', 'session.Session', ([], {}), False, 'from tensorflow.python.client import session\n'), (49, 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', ([], {'shape': '[1, 16, 16, 3]', 'dtype': 'dtypes.float32'}), False, 'from tensorflow.python.ops import array_ops\n'), (51, 'tensorflow.python.ops.array_ops.fake_quant_with_min_max_args', 'array_ops.fake_quant_with_min_max_args', (['(in_tensor + in_tensor)'], {'min': '(0.0)', 'max': '(1.0)'}), False, 'from tensorflow.python.ops import array_ops\n'), (53, 'tensorflow.python.client.session.Session', 'session.Session', ([], {}), False, 'from tensorflow.python.client import session\n'), (63, 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', ([], {'shape': '[1, 16, 16, 3]', 'dtype': 'dtypes.float32', 'name': '"""input"""'}), False, 'from tensorflow.python.ops import array_ops\n'), (66, 'tensorflow.python.client.session.Session', 'session.Session', ([], {}), False, 'from tensorflow.python.client import session\n'), (95, 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', ([], {'shape': '[1, 16, 16, 3]', 'dtype': 'dtypes.float32', 'name': '"""inputA"""'}), False, 'from tensorflow.python.ops import array_ops\n'), (97, 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', ([], {'shape': '[1, 16, 16, 3]', 'dtype': 'dtypes.float32', 'name': '"""inputB"""'}), False, 'from tensorflow.python.ops import array_ops\n'), (99, 'tensorflow.python.ops.array_ops.fake_quant_with_min_max_args', 'array_ops.fake_quant_with_min_max_args', (['(in_tensor_1 + in_tensor_2)'], {'min': '(0.0)', 'max': '(1.0)', 'name': '"""output"""'}), False, 'from tensorflow.python.ops import array_ops\n'), (101, 'tensorflow.python.client.session.Session', 'session.Session', ([], {}), False, 'from tensorflow.python.client import session\n'), (142, 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', ([], {'shape': '[1, 16, 16, 3]', 'dtype': 'dtypes.float32', 'name': '"""inputA"""'}), False, 'from tensorflow.python.ops import array_ops\n'), (144, 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', ([], {'shape': '[1, 16, 16, 3]', 'dtype': 'dtypes.float32', 'name': '"""inputB"""'}), False, 'from tensorflow.python.ops import array_ops\n'), (146, 'tensorflow.python.ops.array_ops.fake_quant_with_min_max_args', 'array_ops.fake_quant_with_min_max_args', (['(in_tensor_1 + in_tensor_2)'], {'min': '(0.0)', 'max': '(1.0)', 'name': '"""output"""'}), False, 'from tensorflow.python.ops import array_ops\n'), (148, 'tensorflow.python.client.session.Session', 'session.Session', ([], {}), False, 'from tensorflow.python.client import session\n'), (153, 'tensorflow.lite.python.convert.toco_convert_graph_def', 'convert.toco_convert_graph_def', (['sess.graph_def', 'input_arrays_map', 'output_arrays'], {'enable_mlir_converter': '(False)', 'control_output_arrays': 'None', 'inference_type': 'dtypes.uint8'}), False, 'from tensorflow.lite.python import convert\n'), (205, 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['[1.0, 2.0, 3.0, 4.0]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (206, 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['(1.0)'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (235, 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['(1.0)'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (236, 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['[2.0, 3.0]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (237, 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['[4.0, 5.0]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (264, 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['[1.0]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (265, 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['[1.0]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (302, 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['[1.0]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (303, 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['[2.0]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (304, 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['[3.0]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (305, 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['[4.0]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (306, 'tensorflow.lite.python.op_hint.OpHint', 'op_hint.OpHint', (['"""test_tag"""'], {}), False, 'from tensorflow.lite.python import op_hint\n'), (326, 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['[1.0]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (327, 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['[2.0]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (328, 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['[3.0]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (329, 'tensorflow.lite.python.op_hint.OpHint', 'op_hint.OpHint', (['"""test_override"""'], {}), False, 'from tensorflow.lite.python import op_hint\n'), (340, 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['[3.0, 4.0]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (341, 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['[5.0, 6.0]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (342, 'tensorflow.lite.python.op_hint.OpHint', 'op_hint.OpHint', (['"""agg"""'], {}), False, 'from tensorflow.lite.python import op_hint\n'), (343, 'tensorflow.python.ops.array_ops.unstack', 'array_ops.unstack', (['a'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (344, 'tensorflow.python.ops.array_ops.unstack', 'array_ops.unstack', (['b'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (351, 'tensorflow.python.ops.math_ops.add', 'math_ops.add', (['a0', 'b0'], {'name': '"""addleft"""'}), False, 'from tensorflow.python.ops import math_ops\n'), (352, 'tensorflow.python.ops.math_ops.add', 'math_ops.add', (['a1', 'b1'], {'name': '"""addright"""'}), False, 'from tensorflow.python.ops import math_ops\n'), (358, 'tensorflow.python.ops.array_ops.stack', 'array_ops.stack', (['[c0, c1]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (359, 'tensorflow.python.ops.array_ops.identity', 'array_ops.identity', (['curr'], {'name': '"""FINAL_OUTPUT"""'}), False, 'from tensorflow.python.ops import array_ops\n'), (209, 'tensorflow.lite.python.op_hint.OpHint', 'op_hint.OpHint', (['"""cool_activation"""'], {}), False, 'from tensorflow.lite.python import op_hint\n'), (223, 'tensorflow.lite.python.op_hint.convert_op_hints_to_stubs', 'op_hint.convert_op_hints_to_stubs', ([], {'graph_def': 'sess.graph_def'}), False, 'from tensorflow.lite.python import op_hint\n'), (240, 'tensorflow.lite.python.op_hint.OpHint', 'op_hint.OpHint', (['"""scale_and_bias_and_identity"""'], {}), False, 'from tensorflow.lite.python import op_hint\n'), (252, 'tensorflow.lite.python.op_hint.convert_op_hints_to_stubs', 'op_hint.convert_op_hints_to_stubs', ([], {'graph_def': 'sess.graph_def'}), False, 'from tensorflow.lite.python import op_hint\n'), (268, 'tensorflow.lite.python.op_hint.OpHint', 'op_hint.OpHint', (['"""add_test"""'], {}), False, 'from tensorflow.lite.python import op_hint\n'), (270, 'tensorflow.python.ops.math_ops.multiply', 'math_ops.multiply', (['x', 'x'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (282, 'tensorflow.lite.python.op_hint.convert_op_hints_to_stubs', 'op_hint.convert_op_hints_to_stubs', ([], {'graph_def': 'sess.graph_def'}), False, 'from tensorflow.lite.python import op_hint\n'), (314, 'tensorflow.python.ops.math_ops.mul', 'math_ops.mul', (['a', 'b'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (314, 'tensorflow.python.ops.math_ops.mul', 'math_ops.mul', (['c', 'b'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (361, 'tensorflow.lite.python.op_hint.convert_op_hints_to_stubs', 'op_hint.convert_op_hints_to_stubs', ([], {'graph_def': 'sess.graph_def'}), False, 'from tensorflow.lite.python import op_hint\n'), (374, 'tensorflow.lite.python.op_hint.OpHint', 'op_hint.OpHint', (['name'], {}), False, 'from tensorflow.lite.python import op_hint\n'), (377, 'tensorflow.python.ops.math_ops.mul', 'math_ops.mul', (['input1', 'input2'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (380, 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['[1.0]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (381, 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['[2.0]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (382, 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['[3.0]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (383, 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['[4.0]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (385, 'tensorflow.lite.python.op_hint.find_all_hinted_output_nodes', 'op_hint.find_all_hinted_output_nodes', (['sess'], {}), False, 'from tensorflow.lite.python import op_hint\n'), (36, 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (48, 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (62, 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (94, 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (141, 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (204, 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (234, 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (263, 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (301, 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (325, 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (339, 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (371, 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (387, 'tensorflow.python.framework.graph_util_impl._node_name', '_node_name', (['output_1.name'], {}), False, 'from tensorflow.python.framework.graph_util_impl import _node_name\n'), (388, 'tensorflow.python.framework.graph_util_impl._node_name', '_node_name', (['output_2.name'], {}), False, 'from tensorflow.python.framework.graph_util_impl import _node_name\n'), (211, 'tensorflow.python.ops.math_ops.sigmoid', 'math_ops.sigmoid', (['input_tensor'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (229, 'tensorflow.lite.python.op_hint._tensor_name_base', 'op_hint._tensor_name_base', (['output.name'], {}), False, 'from tensorflow.lite.python import op_hint\n'), (258, 'tensorflow.lite.python.op_hint._tensor_name_base', 'op_hint._tensor_name_base', (['output.name'], {}), False, 'from tensorflow.lite.python import op_hint\n'), (287, 'tensorflow.lite.python.op_hint._tensor_name_base', 'op_hint._tensor_name_base', (['output.name'], {}), False, 'from tensorflow.lite.python import op_hint\n'), (366, 'tensorflow.lite.python.op_hint._tensor_name_base', 'op_hint._tensor_name_base', (['output.name'], {}), False, 'from tensorflow.lite.python import op_hint\n')]
jacobdineen/SISR
8c27dccf9ac430b6401ccbf59071219e87b2f68d
import time import tensorflow as tf from model import evaluate from model import srgan from tensorflow.keras.applications.vgg19 import preprocess_input from tensorflow.keras.losses import BinaryCrossentropy from tensorflow.keras.losses import MeanAbsoluteError from tensorflow.keras.losses import MeanSquaredError from tensorflow.keras.metrics import Mean from tensorflow.keras.optimizers import Adam from tensorflow.keras.optimizers.schedules import PiecewiseConstantDecay class Trainer: def __init__(self, model, loss, learning_rate, checkpoint_dir='./ckpt/edsr'): self.now = None self.loss = loss self.checkpoint = tf.train.Checkpoint(step=tf.Variable(0), psnr=tf.Variable(-1.0), optimizer=Adam(learning_rate), model=model) self.checkpoint_manager = tf.train.CheckpointManager(checkpoint=self.checkpoint, directory=checkpoint_dir, max_to_keep=3) self.restore() @property def model(self): return self.checkpoint.model def train(self, train_dataset, valid_dataset, steps, evaluate_every=1000, save_best_only=False): loss_mean = Mean() ckpt_mgr = self.checkpoint_manager ckpt = self.checkpoint self.now = time.perf_counter() for lr, hr in train_dataset.take(steps - ckpt.step.numpy()): ckpt.step.assign_add(1) step = ckpt.step.numpy() loss = self.train_step(lr, hr) loss_mean(loss) if step % evaluate_every == 0: loss_value = loss_mean.result() loss_mean.reset_states() # Compute PSNR on validation dataset psnr_value = self.evaluate(valid_dataset) duration = time.perf_counter() - self.now print(f'{step}/{steps}: loss = {loss_value.numpy():.3f}, PSNR = {psnr_value.numpy():3f} ({duration:.2f}s)') if save_best_only and psnr_value <= ckpt.psnr: self.now = time.perf_counter() # skip saving checkpoint, no PSNR improvement continue ckpt.psnr = psnr_value ckpt_mgr.save() self.now = time.perf_counter() @tf.function def train_step(self, lr, hr): with tf.GradientTape() as tape: lr = tf.cast(lr, tf.float32) hr = tf.cast(hr, tf.float32) sr = self.checkpoint.model(lr, training=True) loss_value = self.loss(hr, sr) gradients = tape.gradient(loss_value, self.checkpoint.model.trainable_variables) self.checkpoint.optimizer.apply_gradients(zip(gradients, self.checkpoint.model.trainable_variables)) return loss_value def evaluate(self, dataset): return evaluate(self.checkpoint.model, dataset) def restore(self): if self.checkpoint_manager.latest_checkpoint: self.checkpoint.restore(self.checkpoint_manager.latest_checkpoint) print(f'Model restored from checkpoint at step {self.checkpoint.step.numpy()}.') class EdsrTrainer(Trainer): def __init__(self, model, checkpoint_dir, learning_rate=PiecewiseConstantDecay(boundaries=[200000], values=[1e-4, 5e-5])): super().__init__(model, loss=MeanAbsoluteError(), learning_rate=learning_rate, checkpoint_dir=checkpoint_dir) def train(self, train_dataset, valid_dataset, steps=300000, evaluate_every=1000, save_best_only=True): super().train(train_dataset, valid_dataset, steps, evaluate_every, save_best_only) class WdsrTrainer(Trainer): def __init__(self, model, checkpoint_dir, learning_rate=PiecewiseConstantDecay(boundaries=[200000], values=[1e-3, 5e-4])): super().__init__(model, loss=MeanAbsoluteError(), learning_rate=learning_rate, checkpoint_dir=checkpoint_dir) def train(self, train_dataset, valid_dataset, steps=300000, evaluate_every=1000, save_best_only=True): super().train(train_dataset, valid_dataset, steps, evaluate_every, save_best_only)
[ "tensorflow.train.CheckpointManager", "tensorflow.Variable", "tensorflow.cast", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.losses.MeanAbsoluteError", "tensorflow.keras.optimizers.schedules.PiecewiseConstantDecay", "tensorflow.keras.metrics.Mean", "tensorflow.GradientTape" ]
train.py
[(29, 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', ([], {'checkpoint': 'self.checkpoint', 'directory': 'checkpoint_dir', 'max_to_keep': '(3)'}), True, 'import tensorflow as tf\n'), (40, 'tensorflow.keras.metrics.Mean', 'Mean', ([], {}), False, 'from tensorflow.keras.metrics import Mean\n'), (45, 'time.perf_counter', 'time.perf_counter', ([], {}), False, 'import time\n'), (89, 'model.evaluate', 'evaluate', (['self.checkpoint.model', 'dataset'], {}), False, 'from model import evaluate\n'), (101, 'tensorflow.keras.optimizers.schedules.PiecewiseConstantDecay', 'PiecewiseConstantDecay', ([], {'boundaries': '[200000]', 'values': '[0.0001, 5e-05]'}), False, 'from tensorflow.keras.optimizers.schedules import PiecewiseConstantDecay\n'), (112, 'tensorflow.keras.optimizers.schedules.PiecewiseConstantDecay', 'PiecewiseConstantDecay', ([], {'boundaries': '[200000]', 'values': '[0.001, 0.0005]'}), False, 'from tensorflow.keras.optimizers.schedules import PiecewiseConstantDecay\n'), (76, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (77, 'tensorflow.cast', 'tf.cast', (['lr', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (78, 'tensorflow.cast', 'tf.cast', (['hr', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (25, 'tensorflow.Variable', 'tf.Variable', (['(0)'], {}), True, 'import tensorflow as tf\n'), (26, 'tensorflow.Variable', 'tf.Variable', (['(-1.0)'], {}), True, 'import tensorflow as tf\n'), (27, 'tensorflow.keras.optimizers.Adam', 'Adam', (['learning_rate'], {}), False, 'from tensorflow.keras.optimizers import Adam\n'), (72, 'time.perf_counter', 'time.perf_counter', ([], {}), False, 'import time\n'), (102, 'tensorflow.keras.losses.MeanAbsoluteError', 'MeanAbsoluteError', ([], {}), False, 'from tensorflow.keras.losses import MeanAbsoluteError\n'), (113, 'tensorflow.keras.losses.MeanAbsoluteError', 'MeanAbsoluteError', ([], {}), False, 'from tensorflow.keras.losses import MeanAbsoluteError\n'), (61, 'time.perf_counter', 'time.perf_counter', ([], {}), False, 'import time\n'), (65, 'time.perf_counter', 'time.perf_counter', ([], {}), False, 'import time\n')]
BioMeasure/deepcell-tf
e8912c9e4a7160900e8d9dc2616a03dfa47fd53f
# Copyright 2016-2020 The Van Valen Lab at the California Institute of # Technology (Caltech), with support from the Paul Allen Family Foundation, # Google, & National Institutes of Health (NIH) under Grant U24CA224309-01. # All rights reserved. # # Licensed under a modified Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.github.com/vanvalenlab/deepcell-tf/LICENSE # # The Work provided may be used for non-commercial academic purposes only. # For any other use of the Work, including commercial use, please contact: # [email protected] # # Neither the name of Caltech nor the names of its contributors may be used # to endorse or promote products derived from this software without specific # prior written permission. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """MaskRCNN models adapted from https://github.com/fizyr/keras-maskrcnn""" from __future__ import absolute_import from __future__ import print_function from __future__ import division import tensorflow as tf from tensorflow.keras import backend as K from tensorflow.keras.layers import Input, Concatenate from tensorflow.keras.layers import TimeDistributed, Conv2D from tensorflow.keras.models import Model from tensorflow.keras.initializers import RandomNormal from deepcell.layers import Cast, UpsampleLike from deepcell.layers import Upsample, RoiAlign, ConcatenateBoxes from deepcell.layers import ClipBoxes, RegressBoxes, FilterDetections from deepcell.layers import TensorProduct, ImageNormalization2D, Location2D from deepcell.model_zoo.retinanet import retinanet, __build_anchors from deepcell.utils.retinanet_anchor_utils import AnchorParameters from deepcell.utils.backbone_utils import get_backbone def default_mask_model(num_classes, pyramid_feature_size=256, mask_feature_size=256, roi_size=(14, 14), mask_size=(28, 28), name='mask_submodel', mask_dtype=K.floatx(), retinanet_dtype=K.floatx()): """Creates the default mask submodel. Args: num_classes (int): Number of classes to predict a score for at each feature level. pyramid_feature_size (int): The number of filters to expect from the feature pyramid levels. mask_feature_size (int): The number of filters to expect from the masks. roi_size (tuple): The number of filters to use in the Roi Layers. mask_size (tuple): The size of the masks. mask_dtype (str): ``dtype`` to use for mask tensors. retinanet_dtype (str): ``dtype`` retinanet models expect. name (str): The name of the submodel. Returns: tensorflow.keras.Model: a Model that predicts classes for each anchor. """ options = { 'kernel_size': 3, 'strides': 1, 'padding': 'same', 'kernel_initializer': RandomNormal(mean=0.0, stddev=0.01, seed=None), 'bias_initializer': 'zeros', 'activation': 'relu', } inputs = Input(shape=(None, roi_size[0], roi_size[1], pyramid_feature_size)) outputs = inputs # casting to the desidered data type, which may be different than # the one used for the underlying keras-retinanet model if mask_dtype != retinanet_dtype: outputs = TimeDistributed( Cast(dtype=mask_dtype), name='cast_masks')(outputs) for i in range(4): outputs = TimeDistributed(Conv2D( filters=mask_feature_size, **options ), name='roi_mask_{}'.format(i))(outputs) # perform upsampling + conv instead of deconv as in the paper # https://distill.pub/2016/deconv-checkerboard/ outputs = TimeDistributed( Upsample(mask_size), name='roi_mask_upsample')(outputs) outputs = TimeDistributed(Conv2D( filters=mask_feature_size, **options ), name='roi_mask_features')(outputs) outputs = TimeDistributed(Conv2D( filters=num_classes, kernel_size=1, activation='sigmoid' ), name='roi_mask')(outputs) # casting back to the underlying keras-retinanet model data type if mask_dtype != retinanet_dtype: outputs = TimeDistributed( Cast(dtype=retinanet_dtype), name='recast_masks')(outputs) return Model(inputs=inputs, outputs=outputs, name=name) def default_roi_submodels(num_classes, roi_size=(14, 14), mask_size=(28, 28), frames_per_batch=1, mask_dtype=K.floatx(), retinanet_dtype=K.floatx()): """Create a list of default roi submodels. The default submodels contains a single mask model. Args: num_classes (int): Number of classes to use. roi_size (tuple): The number of filters to use in the Roi Layers. mask_size (tuple): The size of the masks. frames_per_batch (int): Size of z axis in generated batches. If equal to 1, assumes 2D data. mask_dtype (str): ``dtype`` to use for mask tensors. retinanet_dtype (str): ``dtype`` retinanet models expect. Returns: list: A list of tuple, where the first element is the name of the submodel and the second element is the submodel itself. """ if frames_per_batch > 1: return [ ('masks', TimeDistributed( default_mask_model(num_classes, roi_size=roi_size, mask_size=mask_size, mask_dtype=mask_dtype, retinanet_dtype=retinanet_dtype, name='mask_submodel_single_frame'), name='mask_submodel')) ] return [ ('masks', default_mask_model(num_classes, roi_size=roi_size, mask_size=mask_size, mask_dtype=mask_dtype, retinanet_dtype=retinanet_dtype)) ] def retinamask(inputs, backbone_dict, num_classes, frames_per_batch=1, backbone_levels=['C3', 'C4', 'C5'], pyramid_levels=['P3', 'P4', 'P5', 'P6', 'P7'], retinanet_model=None, anchor_params=None, nms=True, training=True, panoptic=False, class_specific_filter=True, crop_size=(14, 14), mask_size=(28, 28), name='retinanet-mask', roi_submodels=None, max_detections=100, score_threshold=0.05, nms_threshold=0.5, mask_dtype=K.floatx(), **kwargs): """Construct a masking model by appending layers to compute masks to a :mod:`deepcell.model_zoo.retinanet.retinanet` model. Args: inputs (tensor): List of ``tensorflow.keras.layers.Input``. The first input is the image, the second input the blob of masks. backbone_dict (dict): A dictionary with the backbone layers. num_classes (int): Integer, number of classes to classify. frames_per_batch (int): Size of z axis in generated batches. If equal to 1, assumes 2D data. backbone_levels (list): The backbone levels to be used to create the feature pyramid. pyramid_levels (list): The pyramid levels to attach regression and classification heads to. retinanet_model (tensorflow.keras.Model): :mod:`deepcell.model_zoo.retinanet.retinanet` model that predicts regression and classification values. anchor_params (AnchorParameters): Struct containing anchor parameters. nms (bool): Whether to use non-maximum suppression for the filtering step. training (bool): Whether to use the bounding boxes as the detections, during training or to use the :mod:`deepcell.layers.filter_detections.FilterDetections` during inference. panoptic (bool): Flag for adding the semantic head for panoptic segmentation tasks. class_specific_filter (bool): Use class specific filtering. crop_size (tuple): 2-length tuple for the x-y size of the crops. Used to create default ``roi_submodels``. mask_size (tuple): 2-length tuple for the x-y size of the masks. Used to create default ``roi_submodels``. name (str): Name of the model. roi_submodels (list): Submodels for processing ROIs. max_detections (int): The maximum number of detections allowed. score_threshold (float): Minimum score for the :mod:`deepcell.layers.filter_detections.FilterDetections` layer. nms_threshold (float): Minimimum NMS for the :mod:`deepcell.layers.filter_detections.FilterDetections` layer. mask_dtype (str): ``dtype`` to use for mask tensors. kwargs (dict): Additional kwargs to pass to the :mod:`deepcell.model_zoo.retinanet.retinanet` model. Returns: tensorflow.keras.Model: Model with inputs as input and as output the output of each submodel for each pyramid level and the detections. The order is as defined in submodels. .. code-block:: python [ regression, classification, other[0], ..., boxes_masks, boxes, scores, labels, masks, other[0], ... ] """ if anchor_params is None: anchor_params = AnchorParameters.default if roi_submodels is None: retinanet_dtype = K.floatx() K.set_floatx(mask_dtype) roi_submodels = default_roi_submodels( num_classes, crop_size, mask_size, frames_per_batch, mask_dtype, retinanet_dtype) K.set_floatx(retinanet_dtype) image = inputs if retinanet_model is None: retinanet_model = retinanet( inputs=image, backbone_dict=backbone_dict, num_classes=num_classes, backbone_levels=backbone_levels, pyramid_levels=pyramid_levels, panoptic=panoptic, num_anchors=anchor_params.num_anchors(), frames_per_batch=frames_per_batch, **kwargs ) # parse outputs regression = retinanet_model.outputs[0] classification = retinanet_model.outputs[1] semantic_classes = [1 for layer in retinanet_model.layers if layer.name.startswith('semantic')] if panoptic: # Determine the number of semantic heads n_semantic_heads = len(semantic_classes) # The panoptic output should not be sent to filter detections other = retinanet_model.outputs[2:-n_semantic_heads] semantic = retinanet_model.outputs[-n_semantic_heads:] else: other = retinanet_model.outputs[2:] semantic = [] features = [retinanet_model.get_layer(name).output for name in pyramid_levels] # build boxes anchors = __build_anchors(anchor_params, features, frames_per_batch=frames_per_batch) boxes = RegressBoxes(name='boxes')([anchors, regression]) boxes = ClipBoxes(name='clipped_boxes')([image, boxes]) # filter detections (apply NMS / score threshold / select top-k) if training: if frames_per_batch == 1: boxes = Input(shape=(None, 4), name='boxes_input') else: boxes = Input(shape=(None, None, 4), name='boxes_input') detections = [] else: detections = FilterDetections( nms=nms, nms_threshold=nms_threshold, score_threshold=score_threshold, class_specific_filter=class_specific_filter, max_detections=max_detections, name='filtered_detections' )([boxes, classification] + other) # split up in known outputs and "other" boxes = detections[0] fpn = features[0] fpn = UpsampleLike()([fpn, image]) rois = RoiAlign(crop_size=crop_size)([boxes, fpn]) # execute maskrcnn submodels maskrcnn_outputs = [submodel(rois) for _, submodel in roi_submodels] # concatenate boxes for loss computation trainable_outputs = [ConcatenateBoxes(name=name)([boxes, output]) for (name, _), output in zip( roi_submodels, maskrcnn_outputs)] # reconstruct the new output outputs = [regression, classification] + other + trainable_outputs + \ detections + maskrcnn_outputs + list(semantic) inputs = [image, boxes] if training else image model = Model(inputs=inputs, outputs=outputs, name=name) model.backbone_levels = backbone_levels model.pyramid_levels = pyramid_levels return model def retinamask_bbox(model, nms=True, panoptic=False, num_semantic_heads=1, class_specific_filter=True, name='retinanet-bbox', anchor_params=None, max_detections=300, frames_per_batch=1, crop_size=(14, 14), **kwargs): """Construct a RetinaNet model on top of a backbone and adds convenience functions to output boxes directly. This model uses the minimum retinanet model and appends a few layers to compute boxes within the graph. These layers include applying the regression values to the anchors and performing NMS. Args: model (tensorflow.keras.Model): RetinaNet model to append bbox layers to. If ``None``, it will create a ``RetinaNet`` model using ``kwargs``. nms (bool): Whether to use non-maximum suppression for the filtering step. panoptic (bool): Flag for adding the semantic head for panoptic segmentation tasks. num_semantic_heads (int): Total number of semantic heads to build. class_specific_filter (bool): Whether to use class specific filtering or filter for the best scoring class only. anchor_params (AnchorParameters): Struct containing anchor parameters. max_detections (int): The maximum number of detections allowed. frames_per_batch (int): Size of z axis in generated batches. If equal to 1, assumes 2D data. crop_size (tuple): 2-length tuple for the x-y size of the crops. Used to create default ``roi_submodels``. kwargs (dict): Additional kwargs to pass to the :mod:`deepcell.model_zoo.retinanet.retinanet` model. Returns: tensorflow.keras.Model: A Model which takes an image as input and outputs the detections on the image. The order is defined as follows: .. code-block:: python [ boxes, scores, labels, other[0], other[1], ... ] Raises: ValueError: the given model does not have a regression or classification submodel. """ # if no anchor parameters are passed, use default values if anchor_params is None: anchor_params = AnchorParameters.default # create RetinaNet model names = ('regression', 'classification') if not all(output in model.output_names for output in names): raise ValueError('Input is not a training model (no `regression` ' 'and `classification` outputs were found, ' 'outputs are: {}).'.format(model.output_names)) # compute the anchors features = [model.get_layer(l).output for l in model.pyramid_levels] anchors = __build_anchors(anchor_params, features, frames_per_batch=frames_per_batch) # we expect anchors, regression. and classification values as first output regression = model.outputs[0] classification = model.outputs[1] semantic_classes = [1 for layer in model.layers if layer.name.startswith('semantic')] # "other" can be any additional output from custom submodels, by default [] if panoptic: # The last output is the panoptic output, which should not be # sent to filter detections num_semantic_heads = len(semantic_classes) other = model.outputs[2:-num_semantic_heads] semantic = model.outputs[-num_semantic_heads:] else: other = model.outputs[2:] semantic = [] # apply predicted regression to anchors boxes = RegressBoxes(name='boxes')([anchors, regression]) boxes = ClipBoxes(name='clipped_boxes')([model.inputs[0], boxes]) # filter detections (apply NMS / score threshold / select top-k) detections = FilterDetections( nms=nms, class_specific_filter=class_specific_filter, max_detections=max_detections, name='filtered_detections' )([boxes, classification]) # apply submodels to detections image = model.layers[0].output boxes = detections[0] fpn = features[0] fpn = UpsampleLike()([fpn, image]) rois = RoiAlign(crop_size=crop_size)([boxes, fpn]) mask_submodel = model.get_layer('mask_submodel') masks = [mask_submodel(rois)] # add the semantic head's output if needed outputs = detections + list(masks) + list(semantic) # construct the model new_model = Model(inputs=model.inputs, outputs=outputs, name=name) image_input = model.inputs[0] shape = (1, 1, 4) if frames_per_batch == 1 else (1, 1, 1, 4) temp_boxes = K.zeros(shape, name='temp_boxes') new_inputs = [image_input, temp_boxes] final_model = new_model(new_inputs) return Model(inputs=image_input, outputs=final_model) def RetinaMask(backbone, num_classes, input_shape, inputs=None, backbone_levels=['C3', 'C4', 'C5'], pyramid_levels=['P3', 'P4', 'P5', 'P6', 'P7'], norm_method='whole_image', location=False, use_imagenet=False, crop_size=(14, 14), pooling=None, mask_dtype=K.floatx(), required_channels=3, frames_per_batch=1, **kwargs): """Constructs a mrcnn model using a backbone from ``keras-applications``. Args: backbone (str): Name of backbone to use. num_classes (int): Number of classes to classify. input_shape (tuple): The shape of the input data. inputs (tensor): Optional input tensor, overrides ``input_shape``. backbone_levels (list): The backbone levels to be used. to create the feature pyramid. pyramid_levels (list): The pyramid levels to attach regression and classification heads. norm_method (str): Normalization method to use with the :mod:`deepcell.layers.normalization.ImageNormalization2D` layer. location (bool): Whether to include a :mod:`deepcell.layers.location.Location2D` layer. use_imagenet (bool): Whether to load imagenet-based pretrained weights. crop_size (tuple): 2-length tuple for the x-y size of the crops. Used to create default ``roi_submodels``. pooling (str): Pooling mode for feature extraction when ``include_top`` is ``False``. - None means that the output of the model will be the 4D tensor output of the last convolutional layer. - 'avg' means that global average pooling will be applied to the output of the last convolutional layer, and thus the output of the model will be a 2D tensor. - 'max' means that global max pooling will be applied. mask_dtype (str): ``dtype`` to use for mask tensors. required_channels (int): The required number of channels of the backbone. 3 is the default for all current backbones. frames_per_batch (int): Size of z axis in generated batches. If equal to 1, assumes 2D data. kwargs (dict): Other standard inputs for `~retinanetmask`. Returns: tensorflow.keras.Model: :mod:`deepcell.model_zoo.retinanet.RetinaNet` model with additional mask output. """ channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 if inputs is None: if frames_per_batch > 1: if channel_axis == 1: input_shape_with_time = tuple( [input_shape[0], frames_per_batch] + list(input_shape)[1:]) else: input_shape_with_time = tuple( [frames_per_batch] + list(input_shape)) inputs = Input(shape=input_shape_with_time, name='input') else: inputs = Input(shape=input_shape, name='input') if location: if frames_per_batch > 1: # TODO: TimeDistributed is incompatible with channels_first loc = TimeDistributed(Location2D(in_shape=input_shape))(inputs) else: loc = Location2D(in_shape=input_shape)(inputs) concat = Concatenate(axis=channel_axis)([inputs, loc]) else: concat = inputs # force the channel size for backbone input to be `required_channels` if frames_per_batch > 1: norm = TimeDistributed(ImageNormalization2D(norm_method=norm_method))(concat) fixed_inputs = TimeDistributed(TensorProduct(required_channels))(norm) else: norm = ImageNormalization2D(norm_method=norm_method)(concat) fixed_inputs = TensorProduct(required_channels)(norm) # force the input shape axis = 0 if K.image_data_format() == 'channels_first' else -1 fixed_input_shape = list(input_shape) fixed_input_shape[axis] = required_channels fixed_input_shape = tuple(fixed_input_shape) model_kwargs = { 'include_top': False, 'weights': None, 'input_shape': fixed_input_shape, 'pooling': pooling } _, backbone_dict = get_backbone(backbone, fixed_inputs, use_imagenet=use_imagenet, frames_per_batch=frames_per_batch, return_dict=True, **model_kwargs) # create the full model return retinamask( inputs=inputs, num_classes=num_classes, backbone_dict=backbone_dict, crop_size=crop_size, backbone_levels=backbone_levels, pyramid_levels=pyramid_levels, name='{}_retinanetmask'.format(backbone), mask_dtype=mask_dtype, frames_per_batch=frames_per_batch, **kwargs)
[ "tensorflow.keras.layers.Concatenate", "tensorflow.keras.backend.floatx", "tensorflow.keras.backend.image_data_format", "tensorflow.keras.models.Model", "tensorflow.keras.backend.set_floatx", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.backend.zeros", "tensorflow.keras.initializers.RandomNormal", "tensorflow.keras.layers.Input" ]
deepcell/model_zoo/retinamask.py
[(54, 'tensorflow.keras.backend.floatx', 'K.floatx', ([], {}), True, 'from tensorflow.keras import backend as K\n'), (55, 'tensorflow.keras.backend.floatx', 'K.floatx', ([], {}), True, 'from tensorflow.keras import backend as K\n'), (82, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(None, roi_size[0], roi_size[1], pyramid_feature_size)'}), False, 'from tensorflow.keras.layers import Input, Concatenate\n'), (120, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'name': 'name'}), False, 'from tensorflow.keras.models import Model\n'), (127, 'tensorflow.keras.backend.floatx', 'K.floatx', ([], {}), True, 'from tensorflow.keras import backend as K\n'), (128, 'tensorflow.keras.backend.floatx', 'K.floatx', ([], {}), True, 'from tensorflow.keras import backend as K\n'), (185, 'tensorflow.keras.backend.floatx', 'K.floatx', ([], {}), True, 'from tensorflow.keras import backend as K\n'), (287, 'deepcell.model_zoo.retinanet.__build_anchors', '__build_anchors', (['anchor_params', 'features'], {'frames_per_batch': 'frames_per_batch'}), False, 'from deepcell.model_zoo.retinanet import retinanet, __build_anchors\n'), (330, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'name': 'name'}), False, 'from tensorflow.keras.models import Model\n'), (403, 'deepcell.model_zoo.retinanet.__build_anchors', '__build_anchors', (['anchor_params', 'features'], {'frames_per_batch': 'frames_per_batch'}), False, 'from deepcell.model_zoo.retinanet import retinanet, __build_anchors\n'), (450, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'model.inputs', 'outputs': 'outputs', 'name': 'name'}), False, 'from tensorflow.keras.models import Model\n'), (454, 'tensorflow.keras.backend.zeros', 'K.zeros', (['shape'], {'name': '"""temp_boxes"""'}), True, 'from tensorflow.keras import backend as K\n'), (458, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'image_input', 'outputs': 'final_model'}), False, 'from tensorflow.keras.models import Model\n'), (472, 'tensorflow.keras.backend.floatx', 'K.floatx', ([], {}), True, 'from tensorflow.keras import backend as K\n'), (560, 'deepcell.utils.backbone_utils.get_backbone', 'get_backbone', (['backbone', 'fixed_inputs'], {'use_imagenet': 'use_imagenet', 'frames_per_batch': 'frames_per_batch', 'return_dict': '(True)'}), False, 'from deepcell.utils.backbone_utils import get_backbone\n'), (77, 'tensorflow.keras.initializers.RandomNormal', 'RandomNormal', ([], {'mean': '(0.0)', 'stddev': '(0.01)', 'seed': 'None'}), False, 'from tensorflow.keras.initializers import RandomNormal\n'), (245, 'tensorflow.keras.backend.floatx', 'K.floatx', ([], {}), True, 'from tensorflow.keras import backend as K\n'), (246, 'tensorflow.keras.backend.set_floatx', 'K.set_floatx', (['mask_dtype'], {}), True, 'from tensorflow.keras import backend as K\n'), (250, 'tensorflow.keras.backend.set_floatx', 'K.set_floatx', (['retinanet_dtype'], {}), True, 'from tensorflow.keras import backend as K\n'), (289, 'deepcell.layers.RegressBoxes', 'RegressBoxes', ([], {'name': '"""boxes"""'}), False, 'from deepcell.layers import ClipBoxes, RegressBoxes, FilterDetections\n'), (290, 'deepcell.layers.ClipBoxes', 'ClipBoxes', ([], {'name': '"""clipped_boxes"""'}), False, 'from deepcell.layers import ClipBoxes, RegressBoxes, FilterDetections\n'), (314, 'deepcell.layers.UpsampleLike', 'UpsampleLike', ([], {}), False, 'from deepcell.layers import Cast, UpsampleLike\n'), (315, 'deepcell.layers.RoiAlign', 'RoiAlign', ([], {'crop_size': 'crop_size'}), False, 'from deepcell.layers import Upsample, RoiAlign, ConcatenateBoxes\n'), (424, 'deepcell.layers.RegressBoxes', 'RegressBoxes', ([], {'name': '"""boxes"""'}), False, 'from deepcell.layers import ClipBoxes, RegressBoxes, FilterDetections\n'), (425, 'deepcell.layers.ClipBoxes', 'ClipBoxes', ([], {'name': '"""clipped_boxes"""'}), False, 'from deepcell.layers import ClipBoxes, RegressBoxes, FilterDetections\n'), (428, 'deepcell.layers.FilterDetections', 'FilterDetections', ([], {'nms': 'nms', 'class_specific_filter': 'class_specific_filter', 'max_detections': 'max_detections', 'name': '"""filtered_detections"""'}), False, 'from deepcell.layers import ClipBoxes, RegressBoxes, FilterDetections\n'), (440, 'deepcell.layers.UpsampleLike', 'UpsampleLike', ([], {}), False, 'from deepcell.layers import Cast, UpsampleLike\n'), (441, 'deepcell.layers.RoiAlign', 'RoiAlign', ([], {'crop_size': 'crop_size'}), False, 'from deepcell.layers import Upsample, RoiAlign, ConcatenateBoxes\n'), (101, 'deepcell.layers.Upsample', 'Upsample', (['mask_size'], {}), False, 'from deepcell.layers import Upsample, RoiAlign, ConcatenateBoxes\n'), (103, 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'mask_feature_size'}), False, 'from tensorflow.keras.layers import TimeDistributed, Conv2D\n'), (108, 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'num_classes', 'kernel_size': '(1)', 'activation': '"""sigmoid"""'}), False, 'from tensorflow.keras.layers import TimeDistributed, Conv2D\n'), (295, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(None, 4)', 'name': '"""boxes_input"""'}), False, 'from tensorflow.keras.layers import Input, Concatenate\n'), (297, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(None, None, 4)', 'name': '"""boxes_input"""'}), False, 'from tensorflow.keras.layers import Input, Concatenate\n'), (301, 'deepcell.layers.FilterDetections', 'FilterDetections', ([], {'nms': 'nms', 'nms_threshold': 'nms_threshold', 'score_threshold': 'score_threshold', 'class_specific_filter': 'class_specific_filter', 'max_detections': 'max_detections', 'name': '"""filtered_detections"""'}), False, 'from deepcell.layers import ClipBoxes, RegressBoxes, FilterDetections\n'), (321, 'deepcell.layers.ConcatenateBoxes', 'ConcatenateBoxes', ([], {'name': 'name'}), False, 'from deepcell.layers import Upsample, RoiAlign, ConcatenateBoxes\n'), (516, 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), True, 'from tensorflow.keras import backend as K\n'), (525, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'input_shape_with_time', 'name': '"""input"""'}), False, 'from tensorflow.keras.layers import Input, Concatenate\n'), (527, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'input_shape', 'name': '"""input"""'}), False, 'from tensorflow.keras.layers import Input, Concatenate\n'), (535, 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {'axis': 'channel_axis'}), False, 'from tensorflow.keras.layers import Input, Concatenate\n'), (544, 'deepcell.layers.ImageNormalization2D', 'ImageNormalization2D', ([], {'norm_method': 'norm_method'}), False, 'from deepcell.layers import TensorProduct, ImageNormalization2D, Location2D\n'), (545, 'deepcell.layers.TensorProduct', 'TensorProduct', (['required_channels'], {}), False, 'from deepcell.layers import TensorProduct, ImageNormalization2D, Location2D\n'), (548, 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), True, 'from tensorflow.keras import backend as K\n'), (89, 'deepcell.layers.Cast', 'Cast', ([], {'dtype': 'mask_dtype'}), False, 'from deepcell.layers import Cast, UpsampleLike\n'), (93, 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'mask_feature_size'}), False, 'from tensorflow.keras.layers import TimeDistributed, Conv2D\n'), (117, 'deepcell.layers.Cast', 'Cast', ([], {'dtype': 'retinanet_dtype'}), False, 'from deepcell.layers import Cast, UpsampleLike\n'), (534, 'deepcell.layers.Location2D', 'Location2D', ([], {'in_shape': 'input_shape'}), False, 'from deepcell.layers import TensorProduct, ImageNormalization2D, Location2D\n'), (541, 'deepcell.layers.ImageNormalization2D', 'ImageNormalization2D', ([], {'norm_method': 'norm_method'}), False, 'from deepcell.layers import TensorProduct, ImageNormalization2D, Location2D\n'), (542, 'deepcell.layers.TensorProduct', 'TensorProduct', (['required_channels'], {}), False, 'from deepcell.layers import TensorProduct, ImageNormalization2D, Location2D\n'), (532, 'deepcell.layers.Location2D', 'Location2D', ([], {'in_shape': 'input_shape'}), False, 'from deepcell.layers import TensorProduct, ImageNormalization2D, Location2D\n')]
ConorLazarou/autoencoder-gallery
ff2baef0601fc458aa0aa038b2e1d89ba82fdd15
import tensorflow.keras.backend as K import numpy as np import os import tensorflow.keras.layers as layers import tensorflow.keras.models as models from tensorflow.keras.optimizers import Adam def mae(y_true, y_pred): return K.mean(K.abs(y_true - y_pred)) def build_conv_ae(dim, channels, latent_dim, learning_rate=1e-3, loss_func=mae): if dim < 16: raise ValueError("Image dimensions must be at least 16x16.") if channels < 1: raise ValueError("Channels must be a positive integer.") if latent_dim < 1: raise ValueError("Latent dimension must be a positive integer.") # input layer input_layer = layers.Input((dim, dim, channels)) X = input_layer # conv layers half_dim = dim counter = 0 # Encoding while half_dim >= 8: # make layers # Conv2D(num_channels, window size, stride) X = layers.Conv2D(16*2**(counter), 3, 1, padding='same')(X) X = layers.BatchNormalization()(X) X = layers.Activation('relu')(X) X = layers.Conv2D(16*2**(counter), 3, 1, padding='same')(X) X = layers.BatchNormalization()(X) X = layers.Activation('relu')(X) X = layers.MaxPooling2D(2, 2, padding="same")(X) counter += 1 half_dim = np.ceil(half_dim / 2) # End of encoding X = layers.Flatten()(X) latent_space = layers.Dense(latent_dim, activation="tanh")(X) X = layers.Dense(half_dim * half_dim * 16*2**(counter))(latent_space) X = layers.Reshape((half_dim, half_dim, 16*2**(counter)))(X) for i in range(counter): X = layers.Conv2DTranspose(16*2**(counter-i), 4, 2, padding='same')(X) X = layers.BatchNormalization()(X) X = layers.Activation('relu')(X) X = layers.Conv2DTranspose(16*2**(counter-i), 3, 1, padding='same')(X) X = layers.BatchNormalization()(X) X = layers.Activation('relu')(X) X = layers.Conv2D(channels, 5, 1, padding='same')(X) X = layers.Activation('sigmoid')(X) # crop layer reconstructed_dim = half_dim * 2 ** counter left_diff = int((reconstructed_dim-dim) / 2) right_diff = (reconstructed_dim-dim) - left_diff output_layer = layers.Cropping2D(((left_diff, right_diff), (left_diff, right_diff)))(X) # output layer model = models.Model(input_layer, output_layer) model.compile(Adam(learning_rate), loss=loss_func) return model def build_vae(): pass def build_beta_vae(): pass if __name__ == "__main__": pass
[ "tensorflow.keras.layers.Activation", "tensorflow.keras.models.Model", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv2DTranspose", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.backend.abs", "numpy.ceil", "tensorflow.keras.layers.Cropping2D", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.layers.Reshape", "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.Input" ]
models.py
[(21, 'tensorflow.keras.layers.Input', 'layers.Input', (['(dim, dim, channels)'], {}), True, 'import tensorflow.keras.layers as layers\n'), (66, 'tensorflow.keras.models.Model', 'models.Model', (['input_layer', 'output_layer'], {}), True, 'import tensorflow.keras.models as models\n'), (9, 'tensorflow.keras.backend.abs', 'K.abs', (['(y_true - y_pred)'], {}), True, 'import tensorflow.keras.backend as K\n'), (40, 'numpy.ceil', 'np.ceil', (['(half_dim / 2)'], {}), True, 'import numpy as np\n'), (43, 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), True, 'import tensorflow.keras.layers as layers\n'), (44, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['latent_dim'], {'activation': '"""tanh"""'}), True, 'import tensorflow.keras.layers as layers\n'), (45, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(half_dim * half_dim * 16 * 2 ** counter)'], {}), True, 'import tensorflow.keras.layers as layers\n'), (46, 'tensorflow.keras.layers.Reshape', 'layers.Reshape', (['(half_dim, half_dim, 16 * 2 ** counter)'], {}), True, 'import tensorflow.keras.layers as layers\n'), (56, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['channels', '(5)', '(1)'], {'padding': '"""same"""'}), True, 'import tensorflow.keras.layers as layers\n'), (57, 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""sigmoid"""'], {}), True, 'import tensorflow.keras.layers as layers\n'), (63, 'tensorflow.keras.layers.Cropping2D', 'layers.Cropping2D', (['((left_diff, right_diff), (left_diff, right_diff))'], {}), True, 'import tensorflow.keras.layers as layers\n'), (67, 'tensorflow.keras.optimizers.Adam', 'Adam', (['learning_rate'], {}), False, 'from tensorflow.keras.optimizers import Adam\n'), (32, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(16 * 2 ** counter)', '(3)', '(1)'], {'padding': '"""same"""'}), True, 'import tensorflow.keras.layers as layers\n'), (33, 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), True, 'import tensorflow.keras.layers as layers\n'), (34, 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), True, 'import tensorflow.keras.layers as layers\n'), (35, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(16 * 2 ** counter)', '(3)', '(1)'], {'padding': '"""same"""'}), True, 'import tensorflow.keras.layers as layers\n'), (36, 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), True, 'import tensorflow.keras.layers as layers\n'), (37, 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), True, 'import tensorflow.keras.layers as layers\n'), (38, 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', (['(2)', '(2)'], {'padding': '"""same"""'}), True, 'import tensorflow.keras.layers as layers\n'), (49, 'tensorflow.keras.layers.Conv2DTranspose', 'layers.Conv2DTranspose', (['(16 * 2 ** (counter - i))', '(4)', '(2)'], {'padding': '"""same"""'}), True, 'import tensorflow.keras.layers as layers\n'), (50, 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), True, 'import tensorflow.keras.layers as layers\n'), (51, 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), True, 'import tensorflow.keras.layers as layers\n'), (52, 'tensorflow.keras.layers.Conv2DTranspose', 'layers.Conv2DTranspose', (['(16 * 2 ** (counter - i))', '(3)', '(1)'], {'padding': '"""same"""'}), True, 'import tensorflow.keras.layers as layers\n'), (53, 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), True, 'import tensorflow.keras.layers as layers\n'), (54, 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), True, 'import tensorflow.keras.layers as layers\n')]
CrispenGari/keras-rest
1b5bd57bdc0a6dab48fc8ec9f4b3f98eff64ca8b
# Turning off the warnings import os, sys, json, time os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" import tensorflow as tf from tensorflow import keras import PIL.Image as Image import numpy as np import io from tensorflow.keras.applications import ResNet50 from tensorflow.keras.applications import imagenet_utils from tensorflow.keras.preprocessing.image import img_to_array from flask import Flask, request, jsonify, make_response from flask_cors import CORS, cross_origin app = Flask(__name__) CORS(app) model = None import flask # Loading the model def load_model(): global model model = ResNet50(weights="imagenet") def prepare_image(image, target): # if the image is not RGB then convert it to RGB if image.mode != "RGB": image = image.convert("RGB") # resize the image to desired shape image = image.resize(target) image = img_to_array(image) image = np.expand_dims(image, axis=0) image = imagenet_utils.preprocess_input(image) return image @app.route("/predict", methods=["POST"]) def predict(): data = {"success": False} if request.method == "POST": if request.files.get("image"): # read the image in PIL format image = request.files.get("image").read() image = Image.open(io.BytesIO(image)) # preprocess the image image = prepare_image(image, target=(224, 224)) preds = model.predict(image) results = imagenet_utils.decode_predictions(preds) data["predictions"] = [] for (imageID, label, prob) in results[0]: r = {"label": label, "probability": float(prob)} data["predictions"].append(r) data["success"] = True return jsonify(data) @app.route('/', methods=["GET", "POST"]) def hello(): return "Hello world" if __name__ == '__main__': print("loading the model please await....") load_model() app.run(host="localhost", port=3001)
[ "numpy.expand_dims", "tensorflow.keras.preprocessing.image.img_to_array", "tensorflow.keras.applications.imagenet_utils.decode_predictions", "tensorflow.keras.applications.ResNet50", "tensorflow.keras.applications.imagenet_utils.preprocess_input" ]
00_BASICS/main.py
[(20, 'flask.Flask', 'Flask', (['__name__'], {}), False, 'from flask import Flask, request, jsonify, make_response\n'), (21, 'flask_cors.CORS', 'CORS', (['app'], {}), False, 'from flask_cors import CORS, cross_origin\n'), (28, 'tensorflow.keras.applications.ResNet50', 'ResNet50', ([], {'weights': '"""imagenet"""'}), False, 'from tensorflow.keras.applications import ResNet50\n'), (36, 'tensorflow.keras.preprocessing.image.img_to_array', 'img_to_array', (['image'], {}), False, 'from tensorflow.keras.preprocessing.image import img_to_array\n'), (37, 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), True, 'import numpy as np\n'), (38, 'tensorflow.keras.applications.imagenet_utils.preprocess_input', 'imagenet_utils.preprocess_input', (['image'], {}), False, 'from tensorflow.keras.applications import imagenet_utils\n'), (60, 'flask.jsonify', 'jsonify', (['data'], {}), False, 'from flask import Flask, request, jsonify, make_response\n'), (45, 'flask.request.files.get', 'request.files.get', (['"""image"""'], {}), False, 'from flask import Flask, request, jsonify, make_response\n'), (53, 'tensorflow.keras.applications.imagenet_utils.decode_predictions', 'imagenet_utils.decode_predictions', (['preds'], {}), False, 'from tensorflow.keras.applications import imagenet_utils\n'), (48, 'io.BytesIO', 'io.BytesIO', (['image'], {}), False, 'import io\n'), (47, 'flask.request.files.get', 'request.files.get', (['"""image"""'], {}), False, 'from flask import Flask, request, jsonify, make_response\n')]
johntiger1/transformers
38fadb6faf30c103531fa5fcd52b3f110e7582d0
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF general model utils.""" import functools import inspect import os import re import warnings from typing import Dict, List, Optional, Union import h5py import numpy as np import tensorflow as tf from tensorflow.python.keras import backend as K from tensorflow.python.keras.saving import hdf5_format from .configuration_utils import PretrainedConfig from .file_utils import ( DUMMY_INPUTS, TF2_WEIGHTS_NAME, WEIGHTS_NAME, ModelOutput, cached_path, hf_bucket_url, is_remote_url, ) from .generation_tf_utils import TFGenerationMixin from .tokenization_utils_base import BatchEncoding from .utils import logging logger = logging.get_logger(__name__) class TFModelUtilsMixin: """ A few utilities for :obj:`tf.keras.Model`, to be used as a mixin. """ def num_parameters(self, only_trainable: bool = False) -> int: """ Get the number of (optionally, trainable) parameters in the model. Args: only_trainable (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to return only the number of trainable parameters Returns: :obj:`int`: The number of parameters. """ if only_trainable: return int(sum(np.prod(w.shape.as_list()) for w in self.trainable_variables)) else: return self.count_params() def keras_serializable(cls): """ Decorate a Keras Layer class to support Keras serialization. This is done by: 1. Adding a :obj:`transformers_config` dict to the Keras config dictionary in :obj:`get_config` (called by Keras at serialization time. 2. Wrapping :obj:`__init__` to accept that :obj:`transformers_config` dict (passed by Keras at deserialization time) and convert it to a config object for the actual layer initializer. 3. Registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does not need to be supplied in :obj:`custom_objects` in the call to :obj:`tf.keras.models.load_model`. Args: cls (a :obj:`tf.keras.layers.Layers subclass`): Typically a :obj:`TF.MainLayer` class in this project, in general must accept a :obj:`config` argument to its initializer. Returns: The same class object, with modifications for Keras deserialization. """ initializer = cls.__init__ config_class = getattr(cls, "config_class", None) if config_class is None: raise AttributeError("Must set `config_class` to use @keras_serializable") @functools.wraps(initializer) def wrapped_init(self, *args, **kwargs): config = args[0] if args and isinstance(args[0], PretrainedConfig) else kwargs.pop("config", None) if isinstance(config, dict): config = config_class.from_dict(config) initializer(self, config, *args, **kwargs) elif isinstance(config, PretrainedConfig): if len(args) > 0: initializer(self, *args, **kwargs) else: initializer(self, config, *args, **kwargs) else: raise ValueError("Must pass either `config` (PretrainedConfig) or `config` (dict)") self._config = config self._kwargs = kwargs cls.__init__ = wrapped_init if not hasattr(cls, "get_config"): raise TypeError("Only use @keras_serializable on tf.keras.layers.Layer subclasses") if hasattr(cls.get_config, "_is_default"): def get_config(self): cfg = super(cls, self).get_config() cfg["config"] = self._config.to_dict() cfg.update(self._kwargs) return cfg cls.get_config = get_config cls._keras_serializable = True if hasattr(tf.keras.utils, "register_keras_serializable"): cls = tf.keras.utils.register_keras_serializable()(cls) return cls class TFCausalLanguageModelingLoss: """ Loss function suitable for causal language modeling (CLM), that is, the task of guessing the next token. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. """ def compute_loss(self, labels, logits): loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) # make sure only labels that are not equal to -100 affect the loss active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100) reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss) return loss_fn(labels, reduced_logits) class TFQuestionAnsweringLoss: """ Loss function suitable for question answering. """ def compute_loss(self, labels, logits): loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) start_loss = loss_fn(labels["start_position"], logits[0]) end_loss = loss_fn(labels["end_position"], logits[1]) return (start_loss + end_loss) / 2.0 class TFTokenClassificationLoss: """ Loss function suitable for token classification. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. """ def compute_loss(self, labels, logits): loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) # make sure only labels that are not equal to -100 # are taken into account as loss if tf.math.reduce_any(labels == -1): warnings.warn("Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead.") active_loss = tf.reshape(labels, (-1,)) != -1 else: active_loss = tf.reshape(labels, (-1,)) != -100 reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss) return loss_fn(labels, reduced_logits) class TFSequenceClassificationLoss: """ Loss function suitable for sequence classification. """ def compute_loss(self, labels, logits): if len(shape_list(logits)) == 1 or shape_list(logits)[1] == 1: loss_fn = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE) else: loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) return loss_fn(labels, logits) class TFMultipleChoiceLoss(TFSequenceClassificationLoss): """Loss function suitable for multiple choice tasks.""" class TFMaskedLanguageModelingLoss(TFCausalLanguageModelingLoss): """ Loss function suitable for masked language modeling (MLM), that is, the task of guessing the masked tokens. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. """ class TFNextSentencePredictionLoss: """ Loss function suitable for next sentence prediction (NSP), that is, the task of guessing the next sentence. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. """ def compute_loss(self, labels, logits): loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) # make sure only labels that are not equal to -100 # are taken into account as loss next_sentence_active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100) next_sentence_reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, 2)), next_sentence_active_loss) next_sentence_label = tf.boolean_mask(tf.reshape(labels, (-1,)), next_sentence_active_loss) return loss_fn(next_sentence_label, next_sentence_reduced_logits) def booleans_processing(config, **kwargs): """ Process the input booleans of each model in order to be sure they are compliant with the execution mode (eager or graph) Args: config (:class:`~transformers.PretrainedConfig`): The config of the running model. **kwargs: The boolean parameters Returns: A dictionary with the proper values for each boolean """ final_booleans = {} if tf.executing_eagerly(): final_booleans["output_attentions"] = ( kwargs["output_attentions"] if kwargs["output_attentions"] is not None else config.output_attentions ) final_booleans["output_hidden_states"] = ( kwargs["output_hidden_states"] if kwargs["output_hidden_states"] is not None else config.output_hidden_states ) final_booleans["return_dict"] = ( kwargs["return_dict"] if kwargs["return_dict"] is not None else config.return_dict ) if "use_cache" in kwargs: final_booleans["use_cache"] = kwargs["use_cache"] if kwargs["use_cache"] is not None else config.use_cache else: if ( kwargs["output_attentions"] is not None or kwargs["output_hidden_states"] is not None or ("use_cache" in kwargs and kwargs["use_cache"] is not None) ): tf.print( "The parameters `output_attentions`, `output_hidden_states` and `use_cache` cannot be updated when calling a model." "They have to be set to True/False in the config object (i.e.: `config=XConfig.from_pretrained('name', output_attentions=True)`)." ) final_booleans["output_attentions"] = config.output_attentions final_booleans["output_hidden_states"] = config.output_hidden_states if kwargs["return_dict"] is not None: tf.print("The parameter `return_dict` cannot be set in graph mode and will always be set to `True`.") final_booleans["return_dict"] = True if "use_cache" in kwargs: final_booleans["use_cache"] = config.use_cache return final_booleans def input_processing(func, config, input_ids, **kwargs): """ Process the input of each TensorFlow model including the booleans. In case of a list of symbolic inputs, each input has to be named accordingly to the parameters name, i.e. `input_ids = tf.keras.Input(shape=(128,), dtype='int32', name="input_ids")` otherwise the order of the tensors will not be guaranteed during the training. Args: func (:obj:`callable`): The callable function of the TensorFlow model. config (:class:`~transformers.PretrainedConfig`): The config of the running model. **kwargs: The inputs of the model. Returns: Two lists, one for the missing layers, and another one for the unexpected layers. """ signature = dict(inspect.signature(func).parameters) signature.pop("kwargs", None) signature.pop("self", None) parameter_names = list(signature.keys()) output = {} allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict, np.ndarray) if "inputs" in kwargs["kwargs_call"]: warnings.warn( "The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.", FutureWarning, ) output["input_ids"] = kwargs["kwargs_call"].pop("inputs") if "decoder_cached_states" in kwargs["kwargs_call"]: warnings.warn( "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.", FutureWarning, ) output["past_key_values"] = kwargs["kwargs_call"].pop("decoder_cached_states") if len(kwargs["kwargs_call"]) > 0: raise ValueError( f"The following keyword arguments are not supported by this model: {list(kwargs['kwargs_call'].keys())}." ) kwargs.pop("kwargs_call") for k, v in kwargs.items(): if isinstance(v, allowed_types) or v is None: output[k] = v else: raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.") if isinstance(input_ids, (tuple, list)): for i, input in enumerate(input_ids): # EagerTensors don't allow to use the .name property so we check for a real Tensor if type(input) == tf.Tensor: # Tensor names have always the pattern `name:id` then we check only the # `name` part tensor_name = input.name.split(":")[0] if tensor_name in parameter_names: output[tensor_name] = input else: output[parameter_names[i]] = input elif isinstance(input, allowed_types) or input is None: output[parameter_names[i]] = input else: raise ValueError( f"Data of type {type(input)} is not allowed only {allowed_types} is accepted for {parameter_names[i]}." ) elif isinstance(input_ids, (dict, BatchEncoding)): if "inputs" in input_ids: warnings.warn( "The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.", FutureWarning, ) output["input_ids"] = input_ids.pop("inputs") if "decoder_cached_states" in input_ids: warnings.warn( "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.", FutureWarning, ) output["past_key_values"] = input_ids.pop("decoder_cached_states") for k, v in dict(input_ids).items(): if isinstance(v, allowed_types) or v is None: output[k] = v elif k not in parameter_names and "args" not in parameter_names: logger.warn( f"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored." ) continue else: raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.") else: if isinstance(input_ids, tf.Tensor) or input_ids is None: output[parameter_names[0]] = input_ids else: raise ValueError( f"Data of type {type(input_ids)} is not allowed only {allowed_types} is accepted for {parameter_names[0]}." ) for name in parameter_names: if name not in list(output.keys()) and name != "args": output[name] = kwargs.pop(name, signature[name].default) # When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs) # So to respect the proper output we have to add this exception if "args" in output: if output["args"] is not None and type(output["args"]) == tf.Tensor: tensor_name = output["args"].name.split(":")[0] output[tensor_name] = output["args"] else: # `args` in this case is always the first parameter, then `input_ids` output["input_ids"] = output["args"] del output["args"] if "kwargs" in output: del output["kwargs"] boolean_dict = { k: v for k, v in output.items() if k in ["return_dict", "output_attentions", "output_hidden_states", "use_cache"] } output.update( booleans_processing( config=config, **boolean_dict, ) ) return output def load_tf_weights(model, resolved_archive_file): """ Detect missing and unexpected layers and load the TF weights accordingly to their names and shapes. Args: model (:obj:`tf.keras.models.Model`): The model to load the weights into. resolved_archive_file (:obj:`str`): The location of the H5 file. Returns: Two lists, one for the missing layers, and another one for the unexpected layers. """ missing_layers = [] unexpected_layers = [] # Read the H5 file with h5py.File(resolved_archive_file, "r") as f: # Retrieve the name of each layer from the H5 file saved_h5_model_layers_name = set(hdf5_format.load_attributes_from_hdf5_group(f, "layer_names")) # Find the missing layers from the high level list of layers missing_layers = list(set([layer.name for layer in model.layers]) - saved_h5_model_layers_name) # Find the unexpected layers from the high level list of layers unexpected_layers = list(saved_h5_model_layers_name - set([layer.name for layer in model.layers])) saved_weight_names_set = set() symbolic_weights_names = set() weight_value_tuples = [] # Compute missing and unexpected sub layers # Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...] for layer in model.layers: # if layer_name from the H5 file belongs to the layers from the instantiated model if layer.name in saved_h5_model_layers_name: # Get the H5 layer object from its name h5_layer_object = f[layer.name] # Get all the weights as a list from the layer object symbolic_weights = layer.trainable_weights + layer.non_trainable_weights saved_weights = {} # Create a dict from the H5 saved model that looks like {"weight_name": weight_value} # And a set with only the names for weight_name in hdf5_format.load_attributes_from_hdf5_group(h5_layer_object, "weight_names"): # TF names always start with the model name so we ignore it name = "/".join(weight_name.split("/")[1:]) saved_weights[name] = np.asarray(h5_layer_object[weight_name]) # Add the updated name to the final list for computing missing/unexpected values saved_weight_names_set.add(name) # Loop over each weights from the instantiated model and compare with the weights from the H5 file for symbolic_weight in symbolic_weights: # TF names always start with the model name so we ignore it symbolic_weight_name = "/".join(symbolic_weight.name.split("/")[1:]) # here we check if the current weight is among the weights from the H5 file # If yes, get the weight_value of the corresponding weight from the H5 file # If not, make the value to None saved_weight_value = saved_weights.get(symbolic_weight_name, None) # Add the updated name to the final list for computing missing/unexpected values symbolic_weights_names.add(symbolic_weight_name) # If the current weight is found if saved_weight_value is not None: # Check if the shape of the current weight and the one from the H5 file are different if K.int_shape(symbolic_weight) != saved_weight_value.shape: # If yes we reshape the weight from the H5 file accordingly to the current weight # If the two shapes are not compatible we raise an issue try: array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight)) except AssertionError as e: e.args += (K.int_shape(symbolic_weight), saved_weight_value.shape) raise e else: array = saved_weight_value # We create the tuple that will be loaded and add it to the final list weight_value_tuples.append((symbolic_weight, array)) # Load all the weights K.batch_set_value(weight_value_tuples) # Compute the missing and unexpected layers missing_layers.extend(list(symbolic_weights_names - saved_weight_names_set)) unexpected_layers.extend(list(saved_weight_names_set - symbolic_weights_names)) return missing_layers, unexpected_layers def init_copy_embeddings(old_embeddings, new_num_tokens): r""" This function aims to reduce the embeddings in case new_num_tokens < old_num_tokens or to pad with -1 in case new_num_tokens > old_num_tokens. A mask is also computed in order to know which weight in the embeddings should be kept or not. Example: - if new_num_tokens=5 and old_num_tokens=4 and old_embeddings=[w1,w2,w3,w4] - mask=[True,True,True,True,False] and current_weights=[w1,w2,w3,w4,-1] - if new_num_tokens=4 and old_num_tokens=5 and old_embeddings=[w1,w2,w3,w4,w5] - mask=[True,True,True,True] and current_weights=[w1,w2,w3,w4] """ old_num_tokens, old_embedding_dim = shape_list(old_embeddings) size_diff = new_num_tokens - old_num_tokens # initialize new embeddings # Copy token embeddings from the previous ones if tf.math.greater(size_diff, 0): # if the new size is greater than the old one, we extend the current embeddings with a padding until getting new size # and we create a mask to properly identify the padded values and be replaced by the values of the newly created # embeddings current_weights = tf.pad( old_embeddings.value(), tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=-1 ) num_tokens_to_copy = min(old_num_tokens, new_num_tokens) mask = tf.fill(tf.convert_to_tensor([num_tokens_to_copy, 1]), True) mask = tf.pad(mask, tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=False) else: # if the new size if lower than the old one, we take the current embeddings until the new size current_weights = tf.slice( old_embeddings.value(), tf.convert_to_tensor([0, 0]), tf.convert_to_tensor([new_num_tokens, old_embedding_dim]), ) mask = tf.fill(tf.convert_to_tensor([new_num_tokens, 1]), True) return mask, current_weights class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin): r""" Base class for all TF models. :class:`~transformers.TFPreTrainedModel` takes care of storing the configuration of the models and handles methods for loading, downloading and saving models as well as a few methods common to all models to: * resize the input embeddings, * prune heads in the self-attention heads. Class attributes (overridden by derived classes): - **config_class** (:class:`~transformers.PretrainedConfig`) -- A subclass of :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture. - **base_model_prefix** (:obj:`str`) -- A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model. """ config_class = None base_model_prefix = "" # a list of re pattern of tensor names to ignore from the model when loading the model weights # (and avoid unnecessary warnings). _keys_to_ignore_on_load_missing = None # a list of re pattern of tensor names to ignore from the weights when loading the model weights # (and avoid unnecessary warnings). _keys_to_ignore_on_load_unexpected = None @property def dummy_inputs(self) -> Dict[str, tf.Tensor]: """ Dummy inputs to build the network. Returns: :obj:`Dict[str, tf.Tensor]`: The dummy inputs. """ return { "input_ids": tf.constant(DUMMY_INPUTS), } def __init__(self, config, *inputs, **kwargs): super().__init__(*inputs, **kwargs) if not isinstance(config, PretrainedConfig): raise ValueError( "Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. " "To create a model from a pretrained model use " "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format( self.__class__.__name__, self.__class__.__name__ ) ) # Save config and origin of the pretrained weights if given in model self.config = config self.name_or_path = config.name_or_path @tf.function( input_signature=[ { "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"), } ] ) def serving(self, inputs): """ Method used for serving the model. Args: inputs (:obj:`Dict[str, tf.Tensor]`): The input of the saved model as a dictionnary of tensors. """ output = self.call(inputs) return self.serving_output(output) def serving_output(output): """ Prepare the output of the saved model. Each model must implement this function. Args: output (:obj:`~transformers.TFBaseModelOutput`): The output returned by the model. """ raise NotImplementedError def get_input_embeddings(self) -> tf.keras.layers.Layer: """ Returns the model's input embeddings layer. Returns: :obj:`tf.Variable`: The embeddings layer mapping vocabulary to hidden states. """ main_layer = getattr(self, self.base_model_prefix, self) if main_layer is not self: return main_layer.get_input_embeddings() else: raise NotImplementedError def set_input_embeddings(self, value): """ Set model's input embeddings Args: value (:obj:`tf.Variable`): The new weights mapping hidden states to vocabulary. """ main_layer = getattr(self, self.base_model_prefix) if main_layer is None: raise NotImplementedError("The model does not implements the base_model_prefix attribute.") try: main_layer.set_input_embeddings(value) except AttributeError: logger.info("Building the model") self(self.dummy_inputs) main_layer.set_input_embeddings(value) def get_output_embeddings(self) -> Union[None, tf.keras.layers.Layer]: """ Returns the model's output embeddings Returns: :obj:`tf.Variable`: The new weights mapping vocabulary to hidden states. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() return lm_head.get_output_embeddings() return None # Overwrite for models with output embeddings def set_output_embeddings(self, value): """ Set model's output embeddings Args: value (:obj:`tf.Variable`): The new weights mapping hidden states to vocabulary. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: lm_head.set_output_embeddings(value) except AttributeError: logger.info("Building the model") self(self.dummy_inputs) lm_head.set_output_embeddings(value) def get_output_layer_with_bias(self) -> Union[None, tf.keras.layers.Layer]: """ Get the layer that handles a bias attribute in case the model has an LM head with weights tied to the embeddings Return: :obj:`tf.keras.layers.Layer`: The layer that handles the bias, None if not an LM model. """ warnings.warn( "The method get_output_layer_with_bias is deprecated. Please use `get_lm_head` instead.", FutureWarning ) return self.get_lm_head() def get_prefix_bias_name(self) -> Union[None, str]: """ Get the concatenated prefix name of the bias from the model name to the parent layer Return: :obj:`str`: The prefix name of the bias. """ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return None def get_bias(self) -> Union[None, Dict[str, tf.Variable]]: """ Dict of bias attached to an LM head. The key represents the name of the bias attribute. Return: :obj:`tf.Variable`: The weights representing the bias, None if not an LM model. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: return lm_head.get_bias() except AttributeError: self(self.dummy_inputs) return lm_head.get_bias() return None def set_bias(self, value): """ Set all the bias in the LM head. Args: value (:obj:`Dict[tf.Variable]`): All the new bias attached to an LM head. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: lm_head.set_bias(value) except AttributeError: self(self.dummy_inputs) lm_head.set_bias(value) def get_lm_head(self) -> tf.keras.layers.Layer: """ The LM Head layer. This method must be overwritten by all the models that have a lm head. Return: :obj:`tf.keras.layers.Layer`: The LM head layer if the model has one, None if not. """ return None def resize_token_embeddings(self, new_num_tokens=None) -> tf.Variable: """ Resizes input token embeddings matrix of the model if :obj:`new_num_tokens != config.vocab_size`. Takes care of tying weights embeddings afterwards if the model class has a :obj:`tie_weights()` method. Arguments: new_num_tokens (:obj:`int`, `optional`): The number of new tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens :obj:`tf.Variable` module of the model without doing anything. Return: :obj:`tf.Variable`: Pointer to the input tokens Embeddings Module of the model. """ if new_num_tokens is None or new_num_tokens == self.config.vocab_size: return self._get_word_embedding_weight(self.get_input_embeddings()) model_embeds = self._resize_token_embeddings(new_num_tokens) # Update base model and current model config self.config.vocab_size = new_num_tokens return model_embeds def _get_word_embedding_weight(model, embedding_layer): embeds = getattr(embedding_layer, "weight", None) if embeds is not None: return embeds embeds = getattr(embedding_layer, "decoder", None) if embeds is not None: return embeds # The reason why the attributes don't exist might be # because the model is not built, so retry getting # the argument after building the model model(model.dummy_inputs) embeds = getattr(embedding_layer, "weight", None) if embeds is not None: return embeds embeds = getattr(embedding_layer, "decoder", None) if embeds is not None: return embeds return None def _resize_token_embeddings(self, new_num_tokens): old_embeddings = self._get_word_embedding_weight(self.get_input_embeddings()) new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) # if word embeddings are not tied, make sure that lm head bias is resized as well if self.get_bias() is not None: old_lm_head_bias = self.get_bias() new_lm_head_bias = self._get_resized_lm_head_bias(old_lm_head_bias, new_num_tokens) self.set_bias(new_lm_head_bias) # if word embeddings are not tied, make sure that lm head decoder is resized as well if self.get_output_embeddings() is not None: old_lm_head_decoder = self._get_word_embedding_weight(self.get_output_embeddings()) new_lm_head_decoder = self._get_resized_lm_head_decoder(old_lm_head_decoder, new_num_tokens) self.set_output_embeddings(new_lm_head_decoder) self.set_input_embeddings(new_embeddings) return self.get_input_embeddings() def _get_resized_lm_head_bias(self, old_lm_head_bias, new_num_tokens): """ Build a resized bias from the old ones. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_lm_head_bias (:obj:`tf.Variable`): Old lm head bias to be resized. new_num_tokens (:obj:`int`, `optional`): New number of tokens in the linear matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`, just returns None Return: :obj:`tf.Variable`: Pointer to the resized bias. """ new_lm_head_bias = {} for attr, weight in old_lm_head_bias.items(): first_dim, old_num_tokens = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight) size_diff = new_num_tokens - old_num_tokens final_shape = [new_num_tokens] if first_dim is None else [first_dim, new_num_tokens] # initialize new bias if tf.math.greater(size_diff, 0): padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]] current_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape), constant_values=-1) num_tokens_to_copy = min(old_num_tokens, new_num_tokens) mask_shape = [num_tokens_to_copy] if first_dim is None else [1, num_tokens_to_copy] bias_mask = tf.fill(tf.convert_to_tensor(mask_shape), True) bias_mask = tf.pad(bias_mask, tf.convert_to_tensor(padding_shape), constant_values=False) else: slice_from = [0] if first_dim is None else [0, 0] current_bias = tf.slice( weight.value(), tf.convert_to_tensor(slice_from), tf.convert_to_tensor(final_shape) ) bias_mask = tf.fill(tf.convert_to_tensor(final_shape), True) new_bias = self.add_weight( shape=final_shape, initializer="zeros", trainable=True, name=weight.name.split(":")[0], ) init_bias = tf.where(bias_mask, current_bias, new_bias.value()) new_bias.assign(init_bias) new_lm_head_bias[attr] = new_bias return new_lm_head_bias def _get_resized_lm_head_decoder(self, old_lm_head_decoder, new_num_tokens): """ Build a resized decoder from the old ones. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_lm_head_decoder (:obj:`tf.Variable`): Old lm head decoder to be resized. new_num_tokens (:obj:`int`, `optional`): New number of tokens in the linear matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`, just returns None Return: :obj:`tf.Variable`: Pointer to the resized decoder or None if the output embeddings are differents of the input ones. """ new_lm_head_decoder = old_lm_head_decoder is_input_output_equals = tf.reduce_any( self._get_word_embedding_weight(self.get_input_embeddings()) == old_lm_head_decoder ) if old_lm_head_decoder is not None and not is_input_output_equals: old_embedding_dim = shape_list(old_lm_head_decoder)[1] decoder_mask, current_decoder = init_copy_embeddings(old_lm_head_decoder, new_num_tokens) new_lm_head_decoder = self.add_weight( shape=(new_num_tokens, old_embedding_dim), initializer="zeros", trainable=True, name=old_lm_head_decoder.name.split(":")[0], ) init_decoder = tf.where(decoder_mask, current_decoder, new_lm_head_decoder.value()) new_lm_head_decoder.assign(init_decoder) return new_lm_head_decoder def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None) -> tf.Variable: """ Build a resized Embedding weights from a provided token Embedding weights. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_embeddings (:obj:`tf.Variable`): Old embeddings to be resized. new_num_tokens (:obj:`int`, `optional`): New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens :obj:`tf.Variable`` module of the model without doing anything. Return: :obj:`tf.Variable`: Pointer to the resized Embedding Module or the old Embedding Module if :obj:`new_num_tokens` is :obj:`None` """ old_embedding_dim = shape_list(old_embeddings)[1] init_range = getattr(self.config, "initializer_range", 0.02) embeddings_mask, current_embeddings = init_copy_embeddings(old_embeddings, new_num_tokens) new_embeddings = self.add_weight( name=old_embeddings.name.split(":")[0], shape=[new_num_tokens, old_embedding_dim], initializer=get_initializer(init_range), dtype=tf.float32, ) init_embeddings = tf.where(embeddings_mask, current_embeddings, new_embeddings.value()) new_embeddings.assign(init_embeddings) return new_embeddings def prune_heads(self, heads_to_prune): """ Prunes heads of the base model. Arguments: heads_to_prune (:obj:`Dict[int, List[int]]`): Dictionary with keys being selected layer indices (:obj:`int`) and associated values being the list of heads to prune in said layer (list of :obj:`int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2. """ raise NotImplementedError def save_pretrained(self, save_directory, saved_model=False, version=1): """ Save a model and its configuration file to a directory, so that it can be re-loaded using the :func:`~transformers.TFPreTrainedModel.from_pretrained` class method. Arguments: save_directory (:obj:`str`): Directory to which to save. Will be created if it doesn't exist. saved_model (:obj:`bool`, `optional`, defaults to :obj:`False`): If the model has to be saved in saved model format as well or not. version (:obj:`int`, `optional`, defaults to 1): The version of the saved model. A saved model needs to be versioned in order to be properly loaded by TensorFlow Serving as detailed in the official documentation https://www.tensorflow.org/tfx/serving/serving_basic """ if os.path.isfile(save_directory): logger.error("Provided path ({}) should be a directory, not a file".format(save_directory)) return os.makedirs(save_directory, exist_ok=True) if saved_model: saved_model_dir = os.path.join(save_directory, "saved_model", str(version)) self.save(saved_model_dir, include_optimizer=False, signatures=self.serving) logger.info(f"Saved model created in {saved_model_dir}") # Save configuration file self.config.save_pretrained(save_directory) # If we save using the predefined names, we can load using `from_pretrained` output_model_file = os.path.join(save_directory, TF2_WEIGHTS_NAME) self.save_weights(output_model_file) logger.info("Model weights saved in {}".format(output_model_file)) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Instantiate a pretrained TF 2.0 model from a pre-trained model configuration. The warning `Weights from XXX not initialized from pretrained model` means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task. The warning `Weights from XXX not used in YYY` means that the layer XXX is not used by YYY, therefore those weights are discarded. Parameters: pretrained_model_name_or_path (:obj:`str`, `optional`): Can be either: - A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under a user or organization name, like ``dbmdz/bert-base-german-cased``. - A path to a `directory` containing model weights saved using :func:`~transformersTF.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``. - A path or url to a `PyTorch state_dict save file` (e.g, ``./pt_model/pytorch_model.bin``). In this case, ``from_pt`` should be set to :obj:`True` and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards. - :obj:`None` if you are both providing the configuration and state dictionary (resp. with keyword arguments ``config`` and ``state_dict``). model_args (sequence of positional arguments, `optional`): All remaning positional arguments will be passed to the underlying model's ``__init__`` method. config (:obj:`Union[PretrainedConfig, str]`, `optional`): Can be either: - an instance of a class derived from :class:`~transformers.PretrainedConfig`, - a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained`. Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: - The model is a model provided by the library (loaded with the `model id` string of a pretrained model). - The model was saved using :func:`~transformers.TFPreTrainedModel.save_pretrained` and is reloaded by supplying the save directory. - The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. from_pt: (:obj:`bool`, `optional`, defaults to :obj:`False`): Load the model weights from a PyTorch state_dict save file (see docstring of ``pretrained_model_name_or_path`` argument). cache_dir (:obj:`str`, `optional`): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies: (:obj:`Dict[str, str], `optional`): A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`): Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to only look at local files (e.g., not try doanloading the model). use_auth_token (:obj:`str` or `bool`, `optional`): The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`). revision(:obj:`str`, `optional`, defaults to :obj:`"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any identifier allowed by git. mirror(:obj:`str`, `optional`, defaults to :obj:`None`): Mirror source to accelerate downloads in China. If you are from China and have an accessibility problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. Please refer to the mirror site for more information. kwargs (remaining dictionary of keyword arguments, `optional`): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., :obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or automatically loaded: - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function. .. note:: Passing :obj:`use_auth_token=True` is required when you want to use a private model. Examples:: >>> from transformers import BertConfig, TFBertModel >>> # Download model and configuration from huggingface.co and cache. >>> model = TFBertModel.from_pretrained('bert-base-uncased') >>> # Model was saved using `save_pretrained('./test/saved_model/')` (for example purposes, not runnable). >>> model = TFBertModel.from_pretrained('./test/saved_model/') >>> # Update configuration during loading. >>> model = TFBertModel.from_pretrained('bert-base-uncased', output_attentions=True) >>> assert model.config.output_attentions == True >>> # Loading from a Pytorch model file instead of a TensorFlow checkpoint (slower, for example purposes, not runnable). >>> config = BertConfig.from_json_file('./pt_model/my_pt_model_config.json') >>> model = TFBertModel.from_pretrained('./pt_model/my_pytorch_model.bin', from_pt=True, config=config) """ config = kwargs.pop("config", None) cache_dir = kwargs.pop("cache_dir", None) from_pt = kwargs.pop("from_pt", False) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) output_loading_info = kwargs.pop("output_loading_info", False) local_files_only = kwargs.pop("local_files_only", False) use_auth_token = kwargs.pop("use_auth_token", None) revision = kwargs.pop("revision", None) mirror = kwargs.pop("mirror", None) # Load config if we don't provide a configuration if not isinstance(config, PretrainedConfig): config_path = config if config is not None else pretrained_model_name_or_path config, model_kwargs = cls.config_class.from_pretrained( config_path, *model_args, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, use_auth_token=use_auth_token, revision=revision, **kwargs, ) else: model_kwargs = kwargs # Load model if pretrained_model_name_or_path is not None: if os.path.isdir(pretrained_model_name_or_path): if from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): # Load from a PyTorch checkpoint in priority if from_pt archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)): # Load from a TF 2.0 checkpoint archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME) else: raise EnvironmentError( "Error no file named {} found in directory {} or `from_pt` set to False".format( [WEIGHTS_NAME, TF2_WEIGHTS_NAME], pretrained_model_name_or_path ) ) elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): archive_file = pretrained_model_name_or_path elif os.path.isfile(pretrained_model_name_or_path + ".index"): archive_file = pretrained_model_name_or_path + ".index" else: archive_file = hf_bucket_url( pretrained_model_name_or_path, filename=(WEIGHTS_NAME if from_pt else TF2_WEIGHTS_NAME), revision=revision, mirror=mirror, ) try: # Load from URL or cache if already cached resolved_archive_file = cached_path( archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token, ) except EnvironmentError as err: logger.error(err) msg = ( f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n" f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co./models'\n\n" f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {TF2_WEIGHTS_NAME}, {WEIGHTS_NAME}.\n\n" ) raise EnvironmentError(msg) if resolved_archive_file == archive_file: logger.info("loading weights file {}".format(archive_file)) else: logger.info("loading weights file {} from cache at {}".format(archive_file, resolved_archive_file)) else: resolved_archive_file = None config.name_or_path = pretrained_model_name_or_path # Instantiate model. model = cls(config, *model_args, **model_kwargs) if from_pt: from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model # Load from a PyTorch checkpoint return load_pytorch_checkpoint_in_tf2_model(model, resolved_archive_file, allow_missing_keys=True) model(model.dummy_inputs) # build the network with dummy inputs assert os.path.isfile(resolved_archive_file), "Error retrieving file {}".format(resolved_archive_file) # 'by_name' allow us to do transfer learning by skipping/adding layers # see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1339-L1357 try: missing_keys, unexpected_keys = load_tf_weights(model, resolved_archive_file) except OSError: raise OSError( "Unable to load weights from h5 file. " "If you tried to load a TF 2.0 model from a PyTorch checkpoint, please set from_pt=True. " ) model(model.dummy_inputs) # Make sure restore ops are run if cls._keys_to_ignore_on_load_missing is not None: for pat in cls._keys_to_ignore_on_load_missing: missing_keys = [k for k in missing_keys if re.search(pat, k) is None] if cls._keys_to_ignore_on_load_unexpected is not None: for pat in cls._keys_to_ignore_on_load_unexpected: unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] if len(unexpected_keys) > 0: logger.warning( f"Some layers from the model checkpoint at {pretrained_model_name_or_path} were not used when " f"initializing {model.__class__.__name__}: {unexpected_keys}\n" f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task " f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n" f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect " f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)." ) else: logger.warning(f"All model checkpoint layers were used when initializing {model.__class__.__name__}.\n") if len(missing_keys) > 0: logger.warning( f"Some layers of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} " f"and are newly initialized: {missing_keys}\n" f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) else: logger.warning( f"All the layers of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n" f"If your task is similar to the task the model of the checkpoint was trained on, " f"you can already use {model.__class__.__name__} for predictions without further training." ) if output_loading_info: loading_info = {"missing_keys": missing_keys, "unexpected_keys": unexpected_keys} return model, loading_info return model class TFConv1D(tf.keras.layers.Layer): """ 1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2). Basically works like a linear layer but the weights are transposed. Args: nf (:obj:`int`): The number of output features. nx (:obj:`int`): The number of input features. initializer_range (:obj:`float`, `optional`, defaults to 0.02): The standard deviation to use to initialize the weights. kwargs: Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`. """ def __init__(self, nf, nx, initializer_range=0.02, **kwargs): super().__init__(**kwargs) self.nf = nf self.nx = nx self.initializer_range = initializer_range def build(self, input_shape): self.weight = self.add_weight( "weight", shape=[self.nx, self.nf], initializer=get_initializer(self.initializer_range) ) self.bias = self.add_weight("bias", shape=[1, self.nf], initializer=tf.zeros_initializer()) def call(self, x): bz, sl = shape_list(x)[:2] x = tf.reshape(x, [-1, self.nx]) x = tf.matmul(x, self.weight) + self.bias x = tf.reshape(x, [bz, sl, self.nf]) return x class WordEmbeddings(tf.keras.layers.Layer): def __init__(self, vocab_size: int, hidden_size: int, initializer_range: float, **kwargs): super().__init__(**kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.initializer_range = initializer_range def build(self, input_shape): self.word_embeddings = self.add_weight( name="weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(initializer_range=self.initializer_range), ) super().build(input_shape=input_shape) def get_config(self): config = { "vocab_size": self.vocab_size, "hidden_size": self.hidden_size, "initializer_range": self.initializer_range, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, input_ids): flat_input_ids = tf.reshape(tensor=input_ids, shape=[-1]) embeddings = tf.gather(params=self.word_embeddings, indices=flat_input_ids) embeddings = tf.reshape( tensor=embeddings, shape=tf.concat(values=[shape_list(tensor=input_ids), [self.hidden_size]], axis=0) ) embeddings.set_shape(shape=input_ids.shape.as_list() + [self.hidden_size]) return embeddings class TokenTypeEmbeddings(tf.keras.layers.Layer): def __init__(self, type_vocab_size: int, hidden_size: int, initializer_range: float, **kwargs): super().__init__(**kwargs) self.type_vocab_size = type_vocab_size self.hidden_size = hidden_size self.initializer_range = initializer_range def build(self, input_shape): self.token_type_embeddings = self.add_weight( name="embeddings", shape=[self.type_vocab_size, self.hidden_size], initializer=get_initializer(initializer_range=self.initializer_range), ) super().build(input_shape=input_shape) def get_config(self): config = { "type_vocab_size": self.type_vocab_size, "hidden_size": self.hidden_size, "initializer_range": self.initializer_range, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, token_type_ids): flat_token_type_ids = tf.reshape(tensor=token_type_ids, shape=[-1]) one_hot_data = tf.one_hot(indices=flat_token_type_ids, depth=self.type_vocab_size, dtype=self._compute_dtype) embeddings = tf.matmul(a=one_hot_data, b=self.token_type_embeddings) embeddings = tf.reshape( tensor=embeddings, shape=tf.concat(values=[shape_list(tensor=token_type_ids), [self.hidden_size]], axis=0) ) embeddings.set_shape(shape=token_type_ids.shape.as_list() + [self.hidden_size]) return embeddings class PositionEmbeddings(tf.keras.layers.Layer): def __init__(self, max_position_embeddings: int, hidden_size: int, initializer_range: float, **kwargs): super().__init__(**kwargs) self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.initializer_range = initializer_range def build(self, input_shape): self.position_embeddings = self.add_weight( name="embeddings", shape=[self.max_position_embeddings, self.hidden_size], initializer=get_initializer(initializer_range=self.initializer_range), ) super().build(input_shape) def get_config(self): config = { "max_position_embeddings": self.max_position_embeddings, "hidden_size": self.hidden_size, "initializer_range": self.initializer_range, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, position_ids): input_shape = shape_list(tensor=position_ids) position_embeddings = self.position_embeddings[: input_shape[1], :] return tf.broadcast_to(input=position_embeddings, shape=input_shape) class TFSharedEmbeddings(tf.keras.layers.Layer): r""" Construct shared token embeddings. The weights of the embedding layer is usually shared with the weights of the linear decoder when doing language modeling. Args: vocab_size (:obj:`int`): The size of the vocabulary, e.g., the number of unique tokens. hidden_size (:obj:`int`): The size of the embedding vectors. initializer_range (:obj:`float`, `optional`): The standard deviation to use when initializing the weights. If no value is provided, it will default to :math:`1/\sqrt{hidden\_size}`. kwargs: Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`. """ def __init__(self, vocab_size: int, hidden_size: int, initializer_range: Optional[float] = None, **kwargs): super().__init__(**kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.initializer_range = hidden_size ** -0.5 if initializer_range is None else initializer_range def build(self, input_shape): """ Build shared token embedding layer Shared weights logic adapted from https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24 """ self.weight = self.add_weight( "weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range) ) super().build(input_shape) def get_config(self): config = { "vocab_size": self.vocab_size, "hidden_size": self.hidden_size, "initializer_range": self.initializer_range, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs: tf.Tensor, mode: str = "embedding") -> tf.Tensor: """ Get token embeddings of inputs or decode final hidden state. Args: inputs (:obj:`tf.Tensor`): In embedding mode, should be an int64 tensor with shape :obj:`[batch_size, length]`. In linear mode, should be a float tensor with shape :obj:`[batch_size, length, hidden_size]`. mode (:obj:`str`, defaults to :obj:`"embedding"`): A valid value is either :obj:`"embedding"` or :obj:`"linear"`, the first one indicates that the layer should be used as an embedding layer, the second one that the layer should be used as a linear decoder. Returns: :obj:`tf.Tensor`: In embedding mode, the output is a float32 embedding tensor, with shape :obj:`[batch_size, length, embedding_size]`. In linear mode, the output is a float32 with shape :obj:`[batch_size, length, vocab_size]`. Raises: ValueError: if :obj:`mode` is not valid. Shared weights logic is adapted from `here <https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24>`__. """ if mode == "embedding": return self._embedding(inputs) elif mode == "linear": return self._linear(inputs) else: raise ValueError("mode {} is not valid.".format(mode)) def _embedding(self, input_ids): """Applies embedding based on inputs tensor.""" return tf.gather(self.weight, input_ids) def _linear(self, inputs): """ Computes logits by running inputs through a linear layer. Args: inputs: A float32 tensor with shape [..., hidden_size] Returns: float32 tensor with shape [..., vocab_size]. """ first_dims = shape_list(inputs)[:-1] x = tf.reshape(inputs, [-1, self.hidden_size]) logits = tf.matmul(x, self.weight, transpose_b=True) return tf.reshape(logits, first_dims + [self.vocab_size]) class TFSequenceSummary(tf.keras.layers.Layer): """ Compute a single vector summary of a sequence hidden states. Args: config (:class:`~transformers.PretrainedConfig`): The config used by the model. Relevant arguments in the config class of the model are (refer to the actual config class of your model for the default values it uses): - **summary_type** (:obj:`str`) -- The method to use to make this summary. Accepted values are: - :obj:`"last"` -- Take the last token hidden state (like XLNet) - :obj:`"first"` -- Take the first token hidden state (like Bert) - :obj:`"mean"` -- Take the mean of all tokens hidden states - :obj:`"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2) - :obj:`"attn"` -- Not implemented now, use multi-head attention - **summary_use_proj** (:obj:`bool`) -- Add a projection after the vector extraction. - **summary_proj_to_labels** (:obj:`bool`) -- If :obj:`True`, the projection outputs to :obj:`config.num_labels` classes (otherwise to :obj:`config.hidden_size`). - **summary_activation** (:obj:`Optional[str]`) -- Set to :obj:`"tanh"` to add a tanh activation to the output, another string or :obj:`None` will add no activation. - **summary_first_dropout** (:obj:`float`) -- Optional dropout probability before the projection and activation. - **summary_last_dropout** (:obj:`float`)-- Optional dropout probability after the projection and activation. initializer_range (:obj:`float`, defaults to 0.02): The standard deviation to use to initialize the weights. kwargs: Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`. """ def __init__(self, config: PretrainedConfig, initializer_range: float = 0.02, **kwargs): super().__init__(**kwargs) self.summary_type = config.summary_type if hasattr(config, "summary_use_proj") else "last" if self.summary_type == "attn": # We should use a standard multi-head attention module with absolute positional embedding for that. # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276 # We can probably just use the multi-head attention module of PyTorch >=1.1.0 raise NotImplementedError self.has_summary = hasattr(config, "summary_use_proj") and config.summary_use_proj if self.has_summary: if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0: num_classes = config.num_labels else: num_classes = config.hidden_size self.summary = tf.keras.layers.Dense( num_classes, kernel_initializer=get_initializer(initializer_range), name="summary" ) self.has_activation = hasattr(config, "summary_activation") and config.summary_activation == "tanh" if self.has_activation: self.activation = tf.keras.activations.tanh self.has_first_dropout = hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0 if self.has_first_dropout: self.first_dropout = tf.keras.layers.Dropout(config.summary_first_dropout) self.has_last_dropout = hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0 if self.has_last_dropout: self.last_dropout = tf.keras.layers.Dropout(config.summary_last_dropout) def call(self, inputs, cls_index=None, training=False): if not isinstance(inputs, (dict, tuple, list)): hidden_states = inputs elif isinstance(inputs, (tuple, list)): hidden_states = inputs[0] cls_index = inputs[1] if len(inputs) > 1 else None assert len(inputs) <= 2, "Too many inputs." else: hidden_states = inputs.get("hidden_states") cls_index = inputs.get("cls_index", None) if self.summary_type == "last": output = hidden_states[:, -1] elif self.summary_type == "first": output = hidden_states[:, 0] elif self.summary_type == "mean": output = tf.reduce_mean(hidden_states, axis=1) elif self.summary_type == "cls_index": hidden_shape = shape_list(hidden_states) # e.g. [batch, num choices, seq length, hidden dims] if cls_index is None: cls_index = tf.fill( hidden_shape[:-2], hidden_shape[-2] - 1 ) # A tensor full of shape [batch] or [batch, num choices] full of sequence length cls_shape = shape_list(cls_index) if len(cls_shape) <= len(hidden_shape) - 2: cls_index = cls_index[..., tf.newaxis] # else: # cls_index = cls_index[..., tf.newaxis] # cls_index = cls_index.expand((-1,) * (cls_index.dim()-1) + (hidden_states.size(-1),)) # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states output = tf.gather(hidden_states, cls_index, batch_dims=len(hidden_shape) - 2) output = tf.squeeze( output, axis=len(hidden_shape) - 2 ) # shape of output: (batch, num choices, hidden_size) elif self.summary_type == "attn": raise NotImplementedError if self.has_first_dropout: output = self.first_dropout(output, training=training) if self.has_summary: output = self.summary(output) if self.has_activation: output = self.activation(output) if self.has_last_dropout: output = self.last_dropout(output, training=training) return output def shape_list(tensor: tf.Tensor) -> List[int]: """ Deal with dynamic shape in tensorflow cleanly. Args: tensor (:obj:`tf.Tensor`): The tensor we want the shape of. Returns: :obj:`List[int]`: The shape of the tensor as a list. """ dynamic = tf.shape(tensor) if tensor.shape == tf.TensorShape(None): return dynamic static = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(static)] def get_initializer(initializer_range: float = 0.02) -> tf.initializers.TruncatedNormal: """ Creates a :obj:`tf.initializers.TruncatedNormal` with the given range. Args: initializer_range (`float`, defaults to 0.02): Standard deviation of the initializer range. Returns: :obj:`tf.initializers.TruncatedNormal`: The truncated normal initializer. """ return tf.keras.initializers.TruncatedNormal(stddev=initializer_range) class TFWrappedEmbeddings: """ this class wraps a the TFSharedEmbeddingTokens layer into a python 'no-keras-layer' class to avoid problem with weight restoring. Also it makes sure that the layer is called from the correct scope to avoid problem with saving/storing the correct weights """ def __init__(self, layer, abs_scope_name=None): self._layer = layer self._abs_scope_name = abs_scope_name def call(self, inputs, mode="embedding"): if self._abs_scope_name is None: return self._layer.call(inputs, mode) # if an abs scope name is given to the embedding variable, call variable from absolute scope with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name: with tf.name_scope(abs_scope_name.original_name_scope): return self._layer.call(inputs, mode) def __call__(self, inputs, mode="embedding"): if self._abs_scope_name is None: return self._layer(inputs, mode) # if an abs scope name is given to the embedding variable, call variable from absolute scope with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name: with tf.name_scope(abs_scope_name.original_name_scope): return self._layer(inputs, mode)
[ "tensorflow.convert_to_tensor", "numpy.asarray", "tensorflow.python.keras.backend.int_shape", "tensorflow.keras.utils.register_keras_serializable", "tensorflow.python.keras.saving.hdf5_format.load_attributes_from_hdf5_group", "tensorflow.rank", "tensorflow.math.reduce_any", "tensorflow.gather", "tensorflow.name_scope", "tensorflow.keras.initializers.TruncatedNormal", "tensorflow.compat.v1.variable_scope", "tensorflow.matmul", "tensorflow.TensorShape", "tensorflow.executing_eagerly", "tensorflow.fill", "tensorflow.shape", "tensorflow.keras.losses.MeanSquaredError", "tensorflow.zeros_initializer", "tensorflow.one_hot", "tensorflow.print", "tensorflow.python.keras.backend.batch_set_value", "tensorflow.constant", "tensorflow.reduce_mean", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.broadcast_to", "tensorflow.reshape", "tensorflow.math.greater", "tensorflow.keras.layers.Dropout", "tensorflow.TensorSpec" ]
src/transformers/modeling_tf_utils.py
[(98, 'functools.wraps', 'functools.wraps', (['initializer'], {}), False, 'import functools\n'), (266, 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), True, 'import tensorflow as tf\n'), (527, 'tensorflow.python.keras.backend.batch_set_value', 'K.batch_set_value', (['weight_value_tuples'], {}), True, 'from tensorflow.python.keras import backend as K\n'), (554, 'tensorflow.math.greater', 'tf.math.greater', (['size_diff', '(0)'], {}), True, 'import tensorflow as tf\n'), (1666, 'tensorflow.shape', 'tf.shape', (['tensor'], {}), True, 'import tensorflow as tf\n'), (1686, 'tensorflow.keras.initializers.TruncatedNormal', 'tf.keras.initializers.TruncatedNormal', ([], {'stddev': 'initializer_range'}), True, 'import tensorflow as tf\n'), (147, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), True, 'import tensorflow as tf\n'), (163, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), True, 'import tensorflow as tf\n'), (183, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), True, 'import tensorflow as tf\n'), (188, 'tensorflow.math.reduce_any', 'tf.math.reduce_any', (['(labels == -1)'], {}), True, 'import tensorflow as tf\n'), (238, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), True, 'import tensorflow as tf\n'), (331, 'warnings.warn', 'warnings.warn', (['"""The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (339, 'warnings.warn', 'warnings.warn', (['"""The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (462, 'h5py.File', 'h5py.File', (['resolved_archive_file', '"""r"""'], {}), False, 'import h5py\n'), (732, 'warnings.warn', 'warnings.warn', (['"""The method get_output_layer_with_bias is deprecated. Please use `get_lm_head` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (744, 'warnings.warn', 'warnings.warn', (['"""The method get_prefix_bias_name is deprecated. Please use `get_bias` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (1012, 'os.path.isfile', 'os.path.isfile', (['save_directory'], {}), False, 'import os\n'), (1015, 'os.makedirs', 'os.makedirs', (['save_directory'], {'exist_ok': '(True)'}), False, 'import os\n'), (1026, 'os.path.join', 'os.path.join', (['save_directory', 'TF2_WEIGHTS_NAME'], {}), False, 'import os\n'), (1235, 'os.path.isfile', 'os.path.isfile', (['resolved_archive_file'], {}), False, 'import os\n'), (1321, 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, self.nx]'], {}), True, 'import tensorflow as tf\n'), (1324, 'tensorflow.reshape', 'tf.reshape', (['x', '[bz, sl, self.nf]'], {}), True, 'import tensorflow as tf\n'), (1357, 'tensorflow.reshape', 'tf.reshape', ([], {'tensor': 'input_ids', 'shape': '[-1]'}), True, 'import tensorflow as tf\n'), (1358, 'tensorflow.gather', 'tf.gather', ([], {'params': 'self.word_embeddings', 'indices': 'flat_input_ids'}), True, 'import tensorflow as tf\n'), (1396, 'tensorflow.reshape', 'tf.reshape', ([], {'tensor': 'token_type_ids', 'shape': '[-1]'}), True, 'import tensorflow as tf\n'), (1397, 'tensorflow.one_hot', 'tf.one_hot', ([], {'indices': 'flat_token_type_ids', 'depth': 'self.type_vocab_size', 'dtype': 'self._compute_dtype'}), True, 'import tensorflow as tf\n'), (1398, 'tensorflow.matmul', 'tf.matmul', ([], {'a': 'one_hot_data', 'b': 'self.token_type_embeddings'}), True, 'import tensorflow as tf\n'), (1439, 'tensorflow.broadcast_to', 'tf.broadcast_to', ([], {'input': 'position_embeddings', 'shape': 'input_shape'}), True, 'import tensorflow as tf\n'), (1521, 'tensorflow.gather', 'tf.gather', (['self.weight', 'input_ids'], {}), True, 'import tensorflow as tf\n'), (1534, 'tensorflow.reshape', 'tf.reshape', (['inputs', '[-1, self.hidden_size]'], {}), True, 'import tensorflow as tf\n'), (1535, 'tensorflow.matmul', 'tf.matmul', (['x', 'self.weight'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (1537, 'tensorflow.reshape', 'tf.reshape', (['logits', '(first_dims + [self.vocab_size])'], {}), True, 'import tensorflow as tf\n'), (1668, 'tensorflow.TensorShape', 'tf.TensorShape', (['None'], {}), True, 'import tensorflow as tf\n'), (132, 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {}), True, 'import tensorflow as tf\n'), (151, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (153, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (189, 'warnings.warn', 'warnings.warn', (['"""Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead."""'], {}), False, 'import warnings\n'), (194, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (206, 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {'reduction': 'tf.keras.losses.Reduction.NONE'}), True, 'import tensorflow as tf\n'), (208, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), True, 'import tensorflow as tf\n'), (243, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (244, 'tensorflow.reshape', 'tf.reshape', (['logits', '(-1, 2)'], {}), True, 'import tensorflow as tf\n'), (245, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (288, 'tensorflow.print', 'tf.print', (['"""The parameters `output_attentions`, `output_hidden_states` and `use_cache` cannot be updated when calling a model.They have to be set to True/False in the config object (i.e.: `config=XConfig.from_pretrained(\'name\', output_attentions=True)`)."""'], {}), True, 'import tensorflow as tf\n'), (297, 'tensorflow.print', 'tf.print', (['"""The parameter `return_dict` cannot be set in graph mode and will always be set to `True`."""'], {}), True, 'import tensorflow as tf\n'), (323, 'inspect.signature', 'inspect.signature', (['func'], {}), False, 'import inspect\n'), (464, 'tensorflow.python.keras.saving.hdf5_format.load_attributes_from_hdf5_group', 'hdf5_format.load_attributes_from_hdf5_group', (['f', '"""layer_names"""'], {}), False, 'from tensorflow.python.keras.saving import hdf5_format\n'), (559, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[[0, size_diff], [0, 0]]'], {}), True, 'import tensorflow as tf\n'), (562, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[num_tokens_to_copy, 1]'], {}), True, 'import tensorflow as tf\n'), (563, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[[0, size_diff], [0, 0]]'], {}), True, 'import tensorflow as tf\n'), (568, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[0, 0]'], {}), True, 'import tensorflow as tf\n'), (569, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[new_num_tokens, old_embedding_dim]'], {}), True, 'import tensorflow as tf\n'), (571, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[new_num_tokens, 1]'], {}), True, 'import tensorflow as tf\n'), (611, 'tensorflow.constant', 'tf.constant', (['DUMMY_INPUTS'], {}), True, 'import tensorflow as tf\n'), (886, 'tensorflow.math.greater', 'tf.math.greater', (['size_diff', '(0)'], {}), True, 'import tensorflow as tf\n'), (1171, 'os.path.isdir', 'os.path.isdir', (['pretrained_model_name_or_path'], {}), False, 'import os\n'), (1322, 'tensorflow.matmul', 'tf.matmul', (['x', 'self.weight'], {}), True, 'import tensorflow as tf\n'), (1598, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.summary_first_dropout'], {}), True, 'import tensorflow as tf\n'), (1602, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.summary_last_dropout'], {}), True, 'import tensorflow as tf\n'), (1705, 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['self._abs_scope_name'], {'auxiliary_name_scope': '(False)'}), True, 'import tensorflow as tf\n'), (1714, 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['self._abs_scope_name'], {'auxiliary_name_scope': '(False)'}), True, 'import tensorflow as tf\n'), (190, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (192, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (378, 'warnings.warn', 'warnings.warn', (['"""The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (386, 'warnings.warn', 'warnings.warn', (['"""The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (488, 'tensorflow.python.keras.saving.hdf5_format.load_attributes_from_hdf5_group', 'hdf5_format.load_attributes_from_hdf5_group', (['h5_layer_object', '"""weight_names"""'], {}), False, 'from tensorflow.python.keras.saving import hdf5_format\n'), (1316, 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), True, 'import tensorflow as tf\n'), (1706, 'tensorflow.name_scope', 'tf.name_scope', (['abs_scope_name.original_name_scope'], {}), True, 'import tensorflow as tf\n'), (1715, 'tensorflow.name_scope', 'tf.name_scope', (['abs_scope_name.original_name_scope'], {}), True, 'import tensorflow as tf\n'), (491, 'numpy.asarray', 'np.asarray', (['h5_layer_object[weight_name]'], {}), True, 'import numpy as np\n'), (631, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['(None, None)', 'tf.int32'], {'name': '"""input_ids"""'}), True, 'import tensorflow as tf\n'), (632, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['(None, None)', 'tf.int32'], {'name': '"""attention_mask"""'}), True, 'import tensorflow as tf\n'), (633, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['(None, None)', 'tf.int32'], {'name': '"""token_type_ids"""'}), True, 'import tensorflow as tf\n'), (881, 'tensorflow.rank', 'tf.rank', (['weight'], {}), True, 'import tensorflow as tf\n'), (888, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['padding_shape'], {}), True, 'import tensorflow as tf\n'), (891, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['mask_shape'], {}), True, 'import tensorflow as tf\n'), (892, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['padding_shape'], {}), True, 'import tensorflow as tf\n'), (896, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['slice_from'], {}), True, 'import tensorflow as tf\n'), (896, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['final_shape'], {}), True, 'import tensorflow as tf\n'), (898, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['final_shape'], {}), True, 'import tensorflow as tf\n'), (1174, 'os.path.join', 'os.path.join', (['pretrained_model_name_or_path', 'WEIGHTS_NAME'], {}), False, 'import os\n'), (1184, 'os.path.isfile', 'os.path.isfile', (['pretrained_model_name_or_path'], {}), False, 'import os\n'), (1186, 'os.path.isfile', 'os.path.isfile', (["(pretrained_model_name_or_path + '.index')"], {}), False, 'import os\n'), (1620, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['hidden_states'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (1172, 'os.path.join', 'os.path.join', (['pretrained_model_name_or_path', 'WEIGHTS_NAME'], {}), False, 'import os\n'), (1175, 'os.path.join', 'os.path.join', (['pretrained_model_name_or_path', 'TF2_WEIGHTS_NAME'], {}), False, 'import os\n'), (1177, 'os.path.join', 'os.path.join', (['pretrained_model_name_or_path', 'TF2_WEIGHTS_NAME'], {}), False, 'import os\n'), (512, 'tensorflow.python.keras.backend.int_shape', 'K.int_shape', (['symbolic_weight'], {}), True, 'from tensorflow.python.keras import backend as K\n'), (1250, 're.search', 're.search', (['pat', 'k'], {}), False, 'import re\n'), (1254, 're.search', 're.search', (['pat', 'k'], {}), False, 'import re\n'), (1624, 'tensorflow.fill', 'tf.fill', (['hidden_shape[:-2]', '(hidden_shape[-2] - 1)'], {}), True, 'import tensorflow as tf\n'), (516, 'tensorflow.python.keras.backend.int_shape', 'K.int_shape', (['symbolic_weight'], {}), True, 'from tensorflow.python.keras import backend as K\n'), (518, 'tensorflow.python.keras.backend.int_shape', 'K.int_shape', (['symbolic_weight'], {}), True, 'from tensorflow.python.keras import backend as K\n')]
johntiger1/transformers
38fadb6faf30c103531fa5fcd52b3f110e7582d0
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.file_utils import cached_property from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from .test_configuration_common import ConfigTester from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeq2SeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class TFMBartModelTester: config_cls = MBartConfig config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs_for_common(self): input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size) eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1) input_ids = tf.concat([input_ids, eos_tensor], axis=1) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) inputs_dict = prepare_mbart_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def check_decoder_model_past_large_inputs(self, config, inputs_dict): model = TFMBartModel(config=config).get_decoder() input_ids = inputs_dict["input_ids"] input_ids = input_ids[:1, :] attention_mask = inputs_dict["attention_mask"][:1, :] self.batch_size = 1 # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() past_key_values = past_key_values[1] def test_compile_tf_model(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0) loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) metric = tf.keras.metrics.SparseCategoricalAccuracy("accuracy") model_class = self.all_generative_model_classes[0] input_ids = { "decoder_input_ids": tf.keras.Input(batch_shape=(2, 2000), name="decoder_input_ids", dtype="int32"), "input_ids": tf.keras.Input(batch_shape=(2, 2000), name="input_ids", dtype="int32"), } # Prepare our model model = model_class(config) model(self._prepare_for_class(inputs_dict, model_class)) # Model must be called before saving. # Let's load it from the disk to be sure we can use pretrained weights with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname) outputs_dict = model(input_ids) hidden_states = outputs_dict[0] # Add a dense layer on top to test integration with other keras modules outputs = tf.keras.layers.Dense(2, activation="softmax", name="outputs")(hidden_states) # Compile extended model extended_model = tf.keras.Model(inputs=[input_ids], outputs=[outputs]) extended_model.compile(optimizer=optimizer, loss=loss, metrics=[metric]) def prepare_mbart_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, ): if attention_mask is None: attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8) if decoder_attention_mask is None: decoder_attention_mask = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.int8), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.int8), ], axis=-1, ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_tf class TFMBartModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () all_generative_model_classes = (TFMBartForConditionalGeneration,) if is_tf_available() else () is_encoder_decoder = True test_pruning = False def setUp(self): self.model_tester = TFMBartModelTester(self) self.config_tester = ConfigTester(self, config_class=MBartConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs) def test_model_common_attributes(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer) if model_class in self.all_generative_model_classes: x = model.get_output_embeddings() assert isinstance(x, tf.keras.layers.Layer) name = model.get_bias() assert isinstance(name, dict) for k, v in name.items(): assert isinstance(v, tf.Variable) else: x = model.get_output_embeddings() assert x is None name = model.get_bias() assert name is None def test_saved_model_creation(self): # This test is too long (>30sec) and makes fail the CI pass def test_resize_token_embeddings(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(model, embedding_layer): if hasattr(embedding_layer, "weight"): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model(model.dummy_inputs) if hasattr(embedding_layer, "weight"): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10, None]: # build the embeddings model = model_class(config=config) old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings()) old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings()) old_final_logits_bias = model.get_bias() # reshape the embeddings model.resize_token_embeddings(size) new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings()) new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings()) new_final_logits_bias = model.get_bias() # check that the resized embeddings size matches the desired size. assert_size = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0], assert_size) # check that weights remain the same after resizing models_equal = True for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0], assert_size) models_equal = True for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) if old_final_logits_bias is not None and new_final_logits_bias is not None: old_final_logits_bias = old_final_logits_bias["final_logits_bias"] new_final_logits_bias = new_final_logits_bias["final_logits_bias"] self.assertEqual(new_final_logits_bias.shape[0], 1) self.assertEqual(new_final_logits_bias.shape[1], assert_size) models_equal = True for old, new in zip(old_final_logits_bias.value(), new_final_logits_bias.value()): for p1, p2 in zip(old, new): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) def _assert_tensors_equal(a, b, atol=1e-12, prefix=""): """If tensors not close, or a and b arent both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if tf.debugging.assert_near(a, b, atol=atol): return True raise except Exception: msg = "{} != {}".format(a, b) if prefix: msg = prefix + ": " + msg raise AssertionError(msg) def _long_tensor(tok_lst): return tf.constant(tok_lst, dtype=tf.int32) TOLERANCE = 1e-4 @require_sentencepiece @require_tokenizers @require_tf class TFMBartModelIntegrationTest(unittest.TestCase): src_text = [ " UN Chief Says There Is No Military Solution in Syria", ] expected_text = [ "Şeful ONU declară că nu există o soluţie militară în Siria", ] model_name = "facebook/mbart-large-en-ro" @cached_property def tokenizer(self): return AutoTokenizer.from_pretrained(self.model_name) @cached_property def model(self): model = TFAutoModelForSeq2SeqLM.from_pretrained(self.model_name) return model def _assert_generated_batch_equal_expected(self, **tokenizer_kwargs): generated_words = self.translate_src_text(**tokenizer_kwargs) self.assertListEqual(self.expected_text, generated_words) def translate_src_text(self, **tokenizer_kwargs): model_inputs = self.tokenizer.prepare_seq2seq_batch( src_texts=self.src_text, **tokenizer_kwargs, return_tensors="tf" ) generated_ids = self.model.generate( model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2 ) generated_words = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True) return generated_words @slow def test_batch_generation_en_ro(self): self._assert_generated_batch_equal_expected()
[ "tensorflow.math.abs", "tensorflow.math.not_equal", "tensorflow.concat", "tensorflow.constant", "tensorflow.keras.Input", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.keras.layers.Dense", "tensorflow.ones", "tensorflow.keras.Model", "tensorflow.keras.optimizers.Adam", "tensorflow.debugging.assert_near", "tensorflow.keras.metrics.SparseCategoricalAccuracy" ]
tests/test_modeling_tf_mbart.py
[(27, 'transformers.is_tf_available', 'is_tf_available', ([], {}), False, 'from transformers import AutoTokenizer, MBartConfig, is_tf_available\n'), (291, 'tensorflow.constant', 'tf.constant', (['tok_lst'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (78, 'tensorflow.concat', 'tf.concat', (['[input_ids, eos_tensor]'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (120, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(3e-05)', 'epsilon': '(1e-08)', 'clipnorm': '(1.0)'}), True, 'import tensorflow as tf\n'), (121, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), True, 'import tensorflow as tf\n'), (122, 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', (['"""accuracy"""'], {}), True, 'import tensorflow as tf\n'), (140, 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': '[input_ids]', 'outputs': '[outputs]'}), True, 'import tensorflow as tf\n'), (171, 'transformers.is_tf_available', 'is_tf_available', ([], {}), False, 'from transformers import AutoTokenizer, MBartConfig, is_tf_available\n'), (172, 'transformers.is_tf_available', 'is_tf_available', ([], {}), False, 'from transformers import AutoTokenizer, MBartConfig, is_tf_available\n'), (280, 'tensorflow.debugging.assert_near', 'tf.debugging.assert_near', (['a', 'b'], {'atol': 'atol'}), True, 'import tensorflow as tf\n'), (311, 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['self.model_name'], {}), False, 'from transformers import AutoTokenizer, MBartConfig, is_tf_available\n'), (315, 'transformers.TFAutoModelForSeq2SeqLM.from_pretrained', 'TFAutoModelForSeq2SeqLM.from_pretrained', (['self.model_name'], {}), False, 'from transformers import TFAutoModelForSeq2SeqLM, TFMBartForConditionalGeneration, TFMBartModel\n'), (77, 'tensorflow.constant', 'tf.constant', (['([self.eos_token_id] * self.batch_size)'], {}), True, 'import tensorflow as tf\n'), (125, 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'batch_shape': '(2, 2000)', 'name': '"""decoder_input_ids"""', 'dtype': '"""int32"""'}), True, 'import tensorflow as tf\n'), (126, 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'batch_shape': '(2, 2000)', 'name': '"""input_ids"""', 'dtype': '"""int32"""'}), True, 'import tensorflow as tf\n'), (132, 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), False, 'import tempfile\n'), (138, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)'], {'activation': '"""softmax"""', 'name': '"""outputs"""'}), True, 'import tensorflow as tf\n'), (152, 'tensorflow.math.not_equal', 'tf.math.not_equal', (['input_ids', 'config.pad_token_id'], {}), True, 'import tensorflow as tf\n'), (104, 'transformers.TFMBartModel', 'TFMBartModel', ([], {'config': 'config'}), False, 'from transformers import TFAutoModelForSeq2SeqLM, TFMBartForConditionalGeneration, TFMBartModel\n'), (156, 'tensorflow.ones', 'tf.ones', (['decoder_input_ids[:, :1].shape'], {'dtype': 'tf.int8'}), True, 'import tensorflow as tf\n'), (157, 'tensorflow.math.not_equal', 'tf.math.not_equal', (['decoder_input_ids[:, 1:]', 'config.pad_token_id'], {}), True, 'import tensorflow as tf\n'), (248, 'tensorflow.math.abs', 'tf.math.abs', (['(p1 - p2)'], {}), True, 'import tensorflow as tf\n'), (257, 'tensorflow.math.abs', 'tf.math.abs', (['(p1 - p2)'], {}), True, 'import tensorflow as tf\n'), (270, 'tensorflow.math.abs', 'tf.math.abs', (['(p1 - p2)'], {}), True, 'import tensorflow as tf\n')]
Liang813/transformers
08f534d2da47875a4b7eb1c125cfa7f0f3b79642
# coding=utf-8 # Copyright 2020 The Huggingface Inc. team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from transformers import is_tf_available from transformers.file_utils import cached_property from transformers.testing_utils import is_pt_tf_cross_test, require_tf, slow from .test_configuration_common import ConfigTester from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor if is_tf_available(): import tensorflow as tf from transformers import BartConfig, TFBartForConditionalGeneration, TFBartModel from transformers.tokenization_bart import BartTokenizer @require_tf class ModelTester: def __init__(self, parent): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_labels = False self.vocab_size = 99 self.hidden_size = 32 self.num_hidden_layers = 5 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 20 self.eos_token_ids = [2] self.pad_token_id = 1 self.bos_token_id = 0 # torch.manual_seed(0) def prepare_config_and_inputs_for_common(self): input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size) eos_tensor = tf.expand_dims(tf.constant([2] * self.batch_size), 1) input_ids = tf.concat([input_ids, eos_tensor], axis=1) input_ids = tf.clip_by_value(input_ids, 3, self.vocab_size + 1) config = BartConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, ) inputs_dict = prepare_bart_inputs_dict(config, input_ids) return config, inputs_dict def prepare_bart_inputs_dict( config, input_ids, attention_mask=None, ): if attention_mask is None: attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8) return { "input_ids": input_ids, "decoder_input_ids": input_ids, "attention_mask": attention_mask, } @require_tf class TestTFBart(TFModelTesterMixin, unittest.TestCase): all_model_classes = (TFBartForConditionalGeneration, TFBartModel) if is_tf_available() else () all_generative_model_classes = (TFBartForConditionalGeneration,) if is_tf_available() else () is_encoder_decoder = True test_pruning = False def setUp(self): self.model_tester = ModelTester(self) self.config_tester = ConfigTester(self, config_class=BartConfig) def test_config(self): self.config_tester.run_common_tests() def test_inputs_embeds(self): # inputs_embeds not supported pass def test_compile_tf_model(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0) loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) metric = tf.keras.metrics.SparseCategoricalAccuracy("accuracy") model_class = TFBartForConditionalGeneration input_ids = { "decoder_input_ids": tf.keras.Input(batch_shape=(2, 2000), name="decoder_input_ids", dtype="int32"), "input_ids": tf.keras.Input(batch_shape=(2, 2000), name="input_ids", dtype="int32"), } # Prepare our model model = model_class(config) model(self._prepare_for_class(inputs_dict, model_class)) # Model must be called before saving. # Let's load it from the disk to be sure we can use pretrained weights with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname) outputs_dict = model(input_ids) hidden_states = outputs_dict[0] # Add a dense layer on top to test integration with other keras modules outputs = tf.keras.layers.Dense(2, activation="softmax", name="outputs")(hidden_states) # Compile extended model extended_model = tf.keras.Model(inputs=[input_ids], outputs=[outputs]) extended_model.compile(optimizer=optimizer, loss=loss, metrics=[metric]) def test_saved_model_with_hidden_states_output(self): # Should be uncommented during patrick TF refactor pass def test_saved_model_with_attentions_output(self): # Should be uncommented during patrick TF refactor pass @require_tf class TFBartHeadTests(unittest.TestCase): vocab_size = 99 def _get_config_and_data(self): eos_column_vector = tf.ones((4, 1), dtype=tf.int32) * 2 input_ids = tf.concat([ids_tensor((4, 6), self.vocab_size - 3) + 3, eos_column_vector], axis=1) batch_size = input_ids.shape[0] config = BartConfig( vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, return_dict=True, decoder_start_token_id=2, ) return config, input_ids, batch_size def test_lm_forward(self): config, input_ids, batch_size = self._get_config_and_data() decoder_lm_labels = ids_tensor([batch_size, input_ids.shape[1]], self.vocab_size) lm_model = TFBartForConditionalGeneration(config) outputs = lm_model(inputs=input_ids, lm_labels=decoder_lm_labels, decoder_input_ids=input_ids, use_cache=False) expected_shape = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs.logits.shape, expected_shape) def test_lm_uneven_forward(self): config = BartConfig( vocab_size=10, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, return_dict=True, ) lm_model = TFBartForConditionalGeneration(config) context = tf.fill((7, 2), 4) summary = tf.fill((7, 7), 6) outputs = lm_model(inputs=context, decoder_input_ids=summary, use_cache=False) expected_shape = (*summary.shape, config.vocab_size) self.assertEqual(outputs.logits.shape, expected_shape) def _assert_tensors_equal(a, b, atol=1e-12, prefix=""): """If tensors not close, or a and b arent both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if tf.debugging.assert_near(a, b, atol=atol): return True raise except Exception: msg = "{} != {}".format(a, b) if prefix: msg = prefix + ": " + msg raise AssertionError(msg) def _long_tensor(tok_lst): return tf.constant(tok_lst, dtype=tf.int32) TOLERANCE = 1e-4 @is_pt_tf_cross_test @slow class TFBartModelIntegrationTest(unittest.TestCase): def test_inference_no_head(self): model = TFBartModel.from_pretrained("facebook/bart-large", from_pt=True) input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) inputs_dict = prepare_bart_inputs_dict(model.config, input_ids) # with torch.no_grad(): output = model(**inputs_dict)[0] expected_shape = (1, 11, 1024) self.assertEqual(output.shape, expected_shape) expected_slice = tf.Tensor( [[0.7144, 0.8143, -1.2813], [0.7144, 0.8143, -1.2813], [-0.0467, 2.5911, -2.1845]], ) self.assertTrue(tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=TOLERANCE)) def test_cnn_summarization_same_as_fairseq_hard(self): hf = TFBartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn", from_pt=True) tok = self.tok FRANCE_ARTICLE = ' Marseille, France (CNN)The French prosecutor leading an investigation into the crash of Germanwings Flight 9525 insisted Wednesday that he was not aware of any video footage from on board the plane. Marseille prosecutor Brice Robin told CNN that "so far no videos were used in the crash investigation." He added, "A person who has such a video needs to immediately give it to the investigators." Robin\'s comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French Alps. All 150 on board were killed. Paris Match and Bild reported that the video was recovered from a phone at the wreckage site. The two publications described the supposed video, but did not post it on their websites. The publications said that they watched the video, which was found by a source close to the investigation. "One can hear cries of \'My God\' in several languages," Paris Match reported. "Metallic banging can also be heard more than three times, perhaps of the pilot trying to open the cockpit door with a heavy object. Towards the end, after a heavy shake, stronger than the others, the screaming intensifies. Then nothing." "It is a very disturbing scene," said Julian Reichelt, editor-in-chief of Bild online. An official with France\'s accident investigation agency, the BEA, said the agency is not aware of any such video. Lt. Col. Jean-Marc Menichini, a French Gendarmerie spokesman in charge of communications on rescue efforts around the Germanwings crash site, told CNN that the reports were "completely wrong" and "unwarranted." Cell phones have been collected at the site, he said, but that they "hadn\'t been exploited yet." Menichini said he believed the cell phones would need to be sent to the Criminal Research Institute in Rosny sous-Bois, near Paris, in order to be analyzed by specialized technicians working hand-in-hand with investigators. But none of the cell phones found so far have been sent to the institute, Menichini said. Asked whether staff involved in the search could have leaked a memory card to the media, Menichini answered with a categorical "no." Reichelt told "Erin Burnett: Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match are "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered cell phones from the crash site after Bild and Paris Match published their reports. "That is something we did not know before. ... Overall we can say many things of the investigation weren\'t revealed by the investigation at the beginning," he said. What was mental state of Germanwings co-pilot? German airline Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled depression years before he took the controls of Germanwings Flight 9525, which he\'s accused of deliberately crashing last week in the French Alps. Lubitz told his Lufthansa flight training school in 2009 that he had a "previous episode of severe depression," the airline said Tuesday. Email correspondence between Lubitz and the school discovered in an internal investigation, Lufthansa said, included medical documents he submitted in connection with resuming his flight training. The announcement indicates that Lufthansa, the parent company of Germanwings, knew of Lubitz\'s battle with depression, allowed him to continue training and ultimately put him in the cockpit. Lufthansa, whose CEO Carsten Spohr previously said Lubitz was 100% fit to fly, described its statement Tuesday as a "swift and seamless clarification" and said it was sharing the information and documents -- including training and medical records -- with public prosecutors. Spohr traveled to the crash site Wednesday, where recovery teams have been working for the past week to recover human remains and plane debris scattered across a steep mountainside. He saw the crisis center set up in Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash site, where grieving families have left flowers at a simple stone memorial. Menichini told CNN late Tuesday that no visible human remains were left at the site but recovery teams would keep searching. French President Francois Hollande, speaking Tuesday, said that it should be possible to identify all the victims using DNA analysis by the end of the week, sooner than authorities had previously suggested. In the meantime, the recovery of the victims\' personal belongings will start Wednesday, Menichini said. Among those personal belongings could be more cell phones belonging to the 144 passengers and six crew on board. Check out the latest from our correspondents . The details about Lubitz\'s correspondence with the flight school during his training were among several developments as investigators continued to delve into what caused the crash and Lubitz\'s possible motive for downing the jet. A Lufthansa spokesperson told CNN on Tuesday that Lubitz had a valid medical certificate, had passed all his examinations and "held all the licenses required." Earlier, a spokesman for the prosecutor\'s office in Dusseldorf, Christoph Kumpa, said medical records reveal Lubitz suffered from suicidal tendencies at some point before his aviation career and underwent psychotherapy before he got his pilot\'s license. Kumpa emphasized there\'s no evidence suggesting Lubitz was suicidal or acting aggressively before the crash. Investigators are looking into whether Lubitz feared his medical condition would cause him to lose his pilot\'s license, a European government official briefed on the investigation told CNN on Tuesday. While flying was "a big part of his life," the source said, it\'s only one theory being considered. Another source, a law enforcement official briefed on the investigation, also told CNN that authorities believe the primary motive for Lubitz to bring down the plane was that he feared he would not be allowed to fly because of his medical problems. Lubitz\'s girlfriend told investigators he had seen an eye doctor and a neuropsychologist, both of whom deemed him unfit to work recently and concluded he had psychological issues, the European government official said. But no matter what details emerge about his previous mental health struggles, there\'s more to the story, said Brian Russell, a forensic psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the fact that maybe they weren\'t going to keep doing their job and they\'re upset about that and so they\'re suicidal," he said. "But there is no mental illness that explains why somebody then feels entitled to also take that rage and turn it outward on 149 other people who had nothing to do with the person\'s problems." Germanwings crash compensation: What we know . Who was the captain of Germanwings Flight 9525? CNN\'s Margot Haddad reported from Marseille and Pamela Brown from Dusseldorf, while Laura Smith-Spark wrote from London. CNN\'s Frederik Pleitgen, Pamela Boykoff, Antonia Mortensen, Sandrine Amiel and Anna-Maja Rappard contributed to this report.' # @noqa EXPECTED_SUMMARY_FRANCE = 'French prosecutor says he\'s not aware of any video footage from on board the plane. German daily Bild and French Paris Match claim to have found a cell phone video of the crash. A French Gendarmerie spokesman calls the reports "completely wrong" and "unwarranted" German airline Lufthansa confirms co-pilot Andreas Lubitz had battled depression.' SHORTER_ARTICLE = ' (CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based. The Palestinians signed the ICC\'s founding Rome Statute in January, when they also accepted its jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the situation in Palestinian territories, paving the way for possible war crimes investigations against Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and the United States, neither of which is an ICC member, opposed the Palestinians\' efforts to join the body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday\'s ceremony, said it was a move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the world is also a step closer to ending a long era of impunity and injustice," he said, according to an ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine acquires all the rights as well as responsibilities that come with being a State Party to the Statute. These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should immediately end their pressure, and countries that support universal acceptance of the court\'s treaty should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the group. "What\'s objectionable is the attempts to undermine international justice, not Palestine\'s decision to join a treaty to which over 100 countries around the world are members." In January, when the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an outrage, saying the court was overstepping its boundaries. The United States also said it "strongly" disagreed with the court\'s decision. "As we have said repeatedly, we do not believe that Palestine is a state and therefore we do not believe that it is eligible to join the ICC," the State Department said in a statement. It urged the warring sides to resolve their differences through direct negotiations. "We will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace," it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou Bensouda said her office would "conduct its analysis in full independence and impartiality." The war between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry will include alleged war crimes committed since June. The International Criminal Court was set up in 2002 to prosecute genocide, crimes against humanity and war crimes. CNN\'s Vasco Cotovio, Kareem Khadder and Faith Karimi contributed to this report.' EXPECTED_SUMMARY_SHORTER = "The Palestinian Authority becomes the 123rd member of the International Criminal Court. The move gives the court jurisdiction over alleged crimes in Palestinian territories. Israel and the United States opposed the Palestinians' efforts to join the body. But Palestinian Foreign Minister Riad al-Malki said it was a move toward greater justice." # The below article tests that we don't add any hypotheses outside of the top n_beams IRAN_ARTICLE = " (CNN)The United States and its negotiating partners reached a very strong framework agreement with Iran in Lausanne, Switzerland, on Thursday that limits Iran's nuclear program in such a way as to effectively block it from building a nuclear weapon. Expect pushback anyway, if the recent past is any harbinger. Just last month, in an attempt to head off such an agreement, House Speaker John Boehner invited Israeli Prime Minister Benjamin Netanyahu to preemptively blast it before Congress, and 47 senators sent a letter to the Iranian leadership warning them away from a deal. The debate that has already begun since the announcement of the new framework will likely result in more heat than light. It will not be helped by the gathering swirl of dubious assumptions and doubtful assertions. Let us address some of these: . The most misleading assertion, despite universal rejection by experts, is that the negotiations' objective at the outset was the total elimination of any nuclear program in Iran. That is the position of Netanyahu and his acolytes in the U.S. Congress. But that is not and never was the objective. If it had been, there would have been no Iranian team at the negotiating table. Rather, the objective has always been to structure an agreement or series of agreements so that Iran could not covertly develop a nuclear arsenal before the United States and its allies could respond. The new framework has exceeded expectations in achieving that goal. It would reduce Iran's low-enriched uranium stockpile, cut by two-thirds its number of installed centrifuges and implement a rigorous inspection regime. Another dubious assumption of opponents is that the Iranian nuclear program is a covert weapons program. Despite sharp accusations by some in the United States and its allies, Iran denies having such a program, and U.S. intelligence contends that Iran has not yet made the decision to build a nuclear weapon. Iran's continued cooperation with International Atomic Energy Agency inspections is further evidence on this point, and we'll know even more about Iran's program in the coming months and years because of the deal. In fact, the inspections provisions that are part of this agreement are designed to protect against any covert action by the Iranians. What's more, the rhetoric of some members of Congress has implied that the negotiations have been between only the United States and Iran (i.e., the 47 senators' letter warning that a deal might be killed by Congress or a future president). This of course is not the case. The talks were between Iran and the five permanent members of the U.N. Security Council (United States, United Kingdom, France, China and Russia) plus Germany, dubbed the P5+1. While the United States has played a leading role in the effort, it negotiated the terms alongside its partners. If the agreement reached by the P5+1 is rejected by Congress, it could result in an unraveling of the sanctions on Iran and threaten NATO cohesion in other areas. Another questionable assertion is that this agreement contains a sunset clause, after which Iran will be free to do as it pleases. Again, this is not the case. Some of the restrictions on Iran's nuclear activities, such as uranium enrichment, will be eased or eliminated over time, as long as 15 years. But most importantly, the framework agreement includes Iran's ratification of the Additional Protocol, which allows IAEA inspectors expanded access to nuclear sites both declared and nondeclared. This provision will be permanent. It does not sunset. Thus, going forward, if Iran decides to enrich uranium to weapons-grade levels, monitors will be able to detect such a move in a matter of days and alert the U.N. Security Council. Many in Congress have said that the agreement should be a formal treaty requiring the Senate to \"advise and consent.\" But the issue is not suited for a treaty. Treaties impose equivalent obligations on all signatories. For example, the New START treaty limits Russia and the United States to 1,550 deployed strategic warheads. But any agreement with Iran will not be so balanced. The restrictions and obligations in the final framework agreement will be imposed almost exclusively on Iran. The P5+1 are obligated only to ease and eventually remove most but not all economic sanctions, which were imposed as leverage to gain this final deal. Finally some insist that any agreement must address Iranian missile programs, human rights violations or support for Hamas or Hezbollah. As important as these issues are, and they must indeed be addressed, they are unrelated to the most important aim of a nuclear deal: preventing a nuclear Iran. To include them in the negotiations would be a poison pill. This agreement should be judged on its merits and on how it affects the security of our negotiating partners and allies, including Israel. Those judgments should be fact-based, not based on questionable assertions or dubious assumptions." EXPECTED_SUMMARY_IRAN = "The U.S. and its negotiating partners reached a very strong framework agreement with Iran. Peter Bergen: The debate that has already begun will likely result in more heat than light. He says the agreement limits Iran's nuclear program in such a way as to effectively block it from building a nuclear weapon. Bergen says the most important aim of a nuclear deal is preventing a nuclear Iran." ARTICLE_SUBWAY = ' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A year later, she got married again in Westchester County, but to a different man and without divorcing her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married once more, this time in the Bronx. In an application for a marriage license, she stated it was her "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false instrument for filing in the first degree," referring to her false statements on the 2010 marriage license application, according to court documents. Prosecutors said the marriages were part of an immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total, Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors said the immigration scam involved some of her husbands, who filed for permanent residence status shortly after the marriages. Any divorces happened only after such filings were approved. It was unclear whether any of the men will be prosecuted. The case was referred to the Bronx District Attorney\'s Office by Immigration and Customs Enforcement and the Department of Homeland Security\'s Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt, Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces up to four years in prison. Her next court appearance is scheduled for May 18.' EXPECTED_SUMMARY_SUBWAY = "Liana Barrientos has been married 10 times, sometimes within two weeks of each other. Prosecutors say the marriages were part of an immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx. She was arrested and charged with theft of service and criminal trespass for allegedly sneaking into the subway." dct = tok( [FRANCE_ARTICLE, SHORTER_ARTICLE, IRAN_ARTICLE, ARTICLE_SUBWAY], max_length=1024, truncation_strategy="only_first", padding="longest", truncation=True, return_tensors="tf", ) self.assertEqual(1024, dct["input_ids"].shape[1]) hypotheses_batch = hf.generate( input_ids=dct["input_ids"], attention_mask=dct["attention_mask"], ) assert hypotheses_batch[:, 1].numpy().tolist() == [0, 0, 0, 0] # test force_bos_token_to_be_generated decoded = tok.batch_decode(hypotheses_batch, skip_special_tokens=True, clean_up_tokenization_spaces=False) expected_batch = [ EXPECTED_SUMMARY_FRANCE, EXPECTED_SUMMARY_SHORTER, EXPECTED_SUMMARY_IRAN, EXPECTED_SUMMARY_SUBWAY, ] assert decoded == expected_batch @cached_property def tok(self): return BartTokenizer.from_pretrained("facebook/bart-large") @slow @require_tf class FasterTFBartModelIntegrationTests(unittest.TestCase): """These tests are useful for debugging since they operate on a model with 1 encoder layer and 1 decoder layer.""" @cached_property def tok(self): return BartTokenizer.from_pretrained("facebook/bart-large") @cached_property def xsum_1_1_model(self): return TFBartForConditionalGeneration.from_pretrained("sshleifer/distilbart-xsum-1-1") def test_xsum_1_1_generation(self): model = self.xsum_1_1_model assert model.model.decoder.embed_tokens._layer == model.model.shared ARTICLE = 'The Palestinian Authority officially became the 123rd member of the International Criminal Court on Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based. The Palestinians signed the ICC\'s founding Rome Statute in January, when they also accepted its jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the situation in Palestinian territories, paving the way for possible war crimes investigations against Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and the United States, neither of which is an ICC member, opposed the Palestinians\' efforts to join the body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday\'s ceremony, said it was a move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the world is also a step closer to ending a long era of impunity and injustice," he said, according to an ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine acquires all the rights as well as responsibilities that come with being a State Party to the Statute. These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should immediately end their pressure, and countries that support universal acceptance of the court\'s treaty should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the group. "What\'s objectionable is the attempts to undermine international justice, not Palestine\'s decision to join a treaty to which over 100 countries around the world are members." In January, when the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an outrage, saying the court was overstepping its boundaries. The United States also said it "strongly" disagreed with the court\'s decision. "As we have said repeatedly, we do not believe that Palestine is a state and therefore we do not believe that it is eligible to join the ICC," the State Department said in a statement. It urged the warring sides to resolve their differences through direct negotiations. "We will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace," it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou Bensouda said her office would "conduct its analysis in full independence and impartiality." The war between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry will include alleged war crimes committed since June. The International Criminal Court was set up in 2002 to prosecute genocide, crimes against humanity and war crimes.' dct = self.tok(ARTICLE, return_tensors="tf") generated_ids = model.generate(**dct, num_beams=4) result = self.tok.batch_decode(generated_ids, skip_special_tokens=True)[0] assert ( result == " The International Criminal Court (ICC) has announced that it has been announced by the International Criminal court." ) def test_xsum_1_1_batch_generation(self): batch = self.tok( [ 'The Palestinian Authority officially became the 123rd member of the International Criminal Court on Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based. The Palestinians signed the ICC\'s founding Rome Statute in January, when they also accepted its jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the situation in Palestinian territories, paving the way for possible war crimes investigations against Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and the United States, neither of which is an ICC member, opposed the Palestinians\' efforts to join the body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday\'s ceremony, said it was a move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the world is also a step closer to ending a long era of impunity and injustice," he said, according to an ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine acquires all the rights as well as responsibilities that come with being a State Party to the Statute. These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should immediately end their pressure, and countries that support universal acceptance of the court\'s treaty should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the group. "What\'s objectionable is the attempts to undermine international justice, not Palestine\'s decision to join a treaty to which over 100 countries around the world are members." In January, when the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an outrage, saying the court was overstepping its boundaries. The United States also said it "strongly" disagreed with the court\'s decision. "As we have said repeatedly, we do not believe that Palestine is a state and therefore we do not believe that it is eligible to join the ICC," the State Department said in a statement. It urged the warring sides to resolve their differences through direct negotiations. "We will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace," it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou Bensouda said her office would "conduct its analysis in full independence and impartiality." The war between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry will include alleged war crimes committed since June. The International Criminal Court was set up in 2002 to prosecute genocide, crimes against humanity and war crimes.', 'The French prosecutor leading an investigation into the crash of Germanwings Flight 9525 insisted Wednesday that he was not aware of any video footage from on board the plane. Marseille prosecutor Brice Robin told CNN that "so far no videos were used in the crash investigation." He added, "A person who has such a video needs to immediately give it to the investigators." Robin\'s comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French Alps. All 150 on board were killed. Paris Match and Bild reported that the video was recovered from a phone at the wreckage site. The two publications described the supposed video, but did not post it on their websites. The publications said that they watched the video, which was found by a source close to the investigation. "One can hear cries of \'My God\' in several languages," Paris Match reported. "Metallic banging can also be heard more than three times, perhaps of the pilot trying to open the cockpit door with a heavy object. Towards the end, after a heavy shake, stronger than the others, the screaming intensifies. Then nothing." "It is a very disturbing scene," said Julian Reichelt, editor-in-chief of Bild online. An official with France\'s accident investigation agency, the BEA, said the agency is not aware of any such video. Lt. Col. Jean-Marc Menichini, a French Gendarmerie spokesman in charge of communications on rescue efforts around the Germanwings crash site, told CNN that the reports were "completely wrong" and "unwarranted." Cell phones have been collected at the site, he said, but that they "hadn\'t been exploited yet." Menichini said he believed the cell phones would need to be sent to the Criminal Research Institute in Rosny sous-Bois, near Paris, in order to be analyzed by specialized technicians working hand-in-hand with investigators. But none of the cell phones found so far have been sent to the institute, Menichini said. Asked whether staff involved in the search could have leaked a memory card to the media, Menichini answered with a categorical "no." Reichelt told "Erin Burnett: Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match are "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered cell phones from the crash site after Bild and Paris Match published their reports. "That is something we did not know before. ... Overall we can say many things of the investigation weren\'t revealed by the investigation at the beginning," he said. What was mental state of Germanwings co-pilot? German airline Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled depression years before he took the controls of Germanwings Flight 9525, which he\'s accused of deliberately crashing last week in the French Alps. Lubitz told his Lufthansa flight training school in 2009 that he had a "previous episode of severe depression," the airline said Tuesday. Email correspondence between Lubitz and the school discovered in an internal investigation, Lufthansa said, included medical documents he submitted in connection with resuming his flight training. The announcement indicates that Lufthansa, the parent company of Germanwings, knew of Lubitz\'s battle with depression, allowed him to continue training and ultimately put him in the cockpit. Lufthansa, whose CEO Carsten Spohr previously said Lubitz was 100% fit to fly, described its statement Tuesday as a "swift and seamless clarification" and said it was sharing the information and documents -- including training and medical records -- with public prosecutors. Spohr traveled to the crash site Wednesday, where recovery teams have been working for the past week to recover human remains and plane debris scattered across a steep mountainside. He saw the crisis center set up in Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash site, where grieving families have left flowers at a simple stone memorial. Menichini told CNN late Tuesday that no visible human remains were left at the site but recovery teams would keep searching. French President Francois Hollande, speaking Tuesday, said that it should be possible to identify all the victims using DNA analysis by the end of the week, sooner than authorities had previously suggested. In the meantime, the recovery of the victims\' personal belongings will start Wednesday, Menichini said. Among those personal belongings could be more cell phones belonging to the 144 passengers and six crew on board. Check out the latest from our correspondents . The details about Lubitz\'s correspondence with the flight school during his training were among several developments as investigators continued to delve into what caused the crash and Lubitz\'s possible motive for downing the jet. A Lufthansa spokesperson told CNN on Tuesday that Lubitz had a valid medical certificate, had passed all his examinations and "held all the licenses required." Earlier, a spokesman for the prosecutor\'s office in Dusseldorf, Christoph Kumpa, said medical records reveal Lubitz suffered from suicidal tendencies at some point before his aviation career and underwent psychotherapy before he got his pilot\'s license. Kumpa emphasized there\'s no evidence suggesting Lubitz was suicidal or acting aggressively before the crash. Investigators are looking into whether Lubitz feared his medical condition would cause him to lose his pilot\'s license, a European government official briefed on the investigation told CNN on Tuesday. While flying was "a big part of his life," the source said, it\'s only one theory being considered. Another source, a law enforcement official briefed on the investigation, also told CNN that authorities believe the primary motive for Lubitz to bring down the plane was that he feared he would not be allowed to fly because of his medical problems. Lubitz\'s girlfriend told investigators he had seen an eye doctor and a neuropsychologist, both of whom deemed him unfit to work recently and concluded he had psychological issues, the European government official said. But no matter what details emerge about his previous mental health struggles, there\'s more to the story, said Brian Russell, a forensic psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the fact that maybe they weren\'t going to keep doing their job and they\'re upset about that and so they\'re suicidal," he said. "But there is no mental illness that explains why somebody then feels entitled to also take that rage and turn it outward on 149 other people who had nothing to do with the person\'s problems." Germanwings crash compensation: What we know . Who was the captain of Germanwings Flight 9525? CNN\'s Margot Haddad reported from Marseille and Pamela Brown from Dusseldorf, while Laura Smith-Spark wrote from London. CNN\'s Frederik Pleitgen, Pamela Boykoff, Antonia Mortensen, Sandrine Amiel and Anna-Maja Rappard contributed to this report.', ], return_tensors="tf", padding="longest", truncation=True, ) generated_ids = self.xsum_1_1_model.generate(**batch, num_beams=4) result = self.tok.batch_decode(generated_ids, skip_special_tokens=True) assert ( result[0] == " The International Criminal Court (ICC) has announced that it has been announced by the International Criminal court." ) assert ( result[1] == " An investigation into the crash that killed at least 10 people in the French capital has been released by the French police investigating the crash." ) def test_encoder_equiv(self): batch = self.tok( [ 'The Palestinian Authority officially became the 123rd member of the International Criminal Court on Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based. The Palestinians signed the ICC\'s founding Rome Statute in January, when they also accepted its jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the situation in Palestinian territories, paving the way for possible war crimes investigations against Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and the United States, neither of which is an ICC member, opposed the Palestinians\' efforts to join the body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday\'s ceremony, said it was a move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the world is also a step closer to ending a long era of impunity and injustice," he said, according to an ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine acquires all the rights as well as responsibilities that come with being a State Party to the Statute. These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should immediately end their pressure, and countries that support universal acceptance of the court\'s treaty should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the group. "What\'s objectionable is the attempts to undermine international justice, not Palestine\'s decision to join a treaty to which over 100 countries around the world are members." In January, when the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an outrage, saying the court was overstepping its boundaries. The United States also said it "strongly" disagreed with the court\'s decision. "As we have said repeatedly, we do not believe that Palestine is a state and therefore we do not believe that it is eligible to join the ICC," the State Department said in a statement. It urged the warring sides to resolve their differences through direct negotiations. "We will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace," it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou Bensouda said her office would "conduct its analysis in full independence and impartiality." The war between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry will include alleged war crimes committed since June. The International Criminal Court was set up in 2002 to prosecute genocide, crimes against humanity and war crimes.', 'The French prosecutor leading an investigation into the crash of Germanwings Flight 9525 insisted Wednesday that he was not aware of any video footage from on board the plane. Marseille prosecutor Brice Robin told CNN that "so far no videos were used in the crash investigation." He added, "A person who has such a video needs to immediately give it to the investigators." Robin\'s comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French Alps. All 150 on board were killed. Paris Match and Bild reported that the video was recovered from a phone at the wreckage site. The two publications described the supposed video, but did not post it on their websites. The publications said that they watched the video, which was found by a source close to the investigation. "One can hear cries of \'My God\' in several languages," Paris Match reported. "Metallic banging can also be heard more than three times, perhaps of the pilot trying to open the cockpit door with a heavy object. Towards the end, after a heavy shake, stronger than the others, the screaming intensifies. Then nothing." "It is a very disturbing scene," said Julian Reichelt, editor-in-chief of Bild online. An official with France\'s accident investigation agency, the BEA, said the agency is not aware of any such video. Lt. Col. Jean-Marc Menichini, a French Gendarmerie spokesman in charge of communications on rescue efforts around the Germanwings crash site, told CNN that the reports were "completely wrong" and "unwarranted." Cell phones have been collected at the site, he said, but that they "hadn\'t been exploited yet." Menichini said he believed the cell phones would need to be sent to the Criminal Research Institute in Rosny sous-Bois, near Paris, in order to be analyzed by specialized technicians working hand-in-hand with investigators. But none of the cell phones found so far have been sent to the institute, Menichini said. Asked whether staff involved in the search could have leaked a memory card to the media, Menichini answered with a categorical "no." Reichelt told "Erin Burnett: Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match are "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered cell phones from the crash site after Bild and Paris Match published their reports. "That is something we did not know before. ... Overall we can say many things of the investigation weren\'t revealed by the investigation at the beginning," he said. What was mental state of Germanwings co-pilot? German airline Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled depression years before he took the controls of Germanwings Flight 9525, which he\'s accused of deliberately crashing last week in the French Alps. Lubitz told his Lufthansa flight training school in 2009 that he had a "previous episode of severe depression," the airline said Tuesday. Email correspondence between Lubitz and the school discovered in an internal investigation, Lufthansa said, included medical documents he submitted in connection with resuming his flight training. The announcement indicates that Lufthansa, the parent company of Germanwings, knew of Lubitz\'s battle with depression, allowed him to continue training and ultimately put him in the cockpit. Lufthansa, whose CEO Carsten Spohr previously said Lubitz was 100% fit to fly, described its statement Tuesday as a "swift and seamless clarification" and said it was sharing the information and documents -- including training and medical records -- with public prosecutors. Spohr traveled to the crash site Wednesday, where recovery teams have been working for the past week to recover human remains and plane debris scattered across a steep mountainside. He saw the crisis center set up in Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash site, where grieving families have left flowers at a simple stone memorial. Menichini told CNN late Tuesday that no visible human remains were left at the site but recovery teams would keep searching. French President Francois Hollande, speaking Tuesday, said that it should be possible to identify all the victims using DNA analysis by the end of the week, sooner than authorities had previously suggested. In the meantime, the recovery of the victims\' personal belongings will start Wednesday, Menichini said. Among those personal belongings could be more cell phones belonging to the 144 passengers and six crew on board. Check out the latest from our correspondents . The details about Lubitz\'s correspondence with the flight school during his training were among several developments as investigators continued to delve into what caused the crash and Lubitz\'s possible motive for downing the jet. A Lufthansa spokesperson told CNN on Tuesday that Lubitz had a valid medical certificate, had passed all his examinations and "held all the licenses required." Earlier, a spokesman for the prosecutor\'s office in Dusseldorf, Christoph Kumpa, said medical records reveal Lubitz suffered from suicidal tendencies at some point before his aviation career and underwent psychotherapy before he got his pilot\'s license. Kumpa emphasized there\'s no evidence suggesting Lubitz was suicidal or acting aggressively before the crash. Investigators are looking into whether Lubitz feared his medical condition would cause him to lose his pilot\'s license, a European government official briefed on the investigation told CNN on Tuesday. While flying was "a big part of his life," the source said, it\'s only one theory being considered. Another source, a law enforcement official briefed on the investigation, also told CNN that authorities believe the primary motive for Lubitz to bring down the plane was that he feared he would not be allowed to fly because of his medical problems. Lubitz\'s girlfriend told investigators he had seen an eye doctor and a neuropsychologist, both of whom deemed him unfit to work recently and concluded he had psychological issues, the European government official said. But no matter what details emerge about his previous mental health struggles, there\'s more to the story, said Brian Russell, a forensic psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the fact that maybe they weren\'t going to keep doing their job and they\'re upset about that and so they\'re suicidal," he said. "But there is no mental illness that explains why somebody then feels entitled to also take that rage and turn it outward on 149 other people who had nothing to do with the person\'s problems." Germanwings crash compensation: What we know . Who was the captain of Germanwings Flight 9525? CNN\'s Margot Haddad reported from Marseille and Pamela Brown from Dusseldorf, while Laura Smith-Spark wrote from London. CNN\'s Frederik Pleitgen, Pamela Boykoff, Antonia Mortensen, Sandrine Amiel and Anna-Maja Rappard contributed to this report.', ], return_tensors="tf", padding="longest", truncation=True, ) features = self.xsum_1_1_model.get_encoder()(**batch, return_dict=True).last_hidden_state import numpy as np expected = np.array([[-0.0828, -0.0251, -0.0674], [0.1277, 0.3311, -0.0255], [0.2613, -0.0840, -0.2763]]) assert np.allclose(features[0, :3, :3].numpy(), expected, atol=1e-3)
[ "tensorflow.clip_by_value", "tensorflow.fill", "tensorflow.concat", "tensorflow.constant", "tensorflow.math.not_equal", "tensorflow.keras.Input", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.Tensor", "tensorflow.keras.layers.Dense", "tensorflow.ones", "tensorflow.keras.Model", "tensorflow.keras.optimizers.Adam", "tensorflow.debugging.assert_near", "tensorflow.keras.metrics.SparseCategoricalAccuracy", "numpy.array" ]
tests/test_modeling_tf_bart.py
[(28, 'transformers.is_tf_available', 'is_tf_available', ([], {}), False, 'from transformers import is_tf_available\n'), (228, 'tensorflow.constant', 'tf.constant', (['tok_lst'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.concat', 'tf.concat', (['[input_ids, eos_tensor]'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (61, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['input_ids', '(3)', '(self.vocab_size + 1)'], {}), True, 'import tensorflow as tf\n'), (63, 'transformers.BartConfig', 'BartConfig', ([], {'vocab_size': 'self.vocab_size', 'd_model': 'self.hidden_size', 'encoder_layers': 'self.num_hidden_layers', 'decoder_layers': 'self.num_hidden_layers', 'encoder_attention_heads': 'self.num_attention_heads', 'decoder_attention_heads': 'self.num_attention_heads', 'encoder_ffn_dim': 'self.intermediate_size', 'decoder_ffn_dim': 'self.intermediate_size', 'dropout': 'self.hidden_dropout_prob', 'attention_dropout': 'self.attention_probs_dropout_prob', 'max_position_embeddings': 'self.max_position_embeddings', 'eos_token_ids': '[2]', 'bos_token_id': 'self.bos_token_id', 'pad_token_id': 'self.pad_token_id', 'decoder_start_token_id': 'self.pad_token_id'}), False, 'from transformers import BartConfig, TFBartForConditionalGeneration, TFBartModel\n'), (100, 'transformers.is_tf_available', 'is_tf_available', ([], {}), False, 'from transformers import is_tf_available\n'), (101, 'transformers.is_tf_available', 'is_tf_available', ([], {}), False, 'from transformers import is_tf_available\n'), (119, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(3e-05)', 'epsilon': '(1e-08)', 'clipnorm': '(1.0)'}), True, 'import tensorflow as tf\n'), (120, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), True, 'import tensorflow as tf\n'), (121, 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', (['"""accuracy"""'], {}), True, 'import tensorflow as tf\n'), (144, 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': '[input_ids]', 'outputs': '[outputs]'}), True, 'import tensorflow as tf\n'), (165, 'transformers.BartConfig', 'BartConfig', ([], {'vocab_size': 'self.vocab_size', 'd_model': '(24)', 'encoder_layers': '(2)', 'decoder_layers': '(2)', 'encoder_attention_heads': '(2)', 'decoder_attention_heads': '(2)', 'encoder_ffn_dim': '(32)', 'decoder_ffn_dim': '(32)', 'max_position_embeddings': '(48)', 'eos_token_id': '(2)', 'pad_token_id': '(1)', 'bos_token_id': '(0)', 'return_dict': '(True)', 'decoder_start_token_id': '(2)'}), False, 'from transformers import BartConfig, TFBartForConditionalGeneration, TFBartModel\n'), (186, 'transformers.TFBartForConditionalGeneration', 'TFBartForConditionalGeneration', (['config'], {}), False, 'from transformers import BartConfig, TFBartForConditionalGeneration, TFBartModel\n'), (192, 'transformers.BartConfig', 'BartConfig', ([], {'vocab_size': '(10)', 'd_model': '(24)', 'encoder_layers': '(2)', 'decoder_layers': '(2)', 'encoder_attention_heads': '(2)', 'decoder_attention_heads': '(2)', 'encoder_ffn_dim': '(32)', 'decoder_ffn_dim': '(32)', 'max_position_embeddings': '(48)', 'return_dict': '(True)'}), False, 'from transformers import BartConfig, TFBartForConditionalGeneration, TFBartModel\n'), (204, 'transformers.TFBartForConditionalGeneration', 'TFBartForConditionalGeneration', (['config'], {}), False, 'from transformers import BartConfig, TFBartForConditionalGeneration, TFBartModel\n'), (205, 'tensorflow.fill', 'tf.fill', (['(7, 2)', '(4)'], {}), True, 'import tensorflow as tf\n'), (206, 'tensorflow.fill', 'tf.fill', (['(7, 7)', '(6)'], {}), True, 'import tensorflow as tf\n'), (217, 'tensorflow.debugging.assert_near', 'tf.debugging.assert_near', (['a', 'b'], {'atol': 'atol'}), True, 'import tensorflow as tf\n'), (238, 'transformers.TFBartModel.from_pretrained', 'TFBartModel.from_pretrained', (['"""facebook/bart-large"""'], {'from_pt': '(True)'}), False, 'from transformers import BartConfig, TFBartForConditionalGeneration, TFBartModel\n'), (245, 'tensorflow.Tensor', 'tf.Tensor', (['[[0.7144, 0.8143, -1.2813], [0.7144, 0.8143, -1.2813], [-0.0467, 2.5911, -\n 2.1845]]'], {}), True, 'import tensorflow as tf\n'), (251, 'transformers.TFBartForConditionalGeneration.from_pretrained', 'TFBartForConditionalGeneration.from_pretrained', (['"""facebook/bart-large-cnn"""'], {'from_pt': '(True)'}), False, 'from transformers import BartConfig, TFBartForConditionalGeneration, TFBartModel\n'), (293, 'transformers.tokenization_bart.BartTokenizer.from_pretrained', 'BartTokenizer.from_pretrained', (['"""facebook/bart-large"""'], {}), False, 'from transformers.tokenization_bart import BartTokenizer\n'), (303, 'transformers.tokenization_bart.BartTokenizer.from_pretrained', 'BartTokenizer.from_pretrained', (['"""facebook/bart-large"""'], {}), False, 'from transformers.tokenization_bart import BartTokenizer\n'), (307, 'transformers.TFBartForConditionalGeneration.from_pretrained', 'TFBartForConditionalGeneration.from_pretrained', (['"""sshleifer/distilbart-xsum-1-1"""'], {}), False, 'from transformers import BartConfig, TFBartForConditionalGeneration, TFBartModel\n'), (355, 'numpy.array', 'np.array', (['[[-0.0828, -0.0251, -0.0674], [0.1277, 0.3311, -0.0255], [0.2613, -0.084, -\n 0.2763]]'], {}), True, 'import numpy as np\n'), (59, 'tensorflow.constant', 'tf.constant', (['([2] * self.batch_size)'], {}), True, 'import tensorflow as tf\n'), (90, 'tensorflow.math.not_equal', 'tf.math.not_equal', (['input_ids', 'config.pad_token_id'], {}), True, 'import tensorflow as tf\n'), (125, 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'batch_shape': '(2, 2000)', 'name': '"""decoder_input_ids"""', 'dtype': '"""int32"""'}), True, 'import tensorflow as tf\n'), (126, 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'batch_shape': '(2, 2000)', 'name': '"""input_ids"""', 'dtype': '"""int32"""'}), True, 'import tensorflow as tf\n'), (133, 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), False, 'import tempfile\n'), (141, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)'], {'activation': '"""softmax"""', 'name': '"""outputs"""'}), True, 'import tensorflow as tf\n'), (162, 'tensorflow.ones', 'tf.ones', (['(4, 1)'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (248, 'tensorflow.debugging.assert_near', 'tf.debugging.assert_near', (['output[:, :3, :3]', 'expected_slice'], {'atol': 'TOLERANCE'}), True, 'import tensorflow as tf\n')]
BrentJiang/tensorflow
d84330946a9c49ea7970a5f83784b5ec52112ad1
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tools for deserializing `Function`s.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import re from tensorflow.core.framework import function_pb2 from tensorflow.python.eager import def_function from tensorflow.python.eager import function as function_lib from tensorflow.python.framework import function_def_to_graph as function_def_lib from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_spec from tensorflow.python.ops import resource_variable_ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.saved_model import nested_structure_coder from tensorflow.python.util import compat from tensorflow.python.util import nest def _is_tensor(t): return isinstance(t, (ops.Tensor, resource_variable_ops.ResourceVariable)) def _inputs_compatible(args, stored_inputs): """Checks whether function arguments are compatible with parameters.""" if len(args) != len(stored_inputs): return False for arg, stored_input in zip(args, stored_inputs): if not function_lib.is_same_structure(arg, stored_input): return False flattened_arg = nest.flatten(arg) flattened_stored_input = nest.flatten(stored_input) for a, b in zip(flattened_arg, flattened_stored_input): if _is_tensor(a): if not isinstance(b, tensor_spec.TensorSpec): return False if a.dtype != b.dtype or not b.shape.is_compatible_with(a.shape): return False else: if a != b: return False return True def _deserialize_function_spec(function_spec_proto, coder): """Deserialize a FunctionSpec object from its proto representation.""" fullargspec = coder.decode_proto(function_spec_proto.fullargspec) is_method = function_spec_proto.is_method args_to_prepend = coder.decode_proto(function_spec_proto.args_to_prepend) kwargs_to_include = coder.decode_proto(function_spec_proto.kwargs_to_include) input_signature = coder.decode_proto(function_spec_proto.input_signature) return function_lib.FunctionSpec(fullargspec, is_method, args_to_prepend, kwargs_to_include, input_signature) # TODO(allenl): The fact that we can't derive ConcreteFunction calling # conventions from the serialized input spec right now is unfortunate. Merging # these would be good, maybe by adding TensorSpec names to cache keys so renamed # keyword arguments would yield different ConcreteFunctions. def setup_bare_concrete_function(saved_bare_concrete_function, concrete_functions): """Makes a restored bare concrete function callable.""" # Bare concrete functions accept only flat lists of Tensors with unique # names. concrete_function = concrete_functions[ saved_bare_concrete_function.concrete_function_name] # pylint: disable=protected-access concrete_function._arg_keywords = ( saved_bare_concrete_function.argument_keywords) concrete_function._num_positional_args = ( saved_bare_concrete_function.allowed_positional_arguments) # pylint: enable=protected-access concrete_function.add_to_graph() return concrete_function class RestoredFunction(def_function.Function): """Wrapper class for a function that has been restored from saved state. See `def_function.Function`. """ def __init__(self, python_function, name, function_spec, concrete_functions): # TODO(mdan): We may enable autograph once exceptions are supported. super(RestoredFunction, self).__init__( python_function, name, autograph=False) self._concrete_functions = concrete_functions # This does not propagate to stateful and stateless functions of the # RestoredFunction, which will have seen only defunned # restored_function_body(*args, **kwargs). That's why we have to # canonicalize inputs inside restored_function_body. self._function_spec = function_spec def _list_all_concrete_functions_for_serialization(self): return self._concrete_functions def recreate_function(saved_function, concrete_functions): """Creates a `Function` from a `SavedFunction`. Args: saved_function: `SavedFunction` proto. concrete_functions: map from function name to `ConcreteFunction`. Returns: A `Function`. """ # TODO(andresp): Construct a `Function` with the cache populated # instead of creating a new `Function` backed by a Python layer to # glue things together. Current approach is nesting functions deeper for each # serialization cycle. coder = nested_structure_coder.StructureCoder() function_spec = _deserialize_function_spec(saved_function.function_spec, coder) def restored_function_body(*args, **kwargs): """Calls a restored function.""" # TODO(allenl): Functions saved with input_signatures should revive with # input_signatures. try: canonicalized_inputs = function_spec.canonicalize_function_inputs( *args, **kwargs) except ValueError as e: raise ValueError( "Cannot canonicalize input args %r and kwargs %r. Error: %r." % (args, kwargs, e)) debug_considered_signatures = [] for concrete_function_name in saved_function.concrete_functions: function_obj = concrete_functions[concrete_function_name] canonicalized_original_inputs = ( function_obj.graph.structured_input_signature) debug_considered_signatures.append(canonicalized_original_inputs) if _inputs_compatible(canonicalized_inputs, canonicalized_original_inputs): flattened_inputs = nest.flatten(canonicalized_inputs) filtered_inputs = [t for t in flattened_inputs if _is_tensor(t)] result = function_obj._call_flat(filtered_inputs) # pylint: disable=protected-access if isinstance(result, ops.Operation): return None return result raise AssertionError( "Could not find matching function to call for canonicalized inputs %r. " "Only existing signatures are %r." % (canonicalized_inputs, debug_considered_signatures)) concrete_function_objects = [] for concrete_function_name in saved_function.concrete_functions: concrete_function_objects.append(concrete_functions[concrete_function_name]) return RestoredFunction(restored_function_body, restored_function_body.__name__, function_spec, concrete_function_objects) def load_function_def_library(library): """Load a set of functions as concrete functions without captured inputs. Functions names are manipulated during load such that they do not overlap with previously created ones. Args: library: FunctionDefLibrary proto message. Returns: Map of original function names in the library to instances of `ConcreteFunction` without captured inputs. Raises: ValueError: if functions dependencies have a cycle. """ functions = {} for fdef in _sort_function_defs(library): copy = _fix_fdef(fdef, functions) func_graph = function_def_lib.function_def_to_graph(copy) for dep in _list_function_deps(fdef): functions[dep].add_to_graph(func_graph) func = function_lib.ConcreteFunction(func_graph) func.add_to_graph() functions[fdef.signature.name] = func # Also register the gradients in the current root context. with ops.init_scope(): func._register_gradient() # pylint: disable=protected-access return functions def _sort_function_defs(library): """Return a topologic sort of FunctionDefs in a library.""" edges = collections.defaultdict(list) in_count = collections.defaultdict(lambda: 0) for fdef in library.function: for dep in _list_function_deps(fdef): edges[dep].append(fdef.signature.name) in_count[fdef.signature.name] += 1 ready = [ fdef.signature.name for fdef in library.function if in_count[fdef.signature.name] == 0 ] output = [] while ready: node = ready.pop() output.append(node) for dest in edges[node]: in_count[dest] -= 1 if not in_count[dest]: ready.append(dest) if len(output) != len(library.function): failed_to_resolve = sorted(set(in_count.keys()) - set(output)) raise ValueError("There is a cyclic-dependency between functions. ", "Could not resolve %r." % (failed_to_resolve,)) reverse = {fdef.signature.name: fdef for fdef in library.function} return [reverse[x] for x in output] def _fix_fdef(orig_fdef, functions): """Fixes a FunctionDef proto to be loaded in current context. In particular, when loading a function library into an eager context, one must rename the functions to avoid conflicts with existent functions. Args: orig_fdef: FunctionDef proto to fix. It is not modified. functions: map from function name to a ConcreteFunction instance. Returns: A fixed copy of the original FunctionDef. """ fdef = function_pb2.FunctionDef() fdef.CopyFrom(orig_fdef) for node_def in fdef.node_def: if "_gradient_op_type" in node_def.attr: if node_def.op in ["StatefulPartitionedCall", "PartitionedCall"]: # TODO(andresp): This code assumes that the gradient registered for this # function call is the default gradient for the function and not a # custom one. fname = node_def.attr["f"].func.name node_def.attr["_gradient_op_type"].s = compat.as_bytes( functions[fname]._gradient_name) # pylint: disable=protected-access else: logging.warning("Importing a function (%s) with ops with custom " "gradients. Will likely fail if a gradient is " "requested.", fdef.signature.name) for _, attr_value in node_def.attr.items(): if attr_value.func.name: attr_value.func.name = functions[attr_value.func.name].name # TODO(b/124205571): Avoid accidental sharing and destruction of restored # resources. For now drop "shared_name" when loading functions to avoid # sharing. if "shared_name" in node_def.attr: del node_def.attr["shared_name"] fdef.signature.name = _clean_function_name(fdef.signature.name) return fdef def _list_function_deps(fdef): # TODO(andresp): Recurse into list attributes and into NameAttrList attrs both # when listing deps and when fixing them. `function_def_to_graph` also # requires fixes. deps = set() for node_def in fdef.node_def: for _, attr_value in node_def.attr.items(): if attr_value.WhichOneof("value") == "func": deps.add(attr_value.func.name) return deps def _clean_function_name(name): """Vanity function to keep the function names comprehensible.""" # Note: each time a function is wrapped into `function_lib.ConcreteFunction` # its name becomes "__inference_<orig>_xyz". match = re.search(r"^__inference_(.*)_\d+$", name) if match: return match.group(1) else: return name
[ "tensorflow.core.framework.function_pb2.FunctionDef", "tensorflow.python.eager.function.is_same_structure", "tensorflow.python.platform.tf_logging.warning", "tensorflow.python.framework.ops.init_scope", "tensorflow.python.saved_model.nested_structure_coder.StructureCoder", "tensorflow.python.util.compat.as_bytes", "tensorflow.python.framework.function_def_to_graph.function_def_to_graph", "tensorflow.python.eager.function.FunctionSpec", "tensorflow.python.eager.function.ConcreteFunction", "tensorflow.python.util.nest.flatten" ]
tensorflow/python/saved_model/function_deserialization.py
[(72, 'tensorflow.python.eager.function.FunctionSpec', 'function_lib.FunctionSpec', (['fullargspec', 'is_method', 'args_to_prepend', 'kwargs_to_include', 'input_signature'], {}), True, 'from tensorflow.python.eager import function as function_lib\n'), (133, 'tensorflow.python.saved_model.nested_structure_coder.StructureCoder', 'nested_structure_coder.StructureCoder', ([], {}), False, 'from tensorflow.python.saved_model import nested_structure_coder\n'), (219, 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), False, 'import collections\n'), (220, 'collections.defaultdict', 'collections.defaultdict', (['(lambda : 0)'], {}), False, 'import collections\n'), (263, 'tensorflow.core.framework.function_pb2.FunctionDef', 'function_pb2.FunctionDef', ([], {}), False, 'from tensorflow.core.framework import function_pb2\n'), (308, 're.search', 're.search', (['"""^__inference_(.*)_\\\\d+$"""', 'name'], {}), False, 'import re\n'), (50, 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['arg'], {}), False, 'from tensorflow.python.util import nest\n'), (51, 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['stored_input'], {}), False, 'from tensorflow.python.util import nest\n'), (202, 'tensorflow.python.framework.function_def_to_graph.function_def_to_graph', 'function_def_lib.function_def_to_graph', (['copy'], {}), True, 'from tensorflow.python.framework import function_def_to_graph as function_def_lib\n'), (205, 'tensorflow.python.eager.function.ConcreteFunction', 'function_lib.ConcreteFunction', (['func_graph'], {}), True, 'from tensorflow.python.eager import function as function_lib\n'), (47, 'tensorflow.python.eager.function.is_same_structure', 'function_lib.is_same_structure', (['arg', 'stored_input'], {}), True, 'from tensorflow.python.eager import function as function_lib\n'), (211, 'tensorflow.python.framework.ops.init_scope', 'ops.init_scope', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (158, 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['canonicalized_inputs'], {}), False, 'from tensorflow.python.util import nest\n'), (272, 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['functions[fname]._gradient_name'], {}), False, 'from tensorflow.python.util import compat\n'), (275, 'tensorflow.python.platform.tf_logging.warning', 'logging.warning', (['"""Importing a function (%s) with ops with custom gradients. Will likely fail if a gradient is requested."""', 'fdef.signature.name'], {}), True, 'from tensorflow.python.platform import tf_logging as logging\n')]
joaromi/Spiking-RetinaNet
2c190bff17ac2f3a08fa1f5e0f8f6c38af2c3f1e
# -*- coding: utf-8 -*- """Functions common to input model parsers. The core of this module is an abstract base class extracts an input model written in some neural network library and prepares it for further processing in the SNN toolbox. .. autosummary:: :nosignatures: AbstractModelParser The idea is to make all further steps in the conversion/simulation pipeline independent of the original model format. Other functions help navigate through the network in order to explore network connectivity and layer attributes: .. autosummary:: :nosignatures: get_type has_weights get_fanin get_fanout get_inbound_layers get_inbound_layers_with_params get_inbound_layers_without_params get_outbound_layers get_outbound_activation @author: rbodo """ import json import pickle from abc import abstractmethod from tensorflow import keras import numpy as np import snntoolbox.simulation.backends.custom_layers as custom_layers IS_CHANNELS_FIRST = keras.backend.image_data_format() == 'channels_first' class AbstractModelParser: """Abstract base class for neural network model parsers. Parameters ---------- input_model The input network object. config: configparser.Configparser Contains the toolbox configuration for a particular experiment. Attributes ---------- input_model: dict The input network object. config: configparser.Configparser Contains the toolbox configuration for a particular experiment. _layer_list: list[dict] A list where each entry is a dictionary containing layer specifications. Obtained by calling `parse`. Used to build new, parsed Keras model. _layer_dict: dict Maps the layer names of the specific input model library to our standard names (currently Keras). parsed_model: keras.models.Model The parsed model. """ def __init__(self, input_model, config): self.input_model = input_model self.config = config self._layer_list = [] self._layer_dict = {} self.parsed_model = None def parse_subnet(self, layers, idx, prev_out_idx=None, in_layers=None, out_layers=None, repair=None, special_relu=[]): name_map = {} inserted_flatten = False out_links = [None]*len(out_layers) is_output = False need_rewire = False include_activation = False if prev_out_idx is not None and in_layers is not None: if len(prev_out_idx) != len(in_layers): raise ValueError("prev_out_idx and in_layers must be the same size.") relu_pred = eval(self.config.get('custom', 'relu_pred')) if relu_pred: act_pred = 'relu' else: act_pred = 'linear' snn_layers = eval(self.config.get('restrictions', 'snn_layers')) for k_,layer in enumerate(layers): layer_type = self.get_type(layer) transform_Conv = self.config.getboolean('input', 'norm_conv') is_output = layer in out_layers if repair is not None: need_rewire = layer in repair # Absorb BatchNormalization layer into parameters of previous layer if layer_type == 'BatchNormalization': parameters_bn = list(self.get_batchnorm_parameters(layer)) parameters_bn, axis = parameters_bn[:-1], parameters_bn[-1] inbound = self.get_inbound_layers_with_parameters(layer) assert len(inbound) == 1, \ "Could not find unique layer with parameters " \ "preceeding BatchNorm layer." prev_layer = inbound[0] prev_layer_idx = name_map[str(id(prev_layer))] parameters = list( self._layer_list[prev_layer_idx]['parameters']) prev_layer_type = self.get_type(prev_layer) print("Absorbing batch-normalization parameters into " + "parameters of previous {}.".format(prev_layer_type)) _depthwise_conv_names = ['DepthwiseConv2D', 'SparseDepthwiseConv2D'] _sparse_names = ['Sparse', 'SparseConv2D', 'SparseDepthwiseConv2D'] is_depthwise = prev_layer_type in _depthwise_conv_names is_sparse = prev_layer_type in _sparse_names if is_sparse: args = [parameters[0], parameters[2]] + parameters_bn else: args = parameters[:2] + parameters_bn kwargs = { 'axis': axis, 'image_data_format': keras.backend.image_data_format(), 'is_depthwise': is_depthwise} params_to_absorb = absorb_bn_parameters(*args, **kwargs) if is_sparse: # Need to also save the mask associated with sparse layer. params_to_absorb += (parameters[1],) self._layer_list[prev_layer_idx]['parameters'] = \ params_to_absorb if layer_type == 'GlobalAveragePooling2D': print("Replacing GlobalAveragePooling by AveragePooling " "plus Flatten.") _layer_type = 'AveragePooling2D' axis = 2 if IS_CHANNELS_FIRST else 1 self._layer_list.append( {'layer_type': _layer_type, 'name': self.get_name(layer, idx, _layer_type), 'pool_size': (layer.input_shape[axis: axis + 2]), 'inbound': self.get_inbound_names(layer, name_map), 'strides': [1, 1]}) name_map[_layer_type + str(idx)] = idx idx += 1 _layer_type = 'Flatten' num_str = self.format_layer_idx(idx) shape_str = str(np.prod(layer.output_shape[1:])) self._layer_list.append( {'name': num_str + _layer_type + '_' + shape_str, 'layer_type': _layer_type, 'inbound': [self._layer_list[-1]['name']]}) name_map[_layer_type + str(idx)] = idx idx += 1 inserted_flatten = True if layer_type == 'Add': print("Replacing Add layer by NormAdd.") # _layer_type = 'Normalizable_Add' self._layer_list.append({ 'layer_type': 'NormAdd', 'name': self.get_name(layer, idx), 'inbound': self.get_inbound_names(layer, name_map), }) name_map[str(id(layer))] = idx if is_output: # get output layers references for i,out_layer in enumerate(out_layers): if layer==out_layer: out_links[i]=idx idx += 1 continue if layer_type=='Activation': activation_str = self.get_activation(layer) if activation_str == 'softmax' and \ self.config.getboolean('conversion', 'softmax_to_relu'): activation = 'relu' print("Replaced softmax by relu activation function.") elif activation_str == 'linear' and self.get_type(layer) == 'Dense' \ and self.config.getboolean('conversion', 'append_softmax', fallback=False): activation = 'softmax' print("Added softmax.") else: activation = activation_str print("Using activation {}.".format(activation_str)) include_activation=True if layer_type=='ReLU': activation = 'relu' include_activation=True if include_activation: include_activation=False if layer in special_relu: shape = layer.output_shape h, w, c = shape[1:] _layer_type = 'Conv2D' num_str = self.format_layer_idx(idx) shape_str = '{}x{}x{}'.format(*shape[1:]) weights = np.zeros([1, 1, c, c]) for k in range(c): weights[:, :, k, k] = 1 self._layer_list.append({ 'name': num_str + 'ACTIV_' + _layer_type,# + '_' + shape_str, 'layer_type': _layer_type, 'inbound': [self._layer_list[-1]['name']], 'filters': c, 'activation': 'relu', 'parameters': (weights, np.zeros(c)), 'kernel_size': 1}) name_map[str(id(layer))] = idx idx += 1 else: self._layer_list[-1]['activation'] = activation if layer_type not in snn_layers: print("Skipping layer {}.".format(layer_type)) continue print("Parsing layer {}.".format(layer_type)) if layer_type == 'MaxPooling2D' and \ self.config.getboolean('conversion', 'max2avg_pool'): print("Replacing max by average pooling.") layer_type = 'AveragePooling2D' if prev_out_idx is None or layer not in in_layers: if need_rewire: inbound = [self._layer_list[-1]['name']] else: inbound = self.get_inbound_names(layer, name_map) else: inbound = [] for j,in_layer in enumerate(in_layers): if layer==in_layer: inbound.append(self._layer_list[prev_out_idx[j]]['name']) attributes = self.initialize_attributes(layer) if layer_type == 'Conv2D' and transform_Conv: print("Replacing Conv2D layer by NormConv2D.") layer_type = 'NormConv2D' attributes.update({'parameters': list(layer.get_weights())}) attributes.update({'layer_type': layer_type, 'name': self.get_name(layer, idx), 'inbound': inbound}) if layer_type == 'UpSampling2D': attributes.update({'size': layer.size}) if layer_type == 'Dense': self.parse_dense(layer, attributes) if layer_type == 'Sparse': self.parse_sparse(layer, attributes) if layer_type in {'Conv1D', 'Conv2D'}: attributes = self.parse_convolution(layer, attributes) if layer_type == 'SparseConv2D': self.parse_sparse_convolution(layer, attributes) if layer_type == 'DepthwiseConv2D': self.parse_depthwiseconvolution(layer, attributes) if layer_type == 'SparseDepthwiseConv2D': self.parse_sparse_depthwiseconvolution(layer, attributes) if layer_type in ['Sparse', 'SparseConv2D', 'SparseDepthwiseConv2D']: weights, bias, mask = attributes['parameters'] weights, bias = modify_parameter_precision( weights, bias, self.config, attributes) attributes['parameters'] = (weights, bias, mask) #self.absorb_activation(layer, attributes) if layer_type in {'Dense', 'Conv1D', 'Conv2D', 'DepthwiseConv2D'}: weights, bias = attributes['parameters'] weights, bias = modify_parameter_precision( weights, bias, self.config, attributes) attributes['parameters'] = (weights, bias) #self.absorb_activation(layer, attributes) if 'Pooling' in layer_type: self.parse_pooling(layer, attributes) if layer_type == 'Concatenate': self.parse_concatenate(layer, attributes) self._layer_list.append(attributes) # Map layer index to layer id. Needed for inception modules. name_map[str(id(layer))] = idx if is_output: # get output layers references for i,out_layer in enumerate(out_layers): if layer==out_layer: out_links[i]=idx idx += 1 print('') return idx,out_links def parse(self): """Extract the essential information about a neural network. This method serves to abstract the conversion process of a network from the language the input model was built in (e.g. Keras or Lasagne). The methods iterates over all layers of the input model and writes the layer specifications and parameters into `_layer_list`. The keys are chosen in accordance with Keras layer attributes to facilitate instantiation of a new, parsed Keras model (done in a later step by `build_parsed_model`). This function applies several simplifications and adaptations to prepare the model for conversion to spiking. These modifications include: - Removing layers only used during training (Dropout, BatchNormalization, ...) - Absorbing the parameters of BatchNormalization layers into the parameters of the preceeding layer. This does not affect performance because batch-norm-parameters are constant at inference time. - Removing ReLU activation layers, because their function is inherent to the spike generation mechanism. The information which nonlinearity was used in the original model is preserved in the ``activation`` key in `_layer_list`. If the output layer employs the softmax function, a spiking version is used when testing the SNN in INIsim or MegaSim simulators. - Inserting a Flatten layer between Conv and FC layers, if the input model did not explicitly include one. """ layers = self.get_layer_iterable() snn_layers = eval(self.config.get('restrictions', 'snn_layers')) name_map = {} idx = 0 inserted_flatten = False for layer in layers: layer_type = self.get_type(layer) # Absorb BatchNormalization layer into parameters of previous layer if layer_type == 'BatchNormalization': parameters_bn = list(self.get_batchnorm_parameters(layer)) parameters_bn, axis = parameters_bn[:-1], parameters_bn[-1] inbound = self.get_inbound_layers_with_parameters(layer) assert len(inbound) == 1, \ "Could not find unique layer with parameters " \ "preceeding BatchNorm layer." prev_layer = inbound[0] prev_layer_idx = name_map[str(id(prev_layer))] parameters = list( self._layer_list[prev_layer_idx]['parameters']) prev_layer_type = self.get_type(prev_layer) print("Absorbing batch-normalization parameters into " + "parameters of previous {}.".format(prev_layer_type)) _depthwise_conv_names = ['DepthwiseConv2D', 'SparseDepthwiseConv2D'] _sparse_names = ['Sparse', 'SparseConv2D', 'SparseDepthwiseConv2D'] is_depthwise = prev_layer_type in _depthwise_conv_names is_sparse = prev_layer_type in _sparse_names if is_sparse: args = [parameters[0], parameters[2]] + parameters_bn else: args = parameters[:2] + parameters_bn kwargs = { 'axis': axis, 'image_data_format': keras.backend.image_data_format(), 'is_depthwise': is_depthwise} params_to_absorb = absorb_bn_parameters(*args, **kwargs) if is_sparse: # Need to also save the mask associated with sparse layer. params_to_absorb += (parameters[1],) self._layer_list[prev_layer_idx]['parameters'] = \ params_to_absorb if layer_type == 'GlobalAveragePooling2D': print("Replacing GlobalAveragePooling by AveragePooling " "plus Flatten.") _layer_type = 'AveragePooling2D' axis = 2 if IS_CHANNELS_FIRST else 1 self._layer_list.append( {'layer_type': _layer_type, 'name': self.get_name(layer, idx, _layer_type), 'pool_size': (layer.input_shape[axis: axis + 2]), 'inbound': self.get_inbound_names(layer, name_map), 'strides': [1, 1]}) name_map[_layer_type + str(idx)] = idx idx += 1 _layer_type = 'Flatten' num_str = self.format_layer_idx(idx) shape_str = str(np.prod(layer.output_shape[1:])) self._layer_list.append( {'name': num_str + _layer_type + '_' + shape_str, 'layer_type': _layer_type, 'inbound': [self._layer_list[-1]['name']]}) name_map[_layer_type + str(idx)] = idx idx += 1 inserted_flatten = True if layer_type == 'Add': print("Replacing Add layer by Concatenate plus Conv.") shape = layer.output_shape if IS_CHANNELS_FIRST: axis = 1 c, h, w = shape[1:] shape_str = '{}x{}x{}'.format(2 * c, h, w) else: axis = -1 h, w, c = shape[1:] shape_str = '{}x{}x{}'.format(h, w, 2 * c) _layer_type = 'Concatenate' num_str = self.format_layer_idx(idx) self._layer_list.append({ 'layer_type': _layer_type, 'name': num_str + _layer_type + '_' + shape_str, 'inbound': self.get_inbound_names(layer, name_map), 'axis': axis}) name_map[_layer_type + str(idx)] = idx idx += 1 _layer_type = 'Conv2D' num_str = self.format_layer_idx(idx) shape_str = '{}x{}x{}'.format(*shape[1:]) weights = np.zeros([1, 1, 2 * c, c]) for k in range(c): weights[:, :, k::c, k] = 1 self._layer_list.append({ 'name': num_str + _layer_type + '_' + shape_str, 'layer_type': _layer_type, 'inbound': [self._layer_list[-1]['name']], 'filters': c, 'activation': 'relu', # Default nonlinearity of SNN 'parameters': (weights, np.zeros(c)), 'kernel_size': 1}) name_map[str(id(layer))] = idx idx += 1 if layer_type not in snn_layers: print("Skipping layer {}.".format(layer_type)) continue if not inserted_flatten: inserted_flatten = self.try_insert_flatten(layer, idx, name_map) idx += inserted_flatten print("Parsing layer {}.".format(layer_type)) if layer_type == 'MaxPooling2D' and \ self.config.getboolean('conversion', 'max2avg_pool'): print("Replacing max by average pooling.") layer_type = 'AveragePooling2D' # If we inserted a layer, need to set the right inbound layer here. if inserted_flatten: inbound = [self._layer_list[-1]['name']] inserted_flatten = False else: inbound = self.get_inbound_names(layer, name_map) attributes = self.initialize_attributes(layer) attributes.update({'layer_type': layer_type, 'name': self.get_name(layer, idx), 'inbound': inbound}) if layer_type == 'Dense': self.parse_dense(layer, attributes) if layer_type == 'Sparse': self.parse_sparse(layer, attributes) if layer_type in {'Conv1D', 'Conv2D'}: self.parse_convolution(layer, attributes) if layer_type == 'SparseConv2D': self.parse_sparse_convolution(layer, attributes) if layer_type == 'DepthwiseConv2D': self.parse_depthwiseconvolution(layer, attributes) if layer_type == 'SparseDepthwiseConv2D': self.parse_sparse_depthwiseconvolution(layer, attributes) if layer_type in ['Sparse', 'SparseConv2D', 'SparseDepthwiseConv2D']: weights, bias, mask = attributes['parameters'] weights, bias = modify_parameter_precision( weights, bias, self.config, attributes) attributes['parameters'] = (weights, bias, mask) self.absorb_activation(layer, attributes) if layer_type in {'Dense', 'Conv1D', 'Conv2D', 'DepthwiseConv2D'}: weights, bias = attributes['parameters'] weights, bias = modify_parameter_precision( weights, bias, self.config, attributes) attributes['parameters'] = (weights, bias) self.absorb_activation(layer, attributes) if 'Pooling' in layer_type: self.parse_pooling(layer, attributes) if layer_type == 'Concatenate': self.parse_concatenate(layer, attributes) self._layer_list.append(attributes) # Map layer index to layer id. Needed for inception modules. name_map[str(id(layer))] = idx idx += 1 print('') @abstractmethod def get_layer_iterable(self): """Get an iterable over the layers of the network. Returns ------- layers: list """ pass @abstractmethod def get_type(self, layer): """Get layer class name. Returns ------- layer_type: str Layer class name. """ pass @abstractmethod def get_batchnorm_parameters(self, layer): """Get the parameters of a batch-normalization layer. Returns ------- mean, var_eps_sqrt_inv, gamma, beta, axis: tuple """ pass def get_inbound_layers_with_parameters(self, layer): """Iterate until inbound layers are found that have parameters. Parameters ---------- layer: Layer Returns ------- : list List of inbound layers. """ inbound = layer while True: inbound = self.get_inbound_layers(inbound) if len(inbound) == 1: inbound = inbound[0] if self.has_weights(inbound): return [inbound] else: result = [] for inb in inbound: if self.has_weights(inb): result.append(inb) else: result += self.get_inbound_layers_with_parameters(inb) return result def get_inbound_names(self, layer, name_map): """Get names of inbound layers. Parameters ---------- layer: Layer name_map: dict Maps the name of a layer to the `id` of the layer object. Returns ------- : list The names of inbound layers. """ inbound = self.get_inbound_layers(layer) for ib in range(len(inbound)): for _ in range(len(self.layers_to_skip)): if self.get_type(inbound[ib]) in self.layers_to_skip: inbound[ib] = self.get_inbound_layers(inbound[ib])[0] else: break if len(self._layer_list) == 0 or \ any([self.get_type(inb) == 'InputLayer' for inb in inbound]): return [self.input_layer_name] else: inb_idxs = [name_map[str(id(inb))] for inb in inbound] return [self._layer_list[i]['name'] for i in inb_idxs] @abstractmethod def get_inbound_layers(self, layer): """Get inbound layers of ``layer``. Returns ------- inbound: Sequence """ pass @property def layers_to_skip(self): """ Return a list of layer names that should be skipped during conversion to a spiking network. Returns ------- self._layers_to_skip: List[str] """ # Todo: We should get this list from some central place like the # ``config_defaults`` file. return ['BatchNormalization', 'Activation', 'Dropout', 'ReLU', 'ActivityRegularization', 'GaussianNoise'] @abstractmethod def has_weights(self, layer): """Return ``True`` if ``layer`` has weights.""" pass def initialize_attributes(self, layer=None): """ Return a dictionary that will be used to collect all attributes of a layer. This dictionary can then be used to instantiate a new parsed layer. """ return {} @abstractmethod def get_input_shape(self): """Get the input shape of a network, not including batch size. Returns ------- input_shape: tuple Input shape. """ pass def get_batch_input_shape(self): """Get the input shape of a network, including batch size. Returns ------- batch_input_shape: tuple Batch input shape. """ input_shape = tuple(self.get_input_shape()) batch_size = self.config.getint('simulation', 'batch_size') return (batch_size,) + input_shape def get_name(self, layer, idx, layer_type=None): """Create a name for a ``layer``. The format is <layer_num><layer_type>_<layer_shape>. >>> # Name of first convolution layer with 32 feature maps and >>> # dimension 64x64: "00Conv2D_32x64x64" >>> # Name of final dense layer with 100 units: "06Dense_100" Parameters ---------- layer: Layer. idx: int Layer index. layer_type: Optional[str] Type of layer. Returns ------- name: str Layer name. """ if layer_type is None: layer_type = self.get_type(layer) # try: # output_shape = self.get_output_shape(layer) # shape_string = ["{}x".format(x) for x in output_shape[1:]] # shape_string[0] = "_" + shape_string[0] # shape_string[-1] = shape_string[-1][:-1] # shape_string = "".join(shape_string) # except: # shape_string = "MULT" num_str = self.format_layer_idx(idx) return num_str + layer_type #+ shape_string def format_layer_idx(self, idx): """Pad the layer index with the appropriate amount of zeros. The number of zeros used for padding is determined by the maximum index (i.e. the number of layers in the network). Parameters ---------- idx: int Layer index. Returns ------- num_str: str Zero-padded layer index. """ max_idx = len(self.input_model.layers) return str(idx).zfill(len(str(max_idx))) @abstractmethod def get_output_shape(self, layer): """Get output shape of a ``layer``. Parameters ---------- layer Layer. Returns ------- output_shape: Sized Output shape of ``layer``. """ pass def try_insert_flatten(self, layer, idx, name_map): output_shape = self.get_output_shape(layer) previous_layers = self.get_inbound_layers(layer) prev_layer_output_shape = self.get_output_shape(previous_layers[0]) if len(output_shape) < len(prev_layer_output_shape) and \ self.get_type(layer) not in {'Flatten', 'Reshape'} and \ self.get_type(previous_layers[0]) != 'InputLayer': assert len(previous_layers) == 1, \ "Layer to flatten must be unique." print("Inserting layer Flatten.") num_str = self.format_layer_idx(idx) shape_string = str(np.prod(prev_layer_output_shape[1:])) self._layer_list.append({ 'name': num_str + 'Flatten_' + shape_string, 'layer_type': 'Flatten', 'inbound': self.get_inbound_names(layer, name_map)}) name_map['Flatten' + str(idx)] = idx return True else: return False @abstractmethod def parse_dense(self, layer, attributes): """Parse a fully-connected layer. Parameters ---------- layer: Layer. attributes: dict The layer attributes as key-value pairs in a dict. """ pass @abstractmethod def parse_convolution(self, layer, attributes): """Parse a convolutional layer. Parameters ---------- layer: Layer. attributes: dict The layer attributes as key-value pairs in a dict. """ pass @abstractmethod def parse_depthwiseconvolution(self, layer, attributes): """Parse a depthwise convolution layer. Parameters ---------- layer: Layer. attributes: dict The layer attributes as key-value pairs in a dict. """ pass def parse_sparse(self, layer, attributes): pass def parse_sparse_convolution(self, layer, attributes): pass def parse_sparse_depthwiseconvolution(self, layer, attributes): pass @abstractmethod def parse_pooling(self, layer, attributes): """Parse a pooling layer. Parameters ---------- layer: Layer. attributes: dict The layer attributes as key-value pairs in a dict. """ pass def absorb_activation(self, layer, attributes): """Detect what activation is used by the layer. Sometimes the Dense or Conv layer specifies its activation directly, sometimes it is followed by a dedicated Activation layer (possibly with BatchNormalization in between). Here we try to find such an activation layer, and add this information to the Dense/Conv layer itself. The separate Activation layer can then be removed. Parameters ---------- layer: Layer. attributes: dict The layer attributes as key-value pairs in a dict. """ activation_str = self.get_activation(layer) outbound = layer for _ in range(3): outbound = list(self.get_outbound_layers(outbound)) if len(outbound) != 1: break else: outbound = outbound[0] if self.get_type(outbound) == 'Activation': activation_str = self.get_activation(outbound) break # Todo: Take into account relu parameters. if self.get_type(outbound) == 'ReLU': print("Parsing ReLU parameters not yet implemented.") activation_str = 'relu' break try: self.get_activation(outbound) break except AttributeError: pass activation, activation_str = get_custom_activation(activation_str) if activation_str == 'softmax' and \ self.config.getboolean('conversion', 'softmax_to_relu'): activation = 'relu' print("Replaced softmax by relu activation function.") elif activation_str == 'linear' and self.get_type(layer) == 'Dense' \ and self.config.getboolean('conversion', 'append_softmax', fallback=False): activation = 'softmax' print("Added softmax.") else: print("Using activation {}.".format(activation_str)) attributes['activation'] = activation @abstractmethod def get_activation(self, layer): """Get the activation string of an activation ``layer``. Parameters ---------- layer Layer Returns ------- activation: str String indicating the activation of the ``layer``. """ pass @abstractmethod def get_outbound_layers(self, layer): """Get outbound layers of ``layer``. Parameters ---------- layer: Layer. Returns ------- outbound: list Outbound layers of ``layer``. """ pass @abstractmethod def parse_concatenate(self, layer, attributes): """Parse a concatenation layer. Parameters ---------- layer: Layer. attributes: dict The layer attributes as key-value pairs in a dict. """ pass def build_parsed_model(self): """Create a Keras model suitable for conversion to SNN. This method uses the specifications in `_layer_list` to build a Keras model. The resulting model contains all essential information about the original network, independently of the model library in which the original network was built (e.g. Caffe). Returns ------- parsed_model: keras.models.Model A Keras model, functionally equivalent to `input_model`. """ img_input = keras.layers.Input( batch_shape=self.get_batch_input_shape(), name=self.input_layer_name) parsed_layers = {self.input_layer_name: img_input} print("Building parsed model...\n") for layer in self._layer_list: # Replace 'parameters' key with Keras key 'weights' if 'parameters' in layer: layer['weights'] = layer.pop('parameters') # Add layer layer_type = layer.pop('layer_type') if hasattr(keras.layers, layer_type): parsed_layer = getattr(keras.layers, layer_type) else: import keras_rewiring parsed_layer = getattr(keras_rewiring.sparse_layer, layer_type) inbound = [parsed_layers[inb] for inb in layer.pop('inbound')] if len(inbound) == 1: inbound = inbound[0] check_for_custom_activations(layer) parsed_layers[layer['name']] = parsed_layer(**layer)(inbound) print("Compiling parsed model...\n") self.parsed_model = keras.models.Model(img_input, parsed_layers[ self._layer_list[-1]['name']]) # Optimizer and loss do not matter because we only do inference. top_k = lambda x, y: keras.metrics.top_k_categorical_accuracy( x, y, self.config.getint('simulation', 'top_k')) self.parsed_model.compile('sgd', 'categorical_crossentropy', ['accuracy', top_k]) # Todo: Enable adding custom metric via self.input_model.metrics. self.parsed_model.summary() return self.parsed_model def build_parsed_RNet(self, loss_fn, optimizer, metrics=None): img_input = keras.layers.Input( batch_shape=self.get_batch_input_shape(), name=self.input_layer_name) parsed_layers = {self.input_layer_name: img_input} print("Building parsed model...\n") for layer in self._layer_list: # Replace 'parameters' key with Keras key 'weights' if 'parameters' in layer: layer['weights'] = layer.pop('parameters') # Add layer layer_type = layer.pop('layer_type') if hasattr(keras.layers, layer_type): parsed_layer = getattr(keras.layers, layer_type) elif hasattr(custom_layers, layer_type): parsed_layer = getattr(custom_layers, layer_type) else: import keras_rewiring parsed_layer = getattr(keras_rewiring.sparse_layer, layer_type) inbound = [parsed_layers[inb] for inb in layer.pop('inbound')] if len(inbound) == 1: inbound = inbound[0] check_for_custom_activations(layer) parsed_layers[layer['name']] = parsed_layer(**layer)(inbound) print("Compiling parsed model...\n") self.parsed_model = keras.models.Model(img_input, parsed_layers[ self._layer_list[-1]['name']]) if metrics is not None: self.parsed_model.compile(loss=loss_fn, optimizer=optimizer, metrics=metrics) else: self.parsed_model.compile(loss=loss_fn, optimizer=optimizer) self.parsed_model.summary() return self.parsed_model def evaluate(self, batch_size, num_to_test, x_test=None, y_test=None, dataflow=None): """Evaluate parsed Keras model. Can use either numpy arrays ``x_test, y_test`` containing the test samples, or generate them with a dataflow (``keras.ImageDataGenerator.flow_from_directory`` object). Parameters ---------- batch_size: int Batch size num_to_test: int Number of samples to test x_test: Optional[np.ndarray] y_test: Optional[np.ndarray] dataflow: keras.ImageDataGenerator.flow_from_directory """ assert (x_test is not None and y_test is not None or dataflow is not None), "No testsamples provided." if x_test is not None: score = self.parsed_model.evaluate(x_test, y_test, batch_size, verbose=0) else: steps = int(num_to_test / batch_size) score = self.parsed_model.evaluate(dataflow, steps=steps) print("Top-1 accuracy: {:.2%}".format(score[1])) print("Top-5 accuracy: {:.2%}\n".format(score[2])) return score @property def input_layer_name(self): return 'input' def absorb_bn_parameters(weight, bias, mean, var_eps_sqrt_inv, gamma, beta, axis, image_data_format, is_depthwise=False): """ Absorb the parameters of a batch-normalization layer into the previous layer. """ axis = weight.ndim + axis if axis < 0 else axis print("Using BatchNorm axis {}.".format(axis)) # Map batch norm axis from layer dimension space to kernel dimension space. # Assumes that kernels are shaped like # [height, width, num_input_channels, num_output_channels], # and layers like [batch_size, channels, height, width] or # [batch_size, height, width, channels]. if weight.ndim == 4: channel_axis = 2 if is_depthwise else 3 if image_data_format == 'channels_first': layer2kernel_axes_map = [None, channel_axis, 0, 1] else: layer2kernel_axes_map = [None, 0, 1, channel_axis] axis = layer2kernel_axes_map[axis] broadcast_shape = [1] * weight.ndim broadcast_shape[axis] = weight.shape[axis] var_eps_sqrt_inv = np.reshape(var_eps_sqrt_inv, broadcast_shape) gamma = np.reshape(gamma, broadcast_shape) beta = np.reshape(beta, broadcast_shape) bias = np.reshape(bias, broadcast_shape) mean = np.reshape(mean, broadcast_shape) bias_bn = np.ravel(beta + (bias - mean) * gamma * var_eps_sqrt_inv) weight_bn = weight * gamma * var_eps_sqrt_inv return weight_bn, bias_bn def modify_parameter_precision(weights, biases, config, attributes): if config.getboolean('cell', 'binarize_weights'): from snntoolbox.utils.utils import binarize print("Binarizing weights.") weights = binarize(weights) elif config.getboolean('cell', 'quantize_weights'): assert 'Qm.f' in attributes, \ "In the [cell] section of the configuration file, " \ "'quantize_weights' was set to True. For this to " \ "work, the layer needs to specify the fixed point " \ "number format 'Qm.f'." from snntoolbox.utils.utils import reduce_precision m, f = attributes.get('Qm.f') print("Quantizing weights to Q{}.{}.".format(m, f)) weights = reduce_precision(weights, m, f) if attributes.get('quantize_bias', False): biases = reduce_precision(biases, m, f) # These attributes are not needed any longer and would not be # understood by Keras when building the parsed model. attributes.pop('quantize_bias', None) attributes.pop('Qm.f', None) return weights, biases def padding_string(pad, pool_size): """Get string defining the border mode. Parameters ---------- pad: tuple[int] Zero-padding in x- and y-direction. pool_size: list[int] Size of kernel. Returns ------- padding: str Border mode identifier. """ if isinstance(pad, str): return pad if pad == (0, 0): padding = 'valid' elif pad == (pool_size[0] // 2, pool_size[1] // 2): padding = 'same' elif pad == (pool_size[0] - 1, pool_size[1] - 1): padding = 'full' else: raise NotImplementedError( "Padding {} could not be interpreted as any of the ".format(pad) + "supported border modes 'valid', 'same' or 'full'.") return padding def load_parameters(filepath): """Load all layer parameters from an HDF5 file.""" import h5py f = h5py.File(filepath, 'r') params = [] for k in sorted(f.keys()): params.append(np.array(f.get(k))) f.close() return params def save_parameters(params, filepath, fileformat='h5'): """Save all layer parameters to an HDF5 file.""" if fileformat == 'pkl': pickle.dump(params, open(filepath + '.pkl', str('wb'))) else: import h5py with h5py.File(filepath, mode='w') as f: for i, p in enumerate(params): if i < 10: j = '00' + str(i) elif i < 100: j = '0' + str(i) else: j = str(i) f.create_dataset('param_' + j, data=p) def has_weights(layer): """Return ``True`` if layer has weights. Parameters ---------- layer : keras.layers.Layer Keras layer Returns ------- : bool ``True`` if layer has weights. """ return len(layer.weights) def get_inbound_layers_with_params(layer): """Iterate until inbound layers are found that have parameters. Parameters ---------- layer: keras.layers.Layer Layer Returns ------- : list List of inbound layers. """ inbound = layer while True: inbound = get_inbound_layers(inbound) if len(inbound) == 1: inbound = inbound[0] if has_weights(inbound): return [inbound] else: result = [] for inb in inbound: if has_weights(inb): result.append(inb) else: result += get_inbound_layers_with_params(inb) return result def get_inbound_layers_without_params(layer): """Return inbound layers. Parameters ---------- layer: Keras.layers A Keras layer. Returns ------- : list[Keras.layers] List of inbound layers. """ return [layer for layer in get_inbound_layers(layer) if not has_weights(layer)] def get_inbound_layers(layer): """Return inbound layers. Parameters ---------- layer: Keras.layers A Keras layer. Returns ------- : list[Keras.layers] List of inbound layers. """ try: # noinspection PyProtectedMember inbound_layers = layer._inbound_nodes[0].inbound_layers except AttributeError: # For Keras backward-compatibility. inbound_layers = layer.inbound_nodes[0].inbound_layers if not isinstance(inbound_layers, (list, tuple)): inbound_layers = [inbound_layers] return inbound_layers def get_outbound_layers(layer): """Return outbound layers. Parameters ---------- layer: Keras.layers A Keras layer. Returns ------- : list[Keras.layers] List of outbound layers. """ try: # noinspection PyProtectedMember outbound_nodes = layer._outbound_nodes except AttributeError: # For Keras backward-compatibility. outbound_nodes = layer.outbound_nodes return [on.outbound_layer for on in outbound_nodes] def get_outbound_activation(layer): """ Iterate over 2 outbound layers to find an activation layer. If there is no activation layer, take the activation of the current layer. Parameters ---------- layer: Union[keras.layers.Conv2D, keras.layers.Dense] Layer Returns ------- activation: str Name of outbound activation type. """ activation = layer.activation.__name__ outbound = layer for _ in range(2): outbound = get_outbound_layers(outbound) if len(outbound) == 1 and get_type(outbound[0]) == 'Activation': activation = outbound[0].activation.__name__ return activation def get_fanin(layer): """ Return fan-in of a neuron in ``layer``. Parameters ---------- layer: Subclass[keras.layers.Layer] Layer. Returns ------- fanin: int Fan-in. """ layer_type = get_type(layer) if 'Conv' in layer_type: ax = 1 if IS_CHANNELS_FIRST else -1 fanin = np.prod(layer.kernel_size) * layer.input_shape[ax] elif 'Dense' in layer_type: fanin = layer.input_shape[1] elif 'Pool' in layer_type: fanin = 0 else: fanin = 0 return fanin def get_fanout(layer, config): """ Return fan-out of a neuron in ``layer``. Parameters ---------- layer: Subclass[keras.layers.Layer] Layer. config: configparser.ConfigParser Settings. Returns ------- fanout: Union[int, ndarray] Fan-out. The fan-out of a neuron projecting onto a convolution layer varies between neurons in a feature map if the stride of the convolution layer is greater than unity. In this case, return an array of the same shape as the layer. """ from snntoolbox.simulation.utils import get_spiking_outbound_layers # In branched architectures like GoogLeNet, we have to consider multiple # outbound layers. next_layers = get_spiking_outbound_layers(layer, config) fanout = 0 for next_layer in next_layers: if 'Conv' in next_layer.name and not has_stride_unity(next_layer): shape = layer.output_shape if 'Input' in get_type(layer): shape = fix_input_layer_shape(shape) fanout = np.zeros(shape[1:]) break for next_layer in next_layers: if 'Dense' in next_layer.name: fanout += next_layer.units elif 'Pool' in next_layer.name: fanout += 1 elif 'DepthwiseConv' in next_layer.name: if has_stride_unity(next_layer): fanout += np.prod(next_layer.kernel_size) else: fanout += get_fanout_array(layer, next_layer, True) elif 'Conv' in next_layer.name: if has_stride_unity(next_layer): fanout += np.prod(next_layer.kernel_size) * next_layer.filters else: fanout += get_fanout_array(layer, next_layer) return fanout def has_stride_unity(layer): """Return `True` if the strides in all dimensions of a ``layer`` are 1.""" return all([s == 1 for s in layer.strides]) def get_fanout_array(layer_pre, layer_post, is_depthwise_conv=False): """ Return an array of the same shape as ``layer_pre``, where each entry gives the number of outgoing connections of a neuron. In convolution layers where the post-synaptic layer has stride > 1, the fan-out varies between neurons. """ ax = 1 if IS_CHANNELS_FIRST else 0 nx = layer_post.output_shape[2 + ax] # Width of feature map ny = layer_post.output_shape[1 + ax] # Height of feature map nz = layer_post.output_shape[ax] # Number of channels kx, ky = layer_post.kernel_size # Width and height of kernel px = int((kx - 1) / 2) if layer_post.padding == 'same' else 0 py = int((ky - 1) / 2) if layer_post.padding == 'same' else 0 sx = layer_post.strides[1] sy = layer_post.strides[0] shape = layer_pre.output_shape if 'Input' in get_type(layer_pre): shape = fix_input_layer_shape(shape) fanout = np.zeros(shape[1:]) for y_pre in range(fanout.shape[0 + ax]): y_post = [int((y_pre + py) / sy)] wy = (y_pre + py) % sy i = 1 while wy + i * sy < ky: y = y_post[0] - i if 0 <= y < ny: y_post.append(y) i += 1 for x_pre in range(fanout.shape[1 + ax]): x_post = [int((x_pre + px) / sx)] wx = (x_pre + px) % sx i = 1 while wx + i * sx < kx: x = x_post[0] - i if 0 <= x < nx: x_post.append(x) i += 1 if ax: fanout[:, y_pre, x_pre] = len(x_post) * len(y_post) else: fanout[y_pre, x_pre, :] = len(x_post) * len(y_post) if not is_depthwise_conv: fanout *= nz return fanout def get_type(layer): """Get type of Keras layer. Parameters ---------- layer: Keras.layers.Layer Keras layer. Returns ------- : str Layer type. """ return layer.__class__.__name__ def get_quantized_activation_function_from_string(activation_str): """ Parse a string describing the activation of a layer, and return the corresponding activation function. Parameters ---------- activation_str : str Describes activation. Returns ------- activation : functools.partial Activation function. Examples -------- >>> f = get_quantized_activation_function_from_string('relu_Q1.15') >>> f functools.partial(<function reduce_precision at 0x7f919af92b70>, f='15', m='1') >>> print(f.__name__) relu_Q1.15 """ # TODO: We implicitly assume relu activation function here. Change this to # allow for general activation functions with reduced precision. from functools import partial from snntoolbox.utils.utils import quantized_relu m, f = map(int, activation_str[activation_str.index('_Q') + 2:].split('.')) activation = partial(quantized_relu, m=m, f=f) activation.__name__ = activation_str return activation def get_clamped_relu_from_string(activation_str): from snntoolbox.utils.utils import ClampedReLU threshold, max_value = map(eval, activation_str.split('_')[-2:]) activation = ClampedReLU(threshold, max_value) return activation def get_noisy_softplus_from_string(activation_str): from snntoolbox.utils.utils import NoisySoftplus k, sigma = map(eval, activation_str.split('_')[-2:]) activation = NoisySoftplus(k, sigma) return activation def get_custom_activation(activation_str): """ If ``activation_str`` describes a custom activation function, import this function from `snntoolbox.utils.utils` and return it. If custom activation function is not found or implemented, return the ``activation_str`` in place of the activation function. Parameters ---------- activation_str : str Describes activation. Returns ------- activation : Activation function. activation_str : str Describes activation. """ if activation_str == 'binary_sigmoid': from snntoolbox.utils.utils import binary_sigmoid activation = binary_sigmoid elif activation_str == 'binary_tanh': from snntoolbox.utils.utils import binary_tanh activation = binary_tanh elif '_Q' in activation_str: activation = get_quantized_activation_function_from_string( activation_str) elif 'clamped_relu' in activation_str: activation = get_clamped_relu_from_string(activation_str) elif 'NoisySoftplus' in activation_str: from snntoolbox.utils.utils import NoisySoftplus activation = NoisySoftplus else: activation = activation_str return activation, activation_str def assemble_custom_dict(*args): assembly = [] for arg in args: assembly += arg.items() return dict(assembly) def get_custom_layers_dict(filepath=None): """ Import all implemented custom layers so they can be used when loading a Keras model. Parameters ---------- filepath : Optional[str] Path to json file containing additional custom objects. """ from snntoolbox.utils.utils import is_module_installed custom_layers = {} if is_module_installed('keras_rewiring'): from keras_rewiring import Sparse, SparseConv2D, SparseDepthwiseConv2D from keras_rewiring.optimizers import NoisySGD custom_layers.update({'Sparse': Sparse, 'SparseConv2D': SparseConv2D, 'SparseDepthwiseConv2D': SparseDepthwiseConv2D, 'NoisySGD': NoisySGD}) if filepath is not None and filepath != '': with open(filepath) as f: kwargs = json.load(f) custom_layers.update(kwargs) return custom_layers def get_custom_activations_dict(filepath=None): """ Import all implemented custom activation functions so they can be used when loading a Keras model. Parameters ---------- filepath : Optional[str] Path to json file containing additional custom objects. """ from snntoolbox.utils.utils import binary_sigmoid, binary_tanh, \ ClampedReLU, LimitedReLU, NoisySoftplus # Todo: We should be able to load a different activation for each layer. # Need to remove this hack: activation_str = 'relu_Q1.4' activation = get_quantized_activation_function_from_string(activation_str) custom_objects = { 'binary_sigmoid': binary_sigmoid, 'binary_tanh': binary_tanh, # Todo: This should work regardless of the specific attributes of the # ClampedReLU class used during training. 'clamped_relu': ClampedReLU(), 'LimitedReLU': LimitedReLU, 'relu6': LimitedReLU({'max_value': 6}), activation_str: activation, 'Noisy_Softplus': NoisySoftplus, 'precision': precision, 'activity_regularizer': keras.regularizers.l1} if filepath is not None and filepath != '': with open(filepath) as f: kwargs = json.load(f) for key in kwargs: if 'LimitedReLU' in key: custom_objects[key] = LimitedReLU(kwargs[key]) return custom_objects def check_for_custom_activations(layer_attributes): """ Check if the layer contains a custom activation function, and deal with it appropriately. Parameters ---------- layer_attributes: dict A dictionary containing the attributes of the layer. """ if 'activation' not in layer_attributes.keys(): return def precision(y_true, y_pred): """Precision metric. Computes the precision, a metric for multi-label classification of how many selected items are relevant. Only computes a batch-wise average of precision. """ import tensorflow.keras.backend as k true_positives = k.sum(k.round(k.clip(y_true * y_pred, 0, 1))) predicted_positives = k.sum(k.round(k.clip(y_pred, 0, 1))) return true_positives / (predicted_positives + k.epsilon()) def fix_input_layer_shape(shape): """ tf.keras.models.load_model function introduced a bug that wraps the input tensors and shapes in a single-entry list, i.e. output_shape == [(None, 1, 28, 28)]. Thus we have to apply [0] here. """ if len(shape) == 1: return shape[0] return shape
[ "tensorflow.keras.backend.image_data_format", "tensorflow.keras.models.Model", "numpy.reshape", "tensorflow.keras.backend.clip", "numpy.prod", "numpy.ravel", "tensorflow.keras.backend.epsilon", "numpy.zeros" ]
snntoolbox/parsing/utils.py
[(44, 'tensorflow.keras.backend.image_data_format', 'keras.backend.image_data_format', ([], {}), False, 'from tensorflow import keras\n'), (1195, 'numpy.reshape', 'np.reshape', (['var_eps_sqrt_inv', 'broadcast_shape'], {}), True, 'import numpy as np\n'), (1196, 'numpy.reshape', 'np.reshape', (['gamma', 'broadcast_shape'], {}), True, 'import numpy as np\n'), (1197, 'numpy.reshape', 'np.reshape', (['beta', 'broadcast_shape'], {}), True, 'import numpy as np\n'), (1198, 'numpy.reshape', 'np.reshape', (['bias', 'broadcast_shape'], {}), True, 'import numpy as np\n'), (1199, 'numpy.reshape', 'np.reshape', (['mean', 'broadcast_shape'], {}), True, 'import numpy as np\n'), (1200, 'numpy.ravel', 'np.ravel', (['(beta + (bias - mean) * gamma * var_eps_sqrt_inv)'], {}), True, 'import numpy as np\n'), (1271, 'h5py.File', 'h5py.File', (['filepath', '"""r"""'], {}), False, 'import h5py\n'), (1507, 'snntoolbox.simulation.utils.get_spiking_outbound_layers', 'get_spiking_outbound_layers', (['layer', 'config'], {}), False, 'from snntoolbox.simulation.utils import get_spiking_outbound_layers\n'), (1563, 'numpy.zeros', 'np.zeros', (['shape[1:]'], {}), True, 'import numpy as np\n'), (1650, 'functools.partial', 'partial', (['quantized_relu'], {'m': 'm', 'f': 'f'}), False, 'from functools import partial\n'), (1662, 'snntoolbox.utils.utils.ClampedReLU', 'ClampedReLU', (['threshold', 'max_value'], {}), False, 'from snntoolbox.utils.utils import binary_sigmoid, binary_tanh, ClampedReLU, LimitedReLU, NoisySoftplus\n'), (1672, 'snntoolbox.utils.utils.NoisySoftplus', 'NoisySoftplus', (['k', 'sigma'], {}), False, 'from snntoolbox.utils.utils import NoisySoftplus\n'), (1741, 'snntoolbox.utils.utils.is_module_installed', 'is_module_installed', (['"""keras_rewiring"""'], {}), False, 'from snntoolbox.utils.utils import is_module_installed\n'), (1069, 'tensorflow.keras.models.Model', 'keras.models.Model', (['img_input', "parsed_layers[self._layer_list[-1]['name']]"], {}), False, 'from tensorflow import keras\n'), (1110, 'tensorflow.keras.models.Model', 'keras.models.Model', (['img_input', "parsed_layers[self._layer_list[-1]['name']]"], {}), False, 'from tensorflow import keras\n'), (1210, 'snntoolbox.utils.utils.binarize', 'binarize', (['weights'], {}), False, 'from snntoolbox.utils.utils import binarize\n'), (1745, 'snntoolbox.simulation.backends.custom_layers.update', 'custom_layers.update', (["{'Sparse': Sparse, 'SparseConv2D': SparseConv2D, 'SparseDepthwiseConv2D':\n SparseDepthwiseConv2D, 'NoisySGD': NoisySGD}"], {}), True, 'import snntoolbox.simulation.backends.custom_layers as custom_layers\n'), (1783, 'snntoolbox.utils.utils.ClampedReLU', 'ClampedReLU', ([], {}), False, 'from snntoolbox.utils.utils import binary_sigmoid, binary_tanh, ClampedReLU, LimitedReLU, NoisySoftplus\n'), (1785, 'snntoolbox.utils.utils.LimitedReLU', 'LimitedReLU', (["{'max_value': 6}"], {}), False, 'from snntoolbox.utils.utils import binary_sigmoid, binary_tanh, ClampedReLU, LimitedReLU, NoisySoftplus\n'), (1220, 'snntoolbox.utils.utils.reduce_precision', 'reduce_precision', (['weights', 'm', 'f'], {}), False, 'from snntoolbox.utils.utils import reduce_precision\n'), (1289, 'h5py.File', 'h5py.File', (['filepath'], {'mode': '"""w"""'}), False, 'import h5py\n'), (1470, 'numpy.prod', 'np.prod', (['layer.kernel_size'], {}), True, 'import numpy as np\n'), (1514, 'numpy.zeros', 'np.zeros', (['shape[1:]'], {}), True, 'import numpy as np\n'), (1752, 'json.load', 'json.load', (['f'], {}), False, 'import json\n'), (1753, 'snntoolbox.simulation.backends.custom_layers.update', 'custom_layers.update', (['kwargs'], {}), True, 'import snntoolbox.simulation.backends.custom_layers as custom_layers\n'), (1793, 'json.load', 'json.load', (['f'], {}), False, 'import json\n'), (1827, 'tensorflow.keras.backend.clip', 'k.clip', (['(y_true * y_pred)', '(0)', '(1)'], {}), True, 'import tensorflow.keras.backend as k\n'), (1828, 'tensorflow.keras.backend.clip', 'k.clip', (['y_pred', '(0)', '(1)'], {}), True, 'import tensorflow.keras.backend as k\n'), (1829, 'tensorflow.keras.backend.epsilon', 'k.epsilon', ([], {}), True, 'import tensorflow.keras.backend as k\n'), (465, 'numpy.zeros', 'np.zeros', (['[1, 1, 2 * c, c]'], {}), True, 'import numpy as np\n'), (835, 'numpy.prod', 'np.prod', (['prev_layer_output_shape[1:]'], {}), True, 'import numpy as np\n'), (1222, 'snntoolbox.utils.utils.reduce_precision', 'reduce_precision', (['biases', 'm', 'f'], {}), False, 'from snntoolbox.utils.utils import reduce_precision\n'), (1797, 'snntoolbox.utils.utils.LimitedReLU', 'LimitedReLU', (['kwargs[key]'], {}), False, 'from snntoolbox.utils.utils import binary_sigmoid, binary_tanh, ClampedReLU, LimitedReLU, NoisySoftplus\n'), (141, 'tensorflow.keras.backend.image_data_format', 'keras.backend.image_data_format', ([], {}), False, 'from tensorflow import keras\n'), (168, 'numpy.prod', 'np.prod', (['layer.output_shape[1:]'], {}), True, 'import numpy as np\n'), (223, 'numpy.zeros', 'np.zeros', (['[1, 1, c, c]'], {}), True, 'import numpy as np\n'), (406, 'tensorflow.keras.backend.image_data_format', 'keras.backend.image_data_format', ([], {}), False, 'from tensorflow import keras\n'), (433, 'numpy.prod', 'np.prod', (['layer.output_shape[1:]'], {}), True, 'import numpy as np\n'), (1524, 'numpy.prod', 'np.prod', (['next_layer.kernel_size'], {}), True, 'import numpy as np\n'), (474, 'numpy.zeros', 'np.zeros', (['c'], {}), True, 'import numpy as np\n'), (232, 'numpy.zeros', 'np.zeros', (['c'], {}), True, 'import numpy as np\n'), (1529, 'numpy.prod', 'np.prod', (['next_layer.kernel_size'], {}), True, 'import numpy as np\n')]
minai2020/tensorflow
a6b4474186c906c09cd29b1a7d4d0af714c261e4
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for image preprocessing layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python.compat import compat from tensorflow.python.framework import errors from tensorflow.python.framework import test_util as tf_test_util from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.keras.layers.preprocessing import image_preprocessing from tensorflow.python.keras.utils.generic_utils import CustomObjectScope from tensorflow.python.ops import gen_stateful_random_ops from tensorflow.python.ops import image_ops_impl as image_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import stateless_random_ops from tensorflow.python.platform import test @keras_parameterized.run_all_keras_modes(always_skip_v1=True) class ResizingTest(keras_parameterized.TestCase): def _run_test(self, kwargs, expected_height, expected_width): np.random.seed(1337) num_samples = 2 orig_height = 5 orig_width = 8 channels = 3 kwargs.update({'height': expected_height, 'width': expected_width}) with tf_test_util.use_gpu(): testing_utils.layer_test( image_preprocessing.Resizing, kwargs=kwargs, input_shape=(num_samples, orig_height, orig_width, channels), expected_output_shape=(None, expected_height, expected_width, channels)) @parameterized.named_parameters( ('down_sample_bilinear_2_by_2', {'interpolation': 'bilinear'}, 2, 2), ('down_sample_bilinear_3_by_2', {'interpolation': 'bilinear'}, 3, 2), ('down_sample_nearest_2_by_2', {'interpolation': 'nearest'}, 2, 2), ('down_sample_nearest_3_by_2', {'interpolation': 'nearest'}, 3, 2), ('down_sample_area_2_by_2', {'interpolation': 'area'}, 2, 2), ('down_sample_area_3_by_2', {'interpolation': 'area'}, 3, 2)) def test_down_sampling(self, kwargs, expected_height, expected_width): with CustomObjectScope({'Resizing': image_preprocessing.Resizing}): self._run_test(kwargs, expected_height, expected_width) @parameterized.named_parameters( ('up_sample_bilinear_10_by_12', {'interpolation': 'bilinear'}, 10, 12), ('up_sample_bilinear_12_by_12', {'interpolation': 'bilinear'}, 12, 12), ('up_sample_nearest_10_by_12', {'interpolation': 'nearest'}, 10, 12), ('up_sample_nearest_12_by_12', {'interpolation': 'nearest'}, 12, 12), ('up_sample_area_10_by_12', {'interpolation': 'area'}, 10, 12), ('up_sample_area_12_by_12', {'interpolation': 'area'}, 12, 12)) def test_up_sampling(self, kwargs, expected_height, expected_width): with CustomObjectScope({'Resizing': image_preprocessing.Resizing}): self._run_test(kwargs, expected_height, expected_width) @parameterized.named_parameters( ('reshape_bilinear_10_by_4', {'interpolation': 'bilinear'}, 10, 4)) def test_reshaping(self, kwargs, expected_height, expected_width): with CustomObjectScope({'Resizing': image_preprocessing.Resizing}): self._run_test(kwargs, expected_height, expected_width) def test_invalid_interpolation(self): with self.assertRaises(NotImplementedError): image_preprocessing.Resizing(5, 5, 'invalid_interpolation') def test_config_with_custom_name(self): layer = image_preprocessing.Resizing(5, 5, name='image_preproc') config = layer.get_config() layer_1 = image_preprocessing.Resizing.from_config(config) self.assertEqual(layer_1.name, layer.name) def get_numpy_center_crop(images, expected_height, expected_width): orig_height = images.shape[1] orig_width = images.shape[2] height_start = int((orig_height - expected_height) / 2) width_start = int((orig_width - expected_width) / 2) height_end = height_start + expected_height width_end = width_start + expected_width return images[:, height_start:height_end, width_start:width_end, :] @keras_parameterized.run_all_keras_modes(always_skip_v1=True) class CenterCropTest(keras_parameterized.TestCase): def _run_test(self, expected_height, expected_width): np.random.seed(1337) num_samples = 2 orig_height = 5 orig_width = 8 channels = 3 kwargs = {'height': expected_height, 'width': expected_width} input_images = np.random.random( (num_samples, orig_height, orig_width, channels)).astype(np.float32) expected_output = get_numpy_center_crop( input_images, expected_height, expected_width) with tf_test_util.use_gpu(): testing_utils.layer_test( image_preprocessing.CenterCrop, kwargs=kwargs, input_shape=(num_samples, orig_height, orig_width, channels), input_data=input_images, expected_output=expected_output, expected_output_shape=(None, expected_height, expected_width, channels)) @parameterized.named_parameters( ('center_crop_3_by_4', 3, 4), ('center_crop_3_by_2', 3, 2)) def test_center_crop_aligned(self, expected_height, expected_width): with CustomObjectScope({'CenterCrop': image_preprocessing.CenterCrop}): self._run_test(expected_height, expected_width) @parameterized.named_parameters( ('center_crop_4_by_5', 4, 5), ('center_crop_4_by_3', 4, 3)) def test_center_crop_mis_aligned(self, expected_height, expected_width): with CustomObjectScope({'CenterCrop': image_preprocessing.CenterCrop}): self._run_test(expected_height, expected_width) @parameterized.named_parameters( ('center_crop_4_by_6', 4, 6), ('center_crop_3_by_2', 3, 2)) def test_center_crop_half_mis_aligned(self, expected_height, expected_width): with CustomObjectScope({'CenterCrop': image_preprocessing.CenterCrop}): self._run_test(expected_height, expected_width) @parameterized.named_parameters( ('center_crop_5_by_12', 5, 12), ('center_crop_10_by_8', 10, 8), ('center_crop_10_by_12', 10, 12)) def test_invalid_center_crop(self, expected_height, expected_width): with self.assertRaisesRegexp(errors.InvalidArgumentError, r'assertion failed'): self._run_test(expected_height, expected_width) def test_config_with_custom_name(self): layer = image_preprocessing.CenterCrop(5, 5, name='image_preproc') config = layer.get_config() layer_1 = image_preprocessing.CenterCrop.from_config(config) self.assertEqual(layer_1.name, layer.name) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) class RandomCropTest(keras_parameterized.TestCase): def _run_test(self, expected_height, expected_width): np.random.seed(1337) num_samples = 2 orig_height = 5 orig_width = 8 channels = 3 kwargs = {'height': expected_height, 'width': expected_width} with tf_test_util.use_gpu(): testing_utils.layer_test( image_preprocessing.RandomCrop, kwargs=kwargs, input_shape=(num_samples, orig_height, orig_width, channels), expected_output_shape=(None, expected_height, expected_width, channels)) @parameterized.named_parameters( ('random_crop_5_by_12', 5, 12), ('random_crop_10_by_8', 10, 8), ('random_crop_10_by_12', 10, 12)) def test_invalid_random_crop(self, expected_height, expected_width): with self.assertRaises(errors.InvalidArgumentError): with CustomObjectScope({'RandomCrop': image_preprocessing.RandomCrop}): self._run_test(expected_height, expected_width) def test_training_with_mock(self): if test.is_built_with_rocm(): # TODO(rocm): # re-enable this test once ROCm adds support for # the StatefulUniformFullInt Op (on the GPU) self.skipTest('Feature not supported on ROCm') np.random.seed(1337) height, width = 3, 4 height_offset = np.random.randint(low=0, high=3) width_offset = np.random.randint(low=0, high=5) mock_offset = [0, height_offset, width_offset, 0] with test.mock.patch.object( stateless_random_ops, 'stateless_random_uniform', return_value=mock_offset): with tf_test_util.use_gpu(): layer = image_preprocessing.RandomCrop(height, width) inp = np.random.random((12, 5, 8, 3)) actual_output = layer(inp, training=1) expected_output = inp[:, height_offset:(height_offset + height), width_offset:(width_offset + width), :] self.assertAllClose(expected_output, actual_output) @parameterized.named_parameters( ('random_crop_4_by_6', 4, 6), ('random_crop_3_by_2', 3, 2)) def test_random_crop_output_shape(self, expected_height, expected_width): if test.is_built_with_rocm(): # TODO(rocm): # re-enable this test once ROCm adds support for # the StatefulUniformFullInt Op (on the GPU) self.skipTest('Feature not supported on ROCm') with CustomObjectScope({'RandomCrop': image_preprocessing.RandomCrop}): self._run_test(expected_height, expected_width) def test_predicting_with_mock_longer_height(self): np.random.seed(1337) height, width = 3, 3 inp = np.random.random((12, 10, 6, 3)) with tf_test_util.use_gpu(): layer = image_preprocessing.RandomCrop(height, width) actual_output = layer(inp, training=0) resized_inp = image_ops.resize_images_v2( inp, size=[5, 3]) expected_output = resized_inp[:, 1:4, :, :] self.assertAllClose(expected_output, actual_output) def test_predicting_with_mock_longer_width(self): np.random.seed(1337) height, width = 4, 6 inp = np.random.random((12, 8, 16, 3)) with tf_test_util.use_gpu(): layer = image_preprocessing.RandomCrop(height, width) actual_output = layer(inp, training=0) resized_inp = image_ops.resize_images_v2( inp, size=[4, 8]) expected_output = resized_inp[:, :, 1:7, :] self.assertAllClose(expected_output, actual_output) def test_config_with_custom_name(self): layer = image_preprocessing.RandomCrop(5, 5, name='image_preproc') config = layer.get_config() layer_1 = image_preprocessing.RandomCrop.from_config(config) self.assertEqual(layer_1.name, layer.name) class RescalingTest(keras_parameterized.TestCase): @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_rescaling_base(self): kwargs = {'scale': 0.004} testing_utils.layer_test( image_preprocessing.Rescaling, kwargs=kwargs, input_shape=(2, 5, 6, 3), expected_output_shape=(None, 5, 6, 3)) @tf_test_util.run_v2_only def test_rescaling_correctness_float(self): layer = image_preprocessing.Rescaling(0.004) inputs = random_ops.random_uniform((2, 4, 5, 3)) outputs = layer(inputs) self.assertAllClose(outputs.numpy(), inputs.numpy() * 0.004) @tf_test_util.run_v2_only def test_rescaling_correctness_int(self): layer = image_preprocessing.Rescaling(0.004) inputs = random_ops.random_uniform((2, 4, 5, 3), 0, 100, dtype='int32') outputs = layer(inputs) self.assertEqual(outputs.dtype.name, 'float32') self.assertAllClose(outputs.numpy(), inputs.numpy() * 0.004) def test_config_with_custom_name(self): layer = image_preprocessing.Rescaling(0.5, name='rescaling') config = layer.get_config() layer_1 = image_preprocessing.Rescaling.from_config(config) self.assertEqual(layer_1.name, layer.name) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) class RandomFlipTest(keras_parameterized.TestCase): def _run_test(self, mode, expected_output=None, mock_random=None): np.random.seed(1337) num_samples = 2 orig_height = 5 orig_width = 8 channels = 3 if mock_random is None: mock_random = [1 for _ in range(num_samples)] mock_random = np.reshape(mock_random, [2, 1, 1, 1]) inp = np.random.random((num_samples, orig_height, orig_width, channels)) if expected_output is None: expected_output = inp if mode == 'horizontal' or mode == 'horizontal_and_vertical': expected_output = np.flip(expected_output, axis=1) if mode == 'vertical' or mode == 'horizontal_and_vertical': expected_output = np.flip(expected_output, axis=2) with test.mock.patch.object( random_ops, 'random_uniform', return_value=mock_random): with tf_test_util.use_gpu(): layer = image_preprocessing.RandomFlip(mode) actual_output = layer(inp, training=1) self.assertAllClose(expected_output, actual_output) @parameterized.named_parameters( ('random_flip_horizontal', 'horizontal'), ('random_flip_vertical', 'vertical'), ('random_flip_both', 'horizontal_and_vertical')) def test_random_flip(self, mode): with CustomObjectScope({'RandomFlip': image_preprocessing.RandomFlip}): self._run_test(mode) def test_random_flip_horizontal_half(self): with CustomObjectScope({'RandomFlip': image_preprocessing.RandomFlip}): np.random.seed(1337) mock_random = [1, 0] mock_random = np.reshape(mock_random, [2, 1, 1, 1]) input_images = np.random.random((2, 5, 8, 3)).astype(np.float32) expected_output = input_images.copy() expected_output[0, :, :, :] = np.flip(input_images[0, :, :, :], axis=0) self._run_test('horizontal', expected_output, mock_random) def test_random_flip_vertical_half(self): with CustomObjectScope({'RandomFlip': image_preprocessing.RandomFlip}): np.random.seed(1337) mock_random = [1, 0] mock_random = np.reshape(mock_random, [2, 1, 1, 1]) input_images = np.random.random((2, 5, 8, 3)).astype(np.float32) expected_output = input_images.copy() expected_output[0, :, :, :] = np.flip(input_images[0, :, :, :], axis=1) self._run_test('vertical', expected_output, mock_random) def test_random_flip_inference(self): with CustomObjectScope({'RandomFlip': image_preprocessing.RandomFlip}): input_images = np.random.random((2, 5, 8, 3)).astype(np.float32) expected_output = input_images with tf_test_util.use_gpu(): layer = image_preprocessing.RandomFlip() actual_output = layer(input_images, training=0) self.assertAllClose(expected_output, actual_output) def test_random_flip_default(self): with CustomObjectScope({'RandomFlip': image_preprocessing.RandomFlip}): input_images = np.random.random((2, 5, 8, 3)).astype(np.float32) expected_output = np.flip(np.flip(input_images, axis=1), axis=2) mock_random = [1, 1] mock_random = np.reshape(mock_random, [2, 1, 1, 1]) with test.mock.patch.object( random_ops, 'random_uniform', return_value=mock_random): with self.cached_session(use_gpu=True): layer = image_preprocessing.RandomFlip() actual_output = layer(input_images, training=1) self.assertAllClose(expected_output, actual_output) @tf_test_util.run_v2_only def test_config_with_custom_name(self): layer = image_preprocessing.RandomFlip(name='image_preproc') config = layer.get_config() layer_1 = image_preprocessing.RandomFlip.from_config(config) self.assertEqual(layer_1.name, layer.name) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) class RandomContrastTest(keras_parameterized.TestCase): def _run_test(self, lower, upper, expected_output=None, mock_random=None): np.random.seed(1337) num_samples = 2 orig_height = 5 orig_width = 8 channels = 3 if mock_random is None: mock_random = 0.2 inp = np.random.random((num_samples, orig_height, orig_width, channels)) if expected_output is None: # reduce mean on height. inp_mean = np.mean(inp, axis=1, keepdims=True) # reduce mean on width. inp_mean = np.mean(inp_mean, axis=2, keepdims=True) expected_output = (inp - inp_mean) * mock_random + inp_mean with test.mock.patch.object( random_ops, 'random_uniform', return_value=mock_random): with tf_test_util.use_gpu(): layer = image_preprocessing.RandomContrast((lower, upper)) actual_output = layer(inp, training=True) self.assertAllClose(expected_output, actual_output) @parameterized.named_parameters( ('random_contrast_2_by_5', 0.2, 0.5), ('random_contrast_2_by_13', 0.2, 1.3), ('random_contrast_5_by_2', 0.5, 0.2)) def test_random_contrast(self, lower, upper): with CustomObjectScope( {'RandomContrast': image_preprocessing.RandomContrast}): self._run_test(lower, upper) @parameterized.named_parameters( ('random_contrast_amplitude_2', 0.2), ('random_contrast_amplitude_5', 0.5)) def test_random_contrast_amplitude(self, amplitude): with CustomObjectScope( {'RandomContrast': image_preprocessing.RandomContrast}): input_images = np.random.random((2, 5, 8, 3)) with tf_test_util.use_gpu(): layer = image_preprocessing.RandomContrast(amplitude) layer(input_images) def test_random_contrast_inference(self): with CustomObjectScope( {'RandomContrast': image_preprocessing.RandomContrast}): input_images = np.random.random((2, 5, 8, 3)).astype(np.float32) expected_output = input_images with tf_test_util.use_gpu(): layer = image_preprocessing.RandomContrast((0.1, 0.2)) actual_output = layer(input_images, training=False) self.assertAllClose(expected_output, actual_output) def test_random_contrast_int_dtype(self): with CustomObjectScope( {'RandomContrast': image_preprocessing.RandomContrast}): input_images = np.random.randint(low=0, high=255, size=(2, 5, 8, 3)) with tf_test_util.use_gpu(): layer = image_preprocessing.RandomContrast((0.1, 0.2)) layer(input_images) def test_random_contrast_invalid_bounds(self): with self.assertRaises(ValueError): image_preprocessing.RandomContrast((-0.1, .5)) with self.assertRaises(ValueError): image_preprocessing.RandomContrast((1.1, .5)) with self.assertRaises(ValueError): image_preprocessing.RandomContrast((0.1, -0.2)) @tf_test_util.run_v2_only def test_config_with_custom_name(self): layer = image_preprocessing.RandomContrast((.5, .6), name='image_preproc') config = layer.get_config() layer_1 = image_preprocessing.RandomContrast.from_config(config) self.assertEqual(layer_1.name, layer.name) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) class RandomTranslationTest(keras_parameterized.TestCase): def _run_test(self, height_factor, width_factor): np.random.seed(1337) num_samples = 2 orig_height = 5 orig_width = 8 channels = 3 kwargs = {'height_factor': height_factor, 'width_factor': width_factor} with tf_test_util.use_gpu(): testing_utils.layer_test( image_preprocessing.RandomTranslation, kwargs=kwargs, input_shape=(num_samples, orig_height, orig_width, channels), expected_output_shape=(None, orig_height, orig_width, channels)) @parameterized.named_parameters( ('random_translate_4_by_6', .4, .6), ('random_translate_3_by_2', .3, .2), ('random_translate_tuple_factor', (.5, .4), (.2, .3))) def test_random_translation(self, height_factor, width_factor): self._run_test(height_factor, width_factor) def test_random_translation_negative_lower(self): mock_offset = np.random.random((12, 1)) with test.mock.patch.object( gen_stateful_random_ops, 'stateful_uniform', return_value=mock_offset): with self.cached_session(use_gpu=True): layer = image_preprocessing.RandomTranslation((-0.2, .3), .4) layer_2 = image_preprocessing.RandomTranslation((0.2, .3), .4) inp = np.random.random((12, 5, 8, 3)).astype(np.float32) actual_output = layer(inp, training=1) actual_output_2 = layer_2(inp, training=1) self.assertAllClose(actual_output, actual_output_2) def test_random_translation_inference(self): with CustomObjectScope( {'RandomTranslation': image_preprocessing.RandomTranslation}): input_images = np.random.random((2, 5, 8, 3)).astype(np.float32) expected_output = input_images with tf_test_util.use_gpu(): layer = image_preprocessing.RandomTranslation(.5, .5) actual_output = layer(input_images, training=0) self.assertAllClose(expected_output, actual_output) @tf_test_util.run_v2_only def test_config_with_custom_name(self): layer = image_preprocessing.RandomTranslation(.5, .6, name='image_preproc') config = layer.get_config() layer_1 = image_preprocessing.RandomTranslation.from_config(config) self.assertEqual(layer_1.name, layer.name) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) class RandomTransformTest(keras_parameterized.TestCase): def _run_random_transform_with_mock(self, transform_matrix, expected_output, mode, interpolation='bilinear'): inp = np.arange(15).reshape((1, 5, 3, 1)).astype(np.float32) with self.cached_session(use_gpu=True): output = image_preprocessing.transform( inp, transform_matrix, fill_mode=mode, interpolation=interpolation) self.assertAllClose(expected_output, output) def test_random_translation_reflect(self): # reflected output is (dcba|abcd|dcba) if compat.forward_compatible(2020, 3, 25): # Test down shift by 1. # pyformat: disable expected_output = np.asarray( [[0., 1., 2.], [0., 1., 2.], [3., 4., 5.], [6., 7., 8], [9., 10., 11]]).reshape((1, 5, 3, 1)).astype(np.float32) # pyformat: enable transform_matrix = np.asarray([[1., 0., 0., 0., 1., -1., 0., 0.]]) self._run_random_transform_with_mock(transform_matrix, expected_output, 'reflect') # Test up shift by 1. # pyformat: disable expected_output = np.asarray( [[3., 4., 5.], [6., 7., 8], [9., 10., 11.], [12., 13., 14.], [12., 13., 14.]]).reshape((1, 5, 3, 1)).astype(np.float32) # pyformat: enable transform_matrix = np.asarray([[1., 0., 0., 0., 1., 1., 0., 0.]]) self._run_random_transform_with_mock(transform_matrix, expected_output, 'reflect') # Test left shift by 1. # reflected output is (dcba|abcd|dcba) # pyformat: disable expected_output = np.asarray( [[1., 2., 2.], [4., 5., 5.], [7., 8., 8.], [10., 11., 11.], [13., 14., 14.]]).reshape((1, 5, 3, 1)).astype(np.float32) # pyformat: enable transform_matrix = np.asarray([[1., 0., 1., 0., 1., 0., 0., 0.]]) self._run_random_transform_with_mock(transform_matrix, expected_output, 'reflect') # Test right shift by 1. # pyformat: disable expected_output = np.asarray( [[0., 0., 1.], [3., 3., 4], [6., 6., 7.], [9., 9., 10.], [12., 12., 13.]]).reshape((1, 5, 3, 1)).astype(np.float32) # pyformat: enable transform_matrix = np.asarray([[1., 0., -1., 0., 1., 0., 0., 0.]]) self._run_random_transform_with_mock(transform_matrix, expected_output, 'reflect') def test_random_translation_wrap(self): # warpped output is (abcd|abcd|abcd) if compat.forward_compatible(2020, 3, 25): # Test down shift by 1. # pyformat: disable expected_output = np.asarray( [[12., 13., 14.], [0., 1., 2.], [3., 4., 5.], [6., 7., 8], [9., 10., 11]]).reshape((1, 5, 3, 1)).astype(np.float32) # pyformat: enable transform_matrix = np.asarray([[1., 0., 0., 0., 1., -1., 0., 0.]]) self._run_random_transform_with_mock(transform_matrix, expected_output, 'wrap') # Test up shift by 1. # pyformat: disable expected_output = np.asarray( [[3., 4., 5.], [6., 7., 8], [9., 10., 11.], [12., 13., 14.], [0., 1., 2.]]).reshape((1, 5, 3, 1)).astype(np.float32) # pyformat: enable transform_matrix = np.asarray([[1., 0., 0., 0., 1., 1., 0., 0.]]) self._run_random_transform_with_mock(transform_matrix, expected_output, 'wrap') # Test left shift by 1. # pyformat: disable expected_output = np.asarray( [[1., 2., 0.], [4., 5., 3.], [7., 8., 6.], [10., 11., 9.], [13., 14., 12.]]).reshape((1, 5, 3, 1)).astype(np.float32) # pyformat: enable transform_matrix = np.asarray([[1., 0., 1., 0., 1., 0., 0., 0.]]) self._run_random_transform_with_mock(transform_matrix, expected_output, 'wrap') # Test right shift by 1. # pyformat: disable expected_output = np.asarray( [[2., 0., 1.], [5., 3., 4], [8., 6., 7.], [11., 9., 10.], [14., 12., 13.]]).reshape((1, 5, 3, 1)).astype(np.float32) # pyformat: enable transform_matrix = np.asarray([[1., 0., -1., 0., 1., 0., 0., 0.]]) self._run_random_transform_with_mock(transform_matrix, expected_output, 'wrap') def test_random_translation_constant(self): # constant output is (0000|abcd|0000) if compat.forward_compatible(2020, 3, 25): # Test down shift by 1. # pyformat: disable expected_output = np.asarray( [[0., 0., 0.], [0., 1., 2.], [3., 4., 5.], [6., 7., 8], [9., 10., 11]]).reshape((1, 5, 3, 1)).astype(np.float32) # pyformat: enable transform_matrix = np.asarray([[1., 0., 0., 0., 1., -1., 0., 0.]]) self._run_random_transform_with_mock(transform_matrix, expected_output, 'constant') # Test up shift by 1. # pyformat: disable expected_output = np.asarray( [[3., 4., 5.], [6., 7., 8], [9., 10., 11.], [12., 13., 14.], [0., 0., 0.]]).reshape((1, 5, 3, 1)).astype(np.float32) # pyformat: enable transform_matrix = np.asarray([[1., 0., 0., 0., 1., 1., 0., 0.]]) self._run_random_transform_with_mock(transform_matrix, expected_output, 'constant') # Test left shift by 1. # pyformat: disable expected_output = np.asarray( [[1., 2., 0.], [4., 5., 0.], [7., 8., 0.], [10., 11., 0.], [13., 14., 0.]]).reshape((1, 5, 3, 1)).astype(np.float32) # pyformat: enable transform_matrix = np.asarray([[1., 0., 1., 0., 1., 0., 0., 0.]]) self._run_random_transform_with_mock(transform_matrix, expected_output, 'constant') # Test right shift by 1. # pyformat: disable expected_output = np.asarray( [[0., 0., 1.], [0., 3., 4], [0., 6., 7.], [0., 9., 10.], [0., 12., 13.]]).reshape((1, 5, 3, 1)).astype(np.float32) # pyformat: enable transform_matrix = np.asarray([[1., 0., -1., 0., 1., 0., 0., 0.]]) self._run_random_transform_with_mock(transform_matrix, expected_output, 'constant') def test_random_translation_nearest_interpolation(self): # nearest output is (aaaa|abcd|dddd) if compat.forward_compatible(2020, 3, 25): # Test down shift by 1. # pyformat: disable expected_output = np.asarray( [[0., 0., 0.], [0., 1., 2.], [3., 4., 5.], [6., 7., 8], [9., 10., 11]]).reshape((1, 5, 3, 1)).astype(np.float32) # pyformat: enable transform_matrix = np.asarray([[1., 0., 0., 0., 1., -1., 0., 0.]]) self._run_random_transform_with_mock( transform_matrix, expected_output, mode='constant', interpolation='nearest') # Test up shift by 1. # pyformat: disable expected_output = np.asarray( [[3., 4., 5.], [6., 7., 8], [9., 10., 11.], [12., 13., 14.], [0., 0., 0.]]).reshape((1, 5, 3, 1)).astype(np.float32) # pyformat: enable transform_matrix = np.asarray([[1., 0., 0., 0., 1., 1., 0., 0.]]) self._run_random_transform_with_mock( transform_matrix, expected_output, mode='constant', interpolation='nearest') # Test left shift by 1. # pyformat: disable expected_output = np.asarray( [[1., 2., 0.], [4., 5., 0.], [7., 8., 0.], [10., 11., 0.], [13., 14., 0.]]).reshape((1, 5, 3, 1)).astype(np.float32) # pyformat: enable transform_matrix = np.asarray([[1., 0., 1., 0., 1., 0., 0., 0.]]) self._run_random_transform_with_mock( transform_matrix, expected_output, mode='constant', interpolation='nearest') # Test right shift by 1. # pyformat: disable expected_output = np.asarray( [[0., 0., 1.], [0., 3., 4], [0., 6., 7.], [0., 9., 10.], [0., 12., 13.]]).reshape((1, 5, 3, 1)).astype(np.float32) # pyformat: enable transform_matrix = np.asarray([[1., 0., -1., 0., 1., 0., 0., 0.]]) self._run_random_transform_with_mock( transform_matrix, expected_output, mode='constant', interpolation='nearest') @keras_parameterized.run_all_keras_modes(always_skip_v1=True) class RandomRotationTest(keras_parameterized.TestCase): def _run_test(self, factor): np.random.seed(1337) num_samples = 2 orig_height = 5 orig_width = 8 channels = 3 kwargs = {'factor': factor} with tf_test_util.use_gpu(): testing_utils.layer_test( image_preprocessing.RandomRotation, kwargs=kwargs, input_shape=(num_samples, orig_height, orig_width, channels), expected_output_shape=(None, orig_height, orig_width, channels)) @parameterized.named_parameters(('random_rotate_4', .4), ('random_rotate_3', .3), ('random_rotate_tuple_factor', (.5, .4))) def test_random_rotation(self, factor): self._run_test(factor) def test_random_rotation_inference(self): with CustomObjectScope( {'RandomTranslation': image_preprocessing.RandomRotation}): input_images = np.random.random((2, 5, 8, 3)).astype(np.float32) expected_output = input_images with tf_test_util.use_gpu(): layer = image_preprocessing.RandomRotation(.5) actual_output = layer(input_images, training=0) self.assertAllClose(expected_output, actual_output) @tf_test_util.run_v2_only def test_config_with_custom_name(self): layer = image_preprocessing.RandomRotation(.5, name='image_preproc') config = layer.get_config() layer_1 = image_preprocessing.RandomRotation.from_config(config) self.assertEqual(layer_1.name, layer.name) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) class RandomZoomTest(keras_parameterized.TestCase): def _run_test(self, height_factor, width_factor): np.random.seed(1337) num_samples = 2 orig_height = 5 orig_width = 8 channels = 3 kwargs = {'height_factor': height_factor, 'width_factor': width_factor} with tf_test_util.use_gpu(): testing_utils.layer_test( image_preprocessing.RandomZoom, kwargs=kwargs, input_shape=(num_samples, orig_height, orig_width, channels), expected_output_shape=(None, orig_height, orig_width, channels)) @parameterized.named_parameters( ('random_zoom_4_by_6', .4, .6), ('random_zoom_2_by_3', .2, .3), ('random_zoom_tuple_factor', (.4, .5), (.2, .3))) def test_random_zoom_in(self, height_factor, width_factor): self._run_test(height_factor, width_factor) @parameterized.named_parameters( ('random_zoom_4_by_6', 1.4, 1.6), ('random_zoom_2_by_3', 1.2, 1.3), ('random_zoom_tuple_factor', (1.4, 1.5), (1.2, 1.3))) def test_random_zoom_out(self, height_factor, width_factor): self._run_test(height_factor, width_factor) def test_random_zoom_invalid_factor(self): with self.assertRaises(ValueError): image_preprocessing.RandomZoom((.5, .4), .2) with self.assertRaises(ValueError): image_preprocessing.RandomZoom(.2, (.5, .4)) def test_random_zoom_inference(self): with CustomObjectScope( {'RandomZoom': image_preprocessing.RandomZoom}): input_images = np.random.random((2, 5, 8, 3)).astype(np.float32) expected_output = input_images with tf_test_util.use_gpu(): layer = image_preprocessing.RandomZoom(.5, .5) actual_output = layer(input_images, training=0) self.assertAllClose(expected_output, actual_output) @tf_test_util.run_v2_only def test_config_with_custom_name(self): layer = image_preprocessing.RandomZoom(.5, .6, name='image_preproc') config = layer.get_config() layer_1 = image_preprocessing.RandomZoom.from_config(config) self.assertEqual(layer_1.name, layer.name) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) class RandomHeightTest(keras_parameterized.TestCase): def _run_test(self, factor): np.random.seed(1337) num_samples = 2 orig_height = 5 orig_width = 8 channels = 3 with tf_test_util.use_gpu(): img = np.random.random((num_samples, orig_height, orig_width, channels)) layer = image_preprocessing.RandomHeight(factor) img_out = layer(img, training=True) self.assertEqual(img_out.shape[0], 2) self.assertEqual(img_out.shape[2], 8) self.assertEqual(img_out.shape[3], 3) @parameterized.named_parameters(('random_height_4_by_6', (.4, .6)), ('random_height_3_by_2', (.3, 1.2)), ('random_height_3', .3)) def test_random_height_basic(self, factor): self._run_test(factor) def test_valid_random_height(self): # need (maxval - minval) * rnd + minval = 0.6 mock_factor = 0 with test.mock.patch.object( gen_stateful_random_ops, 'stateful_uniform', return_value=mock_factor): with tf_test_util.use_gpu(): img = np.random.random((12, 5, 8, 3)) layer = image_preprocessing.RandomHeight(.4) img_out = layer(img, training=True) self.assertEqual(img_out.shape[1], 3) def test_random_height_invalid_factor(self): with self.assertRaises(ValueError): image_preprocessing.RandomHeight((-1.5, .4)) def test_random_height_inference(self): with CustomObjectScope({'RandomHeight': image_preprocessing.RandomHeight}): input_images = np.random.random((2, 5, 8, 3)).astype(np.float32) expected_output = input_images with tf_test_util.use_gpu(): layer = image_preprocessing.RandomHeight(.5) actual_output = layer(input_images, training=0) self.assertAllClose(expected_output, actual_output) @tf_test_util.run_v2_only def test_config_with_custom_name(self): layer = image_preprocessing.RandomHeight(.5, name='image_preproc') config = layer.get_config() layer_1 = image_preprocessing.RandomHeight.from_config(config) self.assertEqual(layer_1.name, layer.name) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) class RandomWidthTest(keras_parameterized.TestCase): def _run_test(self, factor): np.random.seed(1337) num_samples = 2 orig_height = 5 orig_width = 8 channels = 3 with tf_test_util.use_gpu(): img = np.random.random((num_samples, orig_height, orig_width, channels)) layer = image_preprocessing.RandomWidth(factor) img_out = layer(img, training=True) self.assertEqual(img_out.shape[0], 2) self.assertEqual(img_out.shape[1], 5) self.assertEqual(img_out.shape[3], 3) @parameterized.named_parameters(('random_width_4_by_6', (.4, .6)), ('random_width_3_by_2', (.3, 1.2)), ('random_width_3', .3)) def test_random_width_basic(self, factor): self._run_test(factor) def test_valid_random_width(self): # need (maxval - minval) * rnd + minval = 0.6 mock_factor = 0 with test.mock.patch.object( gen_stateful_random_ops, 'stateful_uniform', return_value=mock_factor): with tf_test_util.use_gpu(): img = np.random.random((12, 8, 5, 3)) layer = image_preprocessing.RandomWidth(.4) img_out = layer(img, training=True) self.assertEqual(img_out.shape[2], 3) def test_random_width_invalid_factor(self): with self.assertRaises(ValueError): image_preprocessing.RandomWidth((-1.5, .4)) def test_random_width_inference(self): with CustomObjectScope({'RandomWidth': image_preprocessing.RandomWidth}): input_images = np.random.random((2, 5, 8, 3)).astype(np.float32) expected_output = input_images with tf_test_util.use_gpu(): layer = image_preprocessing.RandomWidth(.5) actual_output = layer(input_images, training=0) self.assertAllClose(expected_output, actual_output) @tf_test_util.run_v2_only def test_config_with_custom_name(self): layer = image_preprocessing.RandomWidth(.5, name='image_preproc') config = layer.get_config() layer_1 = image_preprocessing.RandomWidth.from_config(config) self.assertEqual(layer_1.name, layer.name) if __name__ == '__main__': test.main()
[ "numpy.asarray", "tensorflow.python.compat.compat.forward_compatible", "tensorflow.python.keras.utils.generic_utils.CustomObjectScope", "tensorflow.python.keras.layers.preprocessing.image_preprocessing.CenterCrop", "tensorflow.python.platform.test.is_built_with_rocm", "tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomFlip.from_config", "numpy.mean", "tensorflow.python.keras.layers.preprocessing.image_preprocessing.Resizing", "tensorflow.python.ops.image_ops_impl.resize_images_v2", "tensorflow.python.keras.layers.preprocessing.image_preprocessing.CenterCrop.from_config", "tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomHeight", "tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomRotation.from_config", "numpy.random.randint", "tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomTranslation", "tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomCrop", "numpy.reshape", "numpy.arange", "tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomZoom.from_config", "tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomCrop.from_config", "tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomWidth", "tensorflow.python.keras.layers.preprocessing.image_preprocessing.transform", "tensorflow.python.keras.layers.preprocessing.image_preprocessing.Rescaling", "tensorflow.python.platform.test.main", "tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomWidth.from_config", "tensorflow.python.keras.keras_parameterized.run_all_keras_modes", "tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomContrast.from_config", "tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomTranslation.from_config", "tensorflow.python.keras.testing_utils.layer_test", "numpy.flip", "tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomRotation", "tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomHeight.from_config", "tensorflow.python.keras.layers.preprocessing.image_preprocessing.Resizing.from_config", "numpy.random.random", "numpy.random.seed", "tensorflow.python.keras.layers.preprocessing.image_preprocessing.Rescaling.from_config", "tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomZoom", "tensorflow.python.platform.test.mock.patch.object", "tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomFlip", "tensorflow.python.framework.test_util.use_gpu", "tensorflow.python.ops.random_ops.random_uniform", "tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomContrast" ]
tensorflow/python/keras/layers/preprocessing/image_preprocessing_test.py
[(38, 'tensorflow.python.keras.keras_parameterized.run_all_keras_modes', 'keras_parameterized.run_all_keras_modes', ([], {'always_skip_v1': '(True)'}), False, 'from tensorflow.python.keras import keras_parameterized\n'), (105, 'tensorflow.python.keras.keras_parameterized.run_all_keras_modes', 'keras_parameterized.run_all_keras_modes', ([], {'always_skip_v1': '(True)'}), False, 'from tensorflow.python.keras import keras_parameterized\n'), (166, 'tensorflow.python.keras.keras_parameterized.run_all_keras_modes', 'keras_parameterized.run_all_keras_modes', ([], {'always_skip_v1': '(True)'}), False, 'from tensorflow.python.keras import keras_parameterized\n'), (291, 'tensorflow.python.keras.keras_parameterized.run_all_keras_modes', 'keras_parameterized.run_all_keras_modes', ([], {'always_skip_v1': '(True)'}), False, 'from tensorflow.python.keras import keras_parameterized\n'), (375, 'tensorflow.python.keras.keras_parameterized.run_all_keras_modes', 'keras_parameterized.run_all_keras_modes', ([], {'always_skip_v1': '(True)'}), False, 'from tensorflow.python.keras import keras_parameterized\n'), (460, 'tensorflow.python.keras.keras_parameterized.run_all_keras_modes', 'keras_parameterized.run_all_keras_modes', ([], {'always_skip_v1': '(True)'}), False, 'from tensorflow.python.keras import keras_parameterized\n'), (513, 'tensorflow.python.keras.keras_parameterized.run_all_keras_modes', 'keras_parameterized.run_all_keras_modes', ([], {'always_skip_v1': '(True)'}), False, 'from tensorflow.python.keras import keras_parameterized\n'), (757, 'tensorflow.python.keras.keras_parameterized.run_all_keras_modes', 'keras_parameterized.run_all_keras_modes', ([], {'always_skip_v1': '(True)'}), False, 'from tensorflow.python.keras import keras_parameterized\n'), (798, 'tensorflow.python.keras.keras_parameterized.run_all_keras_modes', 'keras_parameterized.run_all_keras_modes', ([], {'always_skip_v1': '(True)'}), False, 'from tensorflow.python.keras import keras_parameterized\n'), (851, 'tensorflow.python.keras.keras_parameterized.run_all_keras_modes', 'keras_parameterized.run_all_keras_modes', ([], {'always_skip_v1': '(True)'}), False, 'from tensorflow.python.keras import keras_parameterized\n'), (906, 'tensorflow.python.keras.keras_parameterized.run_all_keras_modes', 'keras_parameterized.run_all_keras_modes', ([], {'always_skip_v1': '(True)'}), False, 'from tensorflow.python.keras import keras_parameterized\n'), (56, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('down_sample_bilinear_2_by_2', {'interpolation': 'bilinear'}, 2, 2)", "('down_sample_bilinear_3_by_2', {'interpolation': 'bilinear'}, 3, 2)", "('down_sample_nearest_2_by_2', {'interpolation': 'nearest'}, 2, 2)", "('down_sample_nearest_3_by_2', {'interpolation': 'nearest'}, 3, 2)", "('down_sample_area_2_by_2', {'interpolation': 'area'}, 2, 2)", "('down_sample_area_3_by_2', {'interpolation': 'area'}, 3, 2)"], {}), False, 'from absl.testing import parameterized\n'), (67, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('up_sample_bilinear_10_by_12', {'interpolation': 'bilinear'}, 10, 12)", "('up_sample_bilinear_12_by_12', {'interpolation': 'bilinear'}, 12, 12)", "('up_sample_nearest_10_by_12', {'interpolation': 'nearest'}, 10, 12)", "('up_sample_nearest_12_by_12', {'interpolation': 'nearest'}, 12, 12)", "('up_sample_area_10_by_12', {'interpolation': 'area'}, 10, 12)", "('up_sample_area_12_by_12', {'interpolation': 'area'}, 12, 12)"], {}), False, 'from absl.testing import parameterized\n'), (78, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('reshape_bilinear_10_by_4', {'interpolation': 'bilinear'}, 10, 4)"], {}), False, 'from absl.testing import parameterized\n'), (129, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('center_crop_3_by_4', 3, 4)", "('center_crop_3_by_2', 3, 2)"], {}), False, 'from absl.testing import parameterized\n'), (136, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('center_crop_4_by_5', 4, 5)", "('center_crop_4_by_3', 4, 3)"], {}), False, 'from absl.testing import parameterized\n'), (143, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('center_crop_4_by_6', 4, 6)", "('center_crop_3_by_2', 3, 2)"], {}), False, 'from absl.testing import parameterized\n'), (150, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('center_crop_5_by_12', 5, 12)", "('center_crop_10_by_8', 10, 8)", "('center_crop_10_by_12', 10, 12)"], {}), False, 'from absl.testing import parameterized\n'), (184, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('random_crop_5_by_12', 5, 12)", "('random_crop_10_by_8', 10, 8)", "('random_crop_10_by_12', 10, 12)"], {}), False, 'from absl.testing import parameterized\n'), (215, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('random_crop_4_by_6', 4, 6)", "('random_crop_3_by_2', 3, 2)"], {}), False, 'from absl.testing import parameterized\n'), (260, 'tensorflow.python.keras.keras_parameterized.run_all_keras_modes', 'keras_parameterized.run_all_keras_modes', ([], {'always_skip_v1': '(True)'}), False, 'from tensorflow.python.keras import keras_parameterized\n'), (317, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('random_flip_horizontal', 'horizontal')", "('random_flip_vertical', 'vertical')", "('random_flip_both', 'horizontal_and_vertical')"], {}), False, 'from absl.testing import parameterized\n'), (404, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('random_contrast_2_by_5', 0.2, 0.5)", "('random_contrast_2_by_13', 0.2, 1.3)", "('random_contrast_5_by_2', 0.5, 0.2)"], {}), False, 'from absl.testing import parameterized\n'), (413, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('random_contrast_amplitude_2', 0.2)", "('random_contrast_amplitude_5', 0.5)"], {}), False, 'from absl.testing import parameterized\n'), (477, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('random_translate_4_by_6', 0.4, 0.6)", "('random_translate_3_by_2', 0.3, 0.2)", "('random_translate_tuple_factor', (0.5, 0.4), (0.2, 0.3))"], {}), False, 'from absl.testing import parameterized\n'), (774, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('random_rotate_4', 0.4)", "('random_rotate_3', 0.3)", "('random_rotate_tuple_factor', (0.5, 0.4))"], {}), False, 'from absl.testing import parameterized\n'), (815, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('random_zoom_4_by_6', 0.4, 0.6)", "('random_zoom_2_by_3', 0.2, 0.3)", "('random_zoom_tuple_factor', (0.4, 0.5), (0.2, 0.3))"], {}), False, 'from absl.testing import parameterized\n'), (821, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('random_zoom_4_by_6', 1.4, 1.6)", "('random_zoom_2_by_3', 1.2, 1.3)", "('random_zoom_tuple_factor', (1.4, 1.5), (1.2, 1.3))"], {}), False, 'from absl.testing import parameterized\n'), (868, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('random_height_4_by_6', (0.4, 0.6))", "('random_height_3_by_2', (0.3, 1.2))", "('random_height_3', 0.3)"], {}), False, 'from absl.testing import parameterized\n'), (923, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('random_width_4_by_6', (0.4, 0.6))", "('random_width_3_by_2', (0.3, 1.2))", "('random_width_3', 0.3)"], {}), False, 'from absl.testing import parameterized\n'), (962, 'tensorflow.python.platform.test.main', 'test.main', ([], {}), False, 'from tensorflow.python.platform import test\n'), (42, 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), True, 'import numpy as np\n'), (89, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.Resizing', 'image_preprocessing.Resizing', (['(5)', '(5)'], {'name': '"""image_preproc"""'}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (91, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.Resizing.from_config', 'image_preprocessing.Resizing.from_config', (['config'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (109, 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), True, 'import numpy as np\n'), (160, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.CenterCrop', 'image_preprocessing.CenterCrop', (['(5)', '(5)'], {'name': '"""image_preproc"""'}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (162, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.CenterCrop.from_config', 'image_preprocessing.CenterCrop.from_config', (['config'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (170, 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), True, 'import numpy as np\n'), (194, 'tensorflow.python.platform.test.is_built_with_rocm', 'test.is_built_with_rocm', ([], {}), False, 'from tensorflow.python.platform import test\n'), (199, 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), True, 'import numpy as np\n'), (201, 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(3)'}), True, 'import numpy as np\n'), (202, 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(5)'}), True, 'import numpy as np\n'), (219, 'tensorflow.python.platform.test.is_built_with_rocm', 'test.is_built_with_rocm', ([], {}), False, 'from tensorflow.python.platform import test\n'), (228, 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), True, 'import numpy as np\n'), (230, 'numpy.random.random', 'np.random.random', (['(12, 10, 6, 3)'], {}), True, 'import numpy as np\n'), (240, 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), True, 'import numpy as np\n'), (242, 'numpy.random.random', 'np.random.random', (['(12, 8, 16, 3)'], {}), True, 'import numpy as np\n'), (252, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomCrop', 'image_preprocessing.RandomCrop', (['(5)', '(5)'], {'name': '"""image_preproc"""'}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (254, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomCrop.from_config', 'image_preprocessing.RandomCrop.from_config', (['config'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (263, 'tensorflow.python.keras.testing_utils.layer_test', 'testing_utils.layer_test', (['image_preprocessing.Rescaling'], {'kwargs': 'kwargs', 'input_shape': '(2, 5, 6, 3)', 'expected_output_shape': '(None, 5, 6, 3)'}), False, 'from tensorflow.python.keras import testing_utils\n'), (271, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.Rescaling', 'image_preprocessing.Rescaling', (['(0.004)'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (272, 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(2, 4, 5, 3)'], {}), False, 'from tensorflow.python.ops import random_ops\n'), (278, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.Rescaling', 'image_preprocessing.Rescaling', (['(0.004)'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (279, 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(2, 4, 5, 3)', '(0)', '(100)'], {'dtype': '"""int32"""'}), False, 'from tensorflow.python.ops import random_ops\n'), (285, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.Rescaling', 'image_preprocessing.Rescaling', (['(0.5)'], {'name': '"""rescaling"""'}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (287, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.Rescaling.from_config', 'image_preprocessing.Rescaling.from_config', (['config'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (295, 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), True, 'import numpy as np\n'), (303, 'numpy.random.random', 'np.random.random', (['(num_samples, orig_height, orig_width, channels)'], {}), True, 'import numpy as np\n'), (369, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomFlip', 'image_preprocessing.RandomFlip', ([], {'name': '"""image_preproc"""'}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (371, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomFlip.from_config', 'image_preprocessing.RandomFlip.from_config', (['config'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (383, 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), True, 'import numpy as np\n'), (390, 'numpy.random.random', 'np.random.random', (['(num_samples, orig_height, orig_width, channels)'], {}), True, 'import numpy as np\n'), (454, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomContrast', 'image_preprocessing.RandomContrast', (['(0.5, 0.6)'], {'name': '"""image_preproc"""'}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (456, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomContrast.from_config', 'image_preprocessing.RandomContrast.from_config', (['config'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (464, 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), True, 'import numpy as np\n'), (484, 'numpy.random.random', 'np.random.random', (['(12, 1)'], {}), True, 'import numpy as np\n'), (507, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomTranslation', 'image_preprocessing.RandomTranslation', (['(0.5)', '(0.6)'], {'name': '"""image_preproc"""'}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (509, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomTranslation.from_config', 'image_preprocessing.RandomTranslation.from_config', (['config'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (530, 'tensorflow.python.compat.compat.forward_compatible', 'compat.forward_compatible', (['(2020)', '(3)', '(25)'], {}), False, 'from tensorflow.python.compat import compat\n'), (587, 'tensorflow.python.compat.compat.forward_compatible', 'compat.forward_compatible', (['(2020)', '(3)', '(25)'], {}), False, 'from tensorflow.python.compat import compat\n'), (643, 'tensorflow.python.compat.compat.forward_compatible', 'compat.forward_compatible', (['(2020)', '(3)', '(25)'], {}), False, 'from tensorflow.python.compat import compat\n'), (699, 'tensorflow.python.compat.compat.forward_compatible', 'compat.forward_compatible', (['(2020)', '(3)', '(25)'], {}), False, 'from tensorflow.python.compat import compat\n'), (761, 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), True, 'import numpy as np\n'), (792, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomRotation', 'image_preprocessing.RandomRotation', (['(0.5)'], {'name': '"""image_preproc"""'}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (794, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomRotation.from_config', 'image_preprocessing.RandomRotation.from_config', (['config'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (802, 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), True, 'import numpy as np\n'), (845, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomZoom', 'image_preprocessing.RandomZoom', (['(0.5)', '(0.6)'], {'name': '"""image_preproc"""'}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (847, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomZoom.from_config', 'image_preprocessing.RandomZoom.from_config', (['config'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (855, 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), True, 'import numpy as np\n'), (900, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomHeight', 'image_preprocessing.RandomHeight', (['(0.5)'], {'name': '"""image_preproc"""'}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (902, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomHeight.from_config', 'image_preprocessing.RandomHeight.from_config', (['config'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (910, 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), True, 'import numpy as np\n'), (955, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomWidth', 'image_preprocessing.RandomWidth', (['(0.5)'], {'name': '"""image_preproc"""'}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (957, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomWidth.from_config', 'image_preprocessing.RandomWidth.from_config', (['config'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (48, 'tensorflow.python.framework.test_util.use_gpu', 'tf_test_util.use_gpu', ([], {}), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), (49, 'tensorflow.python.keras.testing_utils.layer_test', 'testing_utils.layer_test', (['image_preprocessing.Resizing'], {'kwargs': 'kwargs', 'input_shape': '(num_samples, orig_height, orig_width, channels)', 'expected_output_shape': '(None, expected_height, expected_width, channels)'}), False, 'from tensorflow.python.keras import testing_utils\n'), (64, 'tensorflow.python.keras.utils.generic_utils.CustomObjectScope', 'CustomObjectScope', (["{'Resizing': image_preprocessing.Resizing}"], {}), False, 'from tensorflow.python.keras.utils.generic_utils import CustomObjectScope\n'), (75, 'tensorflow.python.keras.utils.generic_utils.CustomObjectScope', 'CustomObjectScope', (["{'Resizing': image_preprocessing.Resizing}"], {}), False, 'from tensorflow.python.keras.utils.generic_utils import CustomObjectScope\n'), (81, 'tensorflow.python.keras.utils.generic_utils.CustomObjectScope', 'CustomObjectScope', (["{'Resizing': image_preprocessing.Resizing}"], {}), False, 'from tensorflow.python.keras.utils.generic_utils import CustomObjectScope\n'), (86, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.Resizing', 'image_preprocessing.Resizing', (['(5)', '(5)', '"""invalid_interpolation"""'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (119, 'tensorflow.python.framework.test_util.use_gpu', 'tf_test_util.use_gpu', ([], {}), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), (120, 'tensorflow.python.keras.testing_utils.layer_test', 'testing_utils.layer_test', (['image_preprocessing.CenterCrop'], {'kwargs': 'kwargs', 'input_shape': '(num_samples, orig_height, orig_width, channels)', 'input_data': 'input_images', 'expected_output': 'expected_output', 'expected_output_shape': '(None, expected_height, expected_width, channels)'}), False, 'from tensorflow.python.keras import testing_utils\n'), (133, 'tensorflow.python.keras.utils.generic_utils.CustomObjectScope', 'CustomObjectScope', (["{'CenterCrop': image_preprocessing.CenterCrop}"], {}), False, 'from tensorflow.python.keras.utils.generic_utils import CustomObjectScope\n'), (140, 'tensorflow.python.keras.utils.generic_utils.CustomObjectScope', 'CustomObjectScope', (["{'CenterCrop': image_preprocessing.CenterCrop}"], {}), False, 'from tensorflow.python.keras.utils.generic_utils import CustomObjectScope\n'), (147, 'tensorflow.python.keras.utils.generic_utils.CustomObjectScope', 'CustomObjectScope', (["{'CenterCrop': image_preprocessing.CenterCrop}"], {}), False, 'from tensorflow.python.keras.utils.generic_utils import CustomObjectScope\n'), (176, 'tensorflow.python.framework.test_util.use_gpu', 'tf_test_util.use_gpu', ([], {}), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), (177, 'tensorflow.python.keras.testing_utils.layer_test', 'testing_utils.layer_test', (['image_preprocessing.RandomCrop'], {'kwargs': 'kwargs', 'input_shape': '(num_samples, orig_height, orig_width, channels)', 'expected_output_shape': '(None, expected_height, expected_width, channels)'}), False, 'from tensorflow.python.keras import testing_utils\n'), (204, 'tensorflow.python.platform.test.mock.patch.object', 'test.mock.patch.object', (['stateless_random_ops', '"""stateless_random_uniform"""'], {'return_value': 'mock_offset'}), False, 'from tensorflow.python.platform import test\n'), (224, 'tensorflow.python.keras.utils.generic_utils.CustomObjectScope', 'CustomObjectScope', (["{'RandomCrop': image_preprocessing.RandomCrop}"], {}), False, 'from tensorflow.python.keras.utils.generic_utils import CustomObjectScope\n'), (231, 'tensorflow.python.framework.test_util.use_gpu', 'tf_test_util.use_gpu', ([], {}), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), (232, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomCrop', 'image_preprocessing.RandomCrop', (['height', 'width'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (234, 'tensorflow.python.ops.image_ops_impl.resize_images_v2', 'image_ops.resize_images_v2', (['inp'], {'size': '[5, 3]'}), True, 'from tensorflow.python.ops import image_ops_impl as image_ops\n'), (243, 'tensorflow.python.framework.test_util.use_gpu', 'tf_test_util.use_gpu', ([], {}), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), (244, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomCrop', 'image_preprocessing.RandomCrop', (['height', 'width'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (246, 'tensorflow.python.ops.image_ops_impl.resize_images_v2', 'image_ops.resize_images_v2', (['inp'], {'size': '[4, 8]'}), True, 'from tensorflow.python.ops import image_ops_impl as image_ops\n'), (302, 'numpy.reshape', 'np.reshape', (['mock_random', '[2, 1, 1, 1]'], {}), True, 'import numpy as np\n'), (310, 'tensorflow.python.platform.test.mock.patch.object', 'test.mock.patch.object', (['random_ops', '"""random_uniform"""'], {'return_value': 'mock_random'}), False, 'from tensorflow.python.platform import test\n'), (322, 'tensorflow.python.keras.utils.generic_utils.CustomObjectScope', 'CustomObjectScope', (["{'RandomFlip': image_preprocessing.RandomFlip}"], {}), False, 'from tensorflow.python.keras.utils.generic_utils import CustomObjectScope\n'), (326, 'tensorflow.python.keras.utils.generic_utils.CustomObjectScope', 'CustomObjectScope', (["{'RandomFlip': image_preprocessing.RandomFlip}"], {}), False, 'from tensorflow.python.keras.utils.generic_utils import CustomObjectScope\n'), (327, 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), True, 'import numpy as np\n'), (329, 'numpy.reshape', 'np.reshape', (['mock_random', '[2, 1, 1, 1]'], {}), True, 'import numpy as np\n'), (332, 'numpy.flip', 'np.flip', (['input_images[(0), :, :, :]'], {'axis': '(0)'}), True, 'import numpy as np\n'), (336, 'tensorflow.python.keras.utils.generic_utils.CustomObjectScope', 'CustomObjectScope', (["{'RandomFlip': image_preprocessing.RandomFlip}"], {}), False, 'from tensorflow.python.keras.utils.generic_utils import CustomObjectScope\n'), (337, 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), True, 'import numpy as np\n'), (339, 'numpy.reshape', 'np.reshape', (['mock_random', '[2, 1, 1, 1]'], {}), True, 'import numpy as np\n'), (342, 'numpy.flip', 'np.flip', (['input_images[(0), :, :, :]'], {'axis': '(1)'}), True, 'import numpy as np\n'), (346, 'tensorflow.python.keras.utils.generic_utils.CustomObjectScope', 'CustomObjectScope', (["{'RandomFlip': image_preprocessing.RandomFlip}"], {}), False, 'from tensorflow.python.keras.utils.generic_utils import CustomObjectScope\n'), (355, 'tensorflow.python.keras.utils.generic_utils.CustomObjectScope', 'CustomObjectScope', (["{'RandomFlip': image_preprocessing.RandomFlip}"], {}), False, 'from tensorflow.python.keras.utils.generic_utils import CustomObjectScope\n'), (359, 'numpy.reshape', 'np.reshape', (['mock_random', '[2, 1, 1, 1]'], {}), True, 'import numpy as np\n'), (393, 'numpy.mean', 'np.mean', (['inp'], {'axis': '(1)', 'keepdims': '(True)'}), True, 'import numpy as np\n'), (395, 'numpy.mean', 'np.mean', (['inp_mean'], {'axis': '(2)', 'keepdims': '(True)'}), True, 'import numpy as np\n'), (397, 'tensorflow.python.platform.test.mock.patch.object', 'test.mock.patch.object', (['random_ops', '"""random_uniform"""'], {'return_value': 'mock_random'}), False, 'from tensorflow.python.platform import test\n'), (409, 'tensorflow.python.keras.utils.generic_utils.CustomObjectScope', 'CustomObjectScope', (["{'RandomContrast': image_preprocessing.RandomContrast}"], {}), False, 'from tensorflow.python.keras.utils.generic_utils import CustomObjectScope\n'), (417, 'tensorflow.python.keras.utils.generic_utils.CustomObjectScope', 'CustomObjectScope', (["{'RandomContrast': image_preprocessing.RandomContrast}"], {}), False, 'from tensorflow.python.keras.utils.generic_utils import CustomObjectScope\n'), (419, 'numpy.random.random', 'np.random.random', (['(2, 5, 8, 3)'], {}), True, 'import numpy as np\n'), (425, 'tensorflow.python.keras.utils.generic_utils.CustomObjectScope', 'CustomObjectScope', (["{'RandomContrast': image_preprocessing.RandomContrast}"], {}), False, 'from tensorflow.python.keras.utils.generic_utils import CustomObjectScope\n'), (435, 'tensorflow.python.keras.utils.generic_utils.CustomObjectScope', 'CustomObjectScope', (["{'RandomContrast': image_preprocessing.RandomContrast}"], {}), False, 'from tensorflow.python.keras.utils.generic_utils import CustomObjectScope\n'), (437, 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(255)', 'size': '(2, 5, 8, 3)'}), True, 'import numpy as np\n'), (444, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomContrast', 'image_preprocessing.RandomContrast', (['(-0.1, 0.5)'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (447, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomContrast', 'image_preprocessing.RandomContrast', (['(1.1, 0.5)'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (450, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomContrast', 'image_preprocessing.RandomContrast', (['(0.1, -0.2)'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (470, 'tensorflow.python.framework.test_util.use_gpu', 'tf_test_util.use_gpu', ([], {}), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), (471, 'tensorflow.python.keras.testing_utils.layer_test', 'testing_utils.layer_test', (['image_preprocessing.RandomTranslation'], {'kwargs': 'kwargs', 'input_shape': '(num_samples, orig_height, orig_width, channels)', 'expected_output_shape': '(None, orig_height, orig_width, channels)'}), False, 'from tensorflow.python.keras import testing_utils\n'), (485, 'tensorflow.python.platform.test.mock.patch.object', 'test.mock.patch.object', (['gen_stateful_random_ops', '"""stateful_uniform"""'], {'return_value': 'mock_offset'}), False, 'from tensorflow.python.platform import test\n'), (496, 'tensorflow.python.keras.utils.generic_utils.CustomObjectScope', 'CustomObjectScope', (["{'RandomTranslation': image_preprocessing.RandomTranslation}"], {}), False, 'from tensorflow.python.keras.utils.generic_utils import CustomObjectScope\n'), (523, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.transform', 'image_preprocessing.transform', (['inp', 'transform_matrix'], {'fill_mode': 'mode', 'interpolation': 'interpolation'}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (540, 'numpy.asarray', 'np.asarray', (['[[1.0, 0.0, 0.0, 0.0, 1.0, -1.0, 0.0, 0.0]]'], {}), True, 'import numpy as np\n'), (553, 'numpy.asarray', 'np.asarray', (['[[1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0]]'], {}), True, 'import numpy as np\n'), (567, 'numpy.asarray', 'np.asarray', (['[[1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]]'], {}), True, 'import numpy as np\n'), (580, 'numpy.asarray', 'np.asarray', (['[[1.0, 0.0, -1.0, 0.0, 1.0, 0.0, 0.0, 0.0]]'], {}), True, 'import numpy as np\n'), (597, 'numpy.asarray', 'np.asarray', (['[[1.0, 0.0, 0.0, 0.0, 1.0, -1.0, 0.0, 0.0]]'], {}), True, 'import numpy as np\n'), (610, 'numpy.asarray', 'np.asarray', (['[[1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0]]'], {}), True, 'import numpy as np\n'), (623, 'numpy.asarray', 'np.asarray', (['[[1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]]'], {}), True, 'import numpy as np\n'), (636, 'numpy.asarray', 'np.asarray', (['[[1.0, 0.0, -1.0, 0.0, 1.0, 0.0, 0.0, 0.0]]'], {}), True, 'import numpy as np\n'), (653, 'numpy.asarray', 'np.asarray', (['[[1.0, 0.0, 0.0, 0.0, 1.0, -1.0, 0.0, 0.0]]'], {}), True, 'import numpy as np\n'), (666, 'numpy.asarray', 'np.asarray', (['[[1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0]]'], {}), True, 'import numpy as np\n'), (679, 'numpy.asarray', 'np.asarray', (['[[1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]]'], {}), True, 'import numpy as np\n'), (692, 'numpy.asarray', 'np.asarray', (['[[1.0, 0.0, -1.0, 0.0, 1.0, 0.0, 0.0, 0.0]]'], {}), True, 'import numpy as np\n'), (709, 'numpy.asarray', 'np.asarray', (['[[1.0, 0.0, 0.0, 0.0, 1.0, -1.0, 0.0, 0.0]]'], {}), True, 'import numpy as np\n'), (723, 'numpy.asarray', 'np.asarray', (['[[1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0]]'], {}), True, 'import numpy as np\n'), (737, 'numpy.asarray', 'np.asarray', (['[[1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]]'], {}), True, 'import numpy as np\n'), (751, 'numpy.asarray', 'np.asarray', (['[[1.0, 0.0, -1.0, 0.0, 1.0, 0.0, 0.0, 0.0]]'], {}), True, 'import numpy as np\n'), (767, 'tensorflow.python.framework.test_util.use_gpu', 'tf_test_util.use_gpu', ([], {}), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), (768, 'tensorflow.python.keras.testing_utils.layer_test', 'testing_utils.layer_test', (['image_preprocessing.RandomRotation'], {'kwargs': 'kwargs', 'input_shape': '(num_samples, orig_height, orig_width, channels)', 'expected_output_shape': '(None, orig_height, orig_width, channels)'}), False, 'from tensorflow.python.keras import testing_utils\n'), (781, 'tensorflow.python.keras.utils.generic_utils.CustomObjectScope', 'CustomObjectScope', (["{'RandomTranslation': image_preprocessing.RandomRotation}"], {}), False, 'from tensorflow.python.keras.utils.generic_utils import CustomObjectScope\n'), (808, 'tensorflow.python.framework.test_util.use_gpu', 'tf_test_util.use_gpu', ([], {}), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), (809, 'tensorflow.python.keras.testing_utils.layer_test', 'testing_utils.layer_test', (['image_preprocessing.RandomZoom'], {'kwargs': 'kwargs', 'input_shape': '(num_samples, orig_height, orig_width, channels)', 'expected_output_shape': '(None, orig_height, orig_width, channels)'}), False, 'from tensorflow.python.keras import testing_utils\n'), (829, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomZoom', 'image_preprocessing.RandomZoom', (['(0.5, 0.4)', '(0.2)'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (831, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomZoom', 'image_preprocessing.RandomZoom', (['(0.2)', '(0.5, 0.4)'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (834, 'tensorflow.python.keras.utils.generic_utils.CustomObjectScope', 'CustomObjectScope', (["{'RandomZoom': image_preprocessing.RandomZoom}"], {}), False, 'from tensorflow.python.keras.utils.generic_utils import CustomObjectScope\n'), (860, 'tensorflow.python.framework.test_util.use_gpu', 'tf_test_util.use_gpu', ([], {}), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), (861, 'numpy.random.random', 'np.random.random', (['(num_samples, orig_height, orig_width, channels)'], {}), True, 'import numpy as np\n'), (862, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomHeight', 'image_preprocessing.RandomHeight', (['factor'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (877, 'tensorflow.python.platform.test.mock.patch.object', 'test.mock.patch.object', (['gen_stateful_random_ops', '"""stateful_uniform"""'], {'return_value': 'mock_factor'}), False, 'from tensorflow.python.platform import test\n'), (887, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomHeight', 'image_preprocessing.RandomHeight', (['(-1.5, 0.4)'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (890, 'tensorflow.python.keras.utils.generic_utils.CustomObjectScope', 'CustomObjectScope', (["{'RandomHeight': image_preprocessing.RandomHeight}"], {}), False, 'from tensorflow.python.keras.utils.generic_utils import CustomObjectScope\n'), (915, 'tensorflow.python.framework.test_util.use_gpu', 'tf_test_util.use_gpu', ([], {}), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), (916, 'numpy.random.random', 'np.random.random', (['(num_samples, orig_height, orig_width, channels)'], {}), True, 'import numpy as np\n'), (917, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomWidth', 'image_preprocessing.RandomWidth', (['factor'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (932, 'tensorflow.python.platform.test.mock.patch.object', 'test.mock.patch.object', (['gen_stateful_random_ops', '"""stateful_uniform"""'], {'return_value': 'mock_factor'}), False, 'from tensorflow.python.platform import test\n'), (942, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomWidth', 'image_preprocessing.RandomWidth', (['(-1.5, 0.4)'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (945, 'tensorflow.python.keras.utils.generic_utils.CustomObjectScope', 'CustomObjectScope', (["{'RandomWidth': image_preprocessing.RandomWidth}"], {}), False, 'from tensorflow.python.keras.utils.generic_utils import CustomObjectScope\n'), (115, 'numpy.random.random', 'np.random.random', (['(num_samples, orig_height, orig_width, channels)'], {}), True, 'import numpy as np\n'), (190, 'tensorflow.python.keras.utils.generic_utils.CustomObjectScope', 'CustomObjectScope', (["{'RandomCrop': image_preprocessing.RandomCrop}"], {}), False, 'from tensorflow.python.keras.utils.generic_utils import CustomObjectScope\n'), (207, 'tensorflow.python.framework.test_util.use_gpu', 'tf_test_util.use_gpu', ([], {}), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), (208, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomCrop', 'image_preprocessing.RandomCrop', (['height', 'width'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (209, 'numpy.random.random', 'np.random.random', (['(12, 5, 8, 3)'], {}), True, 'import numpy as np\n'), (307, 'numpy.flip', 'np.flip', (['expected_output'], {'axis': '(1)'}), True, 'import numpy as np\n'), (309, 'numpy.flip', 'np.flip', (['expected_output'], {'axis': '(2)'}), True, 'import numpy as np\n'), (312, 'tensorflow.python.framework.test_util.use_gpu', 'tf_test_util.use_gpu', ([], {}), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), (313, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomFlip', 'image_preprocessing.RandomFlip', (['mode'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (349, 'tensorflow.python.framework.test_util.use_gpu', 'tf_test_util.use_gpu', ([], {}), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), (350, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomFlip', 'image_preprocessing.RandomFlip', ([], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (357, 'numpy.flip', 'np.flip', (['input_images'], {'axis': '(1)'}), True, 'import numpy as np\n'), (360, 'tensorflow.python.platform.test.mock.patch.object', 'test.mock.patch.object', (['random_ops', '"""random_uniform"""'], {'return_value': 'mock_random'}), False, 'from tensorflow.python.platform import test\n'), (399, 'tensorflow.python.framework.test_util.use_gpu', 'tf_test_util.use_gpu', ([], {}), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), (400, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomContrast', 'image_preprocessing.RandomContrast', (['(lower, upper)'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (420, 'tensorflow.python.framework.test_util.use_gpu', 'tf_test_util.use_gpu', ([], {}), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), (421, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomContrast', 'image_preprocessing.RandomContrast', (['amplitude'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (429, 'tensorflow.python.framework.test_util.use_gpu', 'tf_test_util.use_gpu', ([], {}), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), (430, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomContrast', 'image_preprocessing.RandomContrast', (['(0.1, 0.2)'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (438, 'tensorflow.python.framework.test_util.use_gpu', 'tf_test_util.use_gpu', ([], {}), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), (439, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomContrast', 'image_preprocessing.RandomContrast', (['(0.1, 0.2)'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (488, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomTranslation', 'image_preprocessing.RandomTranslation', (['(-0.2, 0.3)', '(0.4)'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (489, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomTranslation', 'image_preprocessing.RandomTranslation', (['(0.2, 0.3)', '(0.4)'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (500, 'tensorflow.python.framework.test_util.use_gpu', 'tf_test_util.use_gpu', ([], {}), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), (501, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomTranslation', 'image_preprocessing.RandomTranslation', (['(0.5)', '(0.5)'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (785, 'tensorflow.python.framework.test_util.use_gpu', 'tf_test_util.use_gpu', ([], {}), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), (786, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomRotation', 'image_preprocessing.RandomRotation', (['(0.5)'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (838, 'tensorflow.python.framework.test_util.use_gpu', 'tf_test_util.use_gpu', ([], {}), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), (839, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomZoom', 'image_preprocessing.RandomZoom', (['(0.5)', '(0.5)'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (879, 'tensorflow.python.framework.test_util.use_gpu', 'tf_test_util.use_gpu', ([], {}), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), (880, 'numpy.random.random', 'np.random.random', (['(12, 5, 8, 3)'], {}), True, 'import numpy as np\n'), (881, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomHeight', 'image_preprocessing.RandomHeight', (['(0.4)'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (893, 'tensorflow.python.framework.test_util.use_gpu', 'tf_test_util.use_gpu', ([], {}), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), (894, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomHeight', 'image_preprocessing.RandomHeight', (['(0.5)'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (934, 'tensorflow.python.framework.test_util.use_gpu', 'tf_test_util.use_gpu', ([], {}), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), (935, 'numpy.random.random', 'np.random.random', (['(12, 8, 5, 3)'], {}), True, 'import numpy as np\n'), (936, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomWidth', 'image_preprocessing.RandomWidth', (['(0.4)'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (948, 'tensorflow.python.framework.test_util.use_gpu', 'tf_test_util.use_gpu', ([], {}), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), (949, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomWidth', 'image_preprocessing.RandomWidth', (['(0.5)'], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (330, 'numpy.random.random', 'np.random.random', (['(2, 5, 8, 3)'], {}), True, 'import numpy as np\n'), (340, 'numpy.random.random', 'np.random.random', (['(2, 5, 8, 3)'], {}), True, 'import numpy as np\n'), (347, 'numpy.random.random', 'np.random.random', (['(2, 5, 8, 3)'], {}), True, 'import numpy as np\n'), (356, 'numpy.random.random', 'np.random.random', (['(2, 5, 8, 3)'], {}), True, 'import numpy as np\n'), (363, 'tensorflow.python.keras.layers.preprocessing.image_preprocessing.RandomFlip', 'image_preprocessing.RandomFlip', ([], {}), False, 'from tensorflow.python.keras.layers.preprocessing import image_preprocessing\n'), (427, 'numpy.random.random', 'np.random.random', (['(2, 5, 8, 3)'], {}), True, 'import numpy as np\n'), (498, 'numpy.random.random', 'np.random.random', (['(2, 5, 8, 3)'], {}), True, 'import numpy as np\n'), (783, 'numpy.random.random', 'np.random.random', (['(2, 5, 8, 3)'], {}), True, 'import numpy as np\n'), (836, 'numpy.random.random', 'np.random.random', (['(2, 5, 8, 3)'], {}), True, 'import numpy as np\n'), (891, 'numpy.random.random', 'np.random.random', (['(2, 5, 8, 3)'], {}), True, 'import numpy as np\n'), (946, 'numpy.random.random', 'np.random.random', (['(2, 5, 8, 3)'], {}), True, 'import numpy as np\n'), (490, 'numpy.random.random', 'np.random.random', (['(12, 5, 8, 3)'], {}), True, 'import numpy as np\n'), (521, 'numpy.arange', 'np.arange', (['(15)'], {}), True, 'import numpy as np\n'), (533, 'numpy.asarray', 'np.asarray', (['[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8], [9.0, \n 10.0, 11]]'], {}), True, 'import numpy as np\n'), (546, 'numpy.asarray', 'np.asarray', (['[[3.0, 4.0, 5.0], [6.0, 7.0, 8], [9.0, 10.0, 11.0], [12.0, 13.0, 14.0], [\n 12.0, 13.0, 14.0]]'], {}), True, 'import numpy as np\n'), (560, 'numpy.asarray', 'np.asarray', (['[[1.0, 2.0, 2.0], [4.0, 5.0, 5.0], [7.0, 8.0, 8.0], [10.0, 11.0, 11.0], [\n 13.0, 14.0, 14.0]]'], {}), True, 'import numpy as np\n'), (573, 'numpy.asarray', 'np.asarray', (['[[0.0, 0.0, 1.0], [3.0, 3.0, 4], [6.0, 6.0, 7.0], [9.0, 9.0, 10.0], [12.0, \n 12.0, 13.0]]'], {}), True, 'import numpy as np\n'), (590, 'numpy.asarray', 'np.asarray', (['[[12.0, 13.0, 14.0], [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8], [9.0,\n 10.0, 11]]'], {}), True, 'import numpy as np\n'), (603, 'numpy.asarray', 'np.asarray', (['[[3.0, 4.0, 5.0], [6.0, 7.0, 8], [9.0, 10.0, 11.0], [12.0, 13.0, 14.0], [\n 0.0, 1.0, 2.0]]'], {}), True, 'import numpy as np\n'), (616, 'numpy.asarray', 'np.asarray', (['[[1.0, 2.0, 0.0], [4.0, 5.0, 3.0], [7.0, 8.0, 6.0], [10.0, 11.0, 9.0], [\n 13.0, 14.0, 12.0]]'], {}), True, 'import numpy as np\n'), (629, 'numpy.asarray', 'np.asarray', (['[[2.0, 0.0, 1.0], [5.0, 3.0, 4], [8.0, 6.0, 7.0], [11.0, 9.0, 10.0], [14.0,\n 12.0, 13.0]]'], {}), True, 'import numpy as np\n'), (646, 'numpy.asarray', 'np.asarray', (['[[0.0, 0.0, 0.0], [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8], [9.0, \n 10.0, 11]]'], {}), True, 'import numpy as np\n'), (659, 'numpy.asarray', 'np.asarray', (['[[3.0, 4.0, 5.0], [6.0, 7.0, 8], [9.0, 10.0, 11.0], [12.0, 13.0, 14.0], [\n 0.0, 0.0, 0.0]]'], {}), True, 'import numpy as np\n'), (672, 'numpy.asarray', 'np.asarray', (['[[1.0, 2.0, 0.0], [4.0, 5.0, 0.0], [7.0, 8.0, 0.0], [10.0, 11.0, 0.0], [\n 13.0, 14.0, 0.0]]'], {}), True, 'import numpy as np\n'), (685, 'numpy.asarray', 'np.asarray', (['[[0.0, 0.0, 1.0], [0.0, 3.0, 4], [0.0, 6.0, 7.0], [0.0, 9.0, 10.0], [0.0, \n 12.0, 13.0]]'], {}), True, 'import numpy as np\n'), (702, 'numpy.asarray', 'np.asarray', (['[[0.0, 0.0, 0.0], [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8], [9.0, \n 10.0, 11]]'], {}), True, 'import numpy as np\n'), (716, 'numpy.asarray', 'np.asarray', (['[[3.0, 4.0, 5.0], [6.0, 7.0, 8], [9.0, 10.0, 11.0], [12.0, 13.0, 14.0], [\n 0.0, 0.0, 0.0]]'], {}), True, 'import numpy as np\n'), (730, 'numpy.asarray', 'np.asarray', (['[[1.0, 2.0, 0.0], [4.0, 5.0, 0.0], [7.0, 8.0, 0.0], [10.0, 11.0, 0.0], [\n 13.0, 14.0, 0.0]]'], {}), True, 'import numpy as np\n'), (744, 'numpy.asarray', 'np.asarray', (['[[0.0, 0.0, 1.0], [0.0, 3.0, 4], [0.0, 6.0, 7.0], [0.0, 9.0, 10.0], [0.0, \n 12.0, 13.0]]'], {}), True, 'import numpy as np\n')]
bva99/PINN-KS
da2de335c3d8740341e1caf9af21b904528fb3a9
import tensorflow as tf from .fflayer import ffLayer class Network: """ Build a physics informed neural network (PINN) model for the Kuramoto-Sivashinsky equation. """ @classmethod def build(cls, num_inputs=2, layers=None, activation=tf.nn.tanh, sig_t=[1], sig_x=[1], num_outputs=1, name="DenseNN"): """ Build a PINN model for the Kuramoto-Sivashinsky equation with input shape (t, x) and output shape u(t, x). Parameters: num_inputs: int Number of input variables. Default is 2 for (t, x). layers: array_like List of length equal to number of hidden layers, with the number of nodes for each of them. activation: stror tensorflow activation object Activation function in hidden layers. Default is tanh sig_t: array_like of ints Standard deviations for the time-domain Fourier feature layer. sig_x: array_like of ints Standard deviations for the spatial-domain Fourier feature layer. num_outputs: int Number of output variables. Default is 1 for u(t, x). name : str Name of the neural network. Default is "DenseNN" Returns: keras network model. """ if layers is None: layers = [40, 40, 40, 40] # input layer inputs = tf.keras.layers.Input(shape=(num_inputs,), name="t_x") # separate time and space t = inputs[:, 0:1] x = inputs[:, 1:2] # Fourier feature layer for time t = ffLayer(input_dim=1, m=layers[0], sig=sig_t, name="Time_Fourier_features")(t) # Fourier feature layer for space x = ffLayer(input_dim=1, m=layers[0], sig=sig_x, name="Space_Fourier_features")(x) # dense neural network fnn = tf.keras.models.Sequential() assert len(sig_t)==len(sig_x) fnn.add(tf.keras.layers.Input(shape=(layers[0]*2*len(sig_t)))) # hidden layers for layer in layers: fnn.add(tf.keras.layers.Dense(layer, activation=activation, kernel_initializer='he_uniform', bias_initializer='he_uniform')) # forward pass for time and space t = fnn(t) x = fnn(x) # point-wise multiplication layer for a merge tx = tf.multiply(t, x) # output layer outputs = tf.keras.layers.Dense(num_outputs, kernel_initializer='glorot_uniform')(tx) return tf.keras.models.Model(inputs=inputs, outputs=outputs, name=name)
[ "tensorflow.multiply", "tensorflow.keras.models.Model", "tensorflow.keras.layers.Dense", "tensorflow.keras.models.Sequential", "tensorflow.keras.layers.Input" ]
lib/network.py
[(44, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(num_inputs,)', 'name': '"""t_x"""'}), True, 'import tensorflow as tf\n'), (53, 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), True, 'import tensorflow as tf\n'), (65, 'tensorflow.multiply', 'tf.multiply', (['t', 'x'], {}), True, 'import tensorflow as tf\n'), (70, 'tensorflow.keras.models.Model', 'tf.keras.models.Model', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'name': 'name'}), True, 'import tensorflow as tf\n'), (67, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_outputs'], {'kernel_initializer': '"""glorot_uniform"""'}), True, 'import tensorflow as tf\n'), (58, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['layer'], {'activation': 'activation', 'kernel_initializer': '"""he_uniform"""', 'bias_initializer': '"""he_uniform"""'}), True, 'import tensorflow as tf\n')]
calebmcarr/vgantf
953b98b85747ca1987c395587f47e825217fdedd
''' Name : encoder.py Author : Caleb Carr Description : TensorFlow implementation of the Encoder in Generating Videos with Scene Dynamics from Columbia. Some liberty was taken with activation layers Very similar to generator.py but functions are changed for input size License : GNU V3 Date : 30MAY2020 ''' import tensorflow as tf from tensorflow.keras import layers from tensorflow.keras import models from tensorflow.keras import activations import numpy as np def encoder(): '''turns a 64x64, 3 channel image to a 4x4, 512 channel image''' in_shape = (64,64,3) enc = tf.keras.Sequential() enc.add(layers.Dense(64*64,use_bias=False,input_shape=in_shape)) enc.add(layers.BatchNormalization()) # encode 64x64 down to 4x4 enc.add(layers.Conv2D(64,4,strides=2,use_bias=False,padding='same')) enc.add(layers.Conv2D(128,4,strides=2,use_bias=False,padding='same')) enc.add(layers.Conv2D(256,4,strides=2,use_bias=False,padding='same')) enc.add(layers.Conv2D(512,4,strides=2,use_bias=False,padding='same')) #this result will then be sent off to a modified Generator return enc def foreground(): # kernal size (4,4,4) = 4 # stride = (2,2,2) = 2 # use Conv3DTranspose for upsampling in_shape = (4,4,1) fg = tf.keras.Sequential() fg.add(layers.Dense(4*4,use_bias=False,input_shape=in_shape)) fg.add(layers.BatchNormalization()) #fg.add(activations.tanh()) fg.add(layers.Reshape((4,4,1,1))) #firt layer uses a (2,4,4) convolution; creates (4x4x2) from 100 dim Noise with 512 channels fg.add(layers.Conv3DTranspose(512,(2,4,4),strides=(1,1,2),use_bias=False,padding='same')) #outputs 8x8x4 with 256 channels fg.add(layers.Conv3DTranspose(256,4,strides=2,use_bias=False,padding='same')) #outputs 16x16x8 with 128 channels fg.add(layers.Conv3DTranspose(128,4,strides=2,use_bias=False,padding='same')) #outputs 32x32x16 with 64 channels fg.add(layers.Conv3DTranspose(128,4,strides=2,use_bias=False,padding='same')) #outputs forground: 64x64x32 with 3 channels fg.add(layers.Conv3DTranspose(3,4,strides=2,use_bias=False,padding='same',activation='tanh')) return fg def fg_mask(fg): mask = tf.keras.models.clone_model(fg) mask.add(layers.Conv3DTranspose(1,4,strides=1,use_bias=False,padding='same',activation='sigmoid')) return mask def background(): in_shape = (4,4,1) bg = tf.keras.Sequential() bg.add(layers.Dense(4*4,use_bias=False,input_shape=in_shape)) bg.add(layers.BatchNormalization()) #fg.add(activations.tanh()) bg.add(layers.Reshape((4,4,1,1))) #firt layer uses a (2,4,4) convolution; creates (4x4x2) from 100 dim Noise with 512 channels bg.add(layers.Conv3DTranspose(512,(2,4,4),strides=(1,1,2),use_bias=False,padding='same')) #outputs 8x8x4 with 256 channels bg.add(layers.Conv3DTranspose(256,4,strides=(2,2,1),use_bias=False,padding='same')) #outputs 16x16x8 with 128 channels bg.add(layers.Conv3DTranspose(128,4,strides=(2,2,1),use_bias=False,padding='same')) #outputs 32x32x16 with 64 channels bg.add(layers.Conv3DTranspose(128,4,strides=(2,2,1),use_bias=False,padding='same')) #outputs forground: 64x64x32 with 3 channels bg.add(layers.Conv3DTranspose(3,4,strides=(2,2,1),use_bias=False,padding='same',activation='tanh')) return bg def video(m,f,b): '''Computes two-stream arch. to get generated video''' p1 = (m*f) p2 = (1-m)*b video = p1+p2 return video ''' #---TEST CASING--- #generate models enc = encoder() fg_model = foreground() bg_model = background() mask = fg_mask(fg_model) #create noise tensor noise = tf.random.normal([64,64]) #get encoded tensor from noise gen_encoding = enc(noise,training=False) #use this encoding to great normal generated video gen_fg_vid = fg_model(gen_encoding,training=False) gen_mask = mask(gen_encoding,training=False) gen_bg_vid = bg_model(gen_encoding=False) vid = video(gen_mask,gen_fg_vid,gen_bg_vid) '''
[ "tensorflow.keras.models.clone_model", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.Sequential", "tensorflow.keras.layers.Conv3DTranspose", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.layers.Reshape" ]
src/encoder.py
[(22, 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), True, 'import tensorflow as tf\n'), (43, 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), True, 'import tensorflow as tf\n'), (69, 'tensorflow.keras.models.clone_model', 'tf.keras.models.clone_model', (['fg'], {}), True, 'import tensorflow as tf\n'), (75, 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), True, 'import tensorflow as tf\n'), (23, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64 * 64)'], {'use_bias': '(False)', 'input_shape': 'in_shape'}), False, 'from tensorflow.keras import layers\n'), (24, 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), False, 'from tensorflow.keras import layers\n'), (27, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(64)', '(4)'], {'strides': '(2)', 'use_bias': '(False)', 'padding': '"""same"""'}), False, 'from tensorflow.keras import layers\n'), (29, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(128)', '(4)'], {'strides': '(2)', 'use_bias': '(False)', 'padding': '"""same"""'}), False, 'from tensorflow.keras import layers\n'), (31, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(256)', '(4)'], {'strides': '(2)', 'use_bias': '(False)', 'padding': '"""same"""'}), False, 'from tensorflow.keras import layers\n'), (33, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(512)', '(4)'], {'strides': '(2)', 'use_bias': '(False)', 'padding': '"""same"""'}), False, 'from tensorflow.keras import layers\n'), (44, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(4 * 4)'], {'use_bias': '(False)', 'input_shape': 'in_shape'}), False, 'from tensorflow.keras import layers\n'), (45, 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), False, 'from tensorflow.keras import layers\n'), (48, 'tensorflow.keras.layers.Reshape', 'layers.Reshape', (['(4, 4, 1, 1)'], {}), False, 'from tensorflow.keras import layers\n'), (51, 'tensorflow.keras.layers.Conv3DTranspose', 'layers.Conv3DTranspose', (['(512)', '(2, 4, 4)'], {'strides': '(1, 1, 2)', 'use_bias': '(False)', 'padding': '"""same"""'}), False, 'from tensorflow.keras import layers\n'), (55, 'tensorflow.keras.layers.Conv3DTranspose', 'layers.Conv3DTranspose', (['(256)', '(4)'], {'strides': '(2)', 'use_bias': '(False)', 'padding': '"""same"""'}), False, 'from tensorflow.keras import layers\n'), (58, 'tensorflow.keras.layers.Conv3DTranspose', 'layers.Conv3DTranspose', (['(128)', '(4)'], {'strides': '(2)', 'use_bias': '(False)', 'padding': '"""same"""'}), False, 'from tensorflow.keras import layers\n'), (61, 'tensorflow.keras.layers.Conv3DTranspose', 'layers.Conv3DTranspose', (['(128)', '(4)'], {'strides': '(2)', 'use_bias': '(False)', 'padding': '"""same"""'}), False, 'from tensorflow.keras import layers\n'), (64, 'tensorflow.keras.layers.Conv3DTranspose', 'layers.Conv3DTranspose', (['(3)', '(4)'], {'strides': '(2)', 'use_bias': '(False)', 'padding': '"""same"""', 'activation': '"""tanh"""'}), False, 'from tensorflow.keras import layers\n'), (70, 'tensorflow.keras.layers.Conv3DTranspose', 'layers.Conv3DTranspose', (['(1)', '(4)'], {'strides': '(1)', 'use_bias': '(False)', 'padding': '"""same"""', 'activation': '"""sigmoid"""'}), False, 'from tensorflow.keras import layers\n'), (76, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(4 * 4)'], {'use_bias': '(False)', 'input_shape': 'in_shape'}), False, 'from tensorflow.keras import layers\n'), (77, 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), False, 'from tensorflow.keras import layers\n'), (80, 'tensorflow.keras.layers.Reshape', 'layers.Reshape', (['(4, 4, 1, 1)'], {}), False, 'from tensorflow.keras import layers\n'), (83, 'tensorflow.keras.layers.Conv3DTranspose', 'layers.Conv3DTranspose', (['(512)', '(2, 4, 4)'], {'strides': '(1, 1, 2)', 'use_bias': '(False)', 'padding': '"""same"""'}), False, 'from tensorflow.keras import layers\n'), (87, 'tensorflow.keras.layers.Conv3DTranspose', 'layers.Conv3DTranspose', (['(256)', '(4)'], {'strides': '(2, 2, 1)', 'use_bias': '(False)', 'padding': '"""same"""'}), False, 'from tensorflow.keras import layers\n'), (90, 'tensorflow.keras.layers.Conv3DTranspose', 'layers.Conv3DTranspose', (['(128)', '(4)'], {'strides': '(2, 2, 1)', 'use_bias': '(False)', 'padding': '"""same"""'}), False, 'from tensorflow.keras import layers\n'), (93, 'tensorflow.keras.layers.Conv3DTranspose', 'layers.Conv3DTranspose', (['(128)', '(4)'], {'strides': '(2, 2, 1)', 'use_bias': '(False)', 'padding': '"""same"""'}), False, 'from tensorflow.keras import layers\n'), (96, 'tensorflow.keras.layers.Conv3DTranspose', 'layers.Conv3DTranspose', (['(3)', '(4)'], {'strides': '(2, 2, 1)', 'use_bias': '(False)', 'padding': '"""same"""', 'activation': '"""tanh"""'}), False, 'from tensorflow.keras import layers\n')]
Lornatang/TensorFlow2-tutorials
df5bc050e9941f5be23ff9ff826744b18664bb8b
# Copyright 2019 ChangyuLiu Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # Import TensorFlow into your program: import tensorflow as tf from tensorflow.python.keras.layers import Dense, Conv2D, Flatten from tensorflow.python.keras import Model # define epochs EPOCHS = 5 # Load and prepare the MNIST dataset. mnist = tf.keras.datasets.mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 # Add a channels dimension x_train = x_train[..., tf.newaxis] x_test = x_test[..., tf.newaxis] # Use tf.data to batch and shuffle the dataset. train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(10000).batch(64) test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(64) # Build the tf.keras model using the Keras model subclassing API: class CNN(Model): def __init__(self): super(CNN, self).__init__() self.conv1 = Conv2D(32, 3, activation=tf.nn.relu) self.flatten = Flatten() self.d1 = Dense(128, activation=tf.nn.relu) self.d2 = Dense(10, activation=tf.nn.softmax) def call(self, x, **kwargs): x = self.conv1(x) x = self.flatten(x) x = self.d1(x) return self.d2(x) model = CNN() # Choose an optimizer and loss function for training. loss_op = tf.keras.losses.SparseCategoricalCrossentropy() optimizer = tf.keras.optimizers.Adam() # Select metrics to measure the loss and the accuracy of the model. # These metrics accumulate the values over epochs and then print the overall result. train_loss = tf.keras.metrics.Mean(name='train_loss') train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy') test_loss = tf.keras.metrics.Mean(name='test_loss') test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy') # Use tf.GradientTape to train the model. @tf.function def train_step(images, labels): with tf.GradientTape() as tape: predictions = model(images) loss = loss_op(labels, predictions) gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) train_loss(loss) train_accuracy(labels, predictions) # Test the model. @tf.function def test_step(images, labels): predictions = model(images) t_loss = loss_op(labels, predictions) test_loss(t_loss) test_accuracy(labels, predictions) # training def train(): for epoch in range(EPOCHS): for images, labels in train_dataset: train_step(images, labels) for test_images, test_labels in test_dataset: test_step(test_images, test_labels) print(f"Epoch {epoch+1}," f"Loss: {train_loss.result() * 100:.6f}," f"Accuracy: {train_accuracy.result():.4f}%," f"Test Loss: {test_loss.result() * 100:.6f}," f"test Accuracy: {test_accuracy.result():.4f}%.") # main func if __name__ == '__main__': train() # The image classifier is now trained to ~98% accuracy on this dataset. # ============================================================================== # Epoch 1, Loss: 0.151985, Accuracy: 95.5033%, # Test Loss: 0.073297, Test Accuracy: 97.6999%. # Epoch 2, Loss: 0.097963, Accuracy: 97.0883%, # Test Loss: 0.065212, Test Accuracy: 97.9150%. # Epoch 3, Loss: 0.072738, Accuracy: 97.8533%, # Test Loss: 0.063016, Test Accuracy: 97.9833%. # Epoch 4, Loss: 0.057954, Accuracy: 98.2820%, # Test Loss: 0.061889, Test Accuracy: 98.0650%. # Epoch 5, Loss: 0.048282, Accuracy: 98.5673%,, # Test Loss: 0.061678, Test Accuracy: 98.1159%. # ==============================================================================
[ "tensorflow.python.keras.layers.Flatten", "tensorflow.python.keras.layers.Dense", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.keras.optimizers.Adam", "tensorflow.python.keras.layers.Conv2D", "tensorflow.keras.metrics.SparseCategoricalAccuracy", "tensorflow.keras.metrics.Mean", "tensorflow.GradientTape" ]
Primary_tutorial/Quickstart/mnist-advanced.py
[(57, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {}), True, 'import tensorflow as tf\n'), (58, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), True, 'import tensorflow as tf\n'), (62, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {'name': '"""train_loss"""'}), True, 'import tensorflow as tf\n'), (63, 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', ([], {'name': '"""train_accuracy"""'}), True, 'import tensorflow as tf\n'), (65, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {'name': '"""test_loss"""'}), True, 'import tensorflow as tf\n'), (66, 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', ([], {'name': '"""test_accuracy"""'}), True, 'import tensorflow as tf\n'), (35, 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x_test, y_test)'], {}), True, 'import tensorflow as tf\n'), (42, 'tensorflow.python.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3)'], {'activation': 'tf.nn.relu'}), False, 'from tensorflow.python.keras.layers import Dense, Conv2D, Flatten\n'), (43, 'tensorflow.python.keras.layers.Flatten', 'Flatten', ([], {}), False, 'from tensorflow.python.keras.layers import Dense, Conv2D, Flatten\n'), (44, 'tensorflow.python.keras.layers.Dense', 'Dense', (['(128)'], {'activation': 'tf.nn.relu'}), False, 'from tensorflow.python.keras.layers import Dense, Conv2D, Flatten\n'), (45, 'tensorflow.python.keras.layers.Dense', 'Dense', (['(10)'], {'activation': 'tf.nn.softmax'}), False, 'from tensorflow.python.keras.layers import Dense, Conv2D, Flatten\n'), (72, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (34, 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x_train, y_train)'], {}), True, 'import tensorflow as tf\n')]
sanixa/blind_MI
d0fafe56ea998a5b18fd1ace2176d434d7d04ca7
from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation,Conv2D, MaxPooling2D,Flatten import tensorflow as tf from tensorflow.keras.applications import ResNet50, ResNet101, VGG16, VGG19, DenseNet121 def create_ResNet50_model(input_shape, num_classes): model = tf.keras.Sequential([ ResNet50(include_top=False, weights='imagenet', input_shape=input_shape), GlobalAveragePooling2D(), Dense(num_classes), Activation("softmax") ]) model.summary() return model def create_ResNet101_model(input_shape, num_classes): model = tf.keras.Sequential([ ResNet101(include_top=False, weights='imagenet', input_shape=input_shape), GlobalAveragePooling2D(), Dense(num_classes), Activation("softmax") ]) model.summary() return model def create_VGG16_model(input_shape, num_classes): model = tf.keras.Sequential([ VGG16(include_top=False, weights='imagenet', input_shape=input_shape), GlobalAveragePooling2D(), Dense(num_classes), Activation("softmax") ]) model.summary() return model def create_VGG19_model(input_shape, num_classes): model = tf.keras.Sequential([ VGG19(include_top=False, weights='imagenet', input_shape=input_shape), GlobalAveragePooling2D(), Dense(num_classes), Activation("softmax") ]) model.summary() return model def create_DenseNet121_model(input_shape, num_classes): model = tf.keras.Sequential([ DenseNet121(include_top=False, weights='imagenet', input_shape=input_shape), GlobalAveragePooling2D(), Dense(num_classes), Activation("softmax") ]) model.summary() return model def create_CNN_model(input_shape, num_classes): model = tf.keras.Sequential([ Conv2D(32, (3, 3), activation='relu', input_shape=input_shape), Conv2D(32, (3, 3), activation='relu'), MaxPooling2D(pool_size=(2, 2)), Flatten(), Dense(128, activation='relu'), Dense(num_classes), Activation('softmax') ]) model.summary() return model def create_Dense_3_layer_model(input_shape, num_classes): model = tf.keras.Sequential([ Dense(512, activation='relu', input_shape=input_shape), Dense(256, activation='relu'), Dense(128, activation='relu'), Dense(num_classes), Activation('softmax') ]) model.summary() return model def create_Dense_4_layer_model(input_shape, num_classes): model = tf.keras.Sequential([ Dense(1024, activation='relu', input_shape=input_shape), Dense(512, activation='relu'), Dense(256, activation='relu'), Dense(128, activation='relu'), Dense(num_classes), Activation('softmax') ]) model.summary() return model def create_Dense_5_layer_model(input_shape, num_classes): model = tf.keras.Sequential([ Dense(2048, activation='relu', input_shape=input_shape), Dense(1024, activation='relu'), Dense(512, activation='relu'), Dense(256, activation='relu'), Dense(128, activation='relu'), Dense(num_classes), Activation('softmax') ]) model.summary() return model def create_Dense_6_layer_model(input_shape, num_classes): model = tf.keras.Sequential([ Dense(4096, activation='relu', input_shape=input_shape), Dense(2048, activation='relu'), Dense(1024, activation='relu'), Dense(512, activation='relu'), Dense(256, activation='relu'), Dense(128, activation='relu'), Dense(num_classes), Activation('softmax') ]) model.summary() return model def create_Dense_7_layer_model(input_shape, num_classes): model = tf.keras.Sequential([ Dense(8192, activation='relu', input_shape=input_shape), Dense(4096, activation='relu'), Dense(2048, activation='relu'), Dense(1024, activation='relu'), Dense(512, activation='relu'), Dense(256, activation='relu'), Dense(128, activation='relu'), Dense(num_classes), Activation('softmax') ]) model.summary() return model
[ "tensorflow.keras.layers.GlobalAveragePooling2D", "tensorflow.keras.layers.Activation", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.applications.VGG19", "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.applications.ResNet50", "tensorflow.keras.applications.ResNet101", "tensorflow.keras.applications.VGG16", "tensorflow.keras.applications.DenseNet121", "tensorflow.keras.layers.Flatten" ]
ModelUtil.py
[(8, 'tensorflow.keras.applications.ResNet50', 'ResNet50', ([], {'include_top': '(False)', 'weights': '"""imagenet"""', 'input_shape': 'input_shape'}), False, 'from tensorflow.keras.applications import ResNet50, ResNet101, VGG16, VGG19, DenseNet121\n'), (11, 'tensorflow.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (12, 'tensorflow.keras.layers.Dense', 'Dense', (['num_classes'], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (13, 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (21, 'tensorflow.keras.applications.ResNet101', 'ResNet101', ([], {'include_top': '(False)', 'weights': '"""imagenet"""', 'input_shape': 'input_shape'}), False, 'from tensorflow.keras.applications import ResNet50, ResNet101, VGG16, VGG19, DenseNet121\n'), (24, 'tensorflow.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (25, 'tensorflow.keras.layers.Dense', 'Dense', (['num_classes'], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (26, 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (34, 'tensorflow.keras.applications.VGG16', 'VGG16', ([], {'include_top': '(False)', 'weights': '"""imagenet"""', 'input_shape': 'input_shape'}), False, 'from tensorflow.keras.applications import ResNet50, ResNet101, VGG16, VGG19, DenseNet121\n'), (37, 'tensorflow.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (38, 'tensorflow.keras.layers.Dense', 'Dense', (['num_classes'], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (39, 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (47, 'tensorflow.keras.applications.VGG19', 'VGG19', ([], {'include_top': '(False)', 'weights': '"""imagenet"""', 'input_shape': 'input_shape'}), False, 'from tensorflow.keras.applications import ResNet50, ResNet101, VGG16, VGG19, DenseNet121\n'), (50, 'tensorflow.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (51, 'tensorflow.keras.layers.Dense', 'Dense', (['num_classes'], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (52, 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (60, 'tensorflow.keras.applications.DenseNet121', 'DenseNet121', ([], {'include_top': '(False)', 'weights': '"""imagenet"""', 'input_shape': 'input_shape'}), False, 'from tensorflow.keras.applications import ResNet50, ResNet101, VGG16, VGG19, DenseNet121\n'), (63, 'tensorflow.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (64, 'tensorflow.keras.layers.Dense', 'Dense', (['num_classes'], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (65, 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (73, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""', 'input_shape': 'input_shape'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (74, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (75, 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (76, 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (77, 'tensorflow.keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (78, 'tensorflow.keras.layers.Dense', 'Dense', (['num_classes'], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (79, 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (87, 'tensorflow.keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""', 'input_shape': 'input_shape'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (88, 'tensorflow.keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (89, 'tensorflow.keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (90, 'tensorflow.keras.layers.Dense', 'Dense', (['num_classes'], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (91, 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (99, 'tensorflow.keras.layers.Dense', 'Dense', (['(1024)'], {'activation': '"""relu"""', 'input_shape': 'input_shape'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (100, 'tensorflow.keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (101, 'tensorflow.keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (102, 'tensorflow.keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (103, 'tensorflow.keras.layers.Dense', 'Dense', (['num_classes'], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (104, 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (112, 'tensorflow.keras.layers.Dense', 'Dense', (['(2048)'], {'activation': '"""relu"""', 'input_shape': 'input_shape'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (113, 'tensorflow.keras.layers.Dense', 'Dense', (['(1024)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (114, 'tensorflow.keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (115, 'tensorflow.keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (116, 'tensorflow.keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (117, 'tensorflow.keras.layers.Dense', 'Dense', (['num_classes'], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (118, 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (126, 'tensorflow.keras.layers.Dense', 'Dense', (['(4096)'], {'activation': '"""relu"""', 'input_shape': 'input_shape'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (127, 'tensorflow.keras.layers.Dense', 'Dense', (['(2048)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (128, 'tensorflow.keras.layers.Dense', 'Dense', (['(1024)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (129, 'tensorflow.keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (130, 'tensorflow.keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (131, 'tensorflow.keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (132, 'tensorflow.keras.layers.Dense', 'Dense', (['num_classes'], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (133, 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (141, 'tensorflow.keras.layers.Dense', 'Dense', (['(8192)'], {'activation': '"""relu"""', 'input_shape': 'input_shape'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (142, 'tensorflow.keras.layers.Dense', 'Dense', (['(4096)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (143, 'tensorflow.keras.layers.Dense', 'Dense', (['(2048)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (144, 'tensorflow.keras.layers.Dense', 'Dense', (['(1024)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (145, 'tensorflow.keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (146, 'tensorflow.keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (147, 'tensorflow.keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (148, 'tensorflow.keras.layers.Dense', 'Dense', (['num_classes'], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n'), (149, 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Activation, Conv2D, MaxPooling2D, Flatten\n')]
jsteggink/transformers
90178b0cefe94fef258a39cff5019b5ec150597b
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF general model utils.""" import functools import inspect import os import re import warnings from typing import Dict, List, Optional, Union import h5py import numpy as np import tensorflow as tf from tensorflow.python.keras import backend as K from tensorflow.python.keras.saving import hdf5_format from .configuration_utils import PretrainedConfig from .file_utils import ( DUMMY_INPUTS, TF2_WEIGHTS_NAME, WEIGHTS_NAME, ModelOutput, PushToHubMixin, cached_path, hf_bucket_url, is_offline_mode, is_remote_url, ) from .generation_tf_utils import TFGenerationMixin from .tokenization_utils_base import BatchEncoding from .utils import logging logger = logging.get_logger(__name__) tf_logger = tf.get_logger() TFModelInputType = Union[ List[tf.Tensor], List[np.ndarray], Dict[str, tf.Tensor], Dict[str, np.ndarray], np.ndarray, tf.Tensor ] class TFModelUtilsMixin: """ A few utilities for :obj:`tf.keras.Model`, to be used as a mixin. """ def num_parameters(self, only_trainable: bool = False) -> int: """ Get the number of (optionally, trainable) parameters in the model. Args: only_trainable (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to return only the number of trainable parameters Returns: :obj:`int`: The number of parameters. """ if only_trainable: return int(sum(np.prod(w.shape.as_list()) for w in self.trainable_variables)) else: return self.count_params() def keras_serializable(cls): """ Decorate a Keras Layer class to support Keras serialization. This is done by: 1. Adding a :obj:`transformers_config` dict to the Keras config dictionary in :obj:`get_config` (called by Keras at serialization time. 2. Wrapping :obj:`__init__` to accept that :obj:`transformers_config` dict (passed by Keras at deserialization time) and convert it to a config object for the actual layer initializer. 3. Registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does not need to be supplied in :obj:`custom_objects` in the call to :obj:`tf.keras.models.load_model`. Args: cls (a :obj:`tf.keras.layers.Layers subclass`): Typically a :obj:`TF.MainLayer` class in this project, in general must accept a :obj:`config` argument to its initializer. Returns: The same class object, with modifications for Keras deserialization. """ initializer = cls.__init__ config_class = getattr(cls, "config_class", None) if config_class is None: raise AttributeError("Must set `config_class` to use @keras_serializable") @functools.wraps(initializer) def wrapped_init(self, *args, **kwargs): config = args[0] if args and isinstance(args[0], PretrainedConfig) else kwargs.pop("config", None) if isinstance(config, dict): config = config_class.from_dict(config) initializer(self, config, *args, **kwargs) elif isinstance(config, PretrainedConfig): if len(args) > 0: initializer(self, *args, **kwargs) else: initializer(self, config, *args, **kwargs) else: raise ValueError("Must pass either `config` (PretrainedConfig) or `config` (dict)") self._config = config self._kwargs = kwargs cls.__init__ = wrapped_init if not hasattr(cls, "get_config"): raise TypeError("Only use @keras_serializable on tf.keras.layers.Layer subclasses") if hasattr(cls.get_config, "_is_default"): def get_config(self): cfg = super(cls, self).get_config() cfg["config"] = self._config.to_dict() cfg.update(self._kwargs) return cfg cls.get_config = get_config cls._keras_serializable = True if hasattr(tf.keras.utils, "register_keras_serializable"): cls = tf.keras.utils.register_keras_serializable()(cls) return cls class TFCausalLanguageModelingLoss: """ Loss function suitable for causal language modeling (CLM), that is, the task of guessing the next token. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. """ def compute_loss(self, labels, logits): loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) # make sure only labels that are not equal to -100 affect the loss active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100) reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss) return loss_fn(labels, reduced_logits) class TFQuestionAnsweringLoss: """ Loss function suitable for question answering. """ def compute_loss(self, labels, logits): loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) start_loss = loss_fn(labels["start_position"], logits[0]) end_loss = loss_fn(labels["end_position"], logits[1]) return (start_loss + end_loss) / 2.0 class TFTokenClassificationLoss: """ Loss function suitable for token classification. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. """ def compute_loss(self, labels, logits): loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) # make sure only labels that are not equal to -100 # are taken into account as loss if tf.math.reduce_any(labels == -1): warnings.warn("Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead.") active_loss = tf.reshape(labels, (-1,)) != -1 else: active_loss = tf.reshape(labels, (-1,)) != -100 reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss) return loss_fn(labels, reduced_logits) class TFSequenceClassificationLoss: """ Loss function suitable for sequence classification. """ def compute_loss(self, labels, logits): if len(shape_list(logits)) == 1 or shape_list(logits)[1] == 1: loss_fn = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE) else: loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) return loss_fn(labels, logits) class TFMultipleChoiceLoss(TFSequenceClassificationLoss): """Loss function suitable for multiple choice tasks.""" class TFMaskedLanguageModelingLoss(TFCausalLanguageModelingLoss): """ Loss function suitable for masked language modeling (MLM), that is, the task of guessing the masked tokens. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. """ class TFNextSentencePredictionLoss: """ Loss function suitable for next sentence prediction (NSP), that is, the task of guessing the next sentence. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. """ def compute_loss(self, labels, logits): loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) # make sure only labels that are not equal to -100 # are taken into account as loss next_sentence_active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100) next_sentence_reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, 2)), next_sentence_active_loss) next_sentence_label = tf.boolean_mask(tf.reshape(labels, (-1,)), next_sentence_active_loss) return loss_fn(next_sentence_label, next_sentence_reduced_logits) def booleans_processing(config, **kwargs): """ Process the input booleans of each model in order to be sure they are compliant with the execution mode (eager or graph) Args: config (:class:`~transformers.PretrainedConfig`): The config of the running model. **kwargs: The boolean parameters Returns: A dictionary with the proper values for each boolean """ final_booleans = {} if tf.executing_eagerly(): final_booleans["output_attentions"] = ( kwargs["output_attentions"] if kwargs["output_attentions"] is not None else config.output_attentions ) final_booleans["output_hidden_states"] = ( kwargs["output_hidden_states"] if kwargs["output_hidden_states"] is not None else config.output_hidden_states ) final_booleans["return_dict"] = ( kwargs["return_dict"] if kwargs["return_dict"] is not None else config.return_dict ) if "use_cache" in kwargs: final_booleans["use_cache"] = kwargs["use_cache"] if kwargs["use_cache"] is not None else config.use_cache else: if ( kwargs["output_attentions"] is not None or kwargs["output_hidden_states"] is not None or ("use_cache" in kwargs and kwargs["use_cache"] is not None) ): tf_logger.warning( "The parameters `output_attentions`, `output_hidden_states` and `use_cache` cannot be updated when calling a model." "They have to be set to True/False in the config object (i.e.: `config=XConfig.from_pretrained('name', output_attentions=True)`)." ) final_booleans["output_attentions"] = config.output_attentions final_booleans["output_hidden_states"] = config.output_hidden_states if kwargs["return_dict"] is not None: tf_logger.warning( "The parameter `return_dict` cannot be set in graph mode and will always be set to `True`." ) final_booleans["return_dict"] = True if "use_cache" in kwargs: final_booleans["use_cache"] = config.use_cache return final_booleans def input_processing(func, config, input_ids, **kwargs): """ Process the input of each TensorFlow model including the booleans. In case of a list of symbolic inputs, each input has to be named accordingly to the parameters name, i.e. `input_ids = tf.keras.Input(shape=(128,), dtype='int32', name="input_ids")` otherwise the order of the tensors will not be guaranteed during the training. Args: func (:obj:`callable`): The callable function of the TensorFlow model. config (:class:`~transformers.PretrainedConfig`): The config of the running model. **kwargs: The inputs of the model. Returns: Two lists, one for the missing layers, and another one for the unexpected layers. """ signature = dict(inspect.signature(func).parameters) signature.pop("kwargs", None) signature.pop("self", None) parameter_names = list(signature.keys()) output = {} allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict, np.ndarray) if "inputs" in kwargs["kwargs_call"]: warnings.warn( "The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.", FutureWarning, ) output["input_ids"] = kwargs["kwargs_call"].pop("inputs") if "decoder_cached_states" in kwargs["kwargs_call"]: warnings.warn( "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.", FutureWarning, ) output["past_key_values"] = kwargs["kwargs_call"].pop("decoder_cached_states") if len(kwargs["kwargs_call"]) > 0: raise ValueError( f"The following keyword arguments are not supported by this model: {list(kwargs['kwargs_call'].keys())}." ) kwargs.pop("kwargs_call") for k, v in kwargs.items(): if isinstance(v, allowed_types) or v is None: output[k] = v else: raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.") if isinstance(input_ids, (tuple, list)): for i, input in enumerate(input_ids): # EagerTensors don't allow to use the .name property so we check for a real Tensor if type(input) == tf.Tensor: # Tensor names have always the pattern `name:id` then we check only the # `name` part tensor_name = input.name.split(":")[0] if tensor_name in parameter_names: output[tensor_name] = input else: output[parameter_names[i]] = input elif isinstance(input, allowed_types) or input is None: output[parameter_names[i]] = input else: raise ValueError( f"Data of type {type(input)} is not allowed only {allowed_types} is accepted for {parameter_names[i]}." ) elif isinstance(input_ids, (dict, BatchEncoding)): if "inputs" in input_ids: warnings.warn( "The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.", FutureWarning, ) output["input_ids"] = input_ids.pop("inputs") if "decoder_cached_states" in input_ids: warnings.warn( "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.", FutureWarning, ) output["past_key_values"] = input_ids.pop("decoder_cached_states") for k, v in dict(input_ids).items(): if isinstance(v, allowed_types) or v is None: output[k] = v elif k not in parameter_names and "args" not in parameter_names: logger.warning( f"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored." ) continue else: raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.") else: if isinstance(input_ids, tf.Tensor) or input_ids is None: output[parameter_names[0]] = input_ids else: raise ValueError( f"Data of type {type(input_ids)} is not allowed only {allowed_types} is accepted for {parameter_names[0]}." ) for name in parameter_names: if name not in list(output.keys()) and name != "args": output[name] = kwargs.pop(name, signature[name].default) # When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs) # So to respect the proper output we have to add this exception if "args" in output: if output["args"] is not None and type(output["args"]) == tf.Tensor: tensor_name = output["args"].name.split(":")[0] output[tensor_name] = output["args"] else: # `args` in this case is always the first parameter, then `input_ids` output["input_ids"] = output["args"] del output["args"] if "kwargs" in output: del output["kwargs"] boolean_dict = { k: v for k, v in output.items() if k in ["return_dict", "output_attentions", "output_hidden_states", "use_cache"] } output.update( booleans_processing( config=config, **boolean_dict, ) ) return output def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None): """ Detect missing and unexpected layers and load the TF weights accordingly to their names and shapes. Args: model (:obj:`tf.keras.models.Model`): The model to load the weights into. resolved_archive_file (:obj:`str`): The location of the H5 file. ignore_mismatched_sizes (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to ignore weights with shapes that don't match between the checkpoint of the model. Returns: Three lists, one for the missing layers, another one for the unexpected layers, and a last one for the mismatched layers. """ missing_layers = [] unexpected_layers = [] mismatched_layers = [] # Read the H5 file with h5py.File(resolved_archive_file, "r") as f: # Retrieve the name of each layer from the H5 file saved_h5_model_layers_name = set(hdf5_format.load_attributes_from_hdf5_group(f, "layer_names")) # Find the missing layers from the high level list of layers missing_layers = list(set([layer.name for layer in model.layers]) - saved_h5_model_layers_name) # Find the unexpected layers from the high level list of layers unexpected_layers = list(saved_h5_model_layers_name - set([layer.name for layer in model.layers])) saved_weight_names_set = set() symbolic_weights_names = set() weight_value_tuples = [] # Compute missing and unexpected sub layers # Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...] for layer in model.layers: # if layer_name from the H5 file belongs to the layers from the instantiated model if layer.name in saved_h5_model_layers_name: # Get the H5 layer object from its name h5_layer_object = f[layer.name] # Get all the weights as a list from the layer object symbolic_weights = layer.trainable_weights + layer.non_trainable_weights saved_weights = {} # Create a dict from the H5 saved model that looks like {"weight_name": weight_value} # And a set with only the names for weight_name in hdf5_format.load_attributes_from_hdf5_group(h5_layer_object, "weight_names"): # TF names always start with the model name so we ignore it name = "/".join(weight_name.split("/")[1:]) if _prefix is not None: name = _prefix + "/" + name saved_weights[name] = np.asarray(h5_layer_object[weight_name]) # Add the updated name to the final list for computing missing/unexpected values saved_weight_names_set.add(name) # Loop over each weights from the instantiated model and compare with the weights from the H5 file for symbolic_weight in symbolic_weights: # TF names always start with the model name so we ignore it if _prefix is not None: delimeter = len(_prefix.split("/")) symbolic_weight_name = "/".join( symbolic_weight.name.split("/")[:delimeter] + symbolic_weight.name.split("/")[delimeter + 1 :] ) else: symbolic_weight_name = "/".join(symbolic_weight.name.split("/")[1:]) # here we check if the current weight is among the weights from the H5 file # If yes, get the weight_value of the corresponding weight from the H5 file # If not, make the value to None saved_weight_value = saved_weights.get(symbolic_weight_name, None) # Add the updated name to the final list for computing missing/unexpected values symbolic_weights_names.add(symbolic_weight_name) # If the current weight is found if saved_weight_value is not None: # Check if the shape of the current weight and the one from the H5 file are different if K.int_shape(symbolic_weight) != saved_weight_value.shape: # If yes we reshape the weight from the H5 file accordingly to the current weight # If the two shapes are not compatible we raise an issue try: array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight)) except ValueError as e: if ignore_mismatched_sizes: mismatched_layers.append( (symbolic_weight_name, saved_weight_value.shape, K.int_shape(symbolic_weight)) ) continue else: raise e else: array = saved_weight_value # We create the tuple that will be loaded and add it to the final list weight_value_tuples.append((symbolic_weight, array)) # Load all the weights K.batch_set_value(weight_value_tuples) # Compute the missing and unexpected layers missing_layers.extend(list(symbolic_weights_names - saved_weight_names_set)) unexpected_layers.extend(list(saved_weight_names_set - symbolic_weights_names)) return missing_layers, unexpected_layers, mismatched_layers def init_copy_embeddings(old_embeddings, new_num_tokens): r""" This function aims to reduce the embeddings in case new_num_tokens < old_num_tokens or to pad with -1 in case new_num_tokens > old_num_tokens. A mask is also computed in order to know which weight in the embeddings should be kept or not. Example: - if new_num_tokens=5 and old_num_tokens=4 and old_embeddings=[w1,w2,w3,w4] - mask=[True,True,True,True,False] and current_weights=[w1,w2,w3,w4,-1] - if new_num_tokens=4 and old_num_tokens=5 and old_embeddings=[w1,w2,w3,w4,w5] - mask=[True,True,True,True] and current_weights=[w1,w2,w3,w4] """ old_num_tokens, old_embedding_dim = shape_list(old_embeddings) size_diff = new_num_tokens - old_num_tokens # initialize new embeddings # Copy token embeddings from the previous ones if tf.math.greater(size_diff, 0): # if the new size is greater than the old one, we extend the current embeddings with a padding until getting new size # and we create a mask to properly identify the padded values and be replaced by the values of the newly created # embeddings current_weights = tf.pad( old_embeddings.value(), tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=-1 ) num_tokens_to_copy = min(old_num_tokens, new_num_tokens) mask = tf.fill(tf.convert_to_tensor([num_tokens_to_copy, 1]), True) mask = tf.pad(mask, tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=False) else: # if the new size if lower than the old one, we take the current embeddings until the new size current_weights = tf.slice( old_embeddings.value(), tf.convert_to_tensor([0, 0]), tf.convert_to_tensor([new_num_tokens, old_embedding_dim]), ) mask = tf.fill(tf.convert_to_tensor([new_num_tokens, 1]), True) return mask, current_weights class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin, PushToHubMixin): r""" Base class for all TF models. :class:`~transformers.TFPreTrainedModel` takes care of storing the configuration of the models and handles methods for loading, downloading and saving models as well as a few methods common to all models to: * resize the input embeddings, * prune heads in the self-attention heads. Class attributes (overridden by derived classes): - **config_class** (:class:`~transformers.PretrainedConfig`) -- A subclass of :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture. - **base_model_prefix** (:obj:`str`) -- A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model. """ config_class = None base_model_prefix = "" # a list of re pattern of tensor names to ignore from the model when loading the model weights # (and avoid unnecessary warnings). _keys_to_ignore_on_load_missing = None # a list of re pattern of tensor names to ignore from the weights when loading the model weights # (and avoid unnecessary warnings). _keys_to_ignore_on_load_unexpected = None _requires_load_weight_prefix = False @property def dummy_inputs(self) -> Dict[str, tf.Tensor]: """ Dummy inputs to build the network. Returns: :obj:`Dict[str, tf.Tensor]`: The dummy inputs. """ return { "input_ids": tf.constant(DUMMY_INPUTS), } def __init__(self, config, *inputs, **kwargs): super().__init__(*inputs, **kwargs) if not isinstance(config, PretrainedConfig): raise ValueError( f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class " "`PretrainedConfig`. To create a model from a pretrained model use " f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`" ) # Save config and origin of the pretrained weights if given in model self.config = config self.name_or_path = config.name_or_path @classmethod def _from_config(cls, config, **kwargs): """ All context managers that the model should be initialized under go here. """ return cls(config, **kwargs) @tf.function( input_signature=[ { "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"), } ] ) def serving(self, inputs): """ Method used for serving the model. Args: inputs (:obj:`Dict[str, tf.Tensor]`): The input of the saved model as a dictionary of tensors. """ output = self.call(inputs) return self.serving_output(output) def serving_output(output): """ Prepare the output of the saved model. Each model must implement this function. Args: output (:obj:`~transformers.TFBaseModelOutput`): The output returned by the model. """ raise NotImplementedError def get_input_embeddings(self) -> tf.keras.layers.Layer: """ Returns the model's input embeddings layer. Returns: :obj:`tf.Variable`: The embeddings layer mapping vocabulary to hidden states. """ main_layer = getattr(self, self.base_model_prefix, self) if main_layer is not self: return main_layer.get_input_embeddings() else: raise NotImplementedError def set_input_embeddings(self, value): """ Set model's input embeddings Args: value (:obj:`tf.Variable`): The new weights mapping hidden states to vocabulary. """ main_layer = getattr(self, self.base_model_prefix) if main_layer is None: raise NotImplementedError("The model does not implements the base_model_prefix attribute.") try: main_layer.set_input_embeddings(value) except AttributeError: logger.info("Building the model") self(self.dummy_inputs) main_layer.set_input_embeddings(value) def get_output_embeddings(self) -> Union[None, tf.keras.layers.Layer]: """ Returns the model's output embeddings Returns: :obj:`tf.Variable`: The new weights mapping vocabulary to hidden states. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() return lm_head.get_output_embeddings() return None # Overwrite for models with output embeddings def set_output_embeddings(self, value): """ Set model's output embeddings Args: value (:obj:`tf.Variable`): The new weights mapping hidden states to vocabulary. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: lm_head.set_output_embeddings(value) except AttributeError: logger.info("Building the model") self(self.dummy_inputs) lm_head.set_output_embeddings(value) def get_output_layer_with_bias(self) -> Union[None, tf.keras.layers.Layer]: """ Get the layer that handles a bias attribute in case the model has an LM head with weights tied to the embeddings Return: :obj:`tf.keras.layers.Layer`: The layer that handles the bias, None if not an LM model. """ warnings.warn( "The method get_output_layer_with_bias is deprecated. Please use `get_lm_head` instead.", FutureWarning ) return self.get_lm_head() def get_prefix_bias_name(self) -> Union[None, str]: """ Get the concatenated _prefix name of the bias from the model name to the parent layer Return: :obj:`str`: The _prefix name of the bias. """ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return None def get_bias(self) -> Union[None, Dict[str, tf.Variable]]: """ Dict of bias attached to an LM head. The key represents the name of the bias attribute. Return: :obj:`tf.Variable`: The weights representing the bias, None if not an LM model. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: return lm_head.get_bias() except AttributeError: self(self.dummy_inputs) return lm_head.get_bias() return None def set_bias(self, value): """ Set all the bias in the LM head. Args: value (:obj:`Dict[tf.Variable]`): All the new bias attached to an LM head. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: lm_head.set_bias(value) except AttributeError: self(self.dummy_inputs) lm_head.set_bias(value) def get_lm_head(self) -> tf.keras.layers.Layer: """ The LM Head layer. This method must be overwritten by all the models that have a lm head. Return: :obj:`tf.keras.layers.Layer`: The LM head layer if the model has one, None if not. """ return None def resize_token_embeddings(self, new_num_tokens=None) -> tf.Variable: """ Resizes input token embeddings matrix of the model if :obj:`new_num_tokens != config.vocab_size`. Takes care of tying weights embeddings afterwards if the model class has a :obj:`tie_weights()` method. Arguments: new_num_tokens (:obj:`int`, `optional`): The number of new tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens :obj:`tf.Variable` module of the model without doing anything. Return: :obj:`tf.Variable`: Pointer to the input tokens Embeddings Module of the model. """ if new_num_tokens is None or new_num_tokens == self.config.vocab_size: return self._get_word_embedding_weight(self.get_input_embeddings()) model_embeds = self._resize_token_embeddings(new_num_tokens) # Update base model and current model config self.config.vocab_size = new_num_tokens return model_embeds def _get_word_embedding_weight(model, embedding_layer): embeds = getattr(embedding_layer, "weight", None) if embeds is not None: return embeds embeds = getattr(embedding_layer, "decoder", None) if embeds is not None: return embeds # The reason why the attributes don't exist might be # because the model is not built, so retry getting # the argument after building the model model(model.dummy_inputs) embeds = getattr(embedding_layer, "weight", None) if embeds is not None: return embeds embeds = getattr(embedding_layer, "decoder", None) if embeds is not None: return embeds return None def _resize_token_embeddings(self, new_num_tokens): old_embeddings = self._get_word_embedding_weight(self.get_input_embeddings()) new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) # if word embeddings are not tied, make sure that lm head bias is resized as well if self.get_bias() is not None: old_lm_head_bias = self.get_bias() new_lm_head_bias = self._get_resized_lm_head_bias(old_lm_head_bias, new_num_tokens) self.set_bias(new_lm_head_bias) # if word embeddings are not tied, make sure that lm head decoder is resized as well if self.get_output_embeddings() is not None: old_lm_head_decoder = self._get_word_embedding_weight(self.get_output_embeddings()) new_lm_head_decoder = self._get_resized_lm_head_decoder(old_lm_head_decoder, new_num_tokens) self.set_output_embeddings(new_lm_head_decoder) self.set_input_embeddings(new_embeddings) return self.get_input_embeddings() def _get_resized_lm_head_bias(self, old_lm_head_bias, new_num_tokens): """ Build a resized bias from the old ones. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_lm_head_bias (:obj:`tf.Variable`): Old lm head bias to be resized. new_num_tokens (:obj:`int`, `optional`): New number of tokens in the linear matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`, just returns None Return: :obj:`tf.Variable`: Pointer to the resized bias. """ new_lm_head_bias = {} for attr, weight in old_lm_head_bias.items(): first_dim, old_num_tokens = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight) size_diff = new_num_tokens - old_num_tokens final_shape = [new_num_tokens] if first_dim is None else [first_dim, new_num_tokens] # initialize new bias if tf.math.greater(size_diff, 0): padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]] current_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape), constant_values=-1) num_tokens_to_copy = min(old_num_tokens, new_num_tokens) mask_shape = [num_tokens_to_copy] if first_dim is None else [1, num_tokens_to_copy] bias_mask = tf.fill(tf.convert_to_tensor(mask_shape), True) bias_mask = tf.pad(bias_mask, tf.convert_to_tensor(padding_shape), constant_values=False) else: slice_from = [0] if first_dim is None else [0, 0] current_bias = tf.slice( weight.value(), tf.convert_to_tensor(slice_from), tf.convert_to_tensor(final_shape) ) bias_mask = tf.fill(tf.convert_to_tensor(final_shape), True) new_bias = self.add_weight( shape=final_shape, initializer="zeros", trainable=True, name=weight.name.split(":")[0], ) init_bias = tf.where(bias_mask, current_bias, new_bias.value()) new_bias.assign(init_bias) new_lm_head_bias[attr] = new_bias return new_lm_head_bias def _get_resized_lm_head_decoder(self, old_lm_head_decoder, new_num_tokens): """ Build a resized decoder from the old ones. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_lm_head_decoder (:obj:`tf.Variable`): Old lm head decoder to be resized. new_num_tokens (:obj:`int`, `optional`): New number of tokens in the linear matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`, just returns None Return: :obj:`tf.Variable`: Pointer to the resized decoder or None if the output embeddings are different from the input ones. """ new_lm_head_decoder = old_lm_head_decoder is_input_output_equals = tf.reduce_any( self._get_word_embedding_weight(self.get_input_embeddings()) == old_lm_head_decoder ) if old_lm_head_decoder is not None and not is_input_output_equals: old_embedding_dim = shape_list(old_lm_head_decoder)[1] decoder_mask, current_decoder = init_copy_embeddings(old_lm_head_decoder, new_num_tokens) new_lm_head_decoder = self.add_weight( shape=(new_num_tokens, old_embedding_dim), initializer="zeros", trainable=True, name=old_lm_head_decoder.name.split(":")[0], ) init_decoder = tf.where(decoder_mask, current_decoder, new_lm_head_decoder.value()) new_lm_head_decoder.assign(init_decoder) return new_lm_head_decoder def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None) -> tf.Variable: """ Build a resized Embedding weights from a provided token Embedding weights. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_embeddings (:obj:`tf.Variable`): Old embeddings to be resized. new_num_tokens (:obj:`int`, `optional`): New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens :obj:`tf.Variable`` module of the model without doing anything. Return: :obj:`tf.Variable`: Pointer to the resized Embedding Module or the old Embedding Module if :obj:`new_num_tokens` is :obj:`None` """ old_embedding_dim = shape_list(old_embeddings)[1] init_range = getattr(self.config, "initializer_range", 0.02) embeddings_mask, current_embeddings = init_copy_embeddings(old_embeddings, new_num_tokens) new_embeddings = self.add_weight( name=old_embeddings.name.split(":")[0], shape=[new_num_tokens, old_embedding_dim], initializer=get_initializer(init_range), dtype=tf.float32, ) init_embeddings = tf.where(embeddings_mask, current_embeddings, new_embeddings.value()) new_embeddings.assign(init_embeddings) return new_embeddings def prune_heads(self, heads_to_prune): """ Prunes heads of the base model. Arguments: heads_to_prune (:obj:`Dict[int, List[int]]`): Dictionary with keys being selected layer indices (:obj:`int`) and associated values being the list of heads to prune in said layer (list of :obj:`int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2. """ raise NotImplementedError def save_pretrained(self, save_directory, saved_model=False, version=1, push_to_hub=False, **kwargs): """ Save a model and its configuration file to a directory, so that it can be re-loaded using the :func:`~transformers.TFPreTrainedModel.from_pretrained` class method. Arguments: save_directory (:obj:`str`): Directory to which to save. Will be created if it doesn't exist. saved_model (:obj:`bool`, `optional`, defaults to :obj:`False`): If the model has to be saved in saved model format as well or not. version (:obj:`int`, `optional`, defaults to 1): The version of the saved model. A saved model needs to be versioned in order to be properly loaded by TensorFlow Serving as detailed in the official documentation https://www.tensorflow.org/tfx/serving/serving_basic push_to_hub (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to push your model to the Hugging Face model hub after saving it. .. warning:: Using :obj:`push_to_hub=True` will synchronize the repository you are pushing to with :obj:`save_directory`, which requires :obj:`save_directory` to be a local clone of the repo you are pushing to if it's an existing folder. Pass along :obj:`temp_dir=True` to use a temporary directory instead. kwargs: Additional key word arguments passed along to the :meth:`~transformers.file_utils.PushToHubMixin.push_to_hub` method. """ if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") return if push_to_hub: commit_message = kwargs.pop("commit_message", None) repo = self._create_or_get_repo(save_directory, **kwargs) os.makedirs(save_directory, exist_ok=True) if saved_model: saved_model_dir = os.path.join(save_directory, "saved_model", str(version)) self.save(saved_model_dir, include_optimizer=False, signatures=self.serving) logger.info(f"Saved model created in {saved_model_dir}") # Save configuration file self.config.architectures = [self.__class__.__name__[2:]] self.config.save_pretrained(save_directory) # If we save using the predefined names, we can load using `from_pretrained` output_model_file = os.path.join(save_directory, TF2_WEIGHTS_NAME) self.save_weights(output_model_file) logger.info(f"Model weights saved in {output_model_file}") if push_to_hub: url = self._push_to_hub(repo, commit_message=commit_message) logger.info(f"Model pushed to the hub in this commit: {url}") @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Instantiate a pretrained TF 2.0 model from a pre-trained model configuration. The warning `Weights from XXX not initialized from pretrained model` means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task. The warning `Weights from XXX not used in YYY` means that the layer XXX is not used by YYY, therefore those weights are discarded. Parameters: pretrained_model_name_or_path (:obj:`str`, `optional`): Can be either: - A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under a user or organization name, like ``dbmdz/bert-base-german-cased``. - A path to a `directory` containing model weights saved using :func:`~transformers.TFPreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``. - A path or url to a `PyTorch state_dict save file` (e.g, ``./pt_model/pytorch_model.bin``). In this case, ``from_pt`` should be set to :obj:`True` and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards. - :obj:`None` if you are both providing the configuration and state dictionary (resp. with keyword arguments ``config`` and ``state_dict``). model_args (sequence of positional arguments, `optional`): All remaning positional arguments will be passed to the underlying model's ``__init__`` method. config (:obj:`Union[PretrainedConfig, str]`, `optional`): Can be either: - an instance of a class derived from :class:`~transformers.PretrainedConfig`, - a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained`. Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: - The model is a model provided by the library (loaded with the `model id` string of a pretrained model). - The model was saved using :func:`~transformers.TFPreTrainedModel.save_pretrained` and is reloaded by supplying the save directory. - The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. from_pt: (:obj:`bool`, `optional`, defaults to :obj:`False`): Load the model weights from a PyTorch state_dict save file (see docstring of ``pretrained_model_name_or_path`` argument). ignore_mismatched_size (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to raise an error if some of the weights from the checkpoint do not have the same size as the weights of the model (if for instance, you are instantiating a model with 10 labels from a checkpoint with 3 labels). cache_dir (:obj:`str`, `optional`): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies: (:obj:`Dict[str, str], `optional`): A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`): Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to only look at local files (e.g., not try doanloading the model). use_auth_token (:obj:`str` or `bool`, `optional`): The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`). revision(:obj:`str`, `optional`, defaults to :obj:`"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any identifier allowed by git. mirror(:obj:`str`, `optional`): Mirror source to accelerate downloads in China. If you are from China and have an accessibility problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. Please refer to the mirror site for more information. kwargs (remaining dictionary of keyword arguments, `optional`): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., :obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or automatically loaded: - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function. .. note:: Passing :obj:`use_auth_token=True` is required when you want to use a private model. Examples:: >>> from transformers import BertConfig, TFBertModel >>> # Download model and configuration from huggingface.co and cache. >>> model = TFBertModel.from_pretrained('bert-base-uncased') >>> # Model was saved using `save_pretrained('./test/saved_model/')` (for example purposes, not runnable). >>> model = TFBertModel.from_pretrained('./test/saved_model/') >>> # Update configuration during loading. >>> model = TFBertModel.from_pretrained('bert-base-uncased', output_attentions=True) >>> assert model.config.output_attentions == True >>> # Loading from a Pytorch model file instead of a TensorFlow checkpoint (slower, for example purposes, not runnable). >>> config = BertConfig.from_json_file('./pt_model/my_pt_model_config.json') >>> model = TFBertModel.from_pretrained('./pt_model/my_pytorch_model.bin', from_pt=True, config=config) """ config = kwargs.pop("config", None) cache_dir = kwargs.pop("cache_dir", None) from_pt = kwargs.pop("from_pt", False) ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) output_loading_info = kwargs.pop("output_loading_info", False) local_files_only = kwargs.pop("local_files_only", False) use_auth_token = kwargs.pop("use_auth_token", None) revision = kwargs.pop("revision", None) mirror = kwargs.pop("mirror", None) load_weight_prefix = kwargs.pop("load_weight_prefix", None) from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False) user_agent = {"file_type": "model", "framework": "tensorflow", "from_auto_class": from_auto_class} if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True # Load config if we don't provide a configuration if not isinstance(config, PretrainedConfig): config_path = config if config is not None else pretrained_model_name_or_path config, model_kwargs = cls.config_class.from_pretrained( config_path, *model_args, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, use_auth_token=use_auth_token, revision=revision, _from_auto=from_auto_class, _from_pipeline=from_pipeline, **kwargs, ) else: model_kwargs = kwargs # Load model if pretrained_model_name_or_path is not None: if os.path.isdir(pretrained_model_name_or_path): if from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): # Load from a PyTorch checkpoint in priority if from_pt archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)): # Load from a TF 2.0 checkpoint archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME) else: raise EnvironmentError( f"Error no file named {[WEIGHTS_NAME, TF2_WEIGHTS_NAME]} found in directory " f"{pretrained_model_name_or_path} or `from_pt` set to False" ) elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): archive_file = pretrained_model_name_or_path elif os.path.isfile(pretrained_model_name_or_path + ".index"): archive_file = pretrained_model_name_or_path + ".index" else: archive_file = hf_bucket_url( pretrained_model_name_or_path, filename=(WEIGHTS_NAME if from_pt else TF2_WEIGHTS_NAME), revision=revision, mirror=mirror, ) try: # Load from URL or cache if already cached resolved_archive_file = cached_path( archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token, user_agent=user_agent, ) except EnvironmentError as err: logger.error(err) msg = ( f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n" f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co./models'\n\n" f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {TF2_WEIGHTS_NAME}, {WEIGHTS_NAME}.\n\n" ) raise EnvironmentError(msg) if resolved_archive_file == archive_file: logger.info(f"loading weights file {archive_file}") else: logger.info(f"loading weights file {archive_file} from cache at {resolved_archive_file}") else: resolved_archive_file = None config.name_or_path = pretrained_model_name_or_path # composed models, *e.g.* TFRag, require special treatment when it comes to loading # pre-trained weights. if cls._requires_load_weight_prefix and model_kwargs.get("name") is not None: model_kwargs["load_weight_prefix"] = load_weight_prefix + "/" + model_kwargs.get("name") # Instantiate model. model = cls(config, *model_args, **model_kwargs) if from_pt: from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model # Load from a PyTorch checkpoint return load_pytorch_checkpoint_in_tf2_model(model, resolved_archive_file, allow_missing_keys=True) # we might need to extend the variable scope for composite models if load_weight_prefix is not None: with tf.compat.v1.variable_scope(load_weight_prefix): model(model.dummy_inputs) # build the network with dummy inputs else: model(model.dummy_inputs) # build the network with dummy inputs assert os.path.isfile(resolved_archive_file), f"Error retrieving file {resolved_archive_file}" # 'by_name' allow us to do transfer learning by skipping/adding layers # see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1339-L1357 try: missing_keys, unexpected_keys, mismatched_keys = load_tf_weights( model, resolved_archive_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=load_weight_prefix, ) except OSError: raise OSError( "Unable to load weights from h5 file. " "If you tried to load a TF 2.0 model from a PyTorch checkpoint, please set from_pt=True. " ) model(model.dummy_inputs) # Make sure restore ops are run if cls._keys_to_ignore_on_load_missing is not None: for pat in cls._keys_to_ignore_on_load_missing: missing_keys = [k for k in missing_keys if re.search(pat, k) is None] if cls._keys_to_ignore_on_load_unexpected is not None: for pat in cls._keys_to_ignore_on_load_unexpected: unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] if len(unexpected_keys) > 0: logger.warning( f"Some layers from the model checkpoint at {pretrained_model_name_or_path} were not used when " f"initializing {model.__class__.__name__}: {unexpected_keys}\n" f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task " f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n" f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect " f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)." ) else: logger.warning(f"All model checkpoint layers were used when initializing {model.__class__.__name__}.\n") if len(missing_keys) > 0: logger.warning( f"Some layers of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} " f"and are newly initialized: {missing_keys}\n" f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) elif len(mismatched_keys) == 0: logger.warning( f"All the layers of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n" f"If your task is similar to the task the model of the checkpoint was trained on, " f"you can already use {model.__class__.__name__} for predictions without further training." ) if len(mismatched_keys) > 0: mismatched_warning = "\n".join( [ f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated" for key, shape1, shape2 in mismatched_keys ] ) logger.warning( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} " f"and are newly initialized because the shapes did not match:\n{mismatched_warning}\n" f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) if output_loading_info: loading_info = { "missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "mismatched_keys": mismatched_keys, } return model, loading_info return model class TFConv1D(tf.keras.layers.Layer): """ 1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2). Basically works like a linear layer but the weights are transposed. Args: nf (:obj:`int`): The number of output features. nx (:obj:`int`): The number of input features. initializer_range (:obj:`float`, `optional`, defaults to 0.02): The standard deviation to use to initialize the weights. kwargs: Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`. """ def __init__(self, nf, nx, initializer_range=0.02, **kwargs): super().__init__(**kwargs) self.nf = nf self.nx = nx self.initializer_range = initializer_range def build(self, input_shape): self.weight = self.add_weight( "weight", shape=[self.nx, self.nf], initializer=get_initializer(self.initializer_range) ) self.bias = self.add_weight("bias", shape=[1, self.nf], initializer=tf.zeros_initializer()) def call(self, x): bz, sl = shape_list(x)[:2] x = tf.reshape(x, [-1, self.nx]) x = tf.matmul(x, self.weight) + self.bias x = tf.reshape(x, [bz, sl, self.nf]) return x class TFSharedEmbeddings(tf.keras.layers.Layer): r""" Construct shared token embeddings. The weights of the embedding layer is usually shared with the weights of the linear decoder when doing language modeling. Args: vocab_size (:obj:`int`): The size of the vocabulary, e.g., the number of unique tokens. hidden_size (:obj:`int`): The size of the embedding vectors. initializer_range (:obj:`float`, `optional`): The standard deviation to use when initializing the weights. If no value is provided, it will default to :math:`1/\sqrt{hidden\_size}`. kwargs: Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`. """ def __init__(self, vocab_size: int, hidden_size: int, initializer_range: Optional[float] = None, **kwargs): super().__init__(**kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.initializer_range = hidden_size ** -0.5 if initializer_range is None else initializer_range def build(self, input_shape): """ Build shared token embedding layer Shared weights logic adapted from https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24 """ self.weight = self.add_weight( "weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range) ) super().build(input_shape) def get_config(self): config = { "vocab_size": self.vocab_size, "hidden_size": self.hidden_size, "initializer_range": self.initializer_range, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs: tf.Tensor, mode: str = "embedding") -> tf.Tensor: """ Get token embeddings of inputs or decode final hidden state. Args: inputs (:obj:`tf.Tensor`): In embedding mode, should be an int64 tensor with shape :obj:`[batch_size, length]`. In linear mode, should be a float tensor with shape :obj:`[batch_size, length, hidden_size]`. mode (:obj:`str`, defaults to :obj:`"embedding"`): A valid value is either :obj:`"embedding"` or :obj:`"linear"`, the first one indicates that the layer should be used as an embedding layer, the second one that the layer should be used as a linear decoder. Returns: :obj:`tf.Tensor`: In embedding mode, the output is a float32 embedding tensor, with shape :obj:`[batch_size, length, embedding_size]`. In linear mode, the output is a float32 with shape :obj:`[batch_size, length, vocab_size]`. Raises: ValueError: if :obj:`mode` is not valid. Shared weights logic is adapted from `here <https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24>`__. """ if mode == "embedding": return self._embedding(inputs) elif mode == "linear": return self._linear(inputs) else: raise ValueError(f"mode {mode} is not valid.") def _embedding(self, input_ids): """Applies embedding based on inputs tensor.""" return tf.gather(self.weight, input_ids) def _linear(self, inputs): """ Computes logits by running inputs through a linear layer. Args: inputs: A float32 tensor with shape [..., hidden_size] Returns: float32 tensor with shape [..., vocab_size]. """ first_dims = shape_list(inputs)[:-1] x = tf.reshape(inputs, [-1, self.hidden_size]) logits = tf.matmul(x, self.weight, transpose_b=True) return tf.reshape(logits, first_dims + [self.vocab_size]) class TFSequenceSummary(tf.keras.layers.Layer): """ Compute a single vector summary of a sequence hidden states. Args: config (:class:`~transformers.PretrainedConfig`): The config used by the model. Relevant arguments in the config class of the model are (refer to the actual config class of your model for the default values it uses): - **summary_type** (:obj:`str`) -- The method to use to make this summary. Accepted values are: - :obj:`"last"` -- Take the last token hidden state (like XLNet) - :obj:`"first"` -- Take the first token hidden state (like Bert) - :obj:`"mean"` -- Take the mean of all tokens hidden states - :obj:`"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2) - :obj:`"attn"` -- Not implemented now, use multi-head attention - **summary_use_proj** (:obj:`bool`) -- Add a projection after the vector extraction. - **summary_proj_to_labels** (:obj:`bool`) -- If :obj:`True`, the projection outputs to :obj:`config.num_labels` classes (otherwise to :obj:`config.hidden_size`). - **summary_activation** (:obj:`Optional[str]`) -- Set to :obj:`"tanh"` to add a tanh activation to the output, another string or :obj:`None` will add no activation. - **summary_first_dropout** (:obj:`float`) -- Optional dropout probability before the projection and activation. - **summary_last_dropout** (:obj:`float`)-- Optional dropout probability after the projection and activation. initializer_range (:obj:`float`, defaults to 0.02): The standard deviation to use to initialize the weights. kwargs: Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`. """ def __init__(self, config: PretrainedConfig, initializer_range: float = 0.02, **kwargs): super().__init__(**kwargs) self.summary_type = config.summary_type if hasattr(config, "summary_use_proj") else "last" if self.summary_type == "attn": # We should use a standard multi-head attention module with absolute positional embedding for that. # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276 # We can probably just use the multi-head attention module of PyTorch >=1.1.0 raise NotImplementedError self.has_summary = hasattr(config, "summary_use_proj") and config.summary_use_proj if self.has_summary: if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0: num_classes = config.num_labels else: num_classes = config.hidden_size self.summary = tf.keras.layers.Dense( num_classes, kernel_initializer=get_initializer(initializer_range), name="summary" ) self.has_activation = hasattr(config, "summary_activation") and config.summary_activation == "tanh" if self.has_activation: self.activation = tf.keras.activations.tanh self.has_first_dropout = hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0 if self.has_first_dropout: self.first_dropout = tf.keras.layers.Dropout(config.summary_first_dropout) self.has_last_dropout = hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0 if self.has_last_dropout: self.last_dropout = tf.keras.layers.Dropout(config.summary_last_dropout) def call(self, inputs, cls_index=None, training=False): if not isinstance(inputs, (dict, tuple, list)): hidden_states = inputs elif isinstance(inputs, (tuple, list)): hidden_states = inputs[0] cls_index = inputs[1] if len(inputs) > 1 else None assert len(inputs) <= 2, "Too many inputs." else: hidden_states = inputs.get("hidden_states") cls_index = inputs.get("cls_index", None) if self.summary_type == "last": output = hidden_states[:, -1] elif self.summary_type == "first": output = hidden_states[:, 0] elif self.summary_type == "mean": output = tf.reduce_mean(hidden_states, axis=1) elif self.summary_type == "cls_index": hidden_shape = shape_list(hidden_states) # e.g. [batch, num choices, seq length, hidden dims] if cls_index is None: cls_index = tf.fill( hidden_shape[:-2], hidden_shape[-2] - 1 ) # A tensor full of shape [batch] or [batch, num choices] full of sequence length cls_shape = shape_list(cls_index) if len(cls_shape) <= len(hidden_shape) - 2: cls_index = tf.expand_dims(cls_index, axis=-1) # else: # cls_index = cls_index[..., tf.newaxis] # cls_index = cls_index.expand((-1,) * (cls_index.dim()-1) + (hidden_states.size(-1),)) # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states output = tf.gather(hidden_states, cls_index, batch_dims=len(hidden_shape) - 2) output = tf.squeeze( output, axis=len(hidden_shape) - 2 ) # shape of output: (batch, num choices, hidden_size) elif self.summary_type == "attn": raise NotImplementedError if self.has_first_dropout: output = self.first_dropout(output, training=training) if self.has_summary: output = self.summary(output) if self.has_activation: output = self.activation(output) if self.has_last_dropout: output = self.last_dropout(output, training=training) return output def shape_list(tensor: tf.Tensor) -> List[int]: """ Deal with dynamic shape in tensorflow cleanly. Args: tensor (:obj:`tf.Tensor`): The tensor we want the shape of. Returns: :obj:`List[int]`: The shape of the tensor as a list. """ dynamic = tf.shape(tensor) if tensor.shape == tf.TensorShape(None): return dynamic static = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(static)] def get_initializer(initializer_range: float = 0.02) -> tf.initializers.TruncatedNormal: """ Creates a :obj:`tf.initializers.TruncatedNormal` with the given range. Args: initializer_range (`float`, defaults to 0.02): Standard deviation of the initializer range. Returns: :obj:`tf.initializers.TruncatedNormal`: The truncated normal initializer. """ return tf.keras.initializers.TruncatedNormal(stddev=initializer_range) class TFWrappedEmbeddings: """ this class wraps a the TFSharedEmbeddingTokens layer into a python 'no-keras-layer' class to avoid problem with weight restoring. Also it makes sure that the layer is called from the correct scope to avoid problem with saving/storing the correct weights """ def __init__(self, layer, abs_scope_name=None): self._layer = layer self._abs_scope_name = abs_scope_name def call(self, inputs, mode="embedding"): if self._abs_scope_name is None: return self._layer.call(inputs, mode) # if an abs scope name is given to the embedding variable, call variable from absolute scope with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name: with tf.name_scope(abs_scope_name.original_name_scope): return self._layer.call(inputs, mode) def __call__(self, inputs, mode="embedding"): if self._abs_scope_name is None: return self._layer(inputs, mode) # if an abs scope name is given to the embedding variable, call variable from absolute scope with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name: with tf.name_scope(abs_scope_name.original_name_scope): return self._layer(inputs, mode)
[ "tensorflow.convert_to_tensor", "numpy.asarray", "tensorflow.python.keras.backend.int_shape", "tensorflow.keras.utils.register_keras_serializable", "tensorflow.python.keras.saving.hdf5_format.load_attributes_from_hdf5_group", "tensorflow.rank", "tensorflow.math.reduce_any", "tensorflow.gather", "tensorflow.name_scope", "tensorflow.keras.initializers.TruncatedNormal", "tensorflow.compat.v1.variable_scope", "tensorflow.matmul", "tensorflow.TensorShape", "tensorflow.executing_eagerly", "tensorflow.fill", "tensorflow.shape", "tensorflow.keras.losses.MeanSquaredError", "tensorflow.zeros_initializer", "tensorflow.python.keras.backend.batch_set_value", "tensorflow.constant", "tensorflow.reduce_mean", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.get_logger", "tensorflow.math.greater", "tensorflow.keras.layers.Dropout", "tensorflow.TensorSpec" ]
src/transformers/modeling_tf_utils.py
[(49, 'tensorflow.get_logger', 'tf.get_logger', ([], {}), True, 'import tensorflow as tf\n'), (105, 'functools.wraps', 'functools.wraps', (['initializer'], {}), False, 'import functools\n'), (273, 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), True, 'import tensorflow as tf\n'), (555, 'tensorflow.python.keras.backend.batch_set_value', 'K.batch_set_value', (['weight_value_tuples'], {}), True, 'from tensorflow.python.keras import backend as K\n'), (582, 'tensorflow.math.greater', 'tf.math.greater', (['size_diff', '(0)'], {}), True, 'import tensorflow as tf\n'), (1659, 'tensorflow.shape', 'tf.shape', (['tensor'], {}), True, 'import tensorflow as tf\n'), (1679, 'tensorflow.keras.initializers.TruncatedNormal', 'tf.keras.initializers.TruncatedNormal', ([], {'stddev': 'initializer_range'}), True, 'import tensorflow as tf\n'), (154, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), True, 'import tensorflow as tf\n'), (170, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), True, 'import tensorflow as tf\n'), (190, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), True, 'import tensorflow as tf\n'), (195, 'tensorflow.math.reduce_any', 'tf.math.reduce_any', (['(labels == -1)'], {}), True, 'import tensorflow as tf\n'), (245, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), True, 'import tensorflow as tf\n'), (339, 'warnings.warn', 'warnings.warn', (['"""The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (347, 'warnings.warn', 'warnings.warn', (['"""The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (474, 'h5py.File', 'h5py.File', (['resolved_archive_file', '"""r"""'], {}), False, 'import h5py\n'), (766, 'warnings.warn', 'warnings.warn', (['"""The method get_output_layer_with_bias is deprecated. Please use `get_lm_head` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (778, 'warnings.warn', 'warnings.warn', (['"""The method get_prefix_bias_name is deprecated. Please use `get_bias` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (1059, 'os.path.isfile', 'os.path.isfile', (['save_directory'], {}), False, 'import os\n'), (1067, 'os.makedirs', 'os.makedirs', (['save_directory'], {'exist_ok': '(True)'}), False, 'import os\n'), (1079, 'os.path.join', 'os.path.join', (['save_directory', 'TF2_WEIGHTS_NAME'], {}), False, 'import os\n'), (1320, 'os.path.isfile', 'os.path.isfile', (['resolved_archive_file'], {}), False, 'import os\n'), (1427, 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, self.nx]'], {}), True, 'import tensorflow as tf\n'), (1430, 'tensorflow.reshape', 'tf.reshape', (['x', '[bz, sl, self.nf]'], {}), True, 'import tensorflow as tf\n'), (1514, 'tensorflow.gather', 'tf.gather', (['self.weight', 'input_ids'], {}), True, 'import tensorflow as tf\n'), (1527, 'tensorflow.reshape', 'tf.reshape', (['inputs', '[-1, self.hidden_size]'], {}), True, 'import tensorflow as tf\n'), (1528, 'tensorflow.matmul', 'tf.matmul', (['x', 'self.weight'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (1530, 'tensorflow.reshape', 'tf.reshape', (['logits', '(first_dims + [self.vocab_size])'], {}), True, 'import tensorflow as tf\n'), (1661, 'tensorflow.TensorShape', 'tf.TensorShape', (['None'], {}), True, 'import tensorflow as tf\n'), (139, 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {}), True, 'import tensorflow as tf\n'), (158, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (160, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (196, 'warnings.warn', 'warnings.warn', (['"""Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead."""'], {}), False, 'import warnings\n'), (201, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (213, 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {'reduction': 'tf.keras.losses.Reduction.NONE'}), True, 'import tensorflow as tf\n'), (215, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), True, 'import tensorflow as tf\n'), (250, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (251, 'tensorflow.reshape', 'tf.reshape', (['logits', '(-1, 2)'], {}), True, 'import tensorflow as tf\n'), (252, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (331, 'inspect.signature', 'inspect.signature', (['func'], {}), False, 'import inspect\n'), (476, 'tensorflow.python.keras.saving.hdf5_format.load_attributes_from_hdf5_group', 'hdf5_format.load_attributes_from_hdf5_group', (['f', '"""layer_names"""'], {}), False, 'from tensorflow.python.keras.saving import hdf5_format\n'), (587, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[[0, size_diff], [0, 0]]'], {}), True, 'import tensorflow as tf\n'), (590, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[num_tokens_to_copy, 1]'], {}), True, 'import tensorflow as tf\n'), (591, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[[0, size_diff], [0, 0]]'], {}), True, 'import tensorflow as tf\n'), (596, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[0, 0]'], {}), True, 'import tensorflow as tf\n'), (597, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[new_num_tokens, old_embedding_dim]'], {}), True, 'import tensorflow as tf\n'), (599, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[new_num_tokens, 1]'], {}), True, 'import tensorflow as tf\n'), (640, 'tensorflow.constant', 'tf.constant', (['DUMMY_INPUTS'], {}), True, 'import tensorflow as tf\n'), (920, 'tensorflow.math.greater', 'tf.math.greater', (['size_diff', '(0)'], {}), True, 'import tensorflow as tf\n'), (1246, 'os.path.isdir', 'os.path.isdir', (['pretrained_model_name_or_path'], {}), False, 'import os\n'), (1428, 'tensorflow.matmul', 'tf.matmul', (['x', 'self.weight'], {}), True, 'import tensorflow as tf\n'), (1591, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.summary_first_dropout'], {}), True, 'import tensorflow as tf\n'), (1595, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.summary_last_dropout'], {}), True, 'import tensorflow as tf\n'), (1698, 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['self._abs_scope_name'], {'auxiliary_name_scope': '(False)'}), True, 'import tensorflow as tf\n'), (1707, 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['self._abs_scope_name'], {'auxiliary_name_scope': '(False)'}), True, 'import tensorflow as tf\n'), (197, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (199, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (386, 'warnings.warn', 'warnings.warn', (['"""The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (394, 'warnings.warn', 'warnings.warn', (['"""The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (500, 'tensorflow.python.keras.saving.hdf5_format.load_attributes_from_hdf5_group', 'hdf5_format.load_attributes_from_hdf5_group', (['h5_layer_object', '"""weight_names"""'], {}), False, 'from tensorflow.python.keras.saving import hdf5_format\n'), (1315, 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['load_weight_prefix'], {}), True, 'import tensorflow as tf\n'), (1422, 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), True, 'import tensorflow as tf\n'), (1699, 'tensorflow.name_scope', 'tf.name_scope', (['abs_scope_name.original_name_scope'], {}), True, 'import tensorflow as tf\n'), (1708, 'tensorflow.name_scope', 'tf.name_scope', (['abs_scope_name.original_name_scope'], {}), True, 'import tensorflow as tf\n'), (507, 'numpy.asarray', 'np.asarray', (['h5_layer_object[weight_name]'], {}), True, 'import numpy as np\n'), (665, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['(None, None)', 'tf.int32'], {'name': '"""input_ids"""'}), True, 'import tensorflow as tf\n'), (666, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['(None, None)', 'tf.int32'], {'name': '"""attention_mask"""'}), True, 'import tensorflow as tf\n'), (667, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['(None, None)', 'tf.int32'], {'name': '"""token_type_ids"""'}), True, 'import tensorflow as tf\n'), (915, 'tensorflow.rank', 'tf.rank', (['weight'], {}), True, 'import tensorflow as tf\n'), (922, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['padding_shape'], {}), True, 'import tensorflow as tf\n'), (925, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['mask_shape'], {}), True, 'import tensorflow as tf\n'), (926, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['padding_shape'], {}), True, 'import tensorflow as tf\n'), (930, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['slice_from'], {}), True, 'import tensorflow as tf\n'), (930, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['final_shape'], {}), True, 'import tensorflow as tf\n'), (932, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['final_shape'], {}), True, 'import tensorflow as tf\n'), (1249, 'os.path.join', 'os.path.join', (['pretrained_model_name_or_path', 'WEIGHTS_NAME'], {}), False, 'import os\n'), (1258, 'os.path.isfile', 'os.path.isfile', (['pretrained_model_name_or_path'], {}), False, 'import os\n'), (1260, 'os.path.isfile', 'os.path.isfile', (["(pretrained_model_name_or_path + '.index')"], {}), False, 'import os\n'), (1613, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['hidden_states'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (1247, 'os.path.join', 'os.path.join', (['pretrained_model_name_or_path', 'WEIGHTS_NAME'], {}), False, 'import os\n'), (1250, 'os.path.join', 'os.path.join', (['pretrained_model_name_or_path', 'TF2_WEIGHTS_NAME'], {}), False, 'import os\n'), (1252, 'os.path.join', 'os.path.join', (['pretrained_model_name_or_path', 'TF2_WEIGHTS_NAME'], {}), False, 'import os\n'), (535, 'tensorflow.python.keras.backend.int_shape', 'K.int_shape', (['symbolic_weight'], {}), True, 'from tensorflow.python.keras import backend as K\n'), (1340, 're.search', 're.search', (['pat', 'k'], {}), False, 'import re\n'), (1344, 're.search', 're.search', (['pat', 'k'], {}), False, 'import re\n'), (1617, 'tensorflow.fill', 'tf.fill', (['hidden_shape[:-2]', '(hidden_shape[-2] - 1)'], {}), True, 'import tensorflow as tf\n'), (1622, 'tensorflow.expand_dims', 'tf.expand_dims', (['cls_index'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (539, 'tensorflow.python.keras.backend.int_shape', 'K.int_shape', (['symbolic_weight'], {}), True, 'from tensorflow.python.keras import backend as K\n'), (543, 'tensorflow.python.keras.backend.int_shape', 'K.int_shape', (['symbolic_weight'], {}), True, 'from tensorflow.python.keras import backend as K\n')]
WeiHao97/DIVA
88dec4a4b881fef34ee2775d6a68a4d32849a5d7
import numpy as np import os os.environ["CUDA_VISIBLE_DEVICES"]="0" os.environ['TF_DETERMINISTIC_OPS'] = '1' import tensorflow as tf from tensorflow.python.framework.ops import enable_eager_execution enable_eager_execution() from tensorflow import keras from tensorflow.keras import layers import tensorflow_datasets as tfds import tensorflow_model_optimization as tfmot from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras import backend as K import time def normalize(x): # utility function to normalize a tensor by its L2 norm return x / (K.sqrt(K.mean(K.square(x))) + 1e-5) # Quantization spec for Batchnormalization layer class DefaultBNQuantizeConfig(tfmot.quantization.keras.QuantizeConfig): def get_weights_and_quantizers(self, layer): return [] def get_activations_and_quantizers(self, layer): return [] def set_quantize_weights(self, layer, quantize_weights): pass def set_quantize_activations(self, layer, quantize_activations): pass def get_output_quantizers(self, layer): return [tfmot.quantization.keras.quantizers.MovingAverageQuantizer(num_bits=8, per_axis=False, symmetric=False, narrow_range=False)] def get_config(self): return {} # Quantization spec (null) for concat layer class NoOpQuantizeConfig(tfmot.quantization.keras.QuantizeConfig): """Use this config object if the layer has nothing to be quantized for quantization aware training.""" def get_weights_and_quantizers(self, layer): return [] def get_activations_and_quantizers(self, layer): return [] def set_quantize_weights(self, layer, quantize_weights): pass def set_quantize_activations(self, layer, quantize_activations): pass def get_output_quantizers(self, layer): # Does not quantize output, since we return an empty list. return [] def get_config(self): return {} # Quantization spec func for DenseNet def apply_quantization(layer): if 'bn' in layer.name: return tfmot.quantization.keras.quantize_annotate_layer(layer,DefaultBNQuantizeConfig()) elif 'concat' in layer.name: return tfmot.quantization.keras.quantize_annotate_layer(layer,NoOpQuantizeConfig()) else: return tfmot.quantization.keras.quantize_annotate_layer(layer) # hyper-parameters BATCH_SIZE = 50 c = 1 grad_iterations = 20 step = 1 epsilon = 8 mode = 'm' # 'm' for MobileNet, 'r' for ResNet, 'd' for DenseNet img_rows, img_cols, num_channel = 224 ,224, 3 # input image dimensions #Load Dataset es = {'file_name': tf.TensorSpec(shape=(), dtype=tf.string, name=None), 'image': tf.TensorSpec(shape=(img_rows, img_cols, num_channel), dtype=tf.float32, name=None), 'label': tf.TensorSpec(shape=(), dtype=tf.int64, name=None)} mydataset = tf.data.experimental.load("../../datasets/ImageNet/quantization/3kImages/",es).batch(BATCH_SIZE).prefetch(1) # Construct models if mode == 'm': model_ = tf.keras.applications.MobileNet(input_shape= (img_rows, img_cols,3)) q_model = tfmot.quantization.keras.quantize_model(model_) model = tf.keras.applications.MobileNet(input_shape= (img_rows, img_cols,3)) d_model = tf.keras.applications.MobileNet(input_tensor = q_model.input) model.load_weights("../../weights/fp_model_40_mobilenet.h5")# load model weight q_model.load_weights("../../weights/q_model_40_mobilenet.h5") d_model.load_weights("../../weights/distilled_fp_model_40_mobilenet.h5") model.trainable = False q_model.trainable = False d_model.trainable = False preprocess = tf.keras.applications.mobilenet.preprocess_input decode = tf.keras.applications.mobilenet.decode_predictions net = 'mobile' elif mode == 'r': model_ = ResNet50(input_shape= (img_rows, img_cols,3)) q_model = tfmot.quantization.keras.quantize_model(model_) model = ResNet50(input_shape= (img_rows, img_cols,3)) d_model = ResNet50(input_tensor = q_model.input) model.load_weights("../../weights/fp_model_40_resnet50.h5")# load model weight q_model.load_weights("../../weights/q_model_40_resnet50.h5") d_model.load_weights("../../weights/distilled_fp_model_40_resnet50.h5") model.trainable = False q_model.trainable = False d_model.trainable = False preprocess = tf.keras.applications.resnet.preprocess_input decode = tf.keras.applications.resnet.decode_predictions net = 'res' else: model_ = tf.keras.applications.DenseNet121(input_shape=(img_rows, img_cols,3)) # Create a base model base_model = model_ # Helper function uses `quantize_annotate_layer` to annotate that only the # Dense layers should be quantized. LastValueQuantizer = tfmot.quantization.keras.quantizers.LastValueQuantizer MovingAverageQuantizer = tfmot.quantization.keras.quantizers.MovingAverageQuantizer # Use `tf.keras.models.clone_model` to apply `apply_quantization_to_dense` # to the layers of the model. annotated_model = tf.keras.models.clone_model( base_model, clone_function=apply_quantization, ) with tfmot.quantization.keras.quantize_scope({'DefaultBNQuantizeConfig': DefaultBNQuantizeConfig, 'NoOpQuantizeConfig': NoOpQuantizeConfig}): q_model = tfmot.quantization.keras.quantize_apply(annotated_model) model = tf.keras.applications.DenseNet121(input_shape= (img_rows, img_cols,3)) d_model = tf.keras.applications.DenseNet121(input_tensor = q_model.input) model.load_weights("../../weights/fp_model_40_densenet121.h5")# load model weight q_model.load_weights("../../weights/q_model_40_densenet121.h5") d_model.load_weights("../../weights/distilled_fp_model_40_densenet121.h5") model.trainable = False q_model.trainable = False d_model.trainable = False preprocess = tf.keras.applications.densenet.preprocess_input decode = tf.keras.applications.densenet.decode_predictions net = 'dense' # DIVA attack for top-1 def second(image,label): orig_img = tf.identity(image) input_image = tf.identity(image) # Compute clean prediction and aquire labels orig_logist = tf.identity(model.predict(preprocess(input_image)[None,...]) ) orig_label = np.argmax(orig_logist[0]) quant_logist = tf.identity(q_model.predict(preprocess(input_image)[None,...])) quant_label = np.argmax(quant_logist[0]) d_logist = tf.identity(d_model.predict(preprocess(input_image)[None,...])) d_label = np.argmax(d_logist[0]) # Check for unqualified input if orig_label != quant_label or orig_label != d_label: print(orig_label) return -2,-2,-2,-2,-2 if orig_label != label: return -3,-3,-3,-3,-3 # Initialize attack to 0 A = 0 start_time = time.time() for iters in range(0,grad_iterations): # Compute loss with tf.GradientTape() as g: g.watch(input_image) loss1 = K.mean(d_model(preprocess(input_image)[None,...], training = False)[..., orig_label]) loss2 = K.mean(q_model(preprocess(input_image)[None,...], training = False)[..., orig_label]) final_loss = K.mean(loss1 - c*loss2) # Compute attack grads = normalize(g.gradient(final_loss, input_image)) adv_image = input_image + tf.sign(grads) * step A = tf.clip_by_value(adv_image - orig_img, -epsilon, epsilon) input_image = tf.clip_by_value(orig_img + A, 0, 255) test_image = preprocess(input_image)[None,...] # Compute new predictions pred1, pred2= d_model.predict(test_image), q_model.predict(test_image) label1, label2 = np.argmax(pred1[0]), np.argmax(pred2[0]) pred3 = model.predict(test_image) label3 = np.argmax(pred3[0]) if not label1 == label2: if label1 == orig_label and decode(pred1, top=1)[0][0][2] > 0.6: # If successfully fool the quantized model but not the distilled fp model # also the conf score is higher than 0.6 # time to generate the successful attack total_time = time.time() - start_time gen_img_deprocessed = input_image# adversarial image orig_img_deprocessed = orig_img # original image A = (gen_img_deprocessed - orig_img_deprocessed).numpy() # attack #Since the final goal for the attack is to keep undetected by the original model #its still a failure if the original model mispredicted the label if label3 != orig_label: return -1, -1, -1, gen_img_deprocessed, A norm = np.max(np.abs(A)) # adversarial distance return total_time, norm, iters, gen_img_deprocessed, A gen_img_deprocessed = input_image # generated non-adversarial image orig_img_deprocessed = orig_img # original image A = (gen_img_deprocessed - orig_img_deprocessed).numpy() # differences return -1, -1, -1, gen_img_deprocessed, A # Top-k evaluation def topk(model_pred, qmodel_pred, k): preds = decode(model_pred, top=k) qpreds = decode(qmodel_pred, top=1)[0][0][1] for pred in preds[0]: if pred[1] == qpreds: return True return False # DIVA attack for top-k def secondk(image,k): orig_img = tf.identity(image) input_image = tf.identity(image) # Compute clean prediction and aquire labels orig_logist = tf.identity(model.predict(preprocess(input_image)[None,...]) ) orig_label = np.argmax(orig_logist[0]) quant_logist = tf.identity(q_model.predict(preprocess(input_image)[None,...])) quant_label = np.argmax(quant_logist[0]) d_logist = tf.identity(d_model.predict(preprocess(input_image)[None,...])) d_label = np.argmax(d_logist[0]) # Check for unqualified input if orig_label != quant_label or orig_label != d_label: return -2,-2,-2,-2,-2 # Initialize attack to 0 A = 0 start_time = time.time() for iters in range(0,grad_iterations): # Compute loss with tf.GradientTape() as g: g.watch(input_image) loss1 = K.mean(d_model(preprocess(input_image)[None,...], training = False)[..., orig_label]) loss2 = K.mean(q_model(preprocess(input_image)[None,...], training = False)[..., orig_label]) final_loss = K.mean(loss1 - c*loss2) # Compute attack grads = normalize(g.gradient(final_loss, input_image)) adv_image = input_image + tf.sign(grads) * step A = tf.clip_by_value(adv_image - orig_img, -epsilon, epsilon) input_image = tf.clip_by_value(orig_img + A, 0, 255) test_image = preprocess(input_image)[None,...] # Compute new predictions pred1, pred2= d_model.predict(test_image), q_model.predict(test_image) label1, label2 = np.argmax(pred1[0]), np.argmax(pred2[0]) pred3 = model.predict(test_image) label3 = np.argmax(pred3[0]) if not topk(pred1, pred2, k): if label1 == orig_label and decode(pred1, top=1)[0][0][2] > 0.6: # If successfully fool the quantized model but not the distilled fp model # also the conf score is higher than 0.6 # time to generate the successful attack total_time = time.time() - start_time gen_img_deprocessed = input_image# adversarial image orig_img_deprocessed = orig_img # original image A = (gen_img_deprocessed - orig_img_deprocessed).numpy()# attack #Since the final goal for the attack is to keep undetected by the original model #its still a failure if the original model mispredicted the label if label3 == orig_label and not topk(pred3, pred2, k): norm = np.max(np.abs(A))# adversarial distance return total_time, norm, iters, gen_img_deprocessed, A else: return -1, -1, -1, gen_img_deprocessed, A gen_img_deprocessed = input_image# generated non-adversarial image orig_img_deprocessed = orig_img# original image A = (gen_img_deprocessed - orig_img_deprocessed).numpy()# differences return -1, -1, -1, gen_img_deprocessed, A def calc_normal_success(method, methodk, ds, folderName='', filterName='',dataName='',dataFolder='',locald = ''): total=0 # number of images seen badimg = 0 # number of unqualified images count=0 # number of successful top-1 attack top5 = 0 # number of successful top-5 attack timeStore = [] # time to generate the top-1 attack advdistStore = [] # adversarial distance for the top-1 attack stepsStore = [] # steps took to generate the top-1 attack timeStorek = []# time to generate the top-k (k=5) attack advdistStorek = []# adversarial distance for the top-k attack stepsStorek = []# steps took to generate the top-k attack failure = 0 # number of failed attack for i, features in enumerate(ds): images = features['image'] labels = features['label'] for j,image in enumerate(images): label = labels[j].numpy() # attampt for the top-1 attack time, advdist, steps, gen, A = method(image,label) total += 1 # if attack failed if time == -1: print("Didnt find anything") # np.save(locald + 'failure/' + folderName+"/"+dataName+str(failure)+"@"+str(total)+".npy", gen) # np.save(locald + 'failure/' + filterName+"/"+dataName+str(failure)+"@"+str(total)+".npy", A) failure +=1 continue # if its a bad image if time == -2: badimg += 1 total -= 1 failure +=1 print("Bad Image",badimg) continue # if its an incorrect image if time == -3: badimg += 1 total -= 1 failure +=1 print("Incorrect Image",badimg) continue count += 1 # top-1 sucecced # np.save(locald+folderName+"/"+dataName+str(count)+"@"+str(total)+".npy", gen) # np.save(locald+filterName+"/"+dataName+str(count)+"@"+str(total)+".npy", A) print("Number seen:",total) print("No. worked:", count) print("No. topk:", top5) print("Bad Image:", badimg) timeStore.append(time) advdistStore.append(advdist) stepsStore.append(steps) # with open(locald+dataFolder+"/"+dataName+'_time_data.csv', 'a') as f: # f.write(str(time) + ", ") # with open(locald+dataFolder+"/"+dataName+'_advdist_data.csv', 'a') as f: # f.write(str(advdist) + ", ") # with open(locald+dataFolder+"/"+dataName+'_steps_data.csv', 'a') as f: # f.write(str(steps) + ", ") # attampt for the top-5 attack print("starting k search") time, advdist, steps, gen, A = methodk(image,5) # if attack failed if time == -1: print("Didnt find anything in K") #np.save(locald + 'failure/' + folderName+"/"+dataName+"k"+str(failure)+".npy", gen) #np.save(locald + 'failure/' + filterName+"/"+ dataName+"k"+str(failure)+".npy", A) continue # if its a bad image if time == -2: print("Bad Image in K",badimg) continue top5 += 1 #np.save(locald+folderName+"/"+dataName+"k"+str(count)+".npy", gen) #np.save(locald+filterName+"/"+dataName+"k"+str(count)+".npy", A) timeStorek.append(time) advdistStorek.append(advdist) stepsStorek.append(steps) #with open(locald+dataFolder+"/"+dataName+'_timek_data.csv', 'a') as f: #f.write(str(time) + ", ") #with open(locald+dataFolder+"/"+dataName+'_advdistk_data.csv', 'a') as f: #f.write(str(advdist) + ", ") #with open(locald+dataFolder+"/"+dataName+'_stepsk_data.csv', 'a') as f: #f.write(str(steps) + ", ") print("Number seen:",total) print("No. worked:", count) print("No. topk:", top5) print("Bad Image:", badimg) calc_normal_success(second,secondk,mydataset, folderName=net + 'net_imagenet_images_second', filterName=net +'net_imagenet_filters_second',dataName='second', dataFolder=net +'net_imagenet_data_second', locald ='./results/SemiBB/' + net + 'net_c1/'+ net + 'net/')
[ "tensorflow.python.framework.ops.enable_eager_execution", "tensorflow.keras.applications.MobileNet", "tensorflow.keras.applications.resnet50.ResNet50", "tensorflow.clip_by_value", "tensorflow.sign", "tensorflow.keras.models.clone_model", "tensorflow.data.experimental.load", "numpy.abs", "tensorflow.identity", "tensorflow.keras.backend.square", "numpy.argmax", "tensorflow.keras.backend.mean", "tensorflow.keras.applications.DenseNet121", "tensorflow.TensorSpec", "tensorflow.GradientTape" ]
quantization/ImageNet/semiBBattack.py
[(7, 'tensorflow.python.framework.ops.enable_eager_execution', 'enable_eager_execution', ([], {}), False, 'from tensorflow.python.framework.ops import enable_eager_execution\n'), (82, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '()', 'dtype': 'tf.string', 'name': 'None'}), True, 'import tensorflow as tf\n'), (83, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '(img_rows, img_cols, num_channel)', 'dtype': 'tf.float32', 'name': 'None'}), True, 'import tensorflow as tf\n'), (84, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '()', 'dtype': 'tf.int64', 'name': 'None'}), True, 'import tensorflow as tf\n'), (89, 'tensorflow.keras.applications.MobileNet', 'tf.keras.applications.MobileNet', ([], {'input_shape': '(img_rows, img_cols, 3)'}), True, 'import tensorflow as tf\n'), (90, 'tensorflow_model_optimization.quantization.keras.quantize_model', 'tfmot.quantization.keras.quantize_model', (['model_'], {}), True, 'import tensorflow_model_optimization as tfmot\n'), (91, 'tensorflow.keras.applications.MobileNet', 'tf.keras.applications.MobileNet', ([], {'input_shape': '(img_rows, img_cols, 3)'}), True, 'import tensorflow as tf\n'), (92, 'tensorflow.keras.applications.MobileNet', 'tf.keras.applications.MobileNet', ([], {'input_tensor': 'q_model.input'}), True, 'import tensorflow as tf\n'), (155, 'tensorflow.identity', 'tf.identity', (['image'], {}), True, 'import tensorflow as tf\n'), (156, 'tensorflow.identity', 'tf.identity', (['image'], {}), True, 'import tensorflow as tf\n'), (160, 'numpy.argmax', 'np.argmax', (['orig_logist[0]'], {}), True, 'import numpy as np\n'), (162, 'numpy.argmax', 'np.argmax', (['quant_logist[0]'], {}), True, 'import numpy as np\n'), (164, 'numpy.argmax', 'np.argmax', (['d_logist[0]'], {}), True, 'import numpy as np\n'), (176, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (240, 'tensorflow.identity', 'tf.identity', (['image'], {}), True, 'import tensorflow as tf\n'), (241, 'tensorflow.identity', 'tf.identity', (['image'], {}), True, 'import tensorflow as tf\n'), (245, 'numpy.argmax', 'np.argmax', (['orig_logist[0]'], {}), True, 'import numpy as np\n'), (247, 'numpy.argmax', 'np.argmax', (['quant_logist[0]'], {}), True, 'import numpy as np\n'), (249, 'numpy.argmax', 'np.argmax', (['d_logist[0]'], {}), True, 'import numpy as np\n'), (257, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (104, 'tensorflow.keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'input_shape': '(img_rows, img_cols, 3)'}), False, 'from tensorflow.keras.applications.resnet50 import ResNet50\n'), (105, 'tensorflow_model_optimization.quantization.keras.quantize_model', 'tfmot.quantization.keras.quantize_model', (['model_'], {}), True, 'import tensorflow_model_optimization as tfmot\n'), (106, 'tensorflow.keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'input_shape': '(img_rows, img_cols, 3)'}), False, 'from tensorflow.keras.applications.resnet50 import ResNet50\n'), (107, 'tensorflow.keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'input_tensor': 'q_model.input'}), False, 'from tensorflow.keras.applications.resnet50 import ResNet50\n'), (120, 'tensorflow.keras.applications.DenseNet121', 'tf.keras.applications.DenseNet121', ([], {'input_shape': '(img_rows, img_cols, 3)'}), True, 'import tensorflow as tf\n'), (131, 'tensorflow.keras.models.clone_model', 'tf.keras.models.clone_model', (['base_model'], {'clone_function': 'apply_quantization'}), True, 'import tensorflow as tf\n'), (139, 'tensorflow.keras.applications.DenseNet121', 'tf.keras.applications.DenseNet121', ([], {'input_shape': '(img_rows, img_cols, 3)'}), True, 'import tensorflow as tf\n'), (140, 'tensorflow.keras.applications.DenseNet121', 'tf.keras.applications.DenseNet121', ([], {'input_tensor': 'q_model.input'}), True, 'import tensorflow as tf\n'), (190, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['(adv_image - orig_img)', '(-epsilon)', 'epsilon'], {}), True, 'import tensorflow as tf\n'), (191, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['(orig_img + A)', '(0)', '(255)'], {}), True, 'import tensorflow as tf\n'), (198, 'numpy.argmax', 'np.argmax', (['pred3[0]'], {}), True, 'import numpy as np\n'), (271, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['(adv_image - orig_img)', '(-epsilon)', 'epsilon'], {}), True, 'import tensorflow as tf\n'), (272, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['(orig_img + A)', '(0)', '(255)'], {}), True, 'import tensorflow as tf\n'), (279, 'numpy.argmax', 'np.argmax', (['pred3[0]'], {}), True, 'import numpy as np\n'), (34, 'tensorflow_model_optimization.quantization.keras.quantizers.MovingAverageQuantizer', 'tfmot.quantization.keras.quantizers.MovingAverageQuantizer', ([], {'num_bits': '(8)', 'per_axis': '(False)', 'symmetric': '(False)', 'narrow_range': '(False)'}), True, 'import tensorflow_model_optimization as tfmot\n'), (70, 'tensorflow_model_optimization.quantization.keras.quantize_annotate_layer', 'tfmot.quantization.keras.quantize_annotate_layer', (['layer'], {}), True, 'import tensorflow_model_optimization as tfmot\n'), (136, 'tensorflow_model_optimization.quantization.keras.quantize_scope', 'tfmot.quantization.keras.quantize_scope', (["{'DefaultBNQuantizeConfig': DefaultBNQuantizeConfig, 'NoOpQuantizeConfig':\n NoOpQuantizeConfig}"], {}), True, 'import tensorflow_model_optimization as tfmot\n'), (137, 'tensorflow_model_optimization.quantization.keras.quantize_apply', 'tfmot.quantization.keras.quantize_apply', (['annotated_model'], {}), True, 'import tensorflow_model_optimization as tfmot\n'), (181, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (185, 'tensorflow.keras.backend.mean', 'K.mean', (['(loss1 - c * loss2)'], {}), True, 'from tensorflow.keras import backend as K\n'), (196, 'numpy.argmax', 'np.argmax', (['pred1[0]'], {}), True, 'import numpy as np\n'), (196, 'numpy.argmax', 'np.argmax', (['pred2[0]'], {}), True, 'import numpy as np\n'), (262, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (266, 'tensorflow.keras.backend.mean', 'K.mean', (['(loss1 - c * loss2)'], {}), True, 'from tensorflow.keras import backend as K\n'), (277, 'numpy.argmax', 'np.argmax', (['pred1[0]'], {}), True, 'import numpy as np\n'), (277, 'numpy.argmax', 'np.argmax', (['pred2[0]'], {}), True, 'import numpy as np\n'), (85, 'tensorflow.data.experimental.load', 'tf.data.experimental.load', (['"""../../datasets/ImageNet/quantization/3kImages/"""', 'es'], {}), True, 'import tensorflow as tf\n'), (189, 'tensorflow.sign', 'tf.sign', (['grads'], {}), True, 'import tensorflow as tf\n'), (270, 'tensorflow.sign', 'tf.sign', (['grads'], {}), True, 'import tensorflow as tf\n'), (18, 'tensorflow.keras.backend.square', 'K.square', (['x'], {}), True, 'from tensorflow.keras import backend as K\n'), (206, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (217, 'numpy.abs', 'np.abs', (['A'], {}), True, 'import numpy as np\n'), (287, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (295, 'numpy.abs', 'np.abs', (['A'], {}), True, 'import numpy as np\n')]
lisy14liz/importance-sampling
15040a3c4435735e9b0155d3d0228909bd4e47b7
# # Copyright (c) 2017 Idiap Research Institute, http://www.idiap.ch/ # Written by Angelos Katharopoulos <[email protected]> # from functools import partial from blinker import signal import tensorflow as tf from tensorflow.keras.layers import Dense, Embedding, Flatten, Input, LSTM, Masking, \ concatenate from tensorflow.keras.models import Model import numpy as np def _get_dataset_length(dset, default=1): """Return the dataset's training data length and in case the dataset is uncountable return a defalt value.""" try: return len(dset.train_data) except RuntimeError: return default class BaseSampler(object): """BaseSampler denotes the interface for all the samplers. Samplers should provide the rest of the program with data points to train on and corresponding relative weights.""" def __init__(self, dataset, reweighting): self.dataset = dataset self.reweighting = reweighting def _slice_data(self, x, y, idxs): if isinstance(x, (list, tuple)): return [xi[idxs] for xi in x], y[idxs] else: return x[idxs], y[idxs] def _send_messages(self, idxs, xy, w, predicted_scores): signal("is.sample").send({ "idxs": idxs, "xy": xy, "w": w, "predicted_scores": predicted_scores }) def _get_samples_with_scores(self, batch_size): """Child classes should implement this method. Arguments --------- batch_size: int Return at least that many samples Return ------ idxs: array The indices of some samples in the dataset scores: array or None The predicted importance scores for the corresponding idxs or None for uniform sampling xy: tuple or None Optionally return the data for the corresponding idxs """ raise NotImplementedError() def sample(self, batch_size): # Get the importance scores of some samples idxs1, scores, xy = self._get_samples_with_scores(batch_size) # Sample from the available ones if abs(scores.sum())<1e-5: scores=np.ones(scores.shape) p = scores / scores.sum() if scores is not None else None idxs2 = np.random.choice(len(idxs1), batch_size, p=p) w = self.reweighting.sample_weights(idxs2, scores) # Make sure we have the data if xy is None: xy = self.dataset.train_data[idxs1[idxs2]] else: x, y = xy xy = self._slice_data(x, y, idxs2) scores = scores[idxs2] if scores is not None else np.ones(batch_size) self._send_messages(idxs1[idxs2], xy, w, scores) return idxs1[idxs2], xy, w def update(self, idxs, results): pass class UniformSampler(BaseSampler): """UniformSampler is the simplest possible sampler which samples the dataset uniformly.""" def __init__(self, dataset, reweighting): super(UniformSampler, self).__init__(dataset, reweighting) # Basically if we don't know the length the indices don't matter so # sample batch_size 0s. self.idxs = np.arange(_get_dataset_length(self.dataset, default=1)) def _get_samples_with_scores(self, batch_size): return ( self.idxs, None, None ) class ModelSampler(BaseSampler): """ModelSampler uses a model to score the samples and then performs importance sampling based on those scores. It can be used to implement several training pipelines where the scoring model is separately trained or is sampled from the main model or is the main model.""" def __init__(self, dataset, reweighting, model, large_batch=1024, forward_batch_size=128): self.model = model self.large_batch = large_batch self.forward_batch_size = forward_batch_size self.N = _get_dataset_length(dataset, default=1) super(ModelSampler, self).__init__(dataset, reweighting) def _get_samples_with_scores(self, batch_size): assert batch_size < self.large_batch # Sample a large number of points in random and score them idxs = np.random.choice(self.N, self.large_batch) x, y = self.dataset.train_data[idxs] scores = self.model.score(x, y, batch_size=self.forward_batch_size) return ( idxs, scores, (x, y) ) class CacheSampler(BaseSampler): """CacheSampler uses the recent outputs of the model to determine the importance of the samples""" def __init__(self, dataset, reweighting, staleness=3, cache_prob=0.5, smooth=0.2): # Necessary state for implementing the cache sampler self._N = N = len(dataset.train_data) self._idxs = np.arange(N) self._scores = np.ones((N,)) self._cache = {} self._score_sum = 0.0 # Configuration self._staleness = staleness self._alpha = cache_prob / (1-cache_prob) self._smooth = smooth super(CacheSampler, self).__init__(dataset, reweighting) def _get_samples_with_scores(self, batch_size): return ( self._idxs, self._scores, None ) def update(self, idxs, x): # Add the new scores to the cache for idx, xi in zip(idxs, x): self._cache[idx] = (self._smooth + xi, 0) # Remove the stale values self._score_sum = 0.0 keys = self._cache.keys() for k in keys: item = self._cache[k] if item[1] > self._staleness: self._scores[k] = 1 del self._cache[k] else: self._cache[k] = (item[0], item[1]+1) self._score_sum += item[0] # Recompute the scores for sampling N = self._N S = self._score_sum a = self._alpha for k, (s, _) in self._cache.items(): self._scores[k] = 1 + a * N * s / S class LSTMSampler(BaseSampler): """Use an LSTM to predict the loss based on the previous losses of each sample Arguments --------- dataset: The dataset we want to sample from presample: int Presample that many data points using uniform sampling to decrease the complexity history: int How many scores per data point to keep in history smooth: float Depending on whether we are using adaptive smoothing or additive we either add smooth*mean or simply smooth to each importance before sampling log: bool Do the regression in log space adaptive_smooth: bool Smooth adaptively based on the mean of the scores forget: float A float less than one to used to calculate the mean of the scores """ def __init__(self, dataset, reweighting, presample=2048, history=10, log=False): # Initialize the history for every sample init = 1.0 if log: init = np.log(init) self.history = np.zeros((len(dataset.train_data), history, 1)) self.history[:, 0, 0] = init self.cnts = np.ones(len(dataset.train_data), dtype=np.uint8) # Keep some member variables self.presample = presample self.log = log # Create our LSTM model x00 = Input(shape=(history, 1)) x10 = Input(shape=(1,)) x0 = Masking(mask_value=0.0)(x00) x0 = LSTM(32, return_sequences=True, unroll=True)(x0) x0 = LSTM(32, unroll=True)(x0) x1 = Embedding(dataset.output_size, 32)(x10) x1 = Flatten()(x1) x = concatenate([x0, x1]) y = Dense(1)(x) self.model = Model(inputs=[x00, x10], outputs=y) self.model.compile(optimizer="adam", loss="mse") super(LSTMSampler, self).__init__(dataset, reweighting) def _to_ids(self, y): try: if y.shape[1] > 1: return np.expand_dims(y.argmax(axis=1), -1) except: return y def _get_samples_with_scores(self, batch_size): """Use the LSTM to predict the loss of each sample""" # Presample so that we do not run the LSTM for the whole dataset idxs = np.random.choice(len(self.history), self.presample) x, y = self.dataset.train_data[idxs] # Predict normalize and sample scores = self.model.predict( [self.history[idxs], self._to_ids(y)], batch_size=1024 ).ravel() # Perform the regression in logspace if needed if self.log: np.exp(scores, scores) else: np.maximum(scores, 1e-6, scores) return ( idxs, scores, (x, y) ) def update(self, idxs, x): # Fetch the classes for the regression _, y = self.dataset.train_data[idxs] # If we are doing the regression in logspace if self.log: x = np.log(x) # Train the lstm so that it can predict x given the history self.model.train_on_batch([self.history[idxs], self._to_ids(y)], x) # Update the history to include x full = idxs[self.cnts[idxs] == self.history.shape[1]] self.history[full] = np.roll(self.history[full], -1, axis=1) self.cnts[full] -= 1 self.history[idxs, self.cnts[idxs], :1] = x self.cnts[idxs] += 1 class PerClassGaussian(BaseSampler): """Fit a Gaussian per class to predict the losses""" def __init__(self, dataset, reweighting, alpha=0.9, presample=2048): self.alpha = alpha self.presample = presample self.mu = np.ones(dataset.output_size) * np.log(dataset.output_size) self.variance = np.ones(dataset.output_size) super(PerClassGaussian, self).__init__(dataset, reweighting) def _get_samples_with_scores(self, batch_size): # Presample so that we do not need to compute everything # on the whole dataset idxs = np.random.choice(len(self.dataset.train_data), self.presample) x, y = self.dataset.train_data[idxs] yis = y.ravel() if y.shape[1] == 1 else y.argmax(axis=1) # Compute the sampling probs for each of the above presampled # data points scores = self.mu + np.sqrt(np.maximum(self.variance - self.mu**2, 0)) scores = scores[yis] return ( idxs, scores, (x, y) ) def update(self, idxs, x): # Fetch the classes in order to model per class information _, y = self.dataset.train_data[idxs] yis = y.ravel() if y.shape[1] == 1 else y.argmax(axis=1) # Update the mean and variance one by one # TODO: Improve the following implementation for xi, yi in zip(x, yis): d = (1.0 - self.alpha) * xi self.mu[yi] = self.alpha * self.mu[yi] + d self.variance[yi] = self.alpha * self.variance[yi] + d * xi class LSTMComparisonSampler(BaseSampler): """Compare LSTM and Model scores on a fixed presampled subset of the training data""" def __init__(self, dataset, lstm, model, subset=1024): self._idxs = np.random.choice(len(dataset.train_data), subset) self._x, self._y= dataset.train_data[self._idxs] self.lstm = lstm self.model = model def _get_samples_with_scores(self, batch_size): s1 = self.model.model.score(self._x, self._y) s2 = self.lstm.model.predict( [self.lstm.history[self._idxs], self.lstm._to_ids(self._y)], batch_size=1024 ).ravel() signal("is.lstm_comparison_sampler.scores").send(zip(s1, s2)) return self.lstm._get_samples_with_scores(batch_size) def update(self, idxs, x): return self.lstm.update(idxs, x) class SamplerDecorator(BaseSampler): """Just decorate another sampler. Arguments --------- sampler: BaseSampler The sampler being decorated """ def __init__(self, sampler): self.sampler = sampler super(SamplerDecorator, self).__init__( sampler.dataset, sampler.reweighting ) def _get_samples_with_scores(self, batch_size): raise NotImplementedError() def update(self, idxs, results): self.sampler.update(idxs, results) @property def model(self): """Expose the model attribute of the decorated sampler if one exists.""" return self.sampler.model class AdditiveSmoothingSampler(SamplerDecorator): """Add a constant to all the importance scores in order to smooth them towards uniform Arguments --------- sampler: BaseSampler The sampler being decorated c: float A constant to add to every importance score """ def __init__(self, sampler, c=1.0): self.c = c super(AdditiveSmoothingSampler, self).__init__(sampler) def _get_samples_with_scores(self, batch_size): idxs, scores, xy = self.sampler._get_samples_with_scores(batch_size) return ( idxs, scores + self.c, xy ) class AdaptiveAdditiveSmoothingSampler(SamplerDecorator): """Add a percentage of the moving average of the predicted importance scores to smooth them towards uniform. Arguments --------- sampler: BaseSampler The sampler being decorated percentage: float Multiplied by the moving average of the importance scores to add to each score to smooth it forget: float Used to compute the exponential moving average mu = forget * mu + (1-forget) * mu_new """ def __init__(self, sampler, percentage=0.5, forget=0.9): self.percentage = percentage self.forget = forget self.mu = 1.0 # it could be 0 it doesn't really matter super(AdaptiveAdditiveSmoothingSampler, self).__init__(sampler) def _get_samples_with_scores(self, batch_size): idxs, scores, xy = self.sampler._get_samples_with_scores(batch_size) self.mu = self.forget * self.mu + (1 - self.forget) * scores.mean() return ( idxs, scores + self.percentage * self.mu, xy ) class PowerSmoothingSampler(SamplerDecorator): """Raise the importance scores to a power (less than 1) to smooth them towards uniform. Arguments --------- sampler: BaseSampler The sampler being decorated power: float The power to raise the scores to """ def __init__(self, sampler, power=0.5): assert 0 <= power <= 1 self.power = power super(PowerSmoothingSampler, self).__init__(sampler) def _get_samples_with_scores(self, batch_size): idxs, scores, xy = self.sampler._get_samples_with_scores(batch_size) return ( idxs, scores**self.power, xy ) class ConditionalStartSampler(SamplerDecorator): """ConditionalStartSampler samples uniformly untill a condition is met. Arguments --------- sampler: BaseSampler The sampler to be decorated condition: Condition Decide if we should start importance sampling or not """ def __init__(self, sampler, condition): # create a uniform sampler to sample from when the condition is not met self.uniform = UniformSampler(sampler.dataset, sampler.reweighting) self.condition = condition self.debug_count_satisfied=0 self.debug_count=0 super(ConditionalStartSampler, self).__init__(sampler) def _get_samples_with_scores(self, batch_size): self.debug_count+=1 if self.condition.satisfied: idxs, scores, xy = \ self.sampler._get_samples_with_scores(batch_size) self.condition.update(scores) self.debug_count_satisfied+=1 # print('self.debug_count,self.debug_count_satisfied: ',self.debug_count,self.debug_count_satisfied) else: idxs, scores, xy = \ self.uniform._get_samples_with_scores(batch_size) if scores is None: scores = np.ones(len(idxs)) # print('self.debug_count,self.debug_count_satisfied: ',self.debug_count,self.debug_count_satisfied) return ( idxs, scores, xy ) def update(self, idxs, scores): if not self.condition.previously_satisfied: self.condition.update(scores) self.sampler.update(idxs, scores) class Condition(object): """An interface for use with the ConditionalStartSampler.""" @property def satisfied(self): raise NotImplementedError() @property def previously_satisfied(self): pass # not necessary def update(self, scores): pass # not necessary class WarmupCondition(Condition): """Wait 'warmup' iterations before using importance sampling. Arguments --------- warmup: int The number of iterations to wait before starting importance sampling """ def __init__(self, warmup=100): self._warmup = warmup self._iters = 0 @property def satisfied(self): return self._iters > self._warmup def update(self, scores): self._iters += 1 class ExpCondition(Condition): """Assume that the scores are created by an exponential distribution and sample only if lamda is larger than x. Arguments --------- lambda_th: float When lambda > lambda_th start importance sampling momentum: float The momentum to compute the exponential moving average of lambda """ def __init__(self, lambda_th=2.0, momentum=0.9): self._lambda_th = lambda_th self._lambda = 0.0 self._previous_lambda = 0.0 self._momentum = momentum @property def satisfied(self): self._previous_lambda = self._lambda return self._lambda > self._lambda_th @property def previously_satisfied(self): return self._previous_lambda > self._lambda_th def update(self, scores): self._lambda = ( self._momentum * self._lambda + (1-self._momentum) / scores.mean() ) class TotalVariationCondition(Condition): """Sample from the decorated sampler if the TV of the scores with the uniform distribution is larger than a given value. Arguments --------- tv_th: float When tv > tv_th start importance sampling momentum: float The momentum to compute the exponential moving average of tv """ def __init__(self, tv_th=0.5, momentum=0.9): self._tv_th = tv_th self._tv = 0.0 self._previous_tv = 0.0 self._momentum = momentum @property def satisfied(self): self._previous_tv = self._tv return self._tv > self._tv_th @property def previously_satisfied(self): return self._previous_tv > self._tv_th def update(self, scores): self._previous_tv = self._tv new_tv = 0.5 * np.abs(scores/scores.sum() - 1.0/len(scores)).sum() self._tv = ( self._momentum * self._tv + (1-self._momentum) * new_tv ) class VarianceReductionCondition(Condition): """Sample with importance sampling when the variance reduction is larger than a threshold. The variance reduction units are in batch size increment. Arguments --------- vr_th: float When vr > vr_th start importance sampling momentum: float The momentum to compute the exponential moving average of vr """ def __init__(self, vr_th=1.2, momentum=0.9): self._vr_th = vr_th self._vr = 0.0 self._previous_vr = 0.0 self._momentum = momentum @property def variance_reduction(self): return self._vr @property def satisfied(self): self._previous_vr = self._vr # if self._vr > self._vr_th: # if True: # print('self._vr,self._vr_th',self._vr,self._vr_th) return self._vr > self._vr_th @property def previously_satisfied(self): return self._previous_vr > self._vr_th def update(self, scores): # print('scores',scores) u = 1.0/scores.shape[0] S = tf.math.reduce_sum(scores) if S == 0: g = np.array(u) else: g = scores/S # new_vr = 1.0 / np.sqrt(1 - ((g-u)**2).sum()/(g**2).sum()) new_vr = 1.0 / np.sqrt(1 - tf.math.reduce_sum((g-u)**2)/tf.math.reduce_sum(g**2)) self._vr = ( self._momentum * self._vr + (1-self._momentum) * new_vr ) class ConstantVarianceSampler(BaseSampler): """ConstantVarianceSampler uses the VarianceReductionCondition to sample less and less points but keep the variance of the gradients constant. Arguments -------- dataset: The BaseDataset implementation to sample from reweighting: Compute the weights to make the sampling unbiased etc. model: Used to compute the importance for importance sampling backward_time: The slowdown factor of the backward pass in comparison to the forward pass extra_samples: Sample that much more than suggested by the predicted variance reduction to account for the approximation """ def __init__(self, dataset, reweighting, model, backward_time=2.0, extra_samples=0.2): self.condition = VarianceReductionCondition( 1.0 / ((backward_time / (1+backward_time)) - extra_samples) ) self.model = model self.extra_samples = extra_samples self.N = _get_dataset_length(dataset, default=1) super(ConstantVarianceSampler, self).__init__(dataset, reweighting) def sample(self, batch_size): # Sample batch size uniformly at random idxs = np.random.choice(self.N, batch_size) idxs2 = np.arange(len(idxs)) x, y = self.dataset.train_data[idxs] scores = np.ones(len(idxs)) w = np.ones((len(idxs), self.reweighting.weight_size)) # This means that we can get a speedup by backpropagating less if self.condition.satisfied: f = 1.0 / self.condition.variance_reduction + self.extra_samples N = int(f * batch_size) scores = self.model.score(x, y, batch_size=batch_size) p = scores / scores.sum() idxs2 = np.random.choice(len(idxs), N, p=p) w = self.reweighting.sample_weights(idxs2, scores) x, y = self._slice_data(x, y, idxs2) self.condition.update(scores) self._send_messages(idxs[idxs2], (x, y), w, scores[idxs2]) return ( idxs[idxs2], (x, y), w ) def update(self, idxs, scores): if not self.condition.previously_satisfied: self.condition.update(scores) class ConstantTimeSampler(BaseSampler): """ConstantTimeSampler uses the VarianceReductionCondition to increase the quality of the gradients while keeping the time per iterations constant. Arguments --------- """ def __init__(self, dataset, reweighting, model, backward_time=2.0, tau_th=2.0, ratio=0.5, min_a=0.2): self.condition = VarianceReductionCondition(tau_th) self.backward_time = backward_time self.ratio = ratio self.min_a = min_a self.model = model self.N = _get_dataset_length(dataset, default=1) super(ConstantTimeSampler, self).__init__(dataset, reweighting) def sample(self, batch_size): # Check whether the condition is satisfied so that we can sample with # importance instead of uniformly if self.condition.satisfied: # compute the available time t = (1.0 + self.backward_time) * batch_size # compute the minimum forward-backward batch a = max( self.min_a * batch_size, batch_size / self.condition.variance_reduction ) # compute the maximum scored samples B = t - (1.0 + self.backward_time)*a # split the difference according to ratio keeping the time fixed B = int(batch_size + (B - batch_size)*self.ratio) a = int((t - B) / (1.0 + self.backward_time)) # do the hippy shake idxs = np.random.choice(self.N, B) x, y = self.dataset.train_data[idxs] scores = self.model.score(x, y, batch_size=batch_size) p = scores / scores.sum() idxs2 = np.random.choice(B, a, p=p) w = self.reweighting.sample_weights(idxs2, scores) x, y = self._slice_data(x, y, idxs2) self.condition.update(scores) else: idxs = np.random.choice(self.N, batch_size) idxs2 = np.arange(len(idxs)) x, y = self.dataset.train_data[idxs] scores = np.ones(len(idxs)) w = np.ones((len(idxs), self.reweighting.weight_size)) self._send_messages(idxs[idxs2], (x, y), w, scores[idxs2]) return ( idxs[idxs2], (x, y), w ) def update(self, idxs, scores): if not self.condition.previously_satisfied: self.condition.update(scores) class HistorySampler(ModelSampler): """HistorySampler uses the history of the loss to perform importance sampling. Arguments --------- dataset: The dataset to sample from reweighting: The reweighting scheme model: The model to be used for scoring recompute: Compute the loss for the whole dataset every recompute batches """ def __init__(self, dataset, reweighting, model, forward_batch_size=128, recompute=2): super(HistorySampler, self).__init__( dataset, reweighting, model, forward_batch_size=forward_batch_size ) # The configuration of HistorySampler self.recompute = recompute # Mutable variables holding the state of the sampler self._batch = 0 self._scores = np.ones((len(dataset.train_data),)) self._unseen = np.ones(len(dataset.train_data), dtype=np.bool) self._seen = np.zeros_like(self._unseen) def _get_samples_with_scores(self, batch_size): return ( np.arange(len(self._scores)), self._scores, None ) def update(self, idxs,results): # Update the scores of the seen samples self._scores[idxs] = results.ravel() self._unseen[idxs] = False self._seen[idxs] = True self._scores[self._unseen] = self._scores[self._seen].mean() # Recompute all the scores if needed self._batch += 1 if self._batch % self.recompute == 0: for i in range(0, len(self.dataset.train_data), 1024*64): x, y = self.dataset.train_data[i:i+1024*64] self._scores[i:i+1024*64] = self.model.score( x, y, batch_size=self.forward_batch_size ).ravel() self._seen[:] = True self._unseen[:] = False class OnlineBatchSelectionSampler(ModelSampler): """OnlineBatchSelection is the online batch creation method by Loschchilov & Hutter. See 'Online Batch Selection for Faster Training of Neural Networks'. Arguments --------- dataset: The dataset to sample from reweighting: The reweighting scheme model: The model to be used for scoring steps_per_epoch: int How many batches to create before considering that an epoch has passed recompute: int Recompute the scores after r minibatches seen s_e: tuple Used to compute the sampling probabilities from the ranking n_epochs: int The number of epochs, used to compute the sampling probabilities """ def __init__(self, dataset, reweighting, model, large_batch=1024, forward_batch_size=128, steps_per_epoch=300, recompute=2, s_e=(1, 1), n_epochs=1): super(OnlineBatchSelectionSampler, self).__init__( dataset, reweighting, model, large_batch=large_batch, forward_batch_size=forward_batch_size ) # The configuration of OnlineBatchSelection self.steps_per_epoch = steps_per_epoch self.recompute = recompute self.s_e = s_e self.n_epochs = n_epochs # Mutable variables to be updated self._batch = 0 self._epoch = 0 self._raw_scores = np.ones((len(dataset.train_data),)) self._scores = np.ones_like(self._raw_scores) self._ranks = np.arange(len(dataset.train_data)) def _get_samples_with_scores(self, batch_size): return ( np.arange(len(self._ranks)), self._scores, None ) def update(self, idxs, results): # Compute the current epoch and the current batch self._batch += 1 self._epoch = 1 + self._batch // self.steps_per_epoch # Add the new scores to the raw_scores self._raw_scores[idxs] = results.ravel() # if it is a new epoch if self._batch % self.steps_per_epoch == 0: # For the very first batch or every 'recompute' epochs compute the # loss across the whole dataset if self.recompute > 0 and self._epoch % self.recompute == 0: # Extra internal batch size so that we do not load too much # data into memory scores = [] for i in range(0, len(self.dataset.train_data), 1024*64): x, y = self.dataset.train_data[i:i+1024*64] scores.append(self.model.score( x, y, batch_size=self.forward_batch_size )) self._raw_scores[:] = np.hstack(scores) # Sort and recompute the ranks N = len(self.dataset.train_data) self._ranks[self._raw_scores.argsort()] = np.arange(N)[::-1] # Recompute the sample scores from the ranks s_e0, s_eend = self.s_e n_epochs = self.n_epochs s = s_e0 * np.exp(np.log(s_eend/s_e0)/n_epochs) ** self._epoch s = 1.0 / np.exp(np.log(s)/N) self._scores = s**self._ranks class SCSGSampler(BaseSampler): """Implement [1] using the SVRG model wrapper. SCSG is an online version of SVRG especially made for problems with difficult to compute batch gradients. [1]: Nonconvex Finite-Sum Optimization Via SCSG Methods Arguments --------- dataset: The dataset to sample from reweighting: In case we need to reweigh the samples (could be none in this case) model: The model wrapper (must be implement the interface of SVRGWrapper) B: The initial large batch to sample (if None fall back to SVRG) B_over_b: The number of minibatches in an iteration B_rate: B*B_rate^(epoch) gives B for every epoch """ def __init__(self, dataset, reweighting, model, B=1024, B_over_b=32, B_rate=1.0): self.N = _get_dataset_length(dataset, default=1) self.B = B or len(dataset.train_data) self.B_over_b = B_over_b self.B_rate = B_rate self._iteration = 0 self._idxs = np.arange(self.N) self._model = model super(SCSGSampler, self).__init__(dataset, reweighting) def _get_samples_with_scores(self, batch_size): if self._iteration % self.B_over_b == 0: self._compute_batch_gradient(batch_size) self.B *= self.B_rate self.B = min(self.B, self.N) if self.N > 1 else self.B self._iteration += 1 return (self._idxs, None, None) def _compute_batch_gradient(self, batch_size): def batch_gen(): np.random.shuffle(self._idxs) for s in range(0, int(self.B), batch_size): yield self.dataset.train_data[self._idxs[s:s+batch_size]] self._model.update_grad(batch_gen())
[ "numpy.zeros_like", "numpy.exp", "numpy.roll", "numpy.hstack", "numpy.ones_like", "tensorflow.keras.layers.Embedding", "numpy.arange", "tensorflow.math.reduce_sum", "tensorflow.keras.layers.Flatten", "numpy.log", "tensorflow.keras.layers.Masking", "numpy.random.choice", "tensorflow.keras.models.Model", "tensorflow.keras.layers.Dense", "numpy.array", "numpy.maximum", "tensorflow.keras.layers.concatenate", "numpy.random.shuffle", "numpy.ones", "tensorflow.keras.layers.LSTM", "tensorflow.keras.layers.Input" ]
importance_sampling/samplers.py
[(132, 'numpy.random.choice', 'np.random.choice', (['self.N', 'self.large_batch'], {}), True, 'import numpy as np\n'), (150, 'numpy.arange', 'np.arange', (['N'], {}), True, 'import numpy as np\n'), (151, 'numpy.ones', 'np.ones', (['(N,)'], {}), True, 'import numpy as np\n'), (232, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(history, 1)'}), False, 'from tensorflow.keras.layers import Dense, Embedding, Flatten, Input, LSTM, Masking, concatenate\n'), (233, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(1,)'}), False, 'from tensorflow.keras.layers import Dense, Embedding, Flatten, Input, LSTM, Masking, concatenate\n'), (239, 'tensorflow.keras.layers.concatenate', 'concatenate', (['[x0, x1]'], {}), False, 'from tensorflow.keras.layers import Dense, Embedding, Flatten, Input, LSTM, Masking, concatenate\n'), (241, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[x00, x10]', 'outputs': 'y'}), False, 'from tensorflow.keras.models import Model\n'), (290, 'numpy.roll', 'np.roll', (['self.history[full]', '(-1)'], {'axis': '(1)'}), True, 'import numpy as np\n'), (302, 'numpy.ones', 'np.ones', (['dataset.output_size'], {}), True, 'import numpy as np\n'), (661, 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['scores'], {}), True, 'import tensorflow as tf\n'), (701, 'numpy.random.choice', 'np.random.choice', (['self.N', 'batch_size'], {}), True, 'import numpy as np\n'), (822, 'numpy.zeros_like', 'np.zeros_like', (['self._unseen'], {}), True, 'import numpy as np\n'), (893, 'numpy.ones_like', 'np.ones_like', (['self._raw_scores'], {}), True, 'import numpy as np\n'), (964, 'numpy.arange', 'np.arange', (['self.N'], {}), True, 'import numpy as np\n'), (75, 'numpy.ones', 'np.ones', (['scores.shape'], {}), True, 'import numpy as np\n'), (87, 'numpy.ones', 'np.ones', (['batch_size'], {}), True, 'import numpy as np\n'), (222, 'numpy.log', 'np.log', (['init'], {}), True, 'import numpy as np\n'), (234, 'tensorflow.keras.layers.Masking', 'Masking', ([], {'mask_value': '(0.0)'}), False, 'from tensorflow.keras.layers import Dense, Embedding, Flatten, Input, LSTM, Masking, concatenate\n'), (235, 'tensorflow.keras.layers.LSTM', 'LSTM', (['(32)'], {'return_sequences': '(True)', 'unroll': '(True)'}), False, 'from tensorflow.keras.layers import Dense, Embedding, Flatten, Input, LSTM, Masking, concatenate\n'), (236, 'tensorflow.keras.layers.LSTM', 'LSTM', (['(32)'], {'unroll': '(True)'}), False, 'from tensorflow.keras.layers import Dense, Embedding, Flatten, Input, LSTM, Masking, concatenate\n'), (237, 'tensorflow.keras.layers.Embedding', 'Embedding', (['dataset.output_size', '(32)'], {}), False, 'from tensorflow.keras.layers import Dense, Embedding, Flatten, Input, LSTM, Masking, concatenate\n'), (238, 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), False, 'from tensorflow.keras.layers import Dense, Embedding, Flatten, Input, LSTM, Masking, concatenate\n'), (240, 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {}), False, 'from tensorflow.keras.layers import Dense, Embedding, Flatten, Input, LSTM, Masking, concatenate\n'), (267, 'numpy.exp', 'np.exp', (['scores', 'scores'], {}), True, 'import numpy as np\n'), (269, 'numpy.maximum', 'np.maximum', (['scores', '(1e-06)', 'scores'], {}), True, 'import numpy as np\n'), (283, 'numpy.log', 'np.log', (['x'], {}), True, 'import numpy as np\n'), (301, 'numpy.ones', 'np.ones', (['dataset.output_size'], {}), True, 'import numpy as np\n'), (301, 'numpy.log', 'np.log', (['dataset.output_size'], {}), True, 'import numpy as np\n'), (663, 'numpy.array', 'np.array', (['u'], {}), True, 'import numpy as np\n'), (767, 'numpy.random.choice', 'np.random.choice', (['self.N', 'B'], {}), True, 'import numpy as np\n'), (771, 'numpy.random.choice', 'np.random.choice', (['B', 'a'], {'p': 'p'}), True, 'import numpy as np\n'), (776, 'numpy.random.choice', 'np.random.choice', (['self.N', 'batch_size'], {}), True, 'import numpy as np\n'), (980, 'numpy.random.shuffle', 'np.random.shuffle', (['self._idxs'], {}), True, 'import numpy as np\n'), (41, 'blinker.signal', 'signal', (['"""is.sample"""'], {}), False, 'from blinker import signal\n'), (315, 'numpy.maximum', 'np.maximum', (['(self.variance - self.mu ** 2)', '(0)'], {}), True, 'import numpy as np\n'), (352, 'blinker.signal', 'signal', (['"""is.lstm_comparison_sampler.scores"""'], {}), False, 'from blinker import signal\n'), (924, 'numpy.hstack', 'np.hstack', (['scores'], {}), True, 'import numpy as np\n'), (928, 'numpy.arange', 'np.arange', (['N'], {}), True, 'import numpy as np\n'), (667, 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['((g - u) ** 2)'], {}), True, 'import tensorflow as tf\n'), (667, 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['(g ** 2)'], {}), True, 'import tensorflow as tf\n'), (934, 'numpy.log', 'np.log', (['s'], {}), True, 'import numpy as np\n'), (933, 'numpy.log', 'np.log', (['(s_eend / s_e0)'], {}), True, 'import numpy as np\n')]
zhanglei1949/federatedSpeechRecognition
e8a9a1761e309c2aef93a629b7e82742afc798f3
''' from keras import backend as K from keras.models import Model from keras.layers import Input, Lambda, BatchNormalization, Conv1D, GRU, TimeDistributed, Activation, Dense, Flatten from keras.optimizers import SGD, RMSprop from keras.callbacks import ModelCheckpoint from keras.losses import categorical_crossentropy ''' import os from tensorflow.keras import backend as K from tensorflow.keras.models import Model from tensorflow.keras.layers import Input, Lambda, BatchNormalization, Conv1D, GRU, TimeDistributed, Activation, Dense, Flatten from tensorflow.keras.optimizers import SGD, RMSprop from tensorflow.keras.callbacks import ModelCheckpoint from tensorflow.keras.losses import categorical_crossentropy def cnn_output_length(input_length, filter_size, padding, stride, dilation=1): ''' Calculates output length based on the input sample. NOTE: Used only for architectures with Conv1D layers. :param: input_length - Integer, length of the input data (Example: input.shape[0]) filter_size - Integer, kernel_size of the Conv layer padding - String, Padding version on the Conv layer ("same" or "valid") stride - Integer, Conv layer strides size dilation - Integer ''' if input_length is None: return None assert padding in {'same', 'valid'} dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1) if padding == 'same': output_length = input_length else: output_length = input_length - dilated_filter_size + 1 return (output_length + stride - 1) // stride def ctc_loss(args): ''' More info on CTC: https://towardsdatascience.com/intuitively-understanding-connectionist-temporal-classification-3797e43a86c Creates CTC (Connectionist Temporal Classification) loss for a speech_to_text model approach. :params: args - List of params: predictions, labels, input_len and labels_len :returns: calculated CTC loss based on args. ''' predictions, labels, input_len, labels_len = args return K.ctc_batch_cost(labels, predictions, input_len, labels_len) def categorical_loss(args): ''' Creates Categorical crossentropy loss for a classificaiton based model. :params: args - List of params: predictions, labels :returns: calculated categorical_crossentropy loss based on args. ''' predictions, labels = args return categorical_crossentropy(labels, predictions) def add_ctc_loss(model): ''' Adds CTC loss to an model. :params: model - Keras Model object :returns: model - Keras Model object with ctc loss added ''' #Creates placeholder/Input layer for labels labels = Input(name='labels', shape=(None,), dtype='float32') #Creates placeholder/Input layer for lenghts of input features (time steps) input_lens = Input(name='input_length', shape=(1,), dtype='int64') #Creates placeholder/Input layer for lenghts of labels/targets (in our case number of characters in a target word) labels_lens = Input(name='label_length', shape=(1,), dtype='int64') #Create lambda funciton around model outputs based on labels lenghts outputs = Lambda(model.output_length)(input_lens) #Add CTC Loss to the input model loss = Lambda(ctc_loss, output_shape=(1,), name='ctc')([model.output, labels, outputs, labels_lens]) #Create new model instance with all new placeholders/input layers and loss as the output model = Model(inputs=[model.input, labels, input_lens, labels_lens], outputs=loss) return model def add_categorical_loss(model, number_of_classes): ''' Adds categorical_crossentropy loss to an model. :params: model - Keras Model object number_of_classes - Integer, number of classes in a dataset (number of words in this case) :returns: model - Keras Model object with categorical_crossentropy loss added ''' #Creates placeholder/Input layer for labels in one_hot_encoded form labels = Input(name='labels', shape=(number_of_classes,), dtype='float32') #Add categorical_crossentropy Loss to the input model loss = Lambda(categorical_loss, output_shape=(1,), name='categorical_crossentropy')([model.output, labels]) #Create new model instance with all new placeholders/input layers and loss as the output model = Model(inputs=[model.input, labels], outputs=loss) return model
[ "tensorflow.keras.layers.Lambda", "tensorflow.keras.models.Model", "tensorflow.keras.losses.categorical_crossentropy", "tensorflow.keras.backend.ctc_batch_cost", "tensorflow.keras.layers.Input" ]
utils/model_utils.py
[(57, 'tensorflow.keras.backend.ctc_batch_cost', 'K.ctc_batch_cost', (['labels', 'predictions', 'input_len', 'labels_len'], {}), True, 'from tensorflow.keras import backend as K\n'), (71, 'tensorflow.keras.losses.categorical_crossentropy', 'categorical_crossentropy', (['labels', 'predictions'], {}), False, 'from tensorflow.keras.losses import categorical_crossentropy\n'), (84, 'tensorflow.keras.layers.Input', 'Input', ([], {'name': '"""labels"""', 'shape': '(None,)', 'dtype': '"""float32"""'}), False, 'from tensorflow.keras.layers import Input, Lambda, BatchNormalization, Conv1D, GRU, TimeDistributed, Activation, Dense, Flatten\n'), (86, 'tensorflow.keras.layers.Input', 'Input', ([], {'name': '"""input_length"""', 'shape': '(1,)', 'dtype': '"""int64"""'}), False, 'from tensorflow.keras.layers import Input, Lambda, BatchNormalization, Conv1D, GRU, TimeDistributed, Activation, Dense, Flatten\n'), (88, 'tensorflow.keras.layers.Input', 'Input', ([], {'name': '"""label_length"""', 'shape': '(1,)', 'dtype': '"""int64"""'}), False, 'from tensorflow.keras.layers import Input, Lambda, BatchNormalization, Conv1D, GRU, TimeDistributed, Activation, Dense, Flatten\n'), (97, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[model.input, labels, input_lens, labels_lens]', 'outputs': 'loss'}), False, 'from tensorflow.keras.models import Model\n'), (115, 'tensorflow.keras.layers.Input', 'Input', ([], {'name': '"""labels"""', 'shape': '(number_of_classes,)', 'dtype': '"""float32"""'}), False, 'from tensorflow.keras.layers import Input, Lambda, BatchNormalization, Conv1D, GRU, TimeDistributed, Activation, Dense, Flatten\n'), (121, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[model.input, labels]', 'outputs': 'loss'}), False, 'from tensorflow.keras.models import Model\n'), (91, 'tensorflow.keras.layers.Lambda', 'Lambda', (['model.output_length'], {}), False, 'from tensorflow.keras.layers import Input, Lambda, BatchNormalization, Conv1D, GRU, TimeDistributed, Activation, Dense, Flatten\n'), (94, 'tensorflow.keras.layers.Lambda', 'Lambda', (['ctc_loss'], {'output_shape': '(1,)', 'name': '"""ctc"""'}), False, 'from tensorflow.keras.layers import Input, Lambda, BatchNormalization, Conv1D, GRU, TimeDistributed, Activation, Dense, Flatten\n'), (118, 'tensorflow.keras.layers.Lambda', 'Lambda', (['categorical_loss'], {'output_shape': '(1,)', 'name': '"""categorical_crossentropy"""'}), False, 'from tensorflow.keras.layers import Input, Lambda, BatchNormalization, Conv1D, GRU, TimeDistributed, Activation, Dense, Flatten\n')]
veltzerdoron/quants
68a153e2ada2b9b0c223cef61d863378f836ebef
# Auto Encoder (AE) approach # imports import unittest # my class imports from keras.layers import MaxPooling1D, UpSampling1D from keras.losses import CategoricalCrossentropy from tensorflow.python.keras.models import Sequential from quants.quantifiers import * from quants.classifiers import SingleLabelClassifier, MultiLabelClassifier, CNNClassifier, AEClassifier # keras and TF imports import tensorflow as tf from tensorflow.keras.models import Model from tensorflow.python.keras.layers import * print("TensorFlow version: ", tf.__version__) print(tf.config.list_physical_devices(device_type='GPU')) print(tf.config.list_logical_devices()) print("Keras backend: ", tf.compat.v1.keras.backend.backend()) # gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.2) # sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options)) # tf.compat.v1.keras.backend.set_session(sess) class CNNAEClassifier(AEClassifier, CNNClassifier, SingleLabelClassifier): """ CNN Auto Encoder based classifier classifies a single quantifier """ def build(self): """ Convolutional classifier model builder method """ model = [] # build the same AE model for each quantifier for _ in self._quantifier_names: qmodel = Sequential() # encoding qmodel.add(Input(name='input', shape=(Quantifier.scene_len, len(symbols)))) qmodel.add(Conv1D(filters=100, kernel_size=1, trainable=False, use_bias=False, name='conv')) qmodel.add(MaxPooling1D(pool_size=2, padding='same')) # qmodel.add(Conv1D(100, 5, padding='same', activation='relu')) # qmodel.add(MaxPooling1D(pool_size=2, padding='same')) # decoding # qmodel.add(Conv1DTranspose(100, 1, padding='same', activation='relu')) # qmodel.add(UpSampling1D(2)) # qmodel.add(Conv1DTranspose(100, 5, padding='same', activation='relu')) qmodel.add(UpSampling1D(2)) qmodel.add(Conv1D(filters=len(symbols), kernel_size=1, padding='same', activation='sigmoid')) qmodel.compile(loss='mse', optimizer='adam') model.append(qmodel) return model class DenseAEClassifier(AEClassifier, MultiLabelClassifier): """ Dense Auto Encoder based classifier classifies a single quantifier """ def build(self): """ Dense classifier model builder method """ model = [] # build the same AE model for each quantifier for _ in self._quantifier_names: # encoding qmodel = Sequential() qmodel.add(Input(name='input', shape=Quantifier.scene_len)) qmodel.add(Dense(250, name="dense2", activation='relu')) qmodel.add(Dense(150, name="dense3", activation='sigmoid')) # model.add(Dense(50, name="dense4", activation='sigmoid')) # decoding # model.add(Dense(150, name="dense5", activation='sigmoid')) qmodel.add(Dense(250, name="dense6", activation='relu')) qmodel.add(Dense(Quantifier.scene_len, name="dense8", activation='relu')) # inputs outputs qmodel.compile(loss='mse', optimizer='adam') model.append(qmodel) return model class CNNSoftmaxClassifier(CNNClassifier, SingleLabelClassifier): """ Convolutional softmax classifier class classifies list of quantifiers """ def build(self): const_initializer = tf.keras.initializers.Constant(1.) # input layer scene = Input(name='input', shape=(Quantifier.scene_len, len(symbols))) # conv conv = Conv1D(filters=self._num_kernels, kernel_size=1, kernel_initializer=const_initializer, trainable=False, use_bias=False, name='conv')(scene) # split the splitters = tf.split(conv, self._num_kernels, axis=2, name='split') # flats flats = [Flatten(name='flat_{i}'.format(i=i))(splitters[i]) for i in range(self._num_kernels)] # dropouts after convolutions dropouts = [Dropout(rate=0.15, name='dropout_{i}'.format(i=i))(flats[i]) for i in range(self._num_kernels)] # single neuron summarizers denses = [Dense(1, kernel_initializer=const_initializer, use_bias=False, trainable=False, # activation='relu', name='dense_{i}'.format(i=i))(dropouts[i]) for i in range(self._num_kernels)] # merge feature extractors merge = tf.concat(denses, axis=1, name='concatenate') # softmax layer softmax = Dense(len(self._quantifier_names), kernel_initializer=const_initializer, use_bias=False, trainable=True, activation='softmax', name="softmax")(merge) # inputs outputs model = Model(inputs=scene, outputs=softmax) # set weights conv = model.get_layer('conv') conv.set_weights([np.array([self._kernels]).transpose().reshape(1, 4, self._num_kernels)]) print(conv.get_weights()) # compile model model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.Recall()]) return model class CNNMultiLabelClassifier(CNNClassifier, MultiLabelClassifier): """ Convolutional MultiLabel classifier class classifies list of quantifiers """ def build(self): const_initializer = tf.keras.initializers.Constant(1.) # input layer scene = Input(name='input', shape=(Quantifier.scene_len, len(symbols))) # conv conv = Conv1D(filters=self._num_kernels, kernel_size=1, kernel_initializer=const_initializer, trainable=False, use_bias=False, name='conv')(scene) # split the splitters = tf.split(conv, self._num_kernels, axis=2, name='split') # flats flats = [Flatten(name='flat_{i}'.format(i=i))(splitters[i]) for i in range(self._num_kernels)] # dropouts after convolutions dropouts = [Dropout(rate=0.15, name='dropout_{i}'.format(i=i))(flats[i]) for i in range(self._num_kernels)] # single neuron summarizers denses = [Dense(1, kernel_initializer=const_initializer, use_bias=False, trainable=False, # activation='relu', name='dense_{i}'.format(i=i))(dropouts[i]) for i in range(self._num_kernels)] # merge feature extractors merge = tf.concat(denses, axis=1, name='concatenate') # multi-label layer multilabel = Dense(len(self._quantifier_names), kernel_initializer=const_initializer, use_bias=False, trainable=True, activation='sigmoid', name="multi-label")(merge) # inputs outputs model = Model(inputs=scene, outputs=multilabel) # set weights conv = model.get_layer('conv') conv.set_weights([np.array([self._kernels]).transpose().reshape(1, len(symbols), self._num_kernels)]) print(conv.get_weights()) # compile model model.compile(loss='mse', optimizer='adam', metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.Recall()]) return model class TestClassifiers(unittest.TestCase): def test_CNN_single_label_classifier(self): quantifiers = [No(), All(), Some(), Most()] # quantifiers = [Between(2, 50), All()] kernels = [[1, 0, 0, 0], [1, -1, 0, 0], [0, 1, 0, 0]] # kernels = [[1, -1, 0, 0], [0, 1, 0, 0]] classifier = CNNSoftmaxClassifier(kernels=kernels, quantifiers=quantifiers) classifier.learn(epochs=15, batch_size=1, max_len=100, verbose=1) def test_CNN_multi_label_classifier(self): quantifiers = [No(), All(), Most(), Some()] # quantifiers = [Between(2, 50), All()] kernels = [[1, 0, 0, 0], [1, -1, 0, 0], [0, 1, 0, 0]] # kernels = [[1, -1, 0, 0], [0, 1, 0, 0]] classifier = CNNMultiLabelClassifier(kernels=kernels, quantifiers=quantifiers, other=True) classifier.learn(epochs=25, batch_size=1, max_len=100, verbose=1) def test_Monotonicity(self): most_quantifiers = [Most(), Some()] kernels = [[1, -1, 0, 0], [0, 1, 0, 0]] CNNSoftmaxClassifier(kernels=kernels, quantifiers=most_quantifiers).learn(epochs=10, verbose=1) def test_Every(self): all_quantifiers = [All2()] kernels = [[1, -1, 0, 0], [0, 1, 0, 0]] CNNSoftmaxClassifier(kernels=kernels, quantifiers=all_quantifiers, other=True).learn(epochs=10, batch_size=100, max_len=100, verbose=1, contrastive_quantifiers=[Most()]) def test_Dense_AE_classifier(self): DenseAEClassifier(quantifiers=[Most(), All()]).learn(batch_size=1, epochs=10, verbose=1) def test_CNN_AE_classifier(self): kernels = [[1, -1, 0, 0], [0, 1, 0, 0]] CNNAEClassifier(quantifiers=[Most(), All()], kernels=kernels).learn(batch_size=1, epochs=10, verbose=1) if __name__ == '__main__': unittest.main()
[ "tensorflow.keras.initializers.Constant", "tensorflow.compat.v1.keras.backend.backend", "tensorflow.keras.metrics.Recall", "tensorflow.concat", "tensorflow.keras.models.Model", "tensorflow.config.list_logical_devices", "tensorflow.python.keras.models.Sequential", "tensorflow.config.list_physical_devices", "tensorflow.keras.metrics.Precision", "tensorflow.split" ]
tests/test_classifiers.py
[(22, 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', ([], {'device_type': '"""GPU"""'}), True, 'import tensorflow as tf\n'), (23, 'tensorflow.config.list_logical_devices', 'tf.config.list_logical_devices', ([], {}), True, 'import tensorflow as tf\n'), (24, 'tensorflow.compat.v1.keras.backend.backend', 'tf.compat.v1.keras.backend.backend', ([], {}), True, 'import tensorflow as tf\n'), (239, 'unittest.main', 'unittest.main', ([], {}), False, 'import unittest\n'), (99, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(1.0)'], {}), True, 'import tensorflow as tf\n'), (109, 'tensorflow.split', 'tf.split', (['conv', 'self._num_kernels'], {'axis': '(2)', 'name': '"""split"""'}), True, 'import tensorflow as tf\n'), (126, 'tensorflow.concat', 'tf.concat', (['denses'], {'axis': '(1)', 'name': '"""concatenate"""'}), True, 'import tensorflow as tf\n'), (134, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'scene', 'outputs': 'softmax'}), False, 'from tensorflow.keras.models import Model\n'), (151, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(1.0)'], {}), True, 'import tensorflow as tf\n'), (161, 'tensorflow.split', 'tf.split', (['conv', 'self._num_kernels'], {'axis': '(2)', 'name': '"""split"""'}), True, 'import tensorflow as tf\n'), (178, 'tensorflow.concat', 'tf.concat', (['denses'], {'axis': '(1)', 'name': '"""concatenate"""'}), True, 'import tensorflow as tf\n'), (186, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'scene', 'outputs': 'multilabel'}), False, 'from tensorflow.keras.models import Model\n'), (41, 'tensorflow.python.keras.models.Sequential', 'Sequential', ([], {}), False, 'from tensorflow.python.keras.models import Sequential\n'), (76, 'tensorflow.python.keras.models.Sequential', 'Sequential', ([], {}), False, 'from tensorflow.python.keras.models import Sequential\n'), (48, 'keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(2)', 'padding': '"""same"""'}), False, 'from keras.layers import MaxPooling1D, UpSampling1D\n'), (56, 'keras.layers.UpSampling1D', 'UpSampling1D', (['(2)'], {}), False, 'from keras.layers import MaxPooling1D, UpSampling1D\n'), (141, 'tensorflow.keras.metrics.Precision', 'tf.keras.metrics.Precision', ([], {}), True, 'import tensorflow as tf\n'), (141, 'tensorflow.keras.metrics.Recall', 'tf.keras.metrics.Recall', ([], {}), True, 'import tensorflow as tf\n'), (193, 'tensorflow.keras.metrics.Precision', 'tf.keras.metrics.Precision', ([], {}), True, 'import tensorflow as tf\n'), (193, 'tensorflow.keras.metrics.Recall', 'tf.keras.metrics.Recall', ([], {}), True, 'import tensorflow as tf\n')]
ishine/TensorFlowTTS-1
dd04992f2b05d2845f862f86cfae006b91e3e870
# -*- coding: utf-8 -*- # Copyright 2020 Minh Nguyen (@dathudeptrai) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Train FastSpeech2.""" import sys sys.path.append(".") sys.path.append("/content/TensorFlowTTS/examples/") from tensorflow_tts.optimizers import AdamWeightDecay from tensorflow_tts.optimizers import WarmUp from tensorflow_tts.models import TFFastSpeech2 from tensorflow_tts.configs import FastSpeech2Config from tensorflow_tts.losses import GANCritic, GanLoss from fastspeech2.fastspeech2_dataset import CharactorDurationF0EnergyMelDataset from fastspeech.train_fastspeech import FastSpeechTrainer from tqdm import tqdm import tensorflow_tts import yaml import tensorflow as tf import numpy as np import argparse import logging import os import torch.optim as optim class FastSpeech2Trainer(FastSpeechTrainer): """FastSpeech2 Trainer class based on FastSpeechTrainer.""" def __init__( self, config, steps=0, epochs=0, is_mixed_precision=False, ): """Initialize trainer. Args: steps (int): Initial global steps. epochs (int): Initial global epochs. config (dict): Config dict loaded from yaml format configuration file. is_mixed_precision (bool): Use mixed precision or not. """ super(FastSpeech2Trainer, self).__init__( steps=steps, epochs=epochs, config=config, is_mixed_precision=is_mixed_precision, ) # define metrics to aggregates data and use tf.summary logs them self.list_metrics_name = [ "duration_loss", "f0_loss", "energy_loss", "mel_loss_before", "mel_loss_after", "gan_loss", ] self.init_train_eval_metrics(self.list_metrics_name) self.reset_states_train() self.reset_states_eval() self.config = config self.enc = GANCritic().to('cuda').double().train() self.opt_enc = optim.Adam(self.enc.parameters(), lr=1e-3, betas=(0.0, 0.9)) def compile(self, model, optimizer): super().compile(model, optimizer) self.mse = tf.keras.losses.MeanSquaredError() self.mae = tf.keras.losses.MeanAbsoluteError() def _train_step(self, batch): """Train model one step.""" charactor, duration, f0, energy, mel, bound, prom = batch self._one_step_fastspeech2(charactor, duration, f0, energy, mel, bound, prom) # update counts self.steps += 1 self.tqdm.update(1) self._check_train_finish() @tf.function( experimental_relax_shapes=True, input_signature=[ tf.TensorSpec([None, None], dtype=tf.int32), tf.TensorSpec([None, None], dtype=tf.int32), tf.TensorSpec([None, None], dtype=tf.float32), tf.TensorSpec([None, None], dtype=tf.float32), tf.TensorSpec([None, None, 80], dtype=tf.float32), tf.TensorSpec([None, None], dtype=tf.float32), tf.TensorSpec([None, None], dtype=tf.float32), ], ) def _one_step_fastspeech2(self, charactor, duration, f0, energy, mel, bound, prom): with tf.GradientTape() as tape: ( mel_before, mel_after, duration_outputs, f0_outputs, energy_outputs, ) = self.model( charactor, attention_mask=tf.math.not_equal(charactor, 0), speaker_ids=tf.zeros(shape=[tf.shape(mel)[0]]), duration_gts=duration, f0_gts=f0, energy_gts=energy, bounds=bound, proms=prom, training=True, ) log_duration = tf.math.log(tf.cast(tf.math.add(duration, 1), tf.float32)) duration_loss = self.mse(log_duration, duration_outputs) f0_loss = self.mse(f0, f0_outputs) energy_loss = self.mse(energy, energy_outputs) mel_loss_before = self.mae(mel, mel_before) mel_loss_after = self.mae(mel, mel_after) tf.config.run_functions_eagerly(True) gan_loss = GanLoss(self.enc, self.opt_enc, mel, mel_after , 16) loss = ( duration_loss + f0_loss + energy_loss + mel_loss_before + mel_loss_after - gan_loss ) if self.is_mixed_precision: scaled_loss = self.optimizer.get_scaled_loss(loss) if self.is_mixed_precision: scaled_gradients = tape.gradient( scaled_loss, self.model.trainable_variables ) gradients = self.optimizer.get_unscaled_gradients(scaled_gradients) else: gradients = tape.gradient(loss, self.model.trainable_variables) self.optimizer.apply_gradients( zip(gradients, self.model.trainable_variables), 5.0 ) # accumulate loss into metrics self.train_metrics["duration_loss"].update_state(duration_loss) self.train_metrics["f0_loss"].update_state(f0_loss) self.train_metrics["energy_loss"].update_state(energy_loss) self.train_metrics["mel_loss_before"].update_state(mel_loss_before) self.train_metrics["mel_loss_after"].update_state(mel_loss_after) self.train_metrics["gan_loss"].update_state(gan_loss) def _eval_epoch(self): """Evaluate model one epoch.""" logging.info(f"(Steps: {self.steps}) Start evaluation.") # calculate loss for each batch for eval_steps_per_epoch, batch in enumerate( tqdm(self.eval_data_loader, desc="[eval]"), 1 ): # eval one step charactor, duration, f0, energy, mel, bound, prom = batch self._eval_step(charactor, duration, f0, energy, mel, bound, prom) if eval_steps_per_epoch <= self.config["num_save_intermediate_results"]: # save intermedia self.generate_and_save_intermediate_result(batch) logging.info( f"(Steps: {self.steps}) Finished evaluation " f"({eval_steps_per_epoch} steps per epoch)." ) # average loss for key in self.eval_metrics.keys(): logging.info( f"(Steps: {self.steps}) eval_{key} = {self.eval_metrics[key].result():.4f}." ) # record self._write_to_tensorboard(self.eval_metrics, stage="eval") # reset self.reset_states_eval() @tf.function( experimental_relax_shapes=True, input_signature=[ tf.TensorSpec([None, None], dtype=tf.int32), tf.TensorSpec([None, None], dtype=tf.int32), tf.TensorSpec([None, None], dtype=tf.float32), tf.TensorSpec([None, None], dtype=tf.float32), tf.TensorSpec([None, None, 80], dtype=tf.float32), tf.TensorSpec([None, None], dtype=tf.float32), tf.TensorSpec([None, None], dtype=tf.float32), ], ) def _eval_step(self, charactor, duration, f0, energy, mel, bound, prom): """Evaluate model one step.""" ( mel_before, mel_after, duration_outputs, f0_outputs, energy_outputs, ) = self.model( charactor, attention_mask=tf.math.not_equal(charactor, 0), speaker_ids=tf.zeros(shape=[tf.shape(mel)[0]]), duration_gts=duration, f0_gts=f0, energy_gts=energy, bounds=bound, proms=prom, training=False, ) log_duration = tf.math.log(tf.cast(tf.math.add(duration, 1), tf.float32)) duration_loss = self.mse(log_duration, duration_outputs) f0_loss = self.mse(f0, f0_outputs) energy_loss = self.mse(energy, energy_outputs) mel_loss_before = self.mae(mel, mel_before) mel_loss_after = self.mae(mel, mel_after) gan_loss = self.ganloss(mel, mel_after) # accumulate loss into metrics self.eval_metrics["duration_loss"].update_state(duration_loss) self.eval_metrics["f0_loss"].update_state(f0_loss) self.eval_metrics["energy_loss"].update_state(energy_loss) self.eval_metrics["mel_loss_before"].update_state(mel_loss_before) self.eval_metrics["mel_loss_after"].update_state(mel_loss_after) self.eval_metrics["gan_loss"].update_state(gan_loss) def _check_log_interval(self): """Log to tensorboard.""" if self.steps % self.config["log_interval_steps"] == 0: for metric_name in self.list_metrics_name: logging.info( f"(Step: {self.steps}) train_{metric_name} = {self.train_metrics[metric_name].result():.4f}." ) self._write_to_tensorboard(self.train_metrics, stage="train") # reset self.reset_states_train() @tf.function( experimental_relax_shapes=True, input_signature=[ tf.TensorSpec([None, None], dtype=tf.int32), tf.TensorSpec([None, None], dtype=tf.int32), tf.TensorSpec([None, None], dtype=tf.float32), tf.TensorSpec([None, None], dtype=tf.float32), tf.TensorSpec([None, None, 80], dtype=tf.float32), tf.TensorSpec([None, None], dtype=tf.float32), tf.TensorSpec([None, None], dtype=tf.float32), ], ) def predict(self, charactor, duration, f0, energy, mel, bound, prom): """Predict.""" mel_before, mel_after, _, _, _ = self.model( charactor, attention_mask=tf.math.not_equal(charactor, 0), speaker_ids=tf.zeros(shape=[tf.shape(mel)[0]]), duration_gts=duration, f0_gts=f0, energy_gts=energy, bounds=bound, proms=prom, training=False, ) return mel_before, mel_after def generate_and_save_intermediate_result(self, batch): """Generate and save intermediate result.""" import matplotlib.pyplot as plt # unpack input. charactor, duration, f0, energy, mel, bound, prom = batch # predict with tf.function. masked_mel_before, masked_mel_after = self.predict( charactor, duration, f0, energy, mel, bound, prom ) # check directory dirname = os.path.join(self.config["outdir"], f"predictions/{self.steps}steps") if not os.path.exists(dirname): os.makedirs(dirname) for idx, (mel_gt, mel_pred_before, mel_pred_after) in enumerate( zip(mel, masked_mel_before, masked_mel_after), 1 ): mel_gt = tf.reshape(mel_gt, (-1, 80)).numpy() # [length, 80] mel_pred_before = tf.reshape( mel_pred_before, (-1, 80) ).numpy() # [length, 80] mel_pred_after = tf.reshape( mel_pred_after, (-1, 80) ).numpy() # [length, 80] # plit figure and save it figname = os.path.join(dirname, f"{idx}.png") fig = plt.figure(figsize=(10, 8)) ax1 = fig.add_subplot(311) ax2 = fig.add_subplot(312) ax3 = fig.add_subplot(313) im = ax1.imshow(np.rot90(mel_gt), aspect="auto", interpolation="none") ax1.set_title("Target Mel-Spectrogram") fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax1) ax2.set_title("Predicted Mel-before-Spectrogram") im = ax2.imshow( np.rot90(mel_pred_before), aspect="auto", interpolation="none" ) fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax2) ax3.set_title("Predicted Mel-after-Spectrogram") im = ax3.imshow( np.rot90(mel_pred_after), aspect="auto", interpolation="none" ) fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax3) plt.tight_layout() plt.savefig(figname) plt.close() def main(): tf.config.run_functions_eagerly(True) """Run training process.""" parser = argparse.ArgumentParser( description="Train FastSpeech (See detail in tensorflow_tts/bin/train-fastspeech.py)" ) parser.add_argument( "--train-dir", default=None, type=str, help="directory including training data. ", ) parser.add_argument( "--dev-dir", default=None, type=str, help="directory including development data. ", ) parser.add_argument( "--use-norm", default=1, type=int, help="usr norm-mels for train or raw." ) parser.add_argument( "--f0-stat", default="./dump/stats_f0.npy", type=str, required=True, help="f0-stat path.", ) parser.add_argument( "--energy-stat", default="./dump/stats_energy.npy", type=str, required=True, help="energy-stat path.", ) parser.add_argument( "--outdir", type=str, required=True, help="directory to save checkpoints." ) parser.add_argument( "--config", type=str, required=True, help="yaml format configuration file." ) parser.add_argument( "--resume", default="", type=str, nargs="?", help='checkpoint file path to resume training. (default="")', ) parser.add_argument( "--verbose", type=int, default=1, help="logging level. higher is more logging. (default=1)", ) parser.add_argument( "--mixed_precision", default=0, type=int, help="using mixed precision for generator or not.", ) args = parser.parse_args() # set mixed precision config if args.mixed_precision == 1: tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True}) args.mixed_precision = bool(args.mixed_precision) args.use_norm = bool(args.use_norm) # set logger if args.verbose > 1: logging.basicConfig( level=logging.DEBUG, stream=sys.stdout, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", ) elif args.verbose > 0: logging.basicConfig( level=logging.INFO, stream=sys.stdout, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", ) else: logging.basicConfig( level=logging.WARN, stream=sys.stdout, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", ) logging.warning("Skip DEBUG/INFO messages") # check directory existence if not os.path.exists(args.outdir): os.makedirs(args.outdir) # check arguments if args.train_dir is None: raise ValueError("Please specify --train-dir") if args.dev_dir is None: raise ValueError("Please specify --valid-dir") # load and save config with open(args.config) as f: config = yaml.load(f, Loader=yaml.Loader) config.update(vars(args)) config["version"] = tensorflow_tts.__version__ with open(os.path.join(args.outdir, "config.yml"), "w") as f: yaml.dump(config, f, Dumper=yaml.Dumper) for key, value in config.items(): logging.info(f"{key} = {value}") # get dataset if config["remove_short_samples"]: mel_length_threshold = config["mel_length_threshold"] else: mel_length_threshold = None if config["format"] == "npy": charactor_query = "*-ids.npy" mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy" duration_query = "*-durations.npy" f0_query = "*-raw-f0.npy" energy_query = "*-raw-energy.npy" else: raise ValueError("Only npy are supported.") # define train/valid dataset train_dataset = CharactorDurationF0EnergyMelDataset( root_dir=args.train_dir, charactor_query=charactor_query, mel_query=mel_query, duration_query=duration_query, f0_query=f0_query, energy_query=energy_query, f0_stat=args.f0_stat, energy_stat=args.energy_stat, mel_length_threshold=mel_length_threshold, return_utt_id=False, ).create( is_shuffle=config["is_shuffle"], allow_cache=config["allow_cache"], batch_size=config["batch_size"], ) #print(list(train_dataset)[0]) valid_dataset = CharactorDurationF0EnergyMelDataset( root_dir=args.dev_dir, charactor_query=charactor_query, mel_query=mel_query, duration_query=duration_query, f0_query=f0_query, energy_query=energy_query, f0_stat=args.f0_stat, energy_stat=args.energy_stat, mel_length_threshold=mel_length_threshold, return_utt_id=False, ).create( is_shuffle=config["is_shuffle"], allow_cache=config["allow_cache"], batch_size=config["batch_size"], ) fastspeech = TFFastSpeech2(config=FastSpeech2Config(**config["fastspeech_params"])) fastspeech._build() fastspeech.summary() # define trainer trainer = FastSpeech2Trainer( config=config, steps=0, epochs=0, is_mixed_precision=args.mixed_precision ) # AdamW for fastspeech learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=config["optimizer_params"]["initial_learning_rate"], decay_steps=config["optimizer_params"]["decay_steps"], end_learning_rate=config["optimizer_params"]["end_learning_rate"], ) learning_rate_fn = WarmUp( initial_learning_rate=config["optimizer_params"]["initial_learning_rate"], decay_schedule_fn=learning_rate_fn, warmup_steps=int( config["train_max_steps"] * config["optimizer_params"]["warmup_proportion"] ), ) optimizer = AdamWeightDecay( learning_rate=learning_rate_fn, weight_decay_rate=config["optimizer_params"]["weight_decay"], beta_1=0.9, beta_2=0.98, epsilon=1e-6, exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"], ) # compile trainer trainer.compile(model=fastspeech, optimizer=optimizer) # start training try: trainer.fit( train_dataset, valid_dataset, saved_path=os.path.join(config["outdir"], "checkpoints/"), resume=args.resume, ) except KeyboardInterrupt: trainer.save_checkpoint() logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.") if __name__ == "__main__": main()
[ "numpy.rot90", "tensorflow.math.add", "tensorflow.keras.optimizers.schedules.PolynomialDecay", "matplotlib.pyplot.tight_layout", "tensorflow.shape", "tensorflow.keras.losses.MeanSquaredError", "matplotlib.pyplot.figure", "tensorflow.reshape", "tensorflow.config.run_functions_eagerly", "matplotlib.pyplot.savefig", "tensorflow.GradientTape", "tensorflow.config.optimizer.set_experimental_options", "matplotlib.pyplot.close", "tensorflow.math.not_equal", "tensorflow.TensorSpec", "tensorflow.keras.losses.MeanAbsoluteError" ]
examples/fastspeech2/train_fastspeech2Gan.py
[(18, 'sys.path.append', 'sys.path.append', (['"""."""'], {}), False, 'import sys\n'), (19, 'sys.path.append', 'sys.path.append', (['"""/content/TensorFlowTTS/examples/"""'], {}), False, 'import sys\n'), (336, 'tensorflow.config.run_functions_eagerly', 'tf.config.run_functions_eagerly', (['(True)'], {}), True, 'import tensorflow as tf\n'), (338, 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train FastSpeech (See detail in tensorflow_tts/bin/train-fastspeech.py)"""'}), False, 'import argparse\n'), (507, 'tensorflow.keras.optimizers.schedules.PolynomialDecay', 'tf.keras.optimizers.schedules.PolynomialDecay', ([], {'initial_learning_rate': "config['optimizer_params']['initial_learning_rate']", 'decay_steps': "config['optimizer_params']['decay_steps']", 'end_learning_rate': "config['optimizer_params']['end_learning_rate']"}), True, 'import tensorflow as tf\n'), (521, 'tensorflow_tts.optimizers.AdamWeightDecay', 'AdamWeightDecay', ([], {'learning_rate': 'learning_rate_fn', 'weight_decay_rate': "config['optimizer_params']['weight_decay']", 'beta_1': '(0.9)', 'beta_2': '(0.98)', 'epsilon': '(1e-06)', 'exclude_from_weight_decay': "['LayerNorm', 'layer_norm', 'bias']"}), False, 'from tensorflow_tts.optimizers import AdamWeightDecay\n'), (85, 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), True, 'import tensorflow as tf\n'), (86, 'tensorflow.keras.losses.MeanAbsoluteError', 'tf.keras.losses.MeanAbsoluteError', ([], {}), True, 'import tensorflow as tf\n'), (166, 'logging.info', 'logging.info', (['f"""(Steps: {self.steps}) Start evaluation."""'], {}), False, 'import logging\n'), (180, 'logging.info', 'logging.info', (['f"""(Steps: {self.steps}) Finished evaluation ({eval_steps_per_epoch} steps per epoch)."""'], {}), False, 'import logging\n'), (296, 'os.path.join', 'os.path.join', (["self.config['outdir']", 'f"""predictions/{self.steps}steps"""'], {}), False, 'import os\n'), (399, 'tensorflow.config.optimizer.set_experimental_options', 'tf.config.optimizer.set_experimental_options', (["{'auto_mixed_precision': True}"], {}), True, 'import tensorflow as tf\n'), (406, 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'stream': 'sys.stdout', 'format': '"""%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"""'}), False, 'import logging\n'), (426, 'os.path.exists', 'os.path.exists', (['args.outdir'], {}), False, 'import os\n'), (427, 'os.makedirs', 'os.makedirs', (['args.outdir'], {}), False, 'import os\n'), (437, 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.Loader'}), False, 'import yaml\n'), (441, 'yaml.dump', 'yaml.dump', (['config', 'f'], {'Dumper': 'yaml.Dumper'}), False, 'import yaml\n'), (443, 'logging.info', 'logging.info', (['f"""{key} = {value}"""'], {}), False, 'import logging\n'), (112, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (136, 'tensorflow.config.run_functions_eagerly', 'tf.config.run_functions_eagerly', (['(True)'], {}), True, 'import tensorflow as tf\n'), (137, 'tensorflow_tts.losses.GanLoss', 'GanLoss', (['self.enc', 'self.opt_enc', 'mel', 'mel_after', '(16)'], {}), False, 'from tensorflow_tts.losses import GANCritic, GanLoss\n'), (170, 'tqdm.tqdm', 'tqdm', (['self.eval_data_loader'], {'desc': '"""[eval]"""'}), False, 'from tqdm import tqdm\n'), (297, 'os.path.exists', 'os.path.exists', (['dirname'], {}), False, 'import os\n'), (298, 'os.makedirs', 'os.makedirs', (['dirname'], {}), False, 'import os\n'), (312, 'os.path.join', 'os.path.join', (['dirname', 'f"""{idx}.png"""'], {}), False, 'import os\n'), (313, 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), True, 'import matplotlib.pyplot as plt\n'), (330, 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (331, 'matplotlib.pyplot.savefig', 'plt.savefig', (['figname'], {}), True, 'import matplotlib.pyplot as plt\n'), (332, 'matplotlib.pyplot.close', 'plt.close', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (412, 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'stream': 'sys.stdout', 'format': '"""%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"""'}), False, 'import logging\n'), (418, 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.WARN', 'stream': 'sys.stdout', 'format': '"""%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"""'}), False, 'import logging\n'), (423, 'logging.warning', 'logging.warning', (['"""Skip DEBUG/INFO messages"""'], {}), False, 'import logging\n'), (440, 'os.path.join', 'os.path.join', (['args.outdir', '"""config.yml"""'], {}), False, 'import os\n'), (461, 'fastspeech2.fastspeech2_dataset.CharactorDurationF0EnergyMelDataset', 'CharactorDurationF0EnergyMelDataset', ([], {'root_dir': 'args.train_dir', 'charactor_query': 'charactor_query', 'mel_query': 'mel_query', 'duration_query': 'duration_query', 'f0_query': 'f0_query', 'energy_query': 'energy_query', 'f0_stat': 'args.f0_stat', 'energy_stat': 'args.energy_stat', 'mel_length_threshold': 'mel_length_threshold', 'return_utt_id': '(False)'}), False, 'from fastspeech2.fastspeech2_dataset import CharactorDurationF0EnergyMelDataset\n'), (480, 'fastspeech2.fastspeech2_dataset.CharactorDurationF0EnergyMelDataset', 'CharactorDurationF0EnergyMelDataset', ([], {'root_dir': 'args.dev_dir', 'charactor_query': 'charactor_query', 'mel_query': 'mel_query', 'duration_query': 'duration_query', 'f0_query': 'f0_query', 'energy_query': 'energy_query', 'f0_stat': 'args.f0_stat', 'energy_stat': 'args.energy_stat', 'mel_length_threshold': 'mel_length_threshold', 'return_utt_id': '(False)'}), False, 'from fastspeech2.fastspeech2_dataset import CharactorDurationF0EnergyMelDataset\n'), (497, 'tensorflow_tts.configs.FastSpeech2Config', 'FastSpeech2Config', ([], {}), False, 'from tensorflow_tts.configs import FastSpeech2Config\n'), (543, 'logging.info', 'logging.info', (['f"""Successfully saved checkpoint @ {trainer.steps}steps."""'], {}), False, 'import logging\n'), (102, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[None, None]'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (103, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[None, None]'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (104, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[None, None]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (105, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[None, None]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (106, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[None, None, 80]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (107, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[None, None]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (108, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[None, None]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (219, 'tensorflow.math.not_equal', 'tf.math.not_equal', (['charactor', '(0)'], {}), True, 'import tensorflow as tf\n'), (228, 'tensorflow.math.add', 'tf.math.add', (['duration', '(1)'], {}), True, 'import tensorflow as tf\n'), (200, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[None, None]'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (201, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[None, None]'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (202, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[None, None]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (203, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[None, None]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (204, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[None, None, 80]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (205, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[None, None]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (206, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[None, None]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (272, 'tensorflow.math.not_equal', 'tf.math.not_equal', (['charactor', '(0)'], {}), True, 'import tensorflow as tf\n'), (259, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[None, None]'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (260, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[None, None]'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (261, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[None, None]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (262, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[None, None]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (263, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[None, None, 80]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (264, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[None, None]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (265, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[None, None]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (317, 'numpy.rot90', 'np.rot90', (['mel_gt'], {}), True, 'import numpy as np\n'), (322, 'numpy.rot90', 'np.rot90', (['mel_pred_before'], {}), True, 'import numpy as np\n'), (327, 'numpy.rot90', 'np.rot90', (['mel_pred_after'], {}), True, 'import numpy as np\n'), (538, 'os.path.join', 'os.path.join', (["config['outdir']", '"""checkpoints/"""'], {}), False, 'import os\n'), (121, 'tensorflow.math.not_equal', 'tf.math.not_equal', (['charactor', '(0)'], {}), True, 'import tensorflow as tf\n'), (130, 'tensorflow.math.add', 'tf.math.add', (['duration', '(1)'], {}), True, 'import tensorflow as tf\n'), (303, 'tensorflow.reshape', 'tf.reshape', (['mel_gt', '(-1, 80)'], {}), True, 'import tensorflow as tf\n'), (304, 'tensorflow.reshape', 'tf.reshape', (['mel_pred_before', '(-1, 80)'], {}), True, 'import tensorflow as tf\n'), (307, 'tensorflow.reshape', 'tf.reshape', (['mel_pred_after', '(-1, 80)'], {}), True, 'import tensorflow as tf\n'), (76, 'tensorflow_tts.losses.GANCritic', 'GANCritic', ([], {}), False, 'from tensorflow_tts.losses import GANCritic, GanLoss\n'), (220, 'tensorflow.shape', 'tf.shape', (['mel'], {}), True, 'import tensorflow as tf\n'), (273, 'tensorflow.shape', 'tf.shape', (['mel'], {}), True, 'import tensorflow as tf\n'), (122, 'tensorflow.shape', 'tf.shape', (['mel'], {}), True, 'import tensorflow as tf\n')]
ustyuzhaninky/OSAR-keras
0eacf8d1e49d6e9a0f9ec82799169c4720e67ac2
# coding=utf-8 # Copyright 2020 Konstantin Ustyuzhanin. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # author: Konstantin Ustyuzhanin # from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import numpy as np from tensorflow.python.framework import tensor_shape from tensorflow.keras import backend as K from tensorflow.python.ops import nn __all__ = ['HelixMemory'] class HelixMemory(tf.keras.layers.Layer): """Helix memory unit. # Arguments batch_size: int > 0. Maximum batch size. memory_len: int > 0. Maximum memory length. n_turns: int >= `compression_rate`+1. Number of helix turns. compression_rate: int > 0. Rate of compression for old memories. WARNING: `sequence_length` should be at least `n_turns` times divisible by `compression_rate`. mode: ['avg', 'max', 'conv',] - mode of compression (default - 'avg'). - 'avg': Average 1d pooling; - 'max': Max pooling 1d; - 'conv': 1d convolution with a filter. WARNING: with setting `mode='conv'` is trainable. # Input shape 3D tensor with shape: `(batch_size, sequence_length, output_dim)`. # Output shape 3D tensor with shape: `(batch_size, compression_rate^n_turns + memory_len, output_dim)`. # References - [Transformer-XL](https://arxiv.org/pdf/1901.02860.pdf) - [Compressive Transformer](https://arxiv.org/abs/1911.05507.pdf) """ def __init__( self, batch_size, memory_len, n_turns, compression_rate=2, mode='avg', initializer='glorot_uniform', regularizer='l2', constraint=None, **kwargs): kwargs['batch_size'] = batch_size super(HelixMemory, self).__init__( **kwargs ) if n_turns <= compression_rate: raise AttributeError('Value of `n_tuns` should be at least `compression_rate`+1') if not mode.lower() in ['avg', 'max', 'conv']: raise AttributeError(f'Mode type `{mode}` is not supported.') self.supports_masking = True self.stateful = True self.batch_size = batch_size self.memory_len = memory_len self.n_turns = n_turns + 1 self.compression_rate = compression_rate self.mode = mode.lower() self.initializer = tf.keras.initializers.get(initializer) self.regularizer = tf.keras.regularizers.get(regularizer) self.constraint = tf.keras.constraints.get(constraint) self.k = 0 self.memory = None def build(self, input_shape): output_dim = input_shape[-1] n_conv = sum(pow(self.compression_rate, i) for i in range(1, self.n_turns)) self.memory = self.add_weight( shape=(self.batch_size, self.memory_len + n_conv, output_dim), initializer='glorot_uniform', trainable=False, name=f'{self.name}-memory', ) if self.mode == 'conv': _out_channels = tf.cast( pow(self.compression_rate, self.n_turns-1), tf.int32) self.filters = self.add_weight( name=f'{self.name}-filter', shape=[self.compression_rate, output_dim, output_dim ], dtype=tf.float32, initializer=self.initializer, regularizer=self.regularizer, constraint=self.constraint, ) self.built = True super(HelixMemory, self).build(input_shape) def compute_output_shape(self, input_shape): return self.memory.shape def compute_mask(self, inputs, mask=None): if mask is None: return None return mask[0] def _compress(self, inputs): # padded_input = K.tile(inputs, [1, 1, inputs.shape[1]]) output_dim = inputs.shape[-1] rate = inputs.shape[1] if inputs.shape[1] < self.compression_rate else self.compression_rate if inputs.shape[1] < self.compression_rate: inputs = K.tile( inputs, (1, self.compression_rate // inputs.shape[1], 1)) if self.mode == 'conv': compressed = nn.conv1d(inputs, self.filters, stride=rate, padding='VALID', name='compressed_conv1d') elif self.mode == 'avg': compressed = nn.avg_pool1d( inputs, rate, rate, padding='VALID') elif self.mode == 'max': compressed = nn.max_pool1d( inputs, rate, rate, padding='VALID') return compressed def _helix(self, inputs): output_dim = inputs.shape[-1] n_long_mem = sum(pow(self.compression_rate, i) for i in range(1, self.n_turns + 1 - self.k)) turn_lenght = pow(self.compression_rate, self.n_turns - self.k) add_lenght = inputs.shape[1] - n_long_mem # turn_start = inputs.shape[1] - turn_lenght - add_lenght # Turn extraction, compression, slice and build helix_k_turn_old = inputs[:, -turn_lenght-add_lenght:-add_lenght, :] compression = self._compress(helix_k_turn_old) compression_lenght = compression.shape[1] other_helix = inputs[:, :-turn_lenght-add_lenght, :] new_other_helix = K.concatenate( [other_helix, compression], axis=1, ) helix_k_turn_prep = inputs[:, -turn_lenght:, :] return new_other_helix, helix_k_turn_prep def call(self, inputs, **kwargs): self.k = 0 if len(inputs.shape) < 3: raise ValueError( 'The dimension of the input vector' ' should be at least 3D: `(batch_size, timesteps, features)`') if tensor_shape.dimension_value(inputs.shape[-1]) is None: raise ValueError('The last dimension of the first tensor of the inputs' 'should be defined. Found `None`.') batch_size = inputs.shape[0] output_dim = inputs.shape[2] seq_len = inputs.shape[1] long_mem_end = sum(pow(self.compression_rate, i) for i in range(1, self.n_turns)) short_mem_start = pow(self.compression_rate, self.n_turns) # Build new memory new_memory = K.concatenate( [self.memory, inputs], axis=1) # Separating short and long-term memories short_memory = new_memory[:, -self.memory_len:, :] long_memory = new_memory[:, :-self.memory_len, :] # Shrinking fallout part for the zero turn of the helix long_memory = long_memory[:, :-seq_len, :] fallout = short_memory[:, -seq_len:, :] sh_fallout = self._compress(fallout) long_memory = K.concatenate( (long_memory, sh_fallout), axis=1, ) new_helix = long_memory def body(new_helix): self.k += 1 new_helix, helix_part = self._helix(new_helix) # Building the helix return new_helix, helix_part for i in range(1, self.n_turns): # Updating the helix new_helix, helix_part = body(new_helix) # Re-joining the updated helix turn with the rest of the memory if i==1: new_mem = K.concatenate( [ helix_part, short_memory, ], axis=1) elif i==self.n_turns-1: new_mem = K.concatenate( [ helix_part, new_mem, ], axis=1) else: new_mem = K.concatenate( [ helix_part, new_mem, ], axis=1) self.k = 0 self.add_update(K.update(self.memory, new_mem)) return new_mem def get_config(self): config = { 'initializer': tf.keras.initializers.serialize(self.initializer), 'regularizer': tf.keras.regularizers.serialize(self.regularizer), 'constraint': tf.keras.constraints.serialize(self.constraint), 'compression_rate': self.compression_rate, 'mode': self.mode.lower(), 'memory_len': self.memory_len, 'n_turns': self.n_turns, 'batch_size': self.batch_size, } base_config = super(HelixMemory, self).get_config() return dict(list(base_config.items()) + list(config.items()))
[ "tensorflow.keras.backend.tile", "tensorflow.python.ops.nn.conv1d", "tensorflow.python.ops.nn.avg_pool1d", "tensorflow.keras.constraints.get", "tensorflow.keras.constraints.serialize", "tensorflow.keras.regularizers.get", "tensorflow.keras.initializers.serialize", "tensorflow.keras.backend.concatenate", "tensorflow.keras.regularizers.serialize", "tensorflow.python.ops.nn.max_pool1d", "tensorflow.python.framework.tensor_shape.dimension_value", "tensorflow.keras.backend.update", "tensorflow.keras.initializers.get" ]
OSAR/helix_memory.py
[(91, 'tensorflow.keras.initializers.get', 'tf.keras.initializers.get', (['initializer'], {}), True, 'import tensorflow as tf\n'), (92, 'tensorflow.keras.regularizers.get', 'tf.keras.regularizers.get', (['regularizer'], {}), True, 'import tensorflow as tf\n'), (93, 'tensorflow.keras.constraints.get', 'tf.keras.constraints.get', (['constraint'], {}), True, 'import tensorflow as tf\n'), (171, 'tensorflow.keras.backend.concatenate', 'K.concatenate', (['[other_helix, compression]'], {'axis': '(1)'}), True, 'from tensorflow.keras import backend as K\n'), (199, 'tensorflow.keras.backend.concatenate', 'K.concatenate', (['[self.memory, inputs]'], {'axis': '(1)'}), True, 'from tensorflow.keras import backend as K\n'), (211, 'tensorflow.keras.backend.concatenate', 'K.concatenate', (['(long_memory, sh_fallout)'], {'axis': '(1)'}), True, 'from tensorflow.keras import backend as K\n'), (140, 'tensorflow.keras.backend.tile', 'K.tile', (['inputs', '(1, self.compression_rate // inputs.shape[1], 1)'], {}), True, 'from tensorflow.keras import backend as K\n'), (144, 'tensorflow.python.ops.nn.conv1d', 'nn.conv1d', (['inputs', 'self.filters'], {'stride': 'rate', 'padding': '"""VALID"""', 'name': '"""compressed_conv1d"""'}), False, 'from tensorflow.python.ops import nn\n'), (187, 'tensorflow.python.framework.tensor_shape.dimension_value', 'tensor_shape.dimension_value', (['inputs.shape[-1]'], {}), False, 'from tensorflow.python.framework import tensor_shape\n'), (249, 'tensorflow.keras.backend.update', 'K.update', (['self.memory', 'new_mem'], {}), True, 'from tensorflow.keras import backend as K\n'), (254, 'tensorflow.keras.initializers.serialize', 'tf.keras.initializers.serialize', (['self.initializer'], {}), True, 'import tensorflow as tf\n'), (255, 'tensorflow.keras.regularizers.serialize', 'tf.keras.regularizers.serialize', (['self.regularizer'], {}), True, 'import tensorflow as tf\n'), (256, 'tensorflow.keras.constraints.serialize', 'tf.keras.constraints.serialize', (['self.constraint'], {}), True, 'import tensorflow as tf\n'), (149, 'tensorflow.python.ops.nn.avg_pool1d', 'nn.avg_pool1d', (['inputs', 'rate', 'rate'], {'padding': '"""VALID"""'}), False, 'from tensorflow.python.ops import nn\n'), (230, 'tensorflow.keras.backend.concatenate', 'K.concatenate', (['[helix_part, short_memory]'], {'axis': '(1)'}), True, 'from tensorflow.keras import backend as K\n'), (152, 'tensorflow.python.ops.nn.max_pool1d', 'nn.max_pool1d', (['inputs', 'rate', 'rate'], {'padding': '"""VALID"""'}), False, 'from tensorflow.python.ops import nn\n'), (236, 'tensorflow.keras.backend.concatenate', 'K.concatenate', (['[helix_part, new_mem]'], {'axis': '(1)'}), True, 'from tensorflow.keras import backend as K\n'), (242, 'tensorflow.keras.backend.concatenate', 'K.concatenate', (['[helix_part, new_mem]'], {'axis': '(1)'}), True, 'from tensorflow.keras import backend as K\n')]
Shivam9091/GamestonkTerminal
0368a3b25ab574d3e19ddbddaab0128716dbe61b
""" Neural Networks View""" __docformat__ = "numpy" from typing import List, Any import traceback import numpy as np import pandas as pd from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.models import Sequential from tensorflow.keras.layers import ( LSTM, SimpleRNN, Dense, Dropout, Conv1D, MaxPool1D, AvgPool1D, Flatten, ) from tensorflow.keras.optimizers import ( Adam, Adamax, Adagrad, Adadelta, Ftrl, Nadam, RMSprop, SGD, ) from gamestonk_terminal.helper_funcs import get_next_stock_market_days from gamestonk_terminal.common.prediction_techniques.pred_helper import ( prepare_scale_train_valid_test, forecast, plot_data_predictions, parse_args, restore_env, print_pretty_prediction, ) from gamestonk_terminal import config_neural_network_models as cfg_nn_models optimizers = { "Adam": Adam, "Adagrad": Adagrad, "Adadelta": Adadelta, "Adamax": Adamax, "Ftrl": Ftrl, "Nadam": Nadam, "Rmsprop": RMSprop, "Ggd": SGD, } if cfg_nn_models.Early_Stop_Patience: es = EarlyStopping(monitor="val_loss", patience=cfg_nn_models.Early_Stop_Patience) else: # Set patience to very long value es = EarlyStopping(monitor="val_loss", patience=1000) def build_neural_network_model( Recurrent_Neural_Network: List[Any], n_inputs: int, n_days: int ) -> Sequential: """ Builds neural net from config_neural_network_models.py Parameters ---------- Recurrent_Neural_Network: List[Any] List of layers with parameters as a dictionary in the file n_inputs: int Number of days that will be fed into the NN n_days: int Number of days the NN wants to predict Returns ------- model: Sequential Keras sequential model with layers from the file """ model = Sequential() for idx_layer, d_layer in enumerate(Recurrent_Neural_Network): # Recurrent Neural Network if str(*d_layer) == "SimpleRNN": # Is this the input layer? If so, define input_shape if idx_layer == 0: model.add(SimpleRNN(**d_layer["SimpleRNN"], input_shape=(n_inputs, 1))) # Is this the last output layer? If so, set units to prediction days elif idx_layer == (len(Recurrent_Neural_Network) - 1): model.add(SimpleRNN(**d_layer["SimpleRNN"], units=n_days)) else: model.add(SimpleRNN(**d_layer["SimpleRNN"])) # Long-Short Term-Memory elif str(*d_layer) == "LSTM": # Is this the input layer? If so, define input_shape if idx_layer == 0: model.add(LSTM(**d_layer["LSTM"], input_shape=(n_inputs, 1))) # Is this the last output layer? If so, set units to prediction days elif idx_layer == (len(Recurrent_Neural_Network) - 1): model.add(LSTM(**d_layer["LSTM"], units=n_days)) else: model.add(LSTM(**d_layer["LSTM"])) # Dense (Simple Neuron) elif str(*d_layer) == "Dense": # Is this the input layer? If so, define input_shape if idx_layer == 0: model.add(Dense(**d_layer["Dense"], input_dim=n_inputs)) # Is this the last output layer? If so, set units to prediction days elif idx_layer == (len(Recurrent_Neural_Network) - 1): model.add(Dense(**d_layer["Dense"], units=n_days)) else: model.add(Dense(**d_layer["Dense"])) # Conv1D Layer elif str(*d_layer) == "Conv1D": if idx_layer == 0: model.add(Conv1D(**d_layer["Conv1D"], input_shape=(n_inputs, 1))) else: model.add(Conv1D(**d_layer["Conv1D"])) # Max Pooling Layer for after Conv Layer elif str(*d_layer) == "MaxPool1D": model.add(MaxPool1D(**d_layer["MaxPool1D"])) # Allow for if user wants to do average pooling elif str(*d_layer) == "AvgPool1D": model.add(AvgPool1D(**d_layer["AvgPool1D"])) # Dropout (Regularization) elif str(*d_layer) == "Dropout": model.add(Dropout(**d_layer["Dropout"])) # Flatten layer for Convolutions elif str(*d_layer) == "Flatten": model.add(Flatten()) else: print(f"Incorrect neuron type: {str(*d_layer)}") return model def mlp(other_args: List[str], s_ticker: str, df_stock: pd.DataFrame): """ Train a multi-layer perceptron model Parameters ---------- other_args: List[str] Argparse Arguments s_ticker: str Ticker df_stock: pd.DataFrame Loaded stock dataframe """ try: ns_parser = parse_args( prog="mlp", description="""Multi-Layered-Perceptron. """, other_args=other_args, ) if not ns_parser: return ( X_train, X_valid, y_train, y_valid, _, _, _, y_dates_valid, forecast_data_input, dates_forecast_input, scaler, is_error, ) = prepare_scale_train_valid_test(df_stock["Adj Close"], ns_parser) if is_error: return print( f"Training on {X_train.shape[0]} sequences of length {X_train.shape[1]}. Using {X_valid.shape[0]} sequences " f" of length {X_valid.shape[1]} for validation. Model will run {ns_parser.n_loops} loops" ) future_dates = get_next_stock_market_days( dates_forecast_input[-1], n_next_days=ns_parser.n_days ) preds = np.zeros((ns_parser.n_loops, X_valid.shape[0], ns_parser.n_days)) forecast_data = np.zeros((ns_parser.n_loops, ns_parser.n_days)) for i in range(ns_parser.n_loops): # Build Neural Network model model = build_neural_network_model( cfg_nn_models.Long_Short_Term_Memory, ns_parser.n_inputs, ns_parser.n_days, ) model.compile( optimizer=optimizers[cfg_nn_models.Optimizer]( learning_rate=ns_parser.lr ), loss=cfg_nn_models.Loss, ) model.fit( X_train.reshape(X_train.shape[0], X_train.shape[1], 1), y_train, epochs=ns_parser.n_epochs, verbose=True, batch_size=ns_parser.n_batch_size, validation_data=( X_valid.reshape(X_valid.shape[0], X_valid.shape[1], 1), y_valid, ), callbacks=[es], ) preds[i] = model.predict( X_valid.reshape(X_valid.shape[0], X_valid.shape[1], 1) ).reshape(X_valid.shape[0], ns_parser.n_days) forecast_data[i] = forecast( forecast_data_input, future_dates, model, scaler ).values.flat forecast_data_df = pd.DataFrame(forecast_data.T, index=future_dates) if ns_parser.n_loops > 1: forecast_data_df["Median"] = forecast_data_df.median(axis=1) print_pretty_prediction( forecast_data_df["Median"], df_stock["Adj Close"].values[-1] ) else: print_pretty_prediction( forecast_data_df[0], df_stock["Adj Close"].values[-1] ) plot_data_predictions( df_stock, np.median(preds, axis=0), y_valid, y_dates_valid, scaler, f"MLP Model on {s_ticker}", forecast_data_df, ns_parser.n_loops, ) print("") except Exception as e: print(e) traceback.print_exc() print("") finally: restore_env() def rnn(other_args: List[str], s_ticker: str, df_stock: pd.DataFrame): """ Train a Recurrent Neural Network (rnn) Parameters ---------- other_args:List[str] Argparse arguments s_ticker: str Stock ticker df_stock: pd.DataFrame Dataframe of stock prices """ try: ns_parser = parse_args( prog="rnn", description="""Recurrent Neural Network. """, other_args=other_args, ) if not ns_parser: return ( X_train, X_valid, y_train, y_valid, _, _, _, y_dates_valid, forecast_data_input, dates_forecast_input, scaler, is_error, ) = prepare_scale_train_valid_test(df_stock["Adj Close"], ns_parser) if is_error: return print( f"Training on {X_train.shape[0]} sequences of length {X_train.shape[1]}. Using {X_valid.shape[0]} sequences " f" of length {X_valid.shape[1]} for validation. Model will run {ns_parser.n_loops} loops" ) future_dates = get_next_stock_market_days( dates_forecast_input[-1], n_next_days=ns_parser.n_days ) preds = np.zeros((ns_parser.n_loops, X_valid.shape[0], ns_parser.n_days)) forecast_data = np.zeros((ns_parser.n_loops, ns_parser.n_days)) for i in range(ns_parser.n_loops): # Build Neural Network model model = build_neural_network_model( cfg_nn_models.Long_Short_Term_Memory, ns_parser.n_inputs, ns_parser.n_days, ) model.compile( optimizer=optimizers[cfg_nn_models.Optimizer]( learning_rate=ns_parser.lr ), loss=cfg_nn_models.Loss, ) model.fit( X_train.reshape(X_train.shape[0], X_train.shape[1], 1), y_train, epochs=ns_parser.n_epochs, verbose=True, batch_size=ns_parser.n_batch_size, validation_data=( X_valid.reshape(X_valid.shape[0], X_valid.shape[1], 1), y_valid, ), callbacks=[es], ) preds[i] = model.predict( X_valid.reshape(X_valid.shape[0], X_valid.shape[1], 1) ).reshape(X_valid.shape[0], ns_parser.n_days) forecast_data[i] = forecast( forecast_data_input, future_dates, model, scaler ).values.flat forecast_data_df = pd.DataFrame(forecast_data.T, index=future_dates) if ns_parser.n_loops > 1: forecast_data_df["Median"] = forecast_data_df.median(axis=1) print_pretty_prediction( forecast_data_df["Median"], df_stock["Adj Close"].values[-1] ) else: print_pretty_prediction( forecast_data_df[0], df_stock["Adj Close"].values[-1] ) plot_data_predictions( df_stock, np.median(preds, axis=0), y_valid, y_dates_valid, scaler, f"RNN Model on {s_ticker}", forecast_data_df, ns_parser.n_loops, ) print("") except Exception as e: print(e) traceback.print_exc() print("") finally: restore_env() def lstm(other_args: List[str], s_ticker: str, df_stock: pd.DataFrame): """ Train a Long-Short-Term-Memory Neural Net (lstm) Parameters ---------- other_args:List[str] Argparse arguments s_ticker: str Stock ticker df_stock: pd.DataFrame Dataframe of stock prices """ try: ns_parser = parse_args( prog="lstm", description="""Long-Short Term Memory. """, other_args=other_args, ) if not ns_parser: return ( X_train, X_valid, y_train, y_valid, _, _, _, y_dates_valid, forecast_data_input, dates_forecast_input, scaler, is_error, ) = prepare_scale_train_valid_test(df_stock["Adj Close"], ns_parser) if is_error: return print( f"Training on {X_train.shape[0]} sequences of length {X_train.shape[1]}. Using {X_valid.shape[0]} sequences " f" of length {X_valid.shape[1]} for validation. Model will run {ns_parser.n_loops} loops" ) future_dates = get_next_stock_market_days( dates_forecast_input[-1], n_next_days=ns_parser.n_days ) preds = np.zeros((ns_parser.n_loops, X_valid.shape[0], ns_parser.n_days)) forecast_data = np.zeros((ns_parser.n_loops, ns_parser.n_days)) for i in range(ns_parser.n_loops): # Build Neural Network model model = build_neural_network_model( cfg_nn_models.Long_Short_Term_Memory, ns_parser.n_inputs, ns_parser.n_days, ) model.compile( optimizer=optimizers[cfg_nn_models.Optimizer]( learning_rate=ns_parser.lr ), loss=cfg_nn_models.Loss, ) model.fit( X_train.reshape(X_train.shape[0], X_train.shape[1], 1), y_train, epochs=ns_parser.n_epochs, verbose=True, batch_size=ns_parser.n_batch_size, validation_data=( X_valid.reshape(X_valid.shape[0], X_valid.shape[1], 1), y_valid, ), callbacks=[es], ) preds[i] = model.predict( X_valid.reshape(X_valid.shape[0], X_valid.shape[1], 1) ).reshape(X_valid.shape[0], ns_parser.n_days) forecast_data[i] = forecast( forecast_data_input, future_dates, model, scaler ).values.flat forecast_data_df = pd.DataFrame(forecast_data.T, index=future_dates) if ns_parser.n_loops > 1: forecast_data_df["Median"] = forecast_data_df.median(axis=1) print_pretty_prediction( forecast_data_df["Median"], df_stock["Adj Close"].values[-1] ) else: print_pretty_prediction( forecast_data_df[0], df_stock["Adj Close"].values[-1] ) plot_data_predictions( df_stock, np.median(preds, axis=0), y_valid, y_dates_valid, scaler, f"LSTM Model on {s_ticker}", forecast_data_df, ns_parser.n_loops, ) print("") except Exception as e: print(e) traceback.print_exc() print("") finally: restore_env() def conv1d(other_args: List[str], s_ticker: str, df_stock: pd.DataFrame): """ Train a 1D Convolutional Neural Net (1D CNN) Parameters ---------- other_args:List[str] Argparse arguments s_ticker: str Stock ticker df_stock: pd.DataFrame Dataframe of stock prices """ try: ns_parser = parse_args( prog="conv1d", description="""1D CNN.""", other_args=other_args, ) if not ns_parser: return ( X_train, X_valid, y_train, y_valid, _, _, _, y_dates_valid, forecast_data_input, dates_forecast_input, scaler, is_error, ) = prepare_scale_train_valid_test(df_stock["Adj Close"], ns_parser) if is_error: return print( f"Training on {X_train.shape[0]} sequences of length {X_train.shape[1]}. Using {X_valid.shape[0]} sequences " f" of length {X_valid.shape[1]} for validation. Model will run {ns_parser.n_loops} loops" ) future_dates = get_next_stock_market_days( dates_forecast_input[-1], n_next_days=ns_parser.n_days ) preds = np.zeros((ns_parser.n_loops, X_valid.shape[0], ns_parser.n_days)) forecast_data = np.zeros((ns_parser.n_loops, ns_parser.n_days)) for i in range(ns_parser.n_loops): # Build Neural Network model model = build_neural_network_model( cfg_nn_models.Convolutional, ns_parser.n_inputs, ns_parser.n_days, ) model.compile( optimizer=optimizers[cfg_nn_models.Optimizer]( learning_rate=ns_parser.lr ), loss=cfg_nn_models.Loss, ) model.fit( X_train.reshape(X_train.shape[0], X_train.shape[1], 1), y_train, epochs=ns_parser.n_epochs, verbose=True, batch_size=ns_parser.n_batch_size, validation_data=( X_valid.reshape(X_valid.shape[0], X_valid.shape[1], 1), y_valid, ), callbacks=[es], ) preds[i] = model.predict( X_valid.reshape(X_valid.shape[0], X_valid.shape[1], 1) ).reshape(X_valid.shape[0], ns_parser.n_days) forecast_data[i] = forecast( forecast_data_input, future_dates, model, scaler ).values.flat forecast_data_df = pd.DataFrame(forecast_data.T, index=future_dates) if ns_parser.n_loops > 1: forecast_data_df["Median"] = forecast_data_df.median(axis=1) print_pretty_prediction( forecast_data_df["Median"], df_stock["Adj Close"].values[-1] ) else: print_pretty_prediction( forecast_data_df[0], df_stock["Adj Close"].values[-1] ) plot_data_predictions( df_stock, np.median(preds, axis=0), y_valid, y_dates_valid, scaler, f"Conv1D Model on {s_ticker}", forecast_data_df, ns_parser.n_loops, ) print("") except Exception as e: print(e) traceback.print_exc() print("") finally: restore_env()
[ "tensorflow.keras.layers.MaxPool1D", "tensorflow.keras.layers.SimpleRNN", "tensorflow.keras.layers.Dense", "numpy.median", "tensorflow.keras.layers.Conv1D", "pandas.DataFrame", "tensorflow.keras.layers.LSTM", "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.AvgPool1D", "tensorflow.keras.layers.Dropout", "tensorflow.keras.callbacks.EarlyStopping", "tensorflow.keras.models.Sequential", "numpy.zeros" ]
gamestonk_terminal/common/prediction_techniques/neural_networks_view.py
[(54, 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': 'cfg_nn_models.Early_Stop_Patience'}), False, 'from tensorflow.keras.callbacks import EarlyStopping\n'), (57, 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(1000)'}), False, 'from tensorflow.keras.callbacks import EarlyStopping\n'), (80, 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), False, 'from tensorflow.keras.models import Sequential\n'), (154, 'gamestonk_terminal.common.prediction_techniques.pred_helper.parse_args', 'parse_args', ([], {'prog': '"""mlp"""', 'description': '"""Multi-Layered-Perceptron. """', 'other_args': 'other_args'}), False, 'from gamestonk_terminal.common.prediction_techniques.pred_helper import prepare_scale_train_valid_test, forecast, plot_data_predictions, parse_args, restore_env, print_pretty_prediction\n'), (174, 'gamestonk_terminal.common.prediction_techniques.pred_helper.prepare_scale_train_valid_test', 'prepare_scale_train_valid_test', (["df_stock['Adj Close']", 'ns_parser'], {}), False, 'from gamestonk_terminal.common.prediction_techniques.pred_helper import prepare_scale_train_valid_test, forecast, plot_data_predictions, parse_args, restore_env, print_pretty_prediction\n'), (181, 'gamestonk_terminal.helper_funcs.get_next_stock_market_days', 'get_next_stock_market_days', (['dates_forecast_input[-1]'], {'n_next_days': 'ns_parser.n_days'}), False, 'from gamestonk_terminal.helper_funcs import get_next_stock_market_days\n'), (185, 'numpy.zeros', 'np.zeros', (['(ns_parser.n_loops, X_valid.shape[0], ns_parser.n_days)'], {}), True, 'import numpy as np\n'), (186, 'numpy.zeros', 'np.zeros', (['(ns_parser.n_loops, ns_parser.n_days)'], {}), True, 'import numpy as np\n'), (221, 'pandas.DataFrame', 'pd.DataFrame', (['forecast_data.T'], {'index': 'future_dates'}), True, 'import pandas as pd\n'), (249, 'gamestonk_terminal.common.prediction_techniques.pred_helper.restore_env', 'restore_env', ([], {}), False, 'from gamestonk_terminal.common.prediction_techniques.pred_helper import prepare_scale_train_valid_test, forecast, plot_data_predictions, parse_args, restore_env, print_pretty_prediction\n'), (266, 'gamestonk_terminal.common.prediction_techniques.pred_helper.parse_args', 'parse_args', ([], {'prog': '"""rnn"""', 'description': '"""Recurrent Neural Network. """', 'other_args': 'other_args'}), False, 'from gamestonk_terminal.common.prediction_techniques.pred_helper import prepare_scale_train_valid_test, forecast, plot_data_predictions, parse_args, restore_env, print_pretty_prediction\n'), (286, 'gamestonk_terminal.common.prediction_techniques.pred_helper.prepare_scale_train_valid_test', 'prepare_scale_train_valid_test', (["df_stock['Adj Close']", 'ns_parser'], {}), False, 'from gamestonk_terminal.common.prediction_techniques.pred_helper import prepare_scale_train_valid_test, forecast, plot_data_predictions, parse_args, restore_env, print_pretty_prediction\n'), (293, 'gamestonk_terminal.helper_funcs.get_next_stock_market_days', 'get_next_stock_market_days', (['dates_forecast_input[-1]'], {'n_next_days': 'ns_parser.n_days'}), False, 'from gamestonk_terminal.helper_funcs import get_next_stock_market_days\n'), (297, 'numpy.zeros', 'np.zeros', (['(ns_parser.n_loops, X_valid.shape[0], ns_parser.n_days)'], {}), True, 'import numpy as np\n'), (298, 'numpy.zeros', 'np.zeros', (['(ns_parser.n_loops, ns_parser.n_days)'], {}), True, 'import numpy as np\n'), (332, 'pandas.DataFrame', 'pd.DataFrame', (['forecast_data.T'], {'index': 'future_dates'}), True, 'import pandas as pd\n'), (359, 'gamestonk_terminal.common.prediction_techniques.pred_helper.restore_env', 'restore_env', ([], {}), False, 'from gamestonk_terminal.common.prediction_techniques.pred_helper import prepare_scale_train_valid_test, forecast, plot_data_predictions, parse_args, restore_env, print_pretty_prediction\n'), (375, 'gamestonk_terminal.common.prediction_techniques.pred_helper.parse_args', 'parse_args', ([], {'prog': '"""lstm"""', 'description': '"""Long-Short Term Memory. """', 'other_args': 'other_args'}), False, 'from gamestonk_terminal.common.prediction_techniques.pred_helper import prepare_scale_train_valid_test, forecast, plot_data_predictions, parse_args, restore_env, print_pretty_prediction\n'), (395, 'gamestonk_terminal.common.prediction_techniques.pred_helper.prepare_scale_train_valid_test', 'prepare_scale_train_valid_test', (["df_stock['Adj Close']", 'ns_parser'], {}), False, 'from gamestonk_terminal.common.prediction_techniques.pred_helper import prepare_scale_train_valid_test, forecast, plot_data_predictions, parse_args, restore_env, print_pretty_prediction\n'), (402, 'gamestonk_terminal.helper_funcs.get_next_stock_market_days', 'get_next_stock_market_days', (['dates_forecast_input[-1]'], {'n_next_days': 'ns_parser.n_days'}), False, 'from gamestonk_terminal.helper_funcs import get_next_stock_market_days\n'), (406, 'numpy.zeros', 'np.zeros', (['(ns_parser.n_loops, X_valid.shape[0], ns_parser.n_days)'], {}), True, 'import numpy as np\n'), (407, 'numpy.zeros', 'np.zeros', (['(ns_parser.n_loops, ns_parser.n_days)'], {}), True, 'import numpy as np\n'), (443, 'pandas.DataFrame', 'pd.DataFrame', (['forecast_data.T'], {'index': 'future_dates'}), True, 'import pandas as pd\n'), (471, 'gamestonk_terminal.common.prediction_techniques.pred_helper.restore_env', 'restore_env', ([], {}), False, 'from gamestonk_terminal.common.prediction_techniques.pred_helper import prepare_scale_train_valid_test, forecast, plot_data_predictions, parse_args, restore_env, print_pretty_prediction\n'), (487, 'gamestonk_terminal.common.prediction_techniques.pred_helper.parse_args', 'parse_args', ([], {'prog': '"""conv1d"""', 'description': '"""1D CNN."""', 'other_args': 'other_args'}), False, 'from gamestonk_terminal.common.prediction_techniques.pred_helper import prepare_scale_train_valid_test, forecast, plot_data_predictions, parse_args, restore_env, print_pretty_prediction\n'), (507, 'gamestonk_terminal.common.prediction_techniques.pred_helper.prepare_scale_train_valid_test', 'prepare_scale_train_valid_test', (["df_stock['Adj Close']", 'ns_parser'], {}), False, 'from gamestonk_terminal.common.prediction_techniques.pred_helper import prepare_scale_train_valid_test, forecast, plot_data_predictions, parse_args, restore_env, print_pretty_prediction\n'), (514, 'gamestonk_terminal.helper_funcs.get_next_stock_market_days', 'get_next_stock_market_days', (['dates_forecast_input[-1]'], {'n_next_days': 'ns_parser.n_days'}), False, 'from gamestonk_terminal.helper_funcs import get_next_stock_market_days\n'), (518, 'numpy.zeros', 'np.zeros', (['(ns_parser.n_loops, X_valid.shape[0], ns_parser.n_days)'], {}), True, 'import numpy as np\n'), (519, 'numpy.zeros', 'np.zeros', (['(ns_parser.n_loops, ns_parser.n_days)'], {}), True, 'import numpy as np\n'), (555, 'pandas.DataFrame', 'pd.DataFrame', (['forecast_data.T'], {'index': 'future_dates'}), True, 'import pandas as pd\n'), (583, 'gamestonk_terminal.common.prediction_techniques.pred_helper.restore_env', 'restore_env', ([], {}), False, 'from gamestonk_terminal.common.prediction_techniques.pred_helper import prepare_scale_train_valid_test, forecast, plot_data_predictions, parse_args, restore_env, print_pretty_prediction\n'), (224, 'gamestonk_terminal.common.prediction_techniques.pred_helper.print_pretty_prediction', 'print_pretty_prediction', (["forecast_data_df['Median']", "df_stock['Adj Close'].values[-1]"], {}), False, 'from gamestonk_terminal.common.prediction_techniques.pred_helper import prepare_scale_train_valid_test, forecast, plot_data_predictions, parse_args, restore_env, print_pretty_prediction\n'), (228, 'gamestonk_terminal.common.prediction_techniques.pred_helper.print_pretty_prediction', 'print_pretty_prediction', (['forecast_data_df[0]', "df_stock['Adj Close'].values[-1]"], {}), False, 'from gamestonk_terminal.common.prediction_techniques.pred_helper import prepare_scale_train_valid_test, forecast, plot_data_predictions, parse_args, restore_env, print_pretty_prediction\n'), (233, 'numpy.median', 'np.median', (['preds'], {'axis': '(0)'}), True, 'import numpy as np\n'), (245, 'traceback.print_exc', 'traceback.print_exc', ([], {}), False, 'import traceback\n'), (335, 'gamestonk_terminal.common.prediction_techniques.pred_helper.print_pretty_prediction', 'print_pretty_prediction', (["forecast_data_df['Median']", "df_stock['Adj Close'].values[-1]"], {}), False, 'from gamestonk_terminal.common.prediction_techniques.pred_helper import prepare_scale_train_valid_test, forecast, plot_data_predictions, parse_args, restore_env, print_pretty_prediction\n'), (339, 'gamestonk_terminal.common.prediction_techniques.pred_helper.print_pretty_prediction', 'print_pretty_prediction', (['forecast_data_df[0]', "df_stock['Adj Close'].values[-1]"], {}), False, 'from gamestonk_terminal.common.prediction_techniques.pred_helper import prepare_scale_train_valid_test, forecast, plot_data_predictions, parse_args, restore_env, print_pretty_prediction\n'), (344, 'numpy.median', 'np.median', (['preds'], {'axis': '(0)'}), True, 'import numpy as np\n'), (355, 'traceback.print_exc', 'traceback.print_exc', ([], {}), False, 'import traceback\n'), (446, 'gamestonk_terminal.common.prediction_techniques.pred_helper.print_pretty_prediction', 'print_pretty_prediction', (["forecast_data_df['Median']", "df_stock['Adj Close'].values[-1]"], {}), False, 'from gamestonk_terminal.common.prediction_techniques.pred_helper import prepare_scale_train_valid_test, forecast, plot_data_predictions, parse_args, restore_env, print_pretty_prediction\n'), (450, 'gamestonk_terminal.common.prediction_techniques.pred_helper.print_pretty_prediction', 'print_pretty_prediction', (['forecast_data_df[0]', "df_stock['Adj Close'].values[-1]"], {}), False, 'from gamestonk_terminal.common.prediction_techniques.pred_helper import prepare_scale_train_valid_test, forecast, plot_data_predictions, parse_args, restore_env, print_pretty_prediction\n'), (455, 'numpy.median', 'np.median', (['preds'], {'axis': '(0)'}), True, 'import numpy as np\n'), (467, 'traceback.print_exc', 'traceback.print_exc', ([], {}), False, 'import traceback\n'), (558, 'gamestonk_terminal.common.prediction_techniques.pred_helper.print_pretty_prediction', 'print_pretty_prediction', (["forecast_data_df['Median']", "df_stock['Adj Close'].values[-1]"], {}), False, 'from gamestonk_terminal.common.prediction_techniques.pred_helper import prepare_scale_train_valid_test, forecast, plot_data_predictions, parse_args, restore_env, print_pretty_prediction\n'), (562, 'gamestonk_terminal.common.prediction_techniques.pred_helper.print_pretty_prediction', 'print_pretty_prediction', (['forecast_data_df[0]', "df_stock['Adj Close'].values[-1]"], {}), False, 'from gamestonk_terminal.common.prediction_techniques.pred_helper import prepare_scale_train_valid_test, forecast, plot_data_predictions, parse_args, restore_env, print_pretty_prediction\n'), (567, 'numpy.median', 'np.median', (['preds'], {'axis': '(0)'}), True, 'import numpy as np\n'), (579, 'traceback.print_exc', 'traceback.print_exc', ([], {}), False, 'import traceback\n'), (87, 'tensorflow.keras.layers.SimpleRNN', 'SimpleRNN', ([], {'input_shape': '(n_inputs, 1)'}), False, 'from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout, Conv1D, MaxPool1D, AvgPool1D, Flatten\n'), (217, 'gamestonk_terminal.common.prediction_techniques.pred_helper.forecast', 'forecast', (['forecast_data_input', 'future_dates', 'model', 'scaler'], {}), False, 'from gamestonk_terminal.common.prediction_techniques.pred_helper import prepare_scale_train_valid_test, forecast, plot_data_predictions, parse_args, restore_env, print_pretty_prediction\n'), (328, 'gamestonk_terminal.common.prediction_techniques.pred_helper.forecast', 'forecast', (['forecast_data_input', 'future_dates', 'model', 'scaler'], {}), False, 'from gamestonk_terminal.common.prediction_techniques.pred_helper import prepare_scale_train_valid_test, forecast, plot_data_predictions, parse_args, restore_env, print_pretty_prediction\n'), (439, 'gamestonk_terminal.common.prediction_techniques.pred_helper.forecast', 'forecast', (['forecast_data_input', 'future_dates', 'model', 'scaler'], {}), False, 'from gamestonk_terminal.common.prediction_techniques.pred_helper import prepare_scale_train_valid_test, forecast, plot_data_predictions, parse_args, restore_env, print_pretty_prediction\n'), (551, 'gamestonk_terminal.common.prediction_techniques.pred_helper.forecast', 'forecast', (['forecast_data_input', 'future_dates', 'model', 'scaler'], {}), False, 'from gamestonk_terminal.common.prediction_techniques.pred_helper import prepare_scale_train_valid_test, forecast, plot_data_predictions, parse_args, restore_env, print_pretty_prediction\n'), (90, 'tensorflow.keras.layers.SimpleRNN', 'SimpleRNN', ([], {'units': 'n_days'}), False, 'from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout, Conv1D, MaxPool1D, AvgPool1D, Flatten\n'), (92, 'tensorflow.keras.layers.SimpleRNN', 'SimpleRNN', ([], {}), False, 'from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout, Conv1D, MaxPool1D, AvgPool1D, Flatten\n'), (98, 'tensorflow.keras.layers.LSTM', 'LSTM', ([], {'input_shape': '(n_inputs, 1)'}), False, 'from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout, Conv1D, MaxPool1D, AvgPool1D, Flatten\n'), (101, 'tensorflow.keras.layers.LSTM', 'LSTM', ([], {'units': 'n_days'}), False, 'from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout, Conv1D, MaxPool1D, AvgPool1D, Flatten\n'), (103, 'tensorflow.keras.layers.LSTM', 'LSTM', ([], {}), False, 'from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout, Conv1D, MaxPool1D, AvgPool1D, Flatten\n'), (109, 'tensorflow.keras.layers.Dense', 'Dense', ([], {'input_dim': 'n_inputs'}), False, 'from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout, Conv1D, MaxPool1D, AvgPool1D, Flatten\n'), (112, 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'n_days'}), False, 'from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout, Conv1D, MaxPool1D, AvgPool1D, Flatten\n'), (114, 'tensorflow.keras.layers.Dense', 'Dense', ([], {}), False, 'from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout, Conv1D, MaxPool1D, AvgPool1D, Flatten\n'), (119, 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'input_shape': '(n_inputs, 1)'}), False, 'from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout, Conv1D, MaxPool1D, AvgPool1D, Flatten\n'), (121, 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {}), False, 'from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout, Conv1D, MaxPool1D, AvgPool1D, Flatten\n'), (124, 'tensorflow.keras.layers.MaxPool1D', 'MaxPool1D', ([], {}), False, 'from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout, Conv1D, MaxPool1D, AvgPool1D, Flatten\n'), (127, 'tensorflow.keras.layers.AvgPool1D', 'AvgPool1D', ([], {}), False, 'from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout, Conv1D, MaxPool1D, AvgPool1D, Flatten\n'), (130, 'tensorflow.keras.layers.Dropout', 'Dropout', ([], {}), False, 'from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout, Conv1D, MaxPool1D, AvgPool1D, Flatten\n'), (133, 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), False, 'from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout, Conv1D, MaxPool1D, AvgPool1D, Flatten\n')]
Gurpreet-Singh121/transformers
669e3c50c98ad5b506555a551d2ecbf72ceb3c99
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF general model utils.""" import functools import inspect import os import pickle import re import warnings from typing import Dict, List, Optional, Union import h5py import numpy as np import tensorflow as tf from tensorflow.python.keras import backend as K from tensorflow.python.keras.engine import data_adapter from tensorflow.python.keras.engine.keras_tensor import KerasTensor from tensorflow.python.keras.saving import hdf5_format from huggingface_hub import Repository, list_repo_files from .configuration_utils import PretrainedConfig from .file_utils import ( DUMMY_INPUTS, TF2_WEIGHTS_NAME, WEIGHTS_NAME, ModelOutput, PushToHubMixin, cached_path, copy_func, hf_bucket_url, is_offline_mode, is_remote_url, ) from .generation_tf_utils import TFGenerationMixin from .modeling_tf_outputs import TFSeq2SeqLMOutput from .tokenization_utils_base import BatchEncoding from .utils import logging logger = logging.get_logger(__name__) tf_logger = tf.get_logger() TFModelInputType = Union[ List[tf.Tensor], List[np.ndarray], List[KerasTensor], Dict[str, tf.Tensor], Dict[str, np.ndarray], Dict[str, KerasTensor], tf.Tensor, np.ndarray, KerasTensor, ] def dummy_loss(y_true, y_pred): return tf.reduce_mean(y_pred) class TFModelUtilsMixin: """ A few utilities for `tf.keras.Model`, to be used as a mixin. """ def num_parameters(self, only_trainable: bool = False) -> int: """ Get the number of (optionally, trainable) parameters in the model. Args: only_trainable (`bool`, *optional*, defaults to `False`): Whether or not to return only the number of trainable parameters Returns: `int`: The number of parameters. """ if only_trainable: return int(sum(np.prod(w.shape.as_list()) for w in self.trainable_variables)) else: return self.count_params() def keras_serializable(cls): """ Decorate a Keras Layer class to support Keras serialization. This is done by: 1. Adding a `transformers_config` dict to the Keras config dictionary in `get_config` (called by Keras at serialization time. 2. Wrapping `__init__` to accept that `transformers_config` dict (passed by Keras at deserialization time) and convert it to a config object for the actual layer initializer. 3. Registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does not need to be supplied in `custom_objects` in the call to `tf.keras.models.load_model`. Args: cls (a `tf.keras.layers.Layers subclass`): Typically a `TF.MainLayer` class in this project, in general must accept a `config` argument to its initializer. Returns: The same class object, with modifications for Keras deserialization. """ initializer = cls.__init__ config_class = getattr(cls, "config_class", None) if config_class is None: raise AttributeError("Must set `config_class` to use @keras_serializable") @functools.wraps(initializer) def wrapped_init(self, *args, **kwargs): config = args[0] if args and isinstance(args[0], PretrainedConfig) else kwargs.pop("config", None) if isinstance(config, dict): config = config_class.from_dict(config) initializer(self, config, *args, **kwargs) elif isinstance(config, PretrainedConfig): if len(args) > 0: initializer(self, *args, **kwargs) else: initializer(self, config, *args, **kwargs) else: raise ValueError("Must pass either `config` (PretrainedConfig) or `config` (dict)") self._config = config self._kwargs = kwargs cls.__init__ = wrapped_init if not hasattr(cls, "get_config"): raise TypeError("Only use @keras_serializable on tf.keras.layers.Layer subclasses") if hasattr(cls.get_config, "_is_default"): def get_config(self): cfg = super(cls, self).get_config() cfg["config"] = self._config.to_dict() cfg.update(self._kwargs) return cfg cls.get_config = get_config cls._keras_serializable = True if hasattr(tf.keras.utils, "register_keras_serializable"): cls = tf.keras.utils.register_keras_serializable()(cls) return cls class TFCausalLanguageModelingLoss: """ Loss function suitable for causal language modeling (CLM), that is, the task of guessing the next token. <Tip> Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. </Tip> """ def compute_loss(self, labels, logits): loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) # make sure only labels that are not equal to -100 affect the loss active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100) reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss) return loss_fn(labels, reduced_logits) class TFQuestionAnsweringLoss: """ Loss function suitable for question answering. """ def compute_loss(self, labels, logits): loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) start_loss = loss_fn(labels["start_position"], logits[0]) end_loss = loss_fn(labels["end_position"], logits[1]) return (start_loss + end_loss) / 2.0 class TFTokenClassificationLoss: """ Loss function suitable for token classification. <Tip> Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. </Tip> """ def compute_loss(self, labels, logits): loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) # make sure only labels that are not equal to -100 # are taken into account as loss if tf.math.reduce_any(labels == -1): warnings.warn("Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead.") active_loss = tf.reshape(labels, (-1,)) != -1 else: active_loss = tf.reshape(labels, (-1,)) != -100 reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss) return loss_fn(labels, reduced_logits) class TFSequenceClassificationLoss: """ Loss function suitable for sequence classification. """ def compute_loss(self, labels, logits): if len(shape_list(logits)) == 1 or shape_list(logits)[1] == 1: loss_fn = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE) else: loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) return loss_fn(labels, logits) class TFMultipleChoiceLoss: """Loss function suitable for multiple choice tasks.""" def compute_loss(self, labels, logits): loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) return loss_fn(labels, logits) class TFMaskedLanguageModelingLoss(TFCausalLanguageModelingLoss): """ Loss function suitable for masked language modeling (MLM), that is, the task of guessing the masked tokens. <Tip> Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. </Tip> """ class TFNextSentencePredictionLoss: """ Loss function suitable for next sentence prediction (NSP), that is, the task of guessing the next sentence. <Tip> Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. </Tip> """ def compute_loss(self, labels, logits): loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) # make sure only labels that are not equal to -100 # are taken into account as loss next_sentence_active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100) next_sentence_reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, 2)), next_sentence_active_loss) next_sentence_label = tf.boolean_mask(tf.reshape(labels, (-1,)), next_sentence_active_loss) return loss_fn(next_sentence_label, next_sentence_reduced_logits) def booleans_processing(config, **kwargs): """ Process the input booleans of each model in order to be sure they are compliant with the execution mode (eager or graph) Args: config ([`PretrainedConfig`]): The config of the running model. **kwargs: The boolean parameters Returns: A dictionary with the proper values for each boolean """ final_booleans = {} if tf.executing_eagerly(): final_booleans["output_attentions"] = ( kwargs["output_attentions"] if kwargs["output_attentions"] is not None else config.output_attentions ) final_booleans["output_hidden_states"] = ( kwargs["output_hidden_states"] if kwargs["output_hidden_states"] is not None else config.output_hidden_states ) final_booleans["return_dict"] = ( kwargs["return_dict"] if kwargs["return_dict"] is not None else config.return_dict ) if "use_cache" in kwargs: final_booleans["use_cache"] = ( kwargs["use_cache"] if kwargs["use_cache"] is not None else getattr(config, "use_cache", None) ) else: if ( kwargs["output_attentions"] not in (None, config.output_attentions) or kwargs["output_hidden_states"] not in (None, config.output_hidden_states) or ("use_cache" in kwargs and kwargs["use_cache"] not in (None, config.use_cache)) ): tf_logger.warning( "The parameters `output_attentions`, `output_hidden_states` and `use_cache` cannot be updated when calling a model. " "They have to be set to True/False in the config object (i.e.: `config=XConfig.from_pretrained('name', output_attentions=True)`)." ) final_booleans["output_attentions"] = config.output_attentions final_booleans["output_hidden_states"] = config.output_hidden_states if kwargs.get("return_dict", None) not in (None, True): tf_logger.warning( "The parameter `return_dict` cannot be set in graph mode and will always be set to `True`." ) final_booleans["return_dict"] = True if "use_cache" in kwargs: final_booleans["use_cache"] = getattr(config, "use_cache", None) return final_booleans def input_processing(func, config, input_ids, **kwargs): """ Process the input of each TensorFlow model including the booleans. In case of a list of symbolic inputs, each input has to be named accordingly to the parameters name, i.e. `input_ids = tf.keras.Input(shape=(128,), dtype='int32', name="input_ids")` otherwise the order of the tensors will not be guaranteed during the training. Args: func (`callable`): The callable function of the TensorFlow model. config ([`PretrainedConfig`]): The config of the running model. **kwargs: The inputs of the model. Returns: Two lists, one for the missing layers, and another one for the unexpected layers. """ signature = dict(inspect.signature(func).parameters) signature.pop("kwargs", None) signature.pop("self", None) parameter_names = list(signature.keys()) output = {} allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict, np.ndarray, KerasTensor) if "inputs" in kwargs["kwargs_call"]: warnings.warn( "The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.", FutureWarning, ) output["input_ids"] = kwargs["kwargs_call"].pop("inputs") if "decoder_cached_states" in kwargs["kwargs_call"]: warnings.warn( "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.", FutureWarning, ) output["past_key_values"] = kwargs["kwargs_call"].pop("decoder_cached_states") if "past" in kwargs["kwargs_call"] and "past_key_values" in kwargs: warnings.warn( "The `past` argument is deprecated and will be removed in a future version, use `past_key_values` instead.", FutureWarning, ) kwargs["past_key_values"] = kwargs["kwargs_call"].pop("past") elif "past_key_values" in kwargs["kwargs_call"] and "past" in kwargs: kwargs["past"] = kwargs["kwargs_call"].pop("past_key_values") if len(kwargs["kwargs_call"]) > 0: raise ValueError( f"The following keyword arguments are not supported by this model: {list(kwargs['kwargs_call'].keys())}." ) kwargs.pop("kwargs_call") for k, v in kwargs.items(): if isinstance(v, allowed_types) or v is None: output[k] = v else: raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.") if isinstance(input_ids, (tuple, list)): for i, input in enumerate(input_ids): # EagerTensors don't allow to use the .name property so we check for a real Tensor if type(input) == tf.Tensor: # Tensor names have always the pattern `name:id` then we check only the # `name` part tensor_name = input.name.split(":")[0] if tensor_name in parameter_names: output[tensor_name] = input else: output[parameter_names[i]] = input elif isinstance(input, allowed_types) or input is None: output[parameter_names[i]] = input else: raise ValueError( f"Data of type {type(input)} is not allowed only {allowed_types} is accepted for {parameter_names[i]}." ) elif isinstance(input_ids, (dict, BatchEncoding)): if "inputs" in input_ids: warnings.warn( "The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.", FutureWarning, ) output["input_ids"] = input_ids.pop("inputs") if "decoder_cached_states" in input_ids: warnings.warn( "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.", FutureWarning, ) output["past_key_values"] = input_ids.pop("decoder_cached_states") for k, v in dict(input_ids).items(): if isinstance(v, allowed_types) or v is None: output[k] = v elif k not in parameter_names and "args" not in parameter_names: logger.warning( f"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored." ) continue else: raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.") else: if isinstance(input_ids, (tf.Tensor, KerasTensor)) or input_ids is None: output[parameter_names[0]] = input_ids else: raise ValueError( f"Data of type {type(input_ids)} is not allowed only {allowed_types} is accepted for {parameter_names[0]}." ) for name in parameter_names: if name not in list(output.keys()) and name != "args": output[name] = kwargs.pop(name, signature[name].default) # When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs) # So to respect the proper output we have to add this exception if "args" in output: if output["args"] is not None and type(output["args"]) == tf.Tensor: tensor_name = output["args"].name.split(":")[0] output[tensor_name] = output["args"] else: # `args` in this case is always the first parameter, then `input_ids` output["input_ids"] = output["args"] del output["args"] if "kwargs" in output: del output["kwargs"] boolean_dict = { k: v for k, v in output.items() if k in ["return_dict", "output_attentions", "output_hidden_states", "use_cache"] } output.update( booleans_processing( config=config, **boolean_dict, ) ) return output def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None): """ Detect missing and unexpected layers and load the TF weights accordingly to their names and shapes. Args: model (`tf.keras.models.Model`): The model to load the weights into. resolved_archive_file (`str`): The location of the H5 file. ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): Whether or not to ignore weights with shapes that don't match between the checkpoint of the model. Returns: Three lists, one for the missing layers, another one for the unexpected layers, and a last one for the mismatched layers. """ missing_layers = [] unexpected_layers = [] mismatched_layers = [] # Read the H5 file with h5py.File(resolved_archive_file, "r") as f: # Retrieve the name of each layer from the H5 file saved_h5_model_layers_name = set(hdf5_format.load_attributes_from_hdf5_group(f, "layer_names")) # Find the missing layers from the high level list of layers missing_layers = list(set([layer.name for layer in model.layers]) - saved_h5_model_layers_name) # Find the unexpected layers from the high level list of layers unexpected_layers = list(saved_h5_model_layers_name - set([layer.name for layer in model.layers])) saved_weight_names_set = set() symbolic_weights_names = set() weight_value_tuples = [] # Compute missing and unexpected sub layers # Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...] for layer in model.layers: # if layer_name from the H5 file belongs to the layers from the instantiated model if layer.name in saved_h5_model_layers_name: # Get the H5 layer object from its name h5_layer_object = f[layer.name] # Get all the weights as a list from the layer object symbolic_weights = layer.trainable_weights + layer.non_trainable_weights saved_weights = {} # Create a dict from the H5 saved model that looks like {"weight_name": weight_value} # And a set with only the names for weight_name in hdf5_format.load_attributes_from_hdf5_group(h5_layer_object, "weight_names"): # TF names always start with the model name so we ignore it name = "/".join(weight_name.split("/")[1:]) if _prefix is not None: name = _prefix + "/" + name saved_weights[name] = np.asarray(h5_layer_object[weight_name]) # Add the updated name to the final list for computing missing/unexpected values saved_weight_names_set.add(name) # Loop over each weights from the instantiated model and compare with the weights from the H5 file for symbolic_weight in symbolic_weights: # TF names always start with the model name so we ignore it if _prefix is not None: delimeter = len(_prefix.split("/")) symbolic_weight_name = "/".join( symbolic_weight.name.split("/")[:delimeter] + symbolic_weight.name.split("/")[delimeter + 1 :] ) else: symbolic_weight_name = "/".join(symbolic_weight.name.split("/")[1:]) # here we check if the current weight is among the weights from the H5 file # If yes, get the weight_value of the corresponding weight from the H5 file # If not, make the value to None saved_weight_value = saved_weights.get(symbolic_weight_name, None) # Add the updated name to the final list for computing missing/unexpected values symbolic_weights_names.add(symbolic_weight_name) # If the current weight is found if saved_weight_value is not None: # Check if the shape of the current weight and the one from the H5 file are different if K.int_shape(symbolic_weight) != saved_weight_value.shape: # If yes we reshape the weight from the H5 file accordingly to the current weight # If the two shapes are not compatible we raise an issue try: array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight)) except ValueError as e: if ignore_mismatched_sizes: mismatched_layers.append( (symbolic_weight_name, saved_weight_value.shape, K.int_shape(symbolic_weight)) ) continue else: raise e else: array = saved_weight_value # We create the tuple that will be loaded and add it to the final list weight_value_tuples.append((symbolic_weight, array)) # Load all the weights K.batch_set_value(weight_value_tuples) # Compute the missing and unexpected layers missing_layers.extend(list(symbolic_weights_names - saved_weight_names_set)) unexpected_layers.extend(list(saved_weight_names_set - symbolic_weights_names)) return missing_layers, unexpected_layers, mismatched_layers def init_copy_embeddings(old_embeddings, new_num_tokens): r""" This function aims to reduce the embeddings in case new_num_tokens < old_num_tokens or to pad with -1 in case new_num_tokens > old_num_tokens. A mask is also computed in order to know which weight in the embeddings should be kept or not. Example: - if new_num_tokens=5 and old_num_tokens=4 and old_embeddings=[w1,w2,w3,w4] - mask=[True,True,True,True,False] and current_weights=[w1,w2,w3,w4,-1] - if new_num_tokens=4 and old_num_tokens=5 and old_embeddings=[w1,w2,w3,w4,w5] - mask=[True,True,True,True] and current_weights=[w1,w2,w3,w4] """ old_num_tokens, old_embedding_dim = shape_list(old_embeddings) size_diff = new_num_tokens - old_num_tokens # initialize new embeddings # Copy token embeddings from the previous ones if tf.math.greater(size_diff, 0): # if the new size is greater than the old one, we extend the current embeddings with a padding until getting new size # and we create a mask to properly identify the padded values and be replaced by the values of the newly created # embeddings current_weights = tf.pad( old_embeddings.value(), tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=-1 ) num_tokens_to_copy = min(old_num_tokens, new_num_tokens) mask = tf.fill(tf.convert_to_tensor([num_tokens_to_copy, 1]), True) mask = tf.pad(mask, tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=False) else: # if the new size if lower than the old one, we take the current embeddings until the new size current_weights = tf.slice( old_embeddings.value(), tf.convert_to_tensor([0, 0]), tf.convert_to_tensor([new_num_tokens, old_embedding_dim]), ) mask = tf.fill(tf.convert_to_tensor([new_num_tokens, 1]), True) return mask, current_weights class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin, PushToHubMixin): r""" Base class for all TF models. [`TFPreTrainedModel`] takes care of storing the configuration of the models and handles methods for loading, downloading and saving models as well as a few methods common to all models to: - resize the input embeddings, - prune heads in the self-attention heads. Class attributes (overridden by derived classes): - **config_class** ([`PretrainedConfig`]) -- A subclass of [`PretrainedConfig`] to use as configuration class for this model architecture. - **base_model_prefix** (`str`) -- A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model. - **main_input_name** (`str`) -- The name of the principal input to the model (often `input_ids` for NLP models, `pixel_values` for vision models and `input_values` for speech models). """ config_class = None base_model_prefix = "" main_input_name = "input_ids" # a list of re pattern of tensor names to ignore from the model when loading the model weights # (and avoid unnecessary warnings). _keys_to_ignore_on_load_missing = None # a list of re pattern of tensor names to ignore from the weights when loading the model weights # (and avoid unnecessary warnings). _keys_to_ignore_on_load_unexpected = None _requires_load_weight_prefix = False @property def dummy_inputs(self) -> Dict[str, tf.Tensor]: """ Dummy inputs to build the network. Returns: `Dict[str, tf.Tensor]`: The dummy inputs. """ return { "input_ids": tf.constant(DUMMY_INPUTS), } @property def framework(self) -> str: """ :str: Identifies that this is a TensorFlow model. """ return "tf" def __init__(self, config, *inputs, **kwargs): super().__init__(*inputs, **kwargs) if not isinstance(config, PretrainedConfig): raise ValueError( f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class " "`PretrainedConfig`. To create a model from a pretrained model use " f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`" ) # Save config and origin of the pretrained weights if given in model self.config = config self.name_or_path = config.name_or_path def get_config(self): return self.config.to_dict() @classmethod def from_config(cls, config, **kwargs): if isinstance(config, PretrainedConfig): return cls._from_config(config, **kwargs) return cls._from_config(cls.config_class.from_dict(config, **kwargs)) @classmethod def _from_config(cls, config, **kwargs): """ All context managers that the model should be initialized under go here. """ return cls(config, **kwargs) @tf.function( input_signature=[ { "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"), } ] ) def serving(self, inputs): """ Method used for serving the model. Args: inputs (`Dict[str, tf.Tensor]`): The input of the saved model as a dictionary of tensors. """ output = self.call(inputs) return self.serving_output(output) def serving_output(output): """ Prepare the output of the saved model. Each model must implement this function. Args: output ([`TFBaseModelOutput`]): The output returned by the model. """ raise NotImplementedError def get_input_embeddings(self) -> tf.keras.layers.Layer: """ Returns the model's input embeddings layer. Returns: `tf.Variable`: The embeddings layer mapping vocabulary to hidden states. """ main_layer = getattr(self, self.base_model_prefix, self) if main_layer is not self: return main_layer.get_input_embeddings() else: raise NotImplementedError def _save_checkpoint(self, checkpoint_dir, epoch): if not os.path.isdir(checkpoint_dir): os.mkdir(checkpoint_dir) # We avoid tf.train.checkpoint or saving weights in TF format, even though that includes optimizer # state for us, because it requires special handling for objects like custom losses, which we use # internally and which users are likely to use too weights_path = os.path.join(checkpoint_dir, "weights.h5") self.save_weights(weights_path) extra_data = {"epoch": epoch, "optimizer_state": self.optimizer.get_weights()} extra_data_path = os.path.join(checkpoint_dir, "extra_data.pickle") with open(extra_data_path, "wb") as f: pickle.dump(extra_data, f) def load_repo_checkpoint(self, repo_path_or_name): """ Loads a saved checkpoint (model weights and optimizer state) from a repo. Returns the current epoch count when the checkpoint was made. Args: repo_path_or_name (`str`): Can either be a repository name for your {object} in the Hub or a path to a local folder (in which case the repository will have the name of that local folder). Returns: `dict`: A dictionary of extra metadata from the checkpoint, most commonly an "epoch" count. """ if getattr(self, "optimizer", None) is None: raise RuntimeError( "Checkpoint loading failed as no optimizer is attached to the model. " "This is most likely caused by the model not being compiled." ) if not os.path.isdir(repo_path_or_name): # If this isn't a local path, check that the remote repo exists and has a checkpoint in it repo_files = list_repo_files(repo_path_or_name) for file in ("checkpoint/weights.h5", "checkpoint/extra_data.pickle"): if file not in repo_files: raise FileNotFoundError(f"Repo {repo_path_or_name} does not contain checkpoint file {file}!") if "/" not in repo_path_or_name: model_id = repo_path_or_name repo_path_or_name = self.get_full_repo_name(repo_path_or_name) else: model_id = repo_path_or_name.split("/")[-1] repo = Repository(model_id, clone_from=f"https://huggingface.co./{repo_path_or_name}") local_dir = repo.local_dir else: local_dir = repo_path_or_name # Now make sure the repo actually has a checkpoint in it. checkpoint_dir = os.path.join(local_dir, "checkpoint") weights_file = os.path.join(checkpoint_dir, "weights.h5") if not os.path.isfile(weights_file): raise FileNotFoundError(f"Could not find checkpoint file weights.h5 in repo {repo_path_or_name}!") extra_data_file = os.path.join(checkpoint_dir, "extra_data.pickle") if not os.path.isfile(extra_data_file): raise FileNotFoundError(f"Could not find checkpoint file extra_data.pickle in repo {repo_path_or_name}!") # Assuming the repo is real and we got a checkpoint, load the weights and the optimizer state into the model. # The optimizer state includes the iteration count, so learning rate schedules should resume as normal too. self.load_weights(weights_file) with open(extra_data_file, "rb") as f: extra_data = pickle.load(f) self.optimizer.set_weights(extra_data["optimizer_state"]) # Finally, return the epoch number from the checkpoint. This isn't a property of the model, so we can't # set it directly, but the user can pass it to fit(). return {"epoch": extra_data["epoch"]} def compile( self, optimizer="rmsprop", loss="passthrough", metrics=None, loss_weights=None, weighted_metrics=None, run_eagerly=None, steps_per_execution=None, **kwargs ): """ This is a thin wrapper that sets the model's loss output head as the loss if the user does not specify a loss function themselves. """ if loss == "passthrough": logger.warning( "No loss specified in compile() - the model's internal loss computation will be used as the " "loss. Don't panic - this is a common way to train TensorFlow models in Transformers! " "Please ensure your labels are passed as keys in the input dict so that they are " "accessible to the model during the forward pass. To disable this behaviour, please pass a " "loss argument, or explicitly pass loss=None if you do not want your model to compute a loss." ) loss = {"loss": dummy_loss} super().compile( optimizer=optimizer, loss=loss, metrics=metrics, loss_weights=loss_weights, weighted_metrics=weighted_metrics, run_eagerly=run_eagerly, steps_per_execution=steps_per_execution, **kwargs, ) def train_step(self, data): """ A modification of Keras's default train_step that cleans up the printed metrics when we use a dummy loss. """ # These are the only transformations `Model.fit` applies to user-input # data when a `tf.data.Dataset` is provided. data = data_adapter.expand_1d(data) x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data) # These next two lines differ from the base method - they avoid issues when the labels are in # the input dict (and loss is computed internally) if y is None and "labels" in x: y = x["labels"] # Stops confusion with metric computations elif y is None and "input_ids" in x: # Just make any kind of dummy array to make loss work y = tf.zeros(tf.shape(x["input_ids"])[0], dtype=tf.int64) # Run forward pass. with tf.GradientTape() as tape: y_pred = self(x, training=True) loss = self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses) # Run backwards pass. self.optimizer.minimize(loss, self.trainable_variables, tape=tape) # When y_pred is a ModelOutput and y is a tf.Tensor the metrics update # should be done only with the relevant ModelOutput param that is # considered by the loss. if isinstance(y_pred, TFSeq2SeqLMOutput) and isinstance(y, tf.Tensor): y_pred = y_pred["logits"] self.compiled_metrics.update_state(y, y_pred, sample_weight) # Collect metrics to return return_metrics = {} for metric in self.metrics: result = metric.result() if isinstance(result, dict): return_metrics.update(result) else: return_metrics[metric.name] = result # These next two lines are also not in the base method - they correct the displayed metrics # when we're using a dummy loss, to avoid a bogus "loss_loss" value being shown. if "loss" in return_metrics and "loss_loss" in return_metrics: del return_metrics["loss_loss"] return return_metrics def test_step(self, data): """ A modification of Keras's default test_step that cleans up the printed metrics when we use a dummy loss. """ data = data_adapter.expand_1d(data) x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data) # These next two lines differ from the base method - they avoid issues when the labels are in # the input dict (and loss is computed internally) if y is None and "labels" in x: y = x["labels"] # Stops confusion with metric computations elif y is None and "input_ids" in x: # Just make any kind of dummy array to make loss work y = tf.zeros(tf.shape(x["input_ids"])[0], dtype=tf.int64) y_pred = self(x, training=False) self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses) # Updates stateful loss metrics. if isinstance(y_pred, TFSeq2SeqLMOutput) and isinstance(y, tf.Tensor): y_pred = y_pred["logits"] self.compiled_metrics.update_state(y, y_pred, sample_weight) # Collect metrics to return return_metrics = {} for metric in self.metrics: result = metric.result() if isinstance(result, dict): return_metrics.update(result) else: return_metrics[metric.name] = result # These next two lines are also not in the base method - they correct the displayed metrics # when we're using a dummy loss, to avoid a bogus "loss_loss" value being shown. if "loss" in return_metrics and "loss_loss" in return_metrics: del return_metrics["loss_loss"] return return_metrics def create_model_card( self, output_dir, model_name: str, language: Optional[str] = None, license: Optional[str] = None, tags: Optional[str] = None, finetuned_from: Optional[str] = None, tasks: Optional[str] = None, dataset_tags: Optional[Union[str, List[str]]] = None, dataset: Optional[Union[str, List[str]]] = None, dataset_args: Optional[Union[str, List[str]]] = None, ): # Avoids a circular import by doing this when necessary. from .modelcard import TrainingSummary training_summary = TrainingSummary.from_keras( self, keras_history=self.history, language=language, license=license, tags=tags, model_name=model_name, finetuned_from=finetuned_from, tasks=tasks, dataset_tags=dataset_tags, dataset=dataset, dataset_args=dataset_args, ) model_card = training_summary.to_model_card() with open(os.path.join(output_dir, "README.md"), "w") as f: f.write(model_card) def set_input_embeddings(self, value): """ Set model's input embeddings Args: value (`tf.Variable`): The new weights mapping hidden states to vocabulary. """ main_layer = getattr(self, self.base_model_prefix) if main_layer is None: raise NotImplementedError("The model does not implements the base_model_prefix attribute.") try: main_layer.set_input_embeddings(value) except AttributeError: logger.info("Building the model") self(self.dummy_inputs) main_layer.set_input_embeddings(value) def get_output_embeddings(self) -> Union[None, tf.keras.layers.Layer]: """ Returns the model's output embeddings Returns: `tf.Variable`: The new weights mapping vocabulary to hidden states. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: return lm_head.get_output_embeddings() except AttributeError: logger.info("Building the model") self(self.dummy_inputs) return lm_head().get_output_embeddings() return None # Overwrite for models with output embeddings def set_output_embeddings(self, value): """ Set model's output embeddings Args: value (`tf.Variable`): The new weights mapping hidden states to vocabulary. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: lm_head.set_output_embeddings(value) except AttributeError: logger.info("Building the model") self(self.dummy_inputs) lm_head.set_output_embeddings(value) def get_output_layer_with_bias(self) -> Union[None, tf.keras.layers.Layer]: """ Get the layer that handles a bias attribute in case the model has an LM head with weights tied to the embeddings Return: `tf.keras.layers.Layer`: The layer that handles the bias, None if not an LM model. """ warnings.warn( "The method get_output_layer_with_bias is deprecated. Please use `get_lm_head` instead.", FutureWarning ) return self.get_lm_head() def get_prefix_bias_name(self) -> Union[None, str]: """ Get the concatenated _prefix name of the bias from the model name to the parent layer Return: `str`: The _prefix name of the bias. """ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return None def get_bias(self) -> Union[None, Dict[str, tf.Variable]]: """ Dict of bias attached to an LM head. The key represents the name of the bias attribute. Return: `tf.Variable`: The weights representing the bias, None if not an LM model. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: return lm_head.get_bias() except AttributeError: self(self.dummy_inputs) return lm_head.get_bias() return None def set_bias(self, value): """ Set all the bias in the LM head. Args: value (`Dict[tf.Variable]`): All the new bias attached to an LM head. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: lm_head.set_bias(value) except AttributeError: self(self.dummy_inputs) lm_head.set_bias(value) def get_lm_head(self) -> tf.keras.layers.Layer: """ The LM Head layer. This method must be overwritten by all the models that have a lm head. Return: `tf.keras.layers.Layer`: The LM head layer if the model has one, None if not. """ return None def resize_token_embeddings(self, new_num_tokens=None) -> tf.Variable: """ Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`. Takes care of tying weights embeddings afterwards if the model class has a `tie_weights()` method. Arguments: new_num_tokens (`int`, *optional*): The number of new tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns a pointer to the input tokens `tf.Variable` module of the model without doing anything. Return: `tf.Variable`: Pointer to the input tokens Embeddings Module of the model. """ if new_num_tokens is None or new_num_tokens == self.config.vocab_size: return self._get_word_embedding_weight(self.get_input_embeddings()) model_embeds = self._resize_token_embeddings(new_num_tokens) # Update base model and current model config self.config.vocab_size = new_num_tokens return model_embeds def _get_word_embedding_weight(model, embedding_layer): embeds = getattr(embedding_layer, "weight", None) if embeds is not None: return embeds embeds = getattr(embedding_layer, "decoder", None) if embeds is not None: return embeds # The reason why the attributes don't exist might be # because the model is not built, so retry getting # the argument after building the model model(model.dummy_inputs) embeds = getattr(embedding_layer, "weight", None) if embeds is not None: return embeds embeds = getattr(embedding_layer, "decoder", None) if embeds is not None: return embeds return None def _resize_token_embeddings(self, new_num_tokens): old_embeddings = self._get_word_embedding_weight(self.get_input_embeddings()) new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) # if word embeddings are not tied, make sure that lm head bias is resized as well if self.get_bias() is not None: old_lm_head_bias = self.get_bias() new_lm_head_bias = self._get_resized_lm_head_bias(old_lm_head_bias, new_num_tokens) self.set_bias(new_lm_head_bias) # if word embeddings are not tied, make sure that lm head decoder is resized as well if self.get_output_embeddings() is not None: old_lm_head_decoder = self._get_word_embedding_weight(self.get_output_embeddings()) new_lm_head_decoder = self._get_resized_lm_head_decoder(old_lm_head_decoder, new_num_tokens) self.set_output_embeddings(new_lm_head_decoder) self.set_input_embeddings(new_embeddings) return self.get_input_embeddings() def _get_resized_lm_head_bias(self, old_lm_head_bias, new_num_tokens): """ Build a resized bias from the old ones. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_lm_head_bias (`tf.Variable`): Old lm head bias to be resized. new_num_tokens (`int`, *optional*): New number of tokens in the linear matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns None Return: `tf.Variable`: Pointer to the resized bias. """ new_lm_head_bias = {} for attr, weight in old_lm_head_bias.items(): first_dim, old_num_tokens = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight) size_diff = new_num_tokens - old_num_tokens final_shape = [new_num_tokens] if first_dim is None else [first_dim, new_num_tokens] # initialize new bias if tf.math.greater(size_diff, 0): padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]] current_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape), constant_values=-1) num_tokens_to_copy = min(old_num_tokens, new_num_tokens) mask_shape = [num_tokens_to_copy] if first_dim is None else [1, num_tokens_to_copy] bias_mask = tf.fill(tf.convert_to_tensor(mask_shape), True) bias_mask = tf.pad(bias_mask, tf.convert_to_tensor(padding_shape), constant_values=False) else: slice_from = [0] if first_dim is None else [0, 0] current_bias = tf.slice( weight.value(), tf.convert_to_tensor(slice_from), tf.convert_to_tensor(final_shape) ) bias_mask = tf.fill(tf.convert_to_tensor(final_shape), True) new_bias = self.add_weight( shape=final_shape, initializer="zeros", trainable=True, name=weight.name.split(":")[0], ) init_bias = tf.where(bias_mask, current_bias, new_bias.value()) new_bias.assign(init_bias) new_lm_head_bias[attr] = new_bias return new_lm_head_bias def _get_resized_lm_head_decoder(self, old_lm_head_decoder, new_num_tokens): """ Build a resized decoder from the old ones. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_lm_head_decoder (`tf.Variable`): Old lm head decoder to be resized. new_num_tokens (`int`, *optional*): New number of tokens in the linear matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns None Return: `tf.Variable`: Pointer to the resized decoder or None if the output embeddings are different from the input ones. """ new_lm_head_decoder = old_lm_head_decoder is_input_output_equals = tf.reduce_any( self._get_word_embedding_weight(self.get_input_embeddings()) == old_lm_head_decoder ) if old_lm_head_decoder is not None and not is_input_output_equals: old_embedding_dim = shape_list(old_lm_head_decoder)[1] decoder_mask, current_decoder = init_copy_embeddings(old_lm_head_decoder, new_num_tokens) new_lm_head_decoder = self.add_weight( shape=(new_num_tokens, old_embedding_dim), initializer="zeros", trainable=True, name=old_lm_head_decoder.name.split(":")[0], ) init_decoder = tf.where(decoder_mask, current_decoder, new_lm_head_decoder.value()) new_lm_head_decoder.assign(init_decoder) return new_lm_head_decoder def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None) -> tf.Variable: """ Build a resized Embedding weights from a provided token Embedding weights. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_embeddings (`tf.Variable`): Old embeddings to be resized. new_num_tokens (`int`, *optional*): New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns a pointer to the input tokens ``tf.Variable``` module of the model without doing anything. Return: `tf.Variable`: Pointer to the resized Embedding Module or the old Embedding Module if `new_num_tokens` is `None` """ old_embedding_dim = shape_list(old_embeddings)[1] init_range = getattr(self.config, "initializer_range", 0.02) embeddings_mask, current_embeddings = init_copy_embeddings(old_embeddings, new_num_tokens) new_embeddings = self.add_weight( name=old_embeddings.name.split(":")[0], shape=[new_num_tokens, old_embedding_dim], initializer=get_initializer(init_range), dtype=tf.float32, ) init_embeddings = tf.where(embeddings_mask, current_embeddings, new_embeddings.value()) new_embeddings.assign(init_embeddings) return new_embeddings def prune_heads(self, heads_to_prune): """ Prunes heads of the base model. Arguments: heads_to_prune (`Dict[int, List[int]]`): Dictionary with keys being selected layer indices (`int`) and associated values being the list of heads to prune in said layer (list of `int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2. """ raise NotImplementedError def save_pretrained(self, save_directory, saved_model=False, version=1, push_to_hub=False, **kwargs): """ Save a model and its configuration file to a directory, so that it can be re-loaded using the [`~TFPreTrainedModel.from_pretrained`] class method. Arguments: save_directory (`str`): Directory to which to save. Will be created if it doesn't exist. saved_model (`bool`, *optional*, defaults to `False`): If the model has to be saved in saved model format as well or not. version (`int`, *optional*, defaults to 1): The version of the saved model. A saved model needs to be versioned in order to be properly loaded by TensorFlow Serving as detailed in the official documentation https://www.tensorflow.org/tfx/serving/serving_basic push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face model hub after saving it. <Tip warning={true}> Using `push_to_hub=True` will synchronize the repository you are pushing to with `save_directory`, which requires `save_directory` to be a local clone of the repo you are pushing to if it's an existing folder. Pass along `temp_dir=True` to use a temporary directory instead. </Tip> kwargs: Additional key word arguments passed along to the [`~file_utils.PushToHubMixin.push_to_hub`] method. """ if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") return if push_to_hub: commit_message = kwargs.pop("commit_message", None) repo = self._create_or_get_repo(save_directory, **kwargs) os.makedirs(save_directory, exist_ok=True) if saved_model: saved_model_dir = os.path.join(save_directory, "saved_model", str(version)) self.save(saved_model_dir, include_optimizer=False, signatures=self.serving) logger.info(f"Saved model created in {saved_model_dir}") # Save configuration file self.config.architectures = [self.__class__.__name__[2:]] self.config.save_pretrained(save_directory) # If we save using the predefined names, we can load using `from_pretrained` output_model_file = os.path.join(save_directory, TF2_WEIGHTS_NAME) self.save_weights(output_model_file) logger.info(f"Model weights saved in {output_model_file}") if push_to_hub: url = self._push_to_hub(repo, commit_message=commit_message) logger.info(f"Model pushed to the hub in this commit: {url}") @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Instantiate a pretrained TF 2.0 model from a pre-trained model configuration. The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task. The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those weights are discarded. Parameters: pretrained_model_name_or_path (`str`, *optional*): Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. - A path to a *directory* containing model weights saved using [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *PyTorch state_dict save file* (e.g, `./pt_model/pytorch_model.bin`). In this case, `from_pt` should be set to `True` and a configuration object should be provided as `config` argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards. - `None` if you are both providing the configuration and state dictionary (resp. with keyword arguments `config` and `state_dict`). model_args (sequence of positional arguments, *optional*): All remaining positional arguments will be passed to the underlying model's `__init__` method. config (`Union[PretrainedConfig, str]`, *optional*): Can be either: - an instance of a class derived from [`PretrainedConfig`], - a string valid as input to [`~PretrainedConfig.from_pretrained`]. Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when: - The model is a model provided by the library (loaded with the *model id* string of a pretrained model). - The model was saved using [`~TFPreTrainedModel.save_pretrained`] and is reloaded by supplying the save directory. - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a configuration JSON file named *config.json* is found in the directory. from_pt: (`bool`, *optional*, defaults to `False`): Load the model weights from a PyTorch state_dict save file (see docstring of `pretrained_model_name_or_path` argument). ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): Whether or not to raise an error if some of the weights from the checkpoint do not have the same size as the weights of the model (if for instance, you are instantiating a model with 10 labels from a checkpoint with 3 labels). cache_dir (`str`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies: (`Dict[str, str], `optional`): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(`bool`, *optional*, defaults to `False`): Whether or not to only look at local files (e.g., not try doanloading the model). use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `transformers-cli login` (stored in `~/.huggingface`). revision(`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. mirror(`str`, *optional*): Mirror source to accelerate downloads in China. If you are from China and have an accessibility problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. Please refer to the mirror site for more information. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). Behaves differently depending on whether a `config` is provided or automatically loaded: - If a configuration is provided with `config`, `**kwargs` will be directly passed to the underlying model's `__init__` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, `kwargs` will be first passed to the configuration class initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that corresponds to a configuration attribute will be used to override said attribute with the supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's `__init__` function. <Tip> Passing `use_auth_token=True` is required when you want to use a private model. </Tip> Examples: ```python >>> from transformers import BertConfig, TFBertModel >>> # Download model and configuration from huggingface.co and cache. >>> model = TFBertModel.from_pretrained("bert-base-uncased") >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable). >>> model = TFBertModel.from_pretrained("./test/saved_model/") >>> # Update configuration during loading. >>> model = TFBertModel.from_pretrained("bert-base-uncased", output_attentions=True) >>> assert model.config.output_attentions == True >>> # Loading from a Pytorch model file instead of a TensorFlow checkpoint (slower, for example purposes, not runnable). >>> config = BertConfig.from_json_file("./pt_model/my_pt_model_config.json") >>> model = TFBertModel.from_pretrained("./pt_model/my_pytorch_model.bin", from_pt=True, config=config) ```""" config = kwargs.pop("config", None) cache_dir = kwargs.pop("cache_dir", None) from_pt = kwargs.pop("from_pt", False) ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) output_loading_info = kwargs.pop("output_loading_info", False) local_files_only = kwargs.pop("local_files_only", False) use_auth_token = kwargs.pop("use_auth_token", None) revision = kwargs.pop("revision", None) mirror = kwargs.pop("mirror", None) load_weight_prefix = kwargs.pop("load_weight_prefix", None) from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False) user_agent = {"file_type": "model", "framework": "tensorflow", "from_auto_class": from_auto_class} if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True # Load config if we don't provide a configuration if not isinstance(config, PretrainedConfig): config_path = config if config is not None else pretrained_model_name_or_path config, model_kwargs = cls.config_class.from_pretrained( config_path, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, use_auth_token=use_auth_token, revision=revision, _from_auto=from_auto_class, _from_pipeline=from_pipeline, **kwargs, ) else: model_kwargs = kwargs # Load model if pretrained_model_name_or_path is not None: if os.path.isdir(pretrained_model_name_or_path): if from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): # Load from a PyTorch checkpoint in priority if from_pt archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)): # Load from a TF 2.0 checkpoint archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME) else: raise EnvironmentError( f"Error no file named {[WEIGHTS_NAME, TF2_WEIGHTS_NAME]} found in directory " f"{pretrained_model_name_or_path} or `from_pt` set to False" ) elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): archive_file = pretrained_model_name_or_path elif os.path.isfile(pretrained_model_name_or_path + ".index"): archive_file = pretrained_model_name_or_path + ".index" else: archive_file = hf_bucket_url( pretrained_model_name_or_path, filename=(WEIGHTS_NAME if from_pt else TF2_WEIGHTS_NAME), revision=revision, mirror=mirror, ) try: # Load from URL or cache if already cached resolved_archive_file = cached_path( archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token, user_agent=user_agent, ) except EnvironmentError as err: logger.error(err) msg = ( f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n" f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co./models'\n" f" (make sure '{pretrained_model_name_or_path}' is not a path to a local directory with something else, in that case)\n\n" f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {TF2_WEIGHTS_NAME}, {WEIGHTS_NAME}.\n\n" ) raise EnvironmentError(msg) if resolved_archive_file == archive_file: logger.info(f"loading weights file {archive_file}") else: logger.info(f"loading weights file {archive_file} from cache at {resolved_archive_file}") else: resolved_archive_file = None config.name_or_path = pretrained_model_name_or_path # composed models, *e.g.* TFRag, require special treatment when it comes to loading # pre-trained weights. if cls._requires_load_weight_prefix and model_kwargs.get("name") is not None: model_kwargs["load_weight_prefix"] = load_weight_prefix + "/" + model_kwargs.get("name") # Instantiate model. model = cls(config, *model_args, **model_kwargs) if from_pt: from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model # Load from a PyTorch checkpoint return load_pytorch_checkpoint_in_tf2_model(model, resolved_archive_file, allow_missing_keys=True) # we might need to extend the variable scope for composite models if load_weight_prefix is not None: with tf.compat.v1.variable_scope(load_weight_prefix): model(model.dummy_inputs) # build the network with dummy inputs else: model(model.dummy_inputs) # build the network with dummy inputs assert os.path.isfile(resolved_archive_file), f"Error retrieving file {resolved_archive_file}" # 'by_name' allow us to do transfer learning by skipping/adding layers # see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1339-L1357 try: missing_keys, unexpected_keys, mismatched_keys = load_tf_weights( model, resolved_archive_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=load_weight_prefix, ) except OSError as e: try: with open(resolved_archive_file) as f: if f.read().startswith("version"): raise OSError( "You seem to have cloned a repository without having git-lfs installed. Please install " "git-lfs and run `git lfs install` followed by `git lfs pull` in the folder " "you cloned." ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise OSError( "Unable to load weights from h5 file. " "If you tried to load a TF 2.0 model from a PyTorch checkpoint, please set from_pt=True. " ) model(model.dummy_inputs) # Make sure restore ops are run if cls._keys_to_ignore_on_load_missing is not None: for pat in cls._keys_to_ignore_on_load_missing: missing_keys = [k for k in missing_keys if re.search(pat, k) is None] if cls._keys_to_ignore_on_load_unexpected is not None: for pat in cls._keys_to_ignore_on_load_unexpected: unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] if len(unexpected_keys) > 0: logger.warning( f"Some layers from the model checkpoint at {pretrained_model_name_or_path} were not used when " f"initializing {model.__class__.__name__}: {unexpected_keys}\n" f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task " f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n" f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect " f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)." ) else: logger.warning(f"All model checkpoint layers were used when initializing {model.__class__.__name__}.\n") if len(missing_keys) > 0: logger.warning( f"Some layers of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} " f"and are newly initialized: {missing_keys}\n" f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) elif len(mismatched_keys) == 0: logger.warning( f"All the layers of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n" f"If your task is similar to the task the model of the checkpoint was trained on, " f"you can already use {model.__class__.__name__} for predictions without further training." ) if len(mismatched_keys) > 0: mismatched_warning = "\n".join( [ f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated" for key, shape1, shape2 in mismatched_keys ] ) logger.warning( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} " f"and are newly initialized because the shapes did not match:\n{mismatched_warning}\n" f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) if output_loading_info: loading_info = { "missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "mismatched_keys": mismatched_keys, } return model, loading_info return model # To update the docstring, we need to copy the method, otherwise we change the original docstring. TFPreTrainedModel.push_to_hub = copy_func(TFPreTrainedModel.push_to_hub) TFPreTrainedModel.push_to_hub.__doc__ = TFPreTrainedModel.push_to_hub.__doc__.format( object="model", object_class="TFAutoModel", object_files="model checkpoint" ) class TFConv1D(tf.keras.layers.Layer): """ 1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2). Basically works like a linear layer but the weights are transposed. Args: nf (`int`): The number of output features. nx (`int`): The number of input features. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation to use to initialize the weights. kwargs: Additional keyword arguments passed along to the `__init__` of `tf.keras.layers.Layer`. """ def __init__(self, nf, nx, initializer_range=0.02, **kwargs): super().__init__(**kwargs) self.nf = nf self.nx = nx self.initializer_range = initializer_range def build(self, input_shape): self.weight = self.add_weight( "weight", shape=[self.nx, self.nf], initializer=get_initializer(self.initializer_range) ) self.bias = self.add_weight("bias", shape=[1, self.nf], initializer=tf.zeros_initializer()) def call(self, x): bz, sl = shape_list(x)[:2] x = tf.reshape(x, [-1, self.nx]) x = tf.matmul(x, self.weight) + self.bias x = tf.reshape(x, [bz, sl, self.nf]) return x class TFSharedEmbeddings(tf.keras.layers.Layer): r""" Construct shared token embeddings. The weights of the embedding layer is usually shared with the weights of the linear decoder when doing language modeling. Args: vocab_size (`int`): The size of the vocabulary, e.g., the number of unique tokens. hidden_size (`int`): The size of the embedding vectors. initializer_range (`float`, *optional*): The standard deviation to use when initializing the weights. If no value is provided, it will default to \\(1/\sqrt{hidden\_size}\\). kwargs: Additional keyword arguments passed along to the `__init__` of `tf.keras.layers.Layer`. """ def __init__(self, vocab_size: int, hidden_size: int, initializer_range: Optional[float] = None, **kwargs): super().__init__(**kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.initializer_range = hidden_size ** -0.5 if initializer_range is None else initializer_range def build(self, input_shape): """ Build shared token embedding layer Shared weights logic adapted from https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24 """ self.weight = self.add_weight( "weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range) ) super().build(input_shape) def get_config(self): config = { "vocab_size": self.vocab_size, "hidden_size": self.hidden_size, "initializer_range": self.initializer_range, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs: tf.Tensor, mode: str = "embedding") -> tf.Tensor: """ Get token embeddings of inputs or decode final hidden state. Args: inputs (`tf.Tensor`): In embedding mode, should be an int64 tensor with shape `[batch_size, length]`. In linear mode, should be a float tensor with shape `[batch_size, length, hidden_size]`. mode (`str`, defaults to `"embedding"`): A valid value is either `"embedding"` or `"linear"`, the first one indicates that the layer should be used as an embedding layer, the second one that the layer should be used as a linear decoder. Returns: `tf.Tensor`: In embedding mode, the output is a float32 embedding tensor, with shape `[batch_size, length, embedding_size]`. In linear mode, the output is a float32 with shape `[batch_size, length, vocab_size]`. Raises: ValueError: if `mode` is not valid. Shared weights logic is adapted from [here](https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24). """ if mode == "embedding": return self._embedding(inputs) elif mode == "linear": return self._linear(inputs) else: raise ValueError(f"mode {mode} is not valid.") def _embedding(self, input_ids): """Applies embedding based on inputs tensor.""" return tf.gather(self.weight, input_ids) def _linear(self, inputs): """ Computes logits by running inputs through a linear layer. Args: inputs: A float32 tensor with shape [..., hidden_size] Returns: float32 tensor with shape [..., vocab_size]. """ first_dims = shape_list(inputs)[:-1] x = tf.reshape(inputs, [-1, self.hidden_size]) logits = tf.matmul(x, self.weight, transpose_b=True) return tf.reshape(logits, first_dims + [self.vocab_size]) class TFSequenceSummary(tf.keras.layers.Layer): """ Compute a single vector summary of a sequence hidden states. Args: config ([`PretrainedConfig`]): The config used by the model. Relevant arguments in the config class of the model are (refer to the actual config class of your model for the default values it uses): - **summary_type** (`str`) -- The method to use to make this summary. Accepted values are: - `"last"` -- Take the last token hidden state (like XLNet) - `"first"` -- Take the first token hidden state (like Bert) - `"mean"` -- Take the mean of all tokens hidden states - `"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2) - `"attn"` -- Not implemented now, use multi-head attention - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction. - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes (otherwise to `config.hidden_size`). - **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output, another string or `None` will add no activation. - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation. - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation. initializer_range (`float`, defaults to 0.02): The standard deviation to use to initialize the weights. kwargs: Additional keyword arguments passed along to the `__init__` of `tf.keras.layers.Layer`. """ def __init__(self, config: PretrainedConfig, initializer_range: float = 0.02, **kwargs): super().__init__(**kwargs) self.summary_type = config.summary_type if hasattr(config, "summary_use_proj") else "last" if self.summary_type == "attn": # We should use a standard multi-head attention module with absolute positional embedding for that. # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276 # We can probably just use the multi-head attention module of PyTorch >=1.1.0 raise NotImplementedError self.has_summary = hasattr(config, "summary_use_proj") and config.summary_use_proj if self.has_summary: if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0: num_classes = config.num_labels else: num_classes = config.hidden_size self.summary = tf.keras.layers.Dense( num_classes, kernel_initializer=get_initializer(initializer_range), name="summary" ) self.has_activation = hasattr(config, "summary_activation") and config.summary_activation == "tanh" if self.has_activation: self.activation = tf.keras.activations.tanh self.has_first_dropout = hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0 if self.has_first_dropout: self.first_dropout = tf.keras.layers.Dropout(config.summary_first_dropout) self.has_last_dropout = hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0 if self.has_last_dropout: self.last_dropout = tf.keras.layers.Dropout(config.summary_last_dropout) def call(self, inputs, cls_index=None, training=False): if not isinstance(inputs, (dict, tuple, list)): hidden_states = inputs elif isinstance(inputs, (tuple, list)): hidden_states = inputs[0] cls_index = inputs[1] if len(inputs) > 1 else None assert len(inputs) <= 2, "Too many inputs." else: hidden_states = inputs.get("hidden_states") cls_index = inputs.get("cls_index", None) if self.summary_type == "last": output = hidden_states[:, -1] elif self.summary_type == "first": output = hidden_states[:, 0] elif self.summary_type == "mean": output = tf.reduce_mean(hidden_states, axis=1) elif self.summary_type == "cls_index": hidden_shape = shape_list(hidden_states) # e.g. [batch, num choices, seq length, hidden dims] if cls_index is None: cls_index = tf.fill( hidden_shape[:-2], hidden_shape[-2] - 1 ) # A tensor full of shape [batch] or [batch, num choices] full of sequence length cls_shape = shape_list(cls_index) if len(cls_shape) <= len(hidden_shape) - 2: cls_index = tf.expand_dims(cls_index, axis=-1) # else: # cls_index = cls_index[..., tf.newaxis] # cls_index = cls_index.expand((-1,) * (cls_index.dim()-1) + (hidden_states.size(-1),)) # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states output = tf.gather(hidden_states, cls_index, batch_dims=len(hidden_shape) - 2) output = tf.squeeze( output, axis=len(hidden_shape) - 2 ) # shape of output: (batch, num choices, hidden_size) elif self.summary_type == "attn": raise NotImplementedError if self.has_first_dropout: output = self.first_dropout(output, training=training) if self.has_summary: output = self.summary(output) if self.has_activation: output = self.activation(output) if self.has_last_dropout: output = self.last_dropout(output, training=training) return output def shape_list(tensor: Union[tf.Tensor, np.ndarray]) -> List[int]: """ Deal with dynamic shape in tensorflow cleanly. Args: tensor (`tf.Tensor` or `np.ndarray`): The tensor we want the shape of. Returns: `List[int]`: The shape of the tensor as a list. """ if isinstance(tensor, np.ndarray): return list(tensor.shape) dynamic = tf.shape(tensor) if tensor.shape == tf.TensorShape(None): return dynamic static = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(static)] def get_initializer(initializer_range: float = 0.02) -> tf.initializers.TruncatedNormal: """ Creates a `tf.initializers.TruncatedNormal` with the given range. Args: initializer_range (*float*, defaults to 0.02): Standard deviation of the initializer range. Returns: `tf.initializers.TruncatedNormal`: The truncated normal initializer. """ return tf.keras.initializers.TruncatedNormal(stddev=initializer_range) class TFWrappedEmbeddings: """ this class wraps a the TFSharedEmbeddingTokens layer into a python 'no-keras-layer' class to avoid problem with weight restoring. Also it makes sure that the layer is called from the correct scope to avoid problem with saving/storing the correct weights """ def __init__(self, layer, abs_scope_name=None): self._layer = layer self._abs_scope_name = abs_scope_name def call(self, inputs, mode="embedding"): if self._abs_scope_name is None: return self._layer.call(inputs, mode) # if an abs scope name is given to the embedding variable, call variable from absolute scope with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name: with tf.name_scope(abs_scope_name.original_name_scope): return self._layer.call(inputs, mode) def __call__(self, inputs, mode="embedding"): if self._abs_scope_name is None: return self._layer(inputs, mode) # if an abs scope name is given to the embedding variable, call variable from absolute scope with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name: with tf.name_scope(abs_scope_name.original_name_scope): return self._layer(inputs, mode)
[ "tensorflow.convert_to_tensor", "numpy.asarray", "tensorflow.python.keras.backend.int_shape", "tensorflow.keras.utils.register_keras_serializable", "tensorflow.python.keras.saving.hdf5_format.load_attributes_from_hdf5_group", "tensorflow.rank", "tensorflow.python.keras.engine.data_adapter.expand_1d", "tensorflow.python.keras.engine.data_adapter.unpack_x_y_sample_weight", "tensorflow.math.reduce_any", "tensorflow.gather", "tensorflow.name_scope", "tensorflow.keras.initializers.TruncatedNormal", "tensorflow.compat.v1.variable_scope", "tensorflow.matmul", "tensorflow.TensorShape", "tensorflow.executing_eagerly", "tensorflow.fill", "tensorflow.shape", "tensorflow.keras.losses.MeanSquaredError", "tensorflow.zeros_initializer", "tensorflow.GradientTape", "tensorflow.python.keras.backend.batch_set_value", "tensorflow.constant", "tensorflow.reduce_mean", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.get_logger", "tensorflow.math.greater", "tensorflow.keras.layers.Dropout", "tensorflow.TensorSpec" ]
src/transformers/modeling_tf_utils.py
[(56, 'tensorflow.get_logger', 'tf.get_logger', ([], {}), True, 'import tensorflow as tf\n'), (72, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['y_pred'], {}), True, 'import tensorflow as tf\n'), (124, 'functools.wraps', 'functools.wraps', (['initializer'], {}), False, 'import functools\n'), (305, 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), True, 'import tensorflow as tf\n'), (598, 'tensorflow.python.keras.backend.batch_set_value', 'K.batch_set_value', (['weight_value_tuples'], {}), True, 'from tensorflow.python.keras import backend as K\n'), (625, 'tensorflow.math.greater', 'tf.math.greater', (['size_diff', '(0)'], {}), True, 'import tensorflow as tf\n'), (1957, 'tensorflow.shape', 'tf.shape', (['tensor'], {}), True, 'import tensorflow as tf\n'), (1977, 'tensorflow.keras.initializers.TruncatedNormal', 'tf.keras.initializers.TruncatedNormal', ([], {'stddev': 'initializer_range'}), True, 'import tensorflow as tf\n'), (174, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), True, 'import tensorflow as tf\n'), (190, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), True, 'import tensorflow as tf\n'), (211, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), True, 'import tensorflow as tf\n'), (216, 'tensorflow.math.reduce_any', 'tf.math.reduce_any', (['(labels == -1)'], {}), True, 'import tensorflow as tf\n'), (247, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), True, 'import tensorflow as tf\n'), (277, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), True, 'import tensorflow as tf\n'), (373, 'warnings.warn', 'warnings.warn', (['"""The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (381, 'warnings.warn', 'warnings.warn', (['"""The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (388, 'warnings.warn', 'warnings.warn', (['"""The `past` argument is deprecated and will be removed in a future version, use `past_key_values` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (517, 'h5py.File', 'h5py.File', (['resolved_archive_file', '"""r"""'], {}), False, 'import h5py\n'), (776, 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""weights.h5"""'], {}), False, 'import os\n'), (779, 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""extra_data.pickle"""'], {}), False, 'import os\n'), (818, 'os.path.join', 'os.path.join', (['local_dir', '"""checkpoint"""'], {}), False, 'import os\n'), (819, 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""weights.h5"""'], {}), False, 'import os\n'), (822, 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""extra_data.pickle"""'], {}), False, 'import os\n'), (878, 'tensorflow.python.keras.engine.data_adapter.expand_1d', 'data_adapter.expand_1d', (['data'], {}), False, 'from tensorflow.python.keras.engine import data_adapter\n'), (879, 'tensorflow.python.keras.engine.data_adapter.unpack_x_y_sample_weight', 'data_adapter.unpack_x_y_sample_weight', (['data'], {}), False, 'from tensorflow.python.keras.engine import data_adapter\n'), (917, 'tensorflow.python.keras.engine.data_adapter.expand_1d', 'data_adapter.expand_1d', (['data'], {}), False, 'from tensorflow.python.keras.engine import data_adapter\n'), (918, 'tensorflow.python.keras.engine.data_adapter.unpack_x_y_sample_weight', 'data_adapter.unpack_x_y_sample_weight', (['data'], {}), False, 'from tensorflow.python.keras.engine import data_adapter\n'), (1044, 'warnings.warn', 'warnings.warn', (['"""The method get_output_layer_with_bias is deprecated. Please use `get_lm_head` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (1056, 'warnings.warn', 'warnings.warn', (['"""The method get_prefix_bias_name is deprecated. Please use `get_bias` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (1336, 'os.path.isfile', 'os.path.isfile', (['save_directory'], {}), False, 'import os\n'), (1344, 'os.makedirs', 'os.makedirs', (['save_directory'], {'exist_ok': '(True)'}), False, 'import os\n'), (1356, 'os.path.join', 'os.path.join', (['save_directory', 'TF2_WEIGHTS_NAME'], {}), False, 'import os\n'), (1599, 'os.path.isfile', 'os.path.isfile', (['resolved_archive_file'], {}), False, 'import os\n'), (1724, 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, self.nx]'], {}), True, 'import tensorflow as tf\n'), (1727, 'tensorflow.reshape', 'tf.reshape', (['x', '[bz, sl, self.nf]'], {}), True, 'import tensorflow as tf\n'), (1811, 'tensorflow.gather', 'tf.gather', (['self.weight', 'input_ids'], {}), True, 'import tensorflow as tf\n'), (1824, 'tensorflow.reshape', 'tf.reshape', (['inputs', '[-1, self.hidden_size]'], {}), True, 'import tensorflow as tf\n'), (1825, 'tensorflow.matmul', 'tf.matmul', (['x', 'self.weight'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (1827, 'tensorflow.reshape', 'tf.reshape', (['logits', '(first_dims + [self.vocab_size])'], {}), True, 'import tensorflow as tf\n'), (1959, 'tensorflow.TensorShape', 'tf.TensorShape', (['None'], {}), True, 'import tensorflow as tf\n'), (158, 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {}), True, 'import tensorflow as tf\n'), (178, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (180, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (217, 'warnings.warn', 'warnings.warn', (['"""Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead."""'], {}), False, 'import warnings\n'), (222, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (234, 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {'reduction': 'tf.keras.losses.Reduction.NONE'}), True, 'import tensorflow as tf\n'), (236, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), True, 'import tensorflow as tf\n'), (282, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (283, 'tensorflow.reshape', 'tf.reshape', (['logits', '(-1, 2)'], {}), True, 'import tensorflow as tf\n'), (284, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (365, 'inspect.signature', 'inspect.signature', (['func'], {}), False, 'import inspect\n'), (519, 'tensorflow.python.keras.saving.hdf5_format.load_attributes_from_hdf5_group', 'hdf5_format.load_attributes_from_hdf5_group', (['f', '"""layer_names"""'], {}), False, 'from tensorflow.python.keras.saving import hdf5_format\n'), (630, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[[0, size_diff], [0, 0]]'], {}), True, 'import tensorflow as tf\n'), (633, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[num_tokens_to_copy, 1]'], {}), True, 'import tensorflow as tf\n'), (634, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[[0, size_diff], [0, 0]]'], {}), True, 'import tensorflow as tf\n'), (639, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[0, 0]'], {}), True, 'import tensorflow as tf\n'), (640, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[new_num_tokens, old_embedding_dim]'], {}), True, 'import tensorflow as tf\n'), (642, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[new_num_tokens, 1]'], {}), True, 'import tensorflow as tf\n'), (687, 'tensorflow.constant', 'tf.constant', (['DUMMY_INPUTS'], {}), True, 'import tensorflow as tf\n'), (771, 'os.path.isdir', 'os.path.isdir', (['checkpoint_dir'], {}), False, 'import os\n'), (772, 'os.mkdir', 'os.mkdir', (['checkpoint_dir'], {}), False, 'import os\n'), (781, 'pickle.dump', 'pickle.dump', (['extra_data', 'f'], {}), False, 'import pickle\n'), (801, 'os.path.isdir', 'os.path.isdir', (['repo_path_or_name'], {}), False, 'import os\n'), (803, 'huggingface_hub.list_repo_files', 'list_repo_files', (['repo_path_or_name'], {}), False, 'from huggingface_hub import Repository, list_repo_files\n'), (812, 'huggingface_hub.Repository', 'Repository', (['model_id'], {'clone_from': 'f"""https://huggingface.co./{repo_path_or_name}"""'}), False, 'from huggingface_hub import Repository, list_repo_files\n'), (820, 'os.path.isfile', 'os.path.isfile', (['weights_file'], {}), False, 'import os\n'), (823, 'os.path.isfile', 'os.path.isfile', (['extra_data_file'], {}), False, 'import os\n'), (830, 'pickle.load', 'pickle.load', (['f'], {}), False, 'import pickle\n'), (888, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (1197, 'tensorflow.math.greater', 'tf.math.greater', (['size_diff', '(0)'], {}), True, 'import tensorflow as tf\n'), (1524, 'os.path.isdir', 'os.path.isdir', (['pretrained_model_name_or_path'], {}), False, 'import os\n'), (1725, 'tensorflow.matmul', 'tf.matmul', (['x', 'self.weight'], {}), True, 'import tensorflow as tf\n'), (1886, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.summary_first_dropout'], {}), True, 'import tensorflow as tf\n'), (1890, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.summary_last_dropout'], {}), True, 'import tensorflow as tf\n'), (1996, 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['self._abs_scope_name'], {'auxiliary_name_scope': '(False)'}), True, 'import tensorflow as tf\n'), (2005, 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['self._abs_scope_name'], {'auxiliary_name_scope': '(False)'}), True, 'import tensorflow as tf\n'), (218, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (220, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (429, 'warnings.warn', 'warnings.warn', (['"""The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (437, 'warnings.warn', 'warnings.warn', (['"""The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (543, 'tensorflow.python.keras.saving.hdf5_format.load_attributes_from_hdf5_group', 'hdf5_format.load_attributes_from_hdf5_group', (['h5_layer_object', '"""weight_names"""'], {}), False, 'from tensorflow.python.keras.saving import hdf5_format\n'), (976, 'os.path.join', 'os.path.join', (['output_dir', '"""README.md"""'], {}), False, 'import os\n'), (1594, 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['load_weight_prefix'], {}), True, 'import tensorflow as tf\n'), (1719, 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), True, 'import tensorflow as tf\n'), (1997, 'tensorflow.name_scope', 'tf.name_scope', (['abs_scope_name.original_name_scope'], {}), True, 'import tensorflow as tf\n'), (2006, 'tensorflow.name_scope', 'tf.name_scope', (['abs_scope_name.original_name_scope'], {}), True, 'import tensorflow as tf\n'), (550, 'numpy.asarray', 'np.asarray', (['h5_layer_object[weight_name]'], {}), True, 'import numpy as np\n'), (728, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['(None, None)', 'tf.int32'], {'name': '"""input_ids"""'}), True, 'import tensorflow as tf\n'), (729, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['(None, None)', 'tf.int32'], {'name': '"""attention_mask"""'}), True, 'import tensorflow as tf\n'), (730, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['(None, None)', 'tf.int32'], {'name': '"""token_type_ids"""'}), True, 'import tensorflow as tf\n'), (1192, 'tensorflow.rank', 'tf.rank', (['weight'], {}), True, 'import tensorflow as tf\n'), (1199, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['padding_shape'], {}), True, 'import tensorflow as tf\n'), (1202, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['mask_shape'], {}), True, 'import tensorflow as tf\n'), (1203, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['padding_shape'], {}), True, 'import tensorflow as tf\n'), (1207, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['slice_from'], {}), True, 'import tensorflow as tf\n'), (1207, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['final_shape'], {}), True, 'import tensorflow as tf\n'), (1209, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['final_shape'], {}), True, 'import tensorflow as tf\n'), (1527, 'os.path.join', 'os.path.join', (['pretrained_model_name_or_path', 'WEIGHTS_NAME'], {}), False, 'import os\n'), (1536, 'os.path.isfile', 'os.path.isfile', (['pretrained_model_name_or_path'], {}), False, 'import os\n'), (1538, 'os.path.isfile', 'os.path.isfile', (["(pretrained_model_name_or_path + '.index')"], {}), False, 'import os\n'), (1908, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['hidden_states'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (886, 'tensorflow.shape', 'tf.shape', (["x['input_ids']"], {}), True, 'import tensorflow as tf\n'), (925, 'tensorflow.shape', 'tf.shape', (["x['input_ids']"], {}), True, 'import tensorflow as tf\n'), (1525, 'os.path.join', 'os.path.join', (['pretrained_model_name_or_path', 'WEIGHTS_NAME'], {}), False, 'import os\n'), (1528, 'os.path.join', 'os.path.join', (['pretrained_model_name_or_path', 'TF2_WEIGHTS_NAME'], {}), False, 'import os\n'), (1530, 'os.path.join', 'os.path.join', (['pretrained_model_name_or_path', 'TF2_WEIGHTS_NAME'], {}), False, 'import os\n'), (578, 'tensorflow.python.keras.backend.int_shape', 'K.int_shape', (['symbolic_weight'], {}), True, 'from tensorflow.python.keras import backend as K\n'), (1630, 're.search', 're.search', (['pat', 'k'], {}), False, 'import re\n'), (1634, 're.search', 're.search', (['pat', 'k'], {}), False, 'import re\n'), (1912, 'tensorflow.fill', 'tf.fill', (['hidden_shape[:-2]', '(hidden_shape[-2] - 1)'], {}), True, 'import tensorflow as tf\n'), (1917, 'tensorflow.expand_dims', 'tf.expand_dims', (['cls_index'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (582, 'tensorflow.python.keras.backend.int_shape', 'K.int_shape', (['symbolic_weight'], {}), True, 'from tensorflow.python.keras import backend as K\n'), (586, 'tensorflow.python.keras.backend.int_shape', 'K.int_shape', (['symbolic_weight'], {}), True, 'from tensorflow.python.keras import backend as K\n')]
th2l/E2EMAL
5f4f4a58dfb00f7062994b852a73f8e222a9bf62
import tensorflow as tf import math import numpy as np import random, time import tabulate import copy def set_gpu_growth_or_cpu(use_cpu=False, write_info=False): gpus = tf.config.list_physical_devices('GPU') if gpus: if use_cpu: if write_info: print("Use CPU") tf.config.set_visible_devices(gpus[1:], 'GPU') else: try: for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) if write_info: logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), "Physical GPUs, ", len(logical_gpus), " Logical GPUs") print('Use GPU') except RuntimeError as e: print(e) else: print("Running CPU, please check GPU drivers, CUDA, ...") tf.get_logger().setLevel('INFO') def set_seed(seed, reset_session=False): if reset_session: tf.keras.backend.clear_session() random.seed(seed) np.random.seed(seed) tf.random.set_seed(seed) class VerboseFitCallBack(tf.keras.callbacks.Callback): def __init__(self, print_lr=False): super(VerboseFitCallBack).__init__() self.columns = None self.st_time = 0 self.print_lr = print_lr def on_epoch_begin(self, epoch, logs=None): self.st_time = time.time() def on_epoch_end(self, epoch, logs=None): cus_logs = copy.deepcopy(logs) cus_logs.pop('batch', None) cus_logs.pop('size', None) current_header = list(cus_logs.keys()) if 'lr' in current_header: lr_index = current_header.index('lr') else: lr_index = len(current_header) if self.columns is None: self.columns = current_header[:lr_index] + current_header[lr_index + 1:] + ['time'] if self.print_lr and tf.executing_eagerly(): self.columns = ['ep', 'lr'] + self.columns else: self.columns = ['ep',] + self.columns # for col_index in range(len(self.columns)): # if len(self.columns[col_index]) > 10: # self.columns[col_index] = self.columns[col_index][:10] logs_values = list(cus_logs.values()) if self.print_lr and tf.executing_eagerly(): # Get Learning rate current_lr = tf.keras.backend.get_value(self.model.optimizer.lr) try: current_step = tf.cast(self.model.optimizer.iterations, tf.float32) current_lr = float(current_lr(current_step)) # current_lr = tf.cast(current_lr(current_step), tf.float32) except: current_lr = float(current_lr) time_ep = time.time() - self.st_time if self.print_lr and tf.executing_eagerly(): current_values = [epoch + 1, current_lr] + logs_values[:lr_index] + logs_values[lr_index + 1:] + [time_ep] else: current_values = [epoch + 1,] + logs_values[:lr_index] + logs_values[lr_index + 1:] + [time_ep] table = tabulate.tabulate([current_values], self.columns, tablefmt='simple', floatfmt='10.6g') if epoch % 40 == 0: table = table.split('\n') table = '\n'.join([table[1]] + table) else: table = table.split('\n')[2] print(table) class CusLRScheduler(tf.keras.optimizers.schedules.LearningRateSchedule): def __init__(self, initial_learning_rate, min_lr, lr_start_warmup=0., warmup_steps=10, num_constant=0, T_max=20, num_half_cycle=1., name=None): super(CusLRScheduler, self).__init__() self.initial_learning_rate = initial_learning_rate self.warmup_steps = warmup_steps self.num_constant = num_constant self.T_max = T_max self.lr_start_warmup = lr_start_warmup self.min_lr = min_lr self.num_half_cycle = num_half_cycle self.name = name pass def __call__(self, step): with tf.name_scope(self.name or "CusLRScheduler") as name: initial_learning_rate = tf.convert_to_tensor(self.initial_learning_rate, name="initial_learning_rate") dtype = initial_learning_rate.dtype min_lr = tf.cast(self.min_lr, dtype) lr_start_warmup = tf.cast(self.lr_start_warmup, dtype) step_cf = tf.cast(step, dtype) wm_steps = tf.cast(self.warmup_steps, dtype=dtype) warmup_ratio = tf.where(tf.less_equal(step_cf, wm_steps), step_cf / wm_steps, 0.0) use_warmup_lr = tf.where(tf.less_equal(step_cf, wm_steps), 1.0, 0.0) warmup_lr = use_warmup_lr * (lr_start_warmup + warmup_ratio * (initial_learning_rate - lr_start_warmup)) num_constant = tf.cast(self.num_constant, dtype=dtype) constant_lr = tf.where( tf.logical_and(tf.less_equal(step_cf - wm_steps, num_constant), use_warmup_lr<1), initial_learning_rate, 0.0) t_max = tf.cast(self.T_max, dtype) use_consine_lr = tf.where(tf.logical_and(tf.less_equal(step_cf, t_max), tf.less(wm_steps + num_constant, step_cf)), 1.0, 0.0) pi_val = tf.cast(tf.constant(math.pi), dtype) num_half_cycle = tf.cast(self.num_half_cycle, dtype) cosine_lr = tf.where(use_consine_lr>0., min_lr + (initial_learning_rate - min_lr) * (1 + tf.cos( pi_val * num_half_cycle*(step_cf - wm_steps - num_constant) / (t_max - wm_steps - num_constant))) / 2, 0.) use_min_lr = tf.where(tf.less_equal(t_max, step_cf), min_lr, 0.0) return use_min_lr + cosine_lr + constant_lr + warmup_lr def get_config(self): ret_config = {'initial_learning_rate': self.initial_learning_rate, 'min_lr': self.min_lr, 'lr_start_warmup': self.lr_start_warmup, 'warmup_steps': self.warmup_steps, 'num_constant': self.num_constant, 'T_max': self.T_max, 'num_half_cycle': self.num_half_cycle, 'name': self.name} return ret_config class MultiModalLoss(tf.keras.layers.Layer): """Adapted from https://github.com/yaringal/multi-task-learning-example""" def __init__(self, num_outputs=4, loss_function=tf.keras.losses.CategoricalCrossentropy, trainable=True, **kwargs): self.num_outputs = num_outputs self.loss_func = loss_function self.trainable = trainable self.config = {'num_outputs': num_outputs, 'loss_function': loss_function, 'trainable': trainable} super(MultiModalLoss, self).__init__(**kwargs) def build(self, input_shape=None): self.log_vars = [] for idx in range(self.num_outputs): self.log_vars += [self.add_weight(name='log_var_{}'.format(idx), shape=(1, ), initializer=tf.keras.initializers.Constant(0.), trainable=self.trainable)] super(MultiModalLoss, self).build(input_shape) def multi_loss(self, ys_true, ys_pred): assert len(ys_true) == self.num_outputs and len(ys_pred) == self.num_outputs loss = 0 for y_true, y_pred, log_var in zip(ys_true, ys_pred, self.log_vars): prec = tf.exp(-log_var[0]) loss = loss + prec * self.loss_func(y_true, y_pred) loss = loss + tf.reduce_mean(self.log_vars) return loss def call(self, inputs): ys_true = inputs[: self.num_outputs] ys_pred = inputs[self.num_outputs: ] loss = self.multi_loss(ys_true, ys_pred) self.add_loss(loss, inputs=inputs) # Only return last prediction return inputs[-1] def get_config(self): return self.config
[ "tensorflow.convert_to_tensor", "tensorflow.cast", "tensorflow.keras.backend.clear_session", "tensorflow.config.list_physical_devices", "tensorflow.random.set_seed", "tensorflow.config.experimental.set_memory_growth", "tensorflow.name_scope", "tensorflow.config.set_visible_devices", "tensorflow.executing_eagerly", "tensorflow.config.experimental.list_logical_devices", "tensorflow.less", "tensorflow.keras.backend.get_value", "tensorflow.less_equal", "tensorflow.exp", "tensorflow.keras.initializers.Constant", "tensorflow.constant", "tensorflow.cos", "numpy.random.seed", "tensorflow.reduce_mean", "tensorflow.get_logger" ]
src/MERC2020/utils.py
[(10, 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), True, 'import tensorflow as tf\n'), (35, 'random.seed', 'random.seed', (['seed'], {}), False, 'import random, time\n'), (36, 'numpy.random.seed', 'np.random.seed', (['seed'], {}), True, 'import numpy as np\n'), (37, 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), True, 'import tensorflow as tf\n'), (34, 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), True, 'import tensorflow as tf\n'), (48, 'time.time', 'time.time', ([], {}), False, 'import random, time\n'), (51, 'copy.deepcopy', 'copy.deepcopy', (['logs'], {}), False, 'import copy\n'), (89, 'tabulate.tabulate', 'tabulate.tabulate', (['[current_values]', 'self.columns'], {'tablefmt': '"""simple"""', 'floatfmt': '"""10.6g"""'}), False, 'import tabulate\n'), (15, 'tensorflow.config.set_visible_devices', 'tf.config.set_visible_devices', (['gpus[1:]', '"""GPU"""'], {}), True, 'import tensorflow as tf\n'), (29, 'tensorflow.get_logger', 'tf.get_logger', ([], {}), True, 'import tensorflow as tf\n'), (73, 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), True, 'import tensorflow as tf\n'), (75, 'tensorflow.keras.backend.get_value', 'tf.keras.backend.get_value', (['self.model.optimizer.lr'], {}), True, 'import tensorflow as tf\n'), (83, 'time.time', 'time.time', ([], {}), False, 'import random, time\n'), (84, 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), True, 'import tensorflow as tf\n'), (113, 'tensorflow.name_scope', 'tf.name_scope', (["(self.name or 'CusLRScheduler')"], {}), True, 'import tensorflow as tf\n'), (114, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['self.initial_learning_rate'], {'name': '"""initial_learning_rate"""'}), True, 'import tensorflow as tf\n'), (117, 'tensorflow.cast', 'tf.cast', (['self.min_lr', 'dtype'], {}), True, 'import tensorflow as tf\n'), (118, 'tensorflow.cast', 'tf.cast', (['self.lr_start_warmup', 'dtype'], {}), True, 'import tensorflow as tf\n'), (120, 'tensorflow.cast', 'tf.cast', (['step', 'dtype'], {}), True, 'import tensorflow as tf\n'), (121, 'tensorflow.cast', 'tf.cast', (['self.warmup_steps'], {'dtype': 'dtype'}), True, 'import tensorflow as tf\n'), (127, 'tensorflow.cast', 'tf.cast', (['self.num_constant'], {'dtype': 'dtype'}), True, 'import tensorflow as tf\n'), (133, 'tensorflow.cast', 'tf.cast', (['self.T_max', 'dtype'], {}), True, 'import tensorflow as tf\n'), (136, 'tensorflow.cast', 'tf.cast', (['self.num_half_cycle', 'dtype'], {}), True, 'import tensorflow as tf\n'), (175, 'tensorflow.exp', 'tf.exp', (['(-log_var[0])'], {}), True, 'import tensorflow as tf\n'), (178, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.log_vars'], {}), True, 'import tensorflow as tf\n'), (64, 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), True, 'import tensorflow as tf\n'), (77, 'tensorflow.cast', 'tf.cast', (['self.model.optimizer.iterations', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (123, 'tensorflow.less_equal', 'tf.less_equal', (['step_cf', 'wm_steps'], {}), True, 'import tensorflow as tf\n'), (124, 'tensorflow.less_equal', 'tf.less_equal', (['step_cf', 'wm_steps'], {}), True, 'import tensorflow as tf\n'), (135, 'tensorflow.constant', 'tf.constant', (['math.pi'], {}), True, 'import tensorflow as tf\n'), (140, 'tensorflow.less_equal', 'tf.less_equal', (['t_max', 'step_cf'], {}), True, 'import tensorflow as tf\n'), (19, 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), True, 'import tensorflow as tf\n'), (21, 'tensorflow.config.experimental.list_logical_devices', 'tf.config.experimental.list_logical_devices', (['"""GPU"""'], {}), True, 'import tensorflow as tf\n'), (130, 'tensorflow.less_equal', 'tf.less_equal', (['(step_cf - wm_steps)', 'num_constant'], {}), True, 'import tensorflow as tf\n'), (134, 'tensorflow.less_equal', 'tf.less_equal', (['step_cf', 't_max'], {}), True, 'import tensorflow as tf\n'), (134, 'tensorflow.less', 'tf.less', (['(wm_steps + num_constant)', 'step_cf'], {}), True, 'import tensorflow as tf\n'), (167, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (137, 'tensorflow.cos', 'tf.cos', (['(pi_val * num_half_cycle * (step_cf - wm_steps - num_constant) / (t_max -\n wm_steps - num_constant))'], {}), True, 'import tensorflow as tf\n')]
CMU-IDS-2020/fp-carambola-of-evolution
440bd33a1f1cc40f22a32a0db80279649e1036e6
import re #import time import nltk import pickle import string import numpy as np import pandas as pd import altair as alt import streamlit as st import tensorflow as tf from sklearn.manifold import TSNE from tensorflow.keras import layers import tensorflow_datasets as tfds from scipy.spatial.distance import cosine HEIGHT = 200 WIDTH = 200 BUFFER_SIZE = 10000 BATCH_SIZE = 64 SEED = 100100 # for processing text try: nltk.data.find("stopwords") except: nltk.download("stopwords", quiet = True) try: nltk.data.find("wordnet") except: nltk.download("wordnet", quiet = True) try: nltk.data.find("punkt") except: nltk.download("punkt", quiet = True) try: nltk.data.find('averaged_perceptron_tagger') except: nltk.download('averaged_perceptron_tagger', quiet = True) lemmatizer = nltk.stem.wordnet.WordNetLemmatizer() english_stopwords = set(nltk.corpus.stopwords.words('english')) @st.cache(allow_output_mutation=True) def process_text(text): def get_pos(tag): if tag.startswith("J"): return "a" elif tag.startswith("V"): return "v" elif tag.startswith("R"): return "r" else: return "n" text = text.replace("<br />", "") text = text.replace("\'", "'") text = re.sub(r"'s", "", text.lower()) text = re.sub(r"([a-z0-9]+)'([^s])", r"\1\2", text) text = re.sub(rf"[^{string.ascii_letters}0-9]", " ", text) tokenized = [] for token in nltk.word_tokenize(text): token, tag = nltk.pos_tag([token])[0] t = lemmatizer.lemmatize(token, pos=get_pos(tag)) if t not in english_stopwords and len(t) > 1: tokenized.append(t) return " ".join(tokenized) def predict(model, sentences): return model.predict(np.array([process_text(s) for s in sentences])) def probability(x): return np.round(np.abs(2 * (1 / (1 + np.exp(-x)) - 0.5)), 2) class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKCYAN = '\033[96m' GREEN = '\033[92m' WARNING = '\033[93m' RED = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' @st.cache(allow_output_mutation=True) def load_data(file_name): data = pd.read_csv(file_name, index_col=None, sep="\t") return data @st.cache(allow_output_mutation=True) def load_pickled(file_name): with open(file_name, "rb") as r_obj: return pickle.load(r_obj, encoding="utf-8") @st.cache(allow_output_mutation=True) def load_embedding(file_name): return np.load(file_name, allow_pickle=True) #@st.cache(allow_output_mutation=True) def load_main_model(): #dataset = tfds.load('imdb_reviews', as_supervised=True) X_train = load_pickled("v.pkl") VOCAB_SIZE=10000 encoder = tf.keras.layers.experimental.preprocessing.TextVectorization( max_tokens=VOCAB_SIZE) encoder.adapt(X_train) model = tf.keras.Sequential([ encoder, tf.keras.layers.Embedding( input_dim=len(encoder.get_vocabulary()) + 2, output_dim=64, # Use masking to handle the variable sequence lengths mask_zero=True), tf.keras.layers.LSTM(64), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(1) ]) model.compile() return model @st.cache(allow_output_mutation=True) def get_ixs(n_train, n_neighbor): return np.random.choice(range(n_train), n_neighbor, replace=False) def color_text(text, model): def make_colored_text(text, p, probs): if abs(p / max(np.abs(probs))) < 0.1: return f"<span style='color:grey; opacity:0.3'>{text}</span>" elif p < 0: return f"<span style='color:red; opacity:{abs(p / max(np.abs(probs))) + 0.2}'>{text}</span>" else: return f"<span style='color:green; opacity:{abs(p / max(np.abs(probs))) + 0.2}'>{text}</span>" tokens = text.split() probs = [0] for k in range(0, len(tokens)): proc_text = process_text(" ".join(tokens[:k+1])) if proc_text == "": probs.append(probs[-1]) else: probs.append( predict(model, np.array([proc_text]))[0][0] ) fin_prob = probs[-1] probs = np.diff(probs) colored_texts = [make_colored_text(token, p, probs) for token, p in zip(tokens, probs)] return " ".join(colored_texts), fin_prob st.markdown("<h1 style='text-align: center;'>Explaining Recurrent Neural Networks</h1>", unsafe_allow_html=True) st.markdown("<h3 style='text-align: right; color: gray;'>Made by Clay and Ihor</h3>", unsafe_allow_html=True) st.write('In this interactive app you will be able to explore why RNN produce one or another output and what makes difference for the model in the input. ' + 'The model that you will explore is a simple RNN that was built on IMDB reviews binary classification dataset (positive or negative review).') st.write('The model consists from embedding layer, LSTM, single hidden dense layer with 64 neurons with ReLu activation and dense output layer with a single neuron.') st.write('Proposed framework is illustrated on specific considerably simple model, however, it is general and can trivially be extended to larger and more complex models.') main_df = load_data("combined_sentiment_labelled.tsv") main_model = load_main_model() colored = load_pickled('colored.txt') def sen2vec(x): x = np.array([[process_text(xx[0])] for xx in x]) return main_model.get_layer(index=1)(main_model.get_layer(index=0)(x)) sen2vec_model = tf.keras.Sequential([ main_model.get_layer(index=0), main_model.get_layer(index=1), main_model.get_layer(index=2), main_model.get_layer(index=4) ]) sen2vec_model_interm = tf.keras.Sequential([ main_model.get_layer(index=0), main_model.get_layer(index=1), main_model.get_layer(index=2) ]) # n_neighbor = st.slider( # "Choose the number of neighboring reviews to find", # min_value=50, max_value=len(main_df), value=50, step=50 # ) n_neighbor = 500 #fix #ixs = get_ixs(len(main_df), n_neighbor) ixs = list(range(n_neighbor)) main_df = main_df.iloc[ixs, :] #embedding = embedding[ixs, :] st.markdown('## Inference:') st.write('Firstly, let\'s try to get some insight how model works by observing which words contribute to decision whether to output positive or negative.') st.write('Below you see five sampled reviews from the example dataset with prediction, confidence of the prediction and visualized word impacts. ' + 'Color represents positive (green), negative (red) or neutral (grey) impact on models prediction, namely how models prediction and confidence changed after seeing that word. ' + 'Opacity represents strength of impact - the higher the opacity, the more impact that word had!') def sample_inference(): idx = np.random.randint(0, len(colored), size=5) st.markdown(f'---------------------------') for i in idx: st.markdown(colored[i], unsafe_allow_html=True) st.markdown(f'---------------------------') if st.button('Sample another reviews'): sample_inference() else: sample_inference() st.markdown('## Training:') st.write('Now let\'s see how model arrived at such decision by visualizing its training process. ' + 'In this part we will be working with a single sentence. Type your own or click a button to sample a random one!') if st.button('Sample random review'): review = main_df.iloc[np.random.randint(0, len(main_df))].text text = st.text_input("Or type your review!", review) else: text = st.text_input("Or type your review!", "This application is really cool and authors are great!") if text != "": st.write('Firstly, we will provide same type of visualization for the review over several epochs. ' + 'Observe the patterns in changes of the models confidence and how each word impacts the prediction.') sentences = np.append(main_df["text"].values, text) st.markdown(f'---------------------------') for i in range(0, 11, 2): main_model.load_weights(f"epoch{i}/") pred = color_text(text, model=main_model) st.markdown(f"Epoch {i}" + " | " + ("NEG" if pred[1] < 0 else "POS") + " | " + str(probability(pred[1])) + " | " + pred[0], unsafe_allow_html=True) st.markdown(f'---------------------------') st.write('Now let\'s visualize feature space and how it is transformed while being passed through models layers.') st.write('The leftmost plot is learned sentence embedding, ' + 'the middle one is output of embeddings being passed through LSTM ' + 'and the rightmost one is the output of LSTM output being passed through dense layer.') st.write('Charts are interactive in two ways - review text and probability will be shown on cursor hover and it\'s possible to select only subset of data by dragging a rectangle with mouse.') st.write('Note that originally all feature spaces are of high dimensionality and we approximate them for visualization with Isomap.') #for i in range(0, 11, 2): for i in [0, 4, 10]: st.markdown(f'#### Epoch {i}') isomap_raw = load_pickled(f'./embedding/isomap-raw-{i}.pkl') isomap_intermediate = load_pickled(f'./embedding/isomap-intermediate-{i}.pkl') isomap_proc = load_pickled(f'./embedding/isomap-proc-{i}.pkl') probs = predict(main_model, sentences).reshape(-1).round(2) labels = ['Positive' if x else 'Negative' for x in (probs.reshape(-1) > 0)] labels[-1] = "User" raw_emb_text = isomap_raw.transform( sen2vec([[text]]).numpy().mean(axis=1) ) isomap_raw_emb = np.append(isomap_raw.embedding_, raw_emb_text, axis=0) intermediate_emb_text = isomap_intermediate.transform( predict(sen2vec_model_interm, [text]) ) isomap_intermediate_emb = np.append(isomap_intermediate.embedding_, intermediate_emb_text, axis=0) proc_emb_text = isomap_proc.transform(predict(sen2vec_model, [text])) isomap_proc_emb = np.append(isomap_proc.embedding_, proc_emb_text, axis=0) plot_data = pd.DataFrame({ 'x_raw': isomap_raw_emb[:,0], 'y_raw': isomap_raw_emb[:,1], 'x_interm': isomap_intermediate_emb[:,0], 'y_interm': isomap_intermediate_emb[:,1], 'x_proc': isomap_proc_emb[:,0], 'y_proc': isomap_proc_emb[:,1], 'sentence': sentences, 'opacity': np.abs(probs), 'prob': probability(probs).astype(str), 'pred': labels}) selector_embs = alt.selection_interval(empty='all', encodings=['x', 'y']) words_tsned = alt.Chart(plot_data).mark_circle(size=200).encode( x = 'x_raw', y = 'y_raw', tooltip =[alt.Tooltip('sentence'), alt.Tooltip('prob')], color = alt.Color('pred', scale=alt.Scale(domain=['Negative', 'Positive', 'User'], range=['red', 'green', 'blue']), legend=alt.Legend(symbolOpacity=1)), opacity=alt.condition(selector_embs, 'opacity', alt.value(0.05), legend=None) ).properties( title='Raw sentences', height=HEIGHT, width=WIDTH ).add_selection( selector_embs ) interm_tsned = alt.Chart(plot_data).mark_circle(size=200).encode( x = 'x_interm', y = 'y_interm', tooltip =[alt.Tooltip('sentence'), alt.Tooltip('prob')], color = alt.Color('pred', scale=alt.Scale(domain=['Negative', 'Positive', 'User'], range=['red', 'green', 'blue']), legend=alt.Legend(symbolOpacity=1)), opacity=alt.condition(selector_embs, 'opacity', alt.value(0.05), legend=None) ).properties( title='Intermediate state sentences', height=HEIGHT, width=WIDTH ).add_selection( selector_embs ) sentences_tsned = alt.Chart(plot_data).mark_circle(size=200).encode( x = 'x_proc', y = 'y_proc', tooltip =[alt.Tooltip('sentence'), alt.Tooltip('prob')], color = alt.Color('pred', scale=alt.Scale(domain=['Negative', 'Positive', 'User'], range=['red', 'green', 'blue']), legend=alt.Legend(symbolOpacity=1)), opacity=alt.condition(selector_embs, 'opacity', alt.value(0.05), legend=None) ).properties( title='Processed sentences', height=HEIGHT, width=WIDTH ).add_selection( selector_embs ) st.altair_chart(words_tsned | interm_tsned | sentences_tsned)
[ "tensorflow.keras.layers.Dropout", "pandas.read_csv", "numpy.abs", "tensorflow.keras.layers.experimental.preprocessing.TextVectorization", "tensorflow.keras.layers.Dense", "numpy.append", "tensorflow.keras.layers.LSTM", "numpy.diff", "numpy.load", "numpy.array", "numpy.exp" ]
streamlit_app.py
[(42, 'nltk.stem.wordnet.WordNetLemmatizer', 'nltk.stem.wordnet.WordNetLemmatizer', ([], {}), False, 'import nltk\n'), (45, 'streamlit.cache', 'st.cache', ([], {'allow_output_mutation': '(True)'}), True, 'import streamlit as st\n'), (94, 'streamlit.cache', 'st.cache', ([], {'allow_output_mutation': '(True)'}), True, 'import streamlit as st\n'), (99, 'streamlit.cache', 'st.cache', ([], {'allow_output_mutation': '(True)'}), True, 'import streamlit as st\n'), (104, 'streamlit.cache', 'st.cache', ([], {'allow_output_mutation': '(True)'}), True, 'import streamlit as st\n'), (136, 'streamlit.cache', 'st.cache', ([], {'allow_output_mutation': '(True)'}), True, 'import streamlit as st\n'), (165, 'streamlit.markdown', 'st.markdown', (['"""<h1 style=\'text-align: center;\'>Explaining Recurrent Neural Networks</h1>"""'], {'unsafe_allow_html': '(True)'}), True, 'import streamlit as st\n'), (167, 'streamlit.markdown', 'st.markdown', (['"""<h3 style=\'text-align: right; color: gray;\'>Made by Clay and Ihor</h3>"""'], {'unsafe_allow_html': '(True)'}), True, 'import streamlit as st\n'), (169, 'streamlit.write', 'st.write', (["('In this interactive app you will be able to explore why RNN produce one or another output and what makes difference for the model in the input. '\n +\n 'The model that you will explore is a simple RNN that was built on IMDB reviews binary classification dataset (positive or negative review).'\n )"], {}), True, 'import streamlit as st\n'), (171, 'streamlit.write', 'st.write', (['"""The model consists from embedding layer, LSTM, single hidden dense layer with 64 neurons with ReLu activation and dense output layer with a single neuron."""'], {}), True, 'import streamlit as st\n'), (173, 'streamlit.write', 'st.write', (['"""Proposed framework is illustrated on specific considerably simple model, however, it is general and can trivially be extended to larger and more complex models."""'], {}), True, 'import streamlit as st\n'), (212, 'streamlit.markdown', 'st.markdown', (['"""## Inference:"""'], {}), True, 'import streamlit as st\n'), (214, 'streamlit.write', 'st.write', (['"""Firstly, let\'s try to get some insight how model works by observing which words contribute to decision whether to output positive or negative."""'], {}), True, 'import streamlit as st\n'), (215, 'streamlit.write', 'st.write', (["(\n 'Below you see five sampled reviews from the example dataset with prediction, confidence of the prediction and visualized word impacts. '\n +\n 'Color represents positive (green), negative (red) or neutral (grey) impact on models prediction, namely how models prediction and confidence changed after seeing that word. '\n +\n 'Opacity represents strength of impact - the higher the opacity, the more impact that word had!'\n )"], {}), True, 'import streamlit as st\n'), (226, 'streamlit.button', 'st.button', (['"""Sample another reviews"""'], {}), True, 'import streamlit as st\n'), (235, 'streamlit.markdown', 'st.markdown', (['"""## Training:"""'], {}), True, 'import streamlit as st\n'), (237, 'streamlit.write', 'st.write', (['("Now let\'s see how model arrived at such decision by visualizing its training process. "\n +\n \'In this part we will be working with a single sentence. Type your own or click a button to sample a random one!\'\n )'], {}), True, 'import streamlit as st\n'), (240, 'streamlit.button', 'st.button', (['"""Sample random review"""'], {}), True, 'import streamlit as st\n'), (26, 'nltk.data.find', 'nltk.data.find', (['"""stopwords"""'], {}), False, 'import nltk\n'), (30, 'nltk.data.find', 'nltk.data.find', (['"""wordnet"""'], {}), False, 'import nltk\n'), (34, 'nltk.data.find', 'nltk.data.find', (['"""punkt"""'], {}), False, 'import nltk\n'), (38, 'nltk.data.find', 'nltk.data.find', (['"""averaged_perceptron_tagger"""'], {}), False, 'import nltk\n'), (43, 'nltk.corpus.stopwords.words', 'nltk.corpus.stopwords.words', (['"""english"""'], {}), False, 'import nltk\n'), (61, 're.sub', 're.sub', (['"""([a-z0-9]+)\'([^s])"""', '"""\\\\1\\\\2"""', 'text'], {}), False, 'import re\n'), (62, 're.sub', 're.sub', (['f"""[^{string.ascii_letters}0-9]"""', '""" """', 'text'], {}), False, 'import re\n'), (66, 'nltk.word_tokenize', 'nltk.word_tokenize', (['text'], {}), False, 'import nltk\n'), (96, 'pandas.read_csv', 'pd.read_csv', (['file_name'], {'index_col': 'None', 'sep': '"""\t"""'}), True, 'import pandas as pd\n'), (106, 'numpy.load', 'np.load', (['file_name'], {'allow_pickle': '(True)'}), True, 'import numpy as np\n'), (114, 'tensorflow.keras.layers.experimental.preprocessing.TextVectorization', 'tf.keras.layers.experimental.preprocessing.TextVectorization', ([], {'max_tokens': 'VOCAB_SIZE'}), True, 'import tensorflow as tf\n'), (160, 'numpy.diff', 'np.diff', (['probs'], {}), True, 'import numpy as np\n'), (221, 'streamlit.markdown', 'st.markdown', (['f"""---------------------------"""'], {}), True, 'import streamlit as st\n'), (224, 'streamlit.markdown', 'st.markdown', (['f"""---------------------------"""'], {}), True, 'import streamlit as st\n'), (242, 'streamlit.text_input', 'st.text_input', (['"""Or type your review!"""', 'review'], {}), True, 'import streamlit as st\n'), (244, 'streamlit.text_input', 'st.text_input', (['"""Or type your review!"""', '"""This application is really cool and authors are great!"""'], {}), True, 'import streamlit as st\n'), (248, 'streamlit.write', 'st.write', (["('Firstly, we will provide same type of visualization for the review over several epochs. '\n +\n 'Observe the patterns in changes of the models confidence and how each word impacts the prediction.'\n )"], {}), True, 'import streamlit as st\n'), (251, 'numpy.append', 'np.append', (["main_df['text'].values", 'text'], {}), True, 'import numpy as np\n'), (252, 'streamlit.markdown', 'st.markdown', (['f"""---------------------------"""'], {}), True, 'import streamlit as st\n'), (261, 'streamlit.markdown', 'st.markdown', (['f"""---------------------------"""'], {}), True, 'import streamlit as st\n'), (263, 'streamlit.write', 'st.write', (['"""Now let\'s visualize feature space and how it is transformed while being passed through models layers."""'], {}), True, 'import streamlit as st\n'), (264, 'streamlit.write', 'st.write', (["('The leftmost plot is learned sentence embedding, ' +\n 'the middle one is output of embeddings being passed through LSTM ' +\n 'and the rightmost one is the output of LSTM output being passed through dense layer.'\n )"], {}), True, 'import streamlit as st\n'), (267, 'streamlit.write', 'st.write', (['"""Charts are interactive in two ways - review text and probability will be shown on cursor hover and it\'s possible to select only subset of data by dragging a rectangle with mouse."""'], {}), True, 'import streamlit as st\n'), (268, 'streamlit.write', 'st.write', (['"""Note that originally all feature spaces are of high dimensionality and we approximate them for visualization with Isomap."""'], {}), True, 'import streamlit as st\n'), (28, 'nltk.download', 'nltk.download', (['"""stopwords"""'], {'quiet': '(True)'}), False, 'import nltk\n'), (32, 'nltk.download', 'nltk.download', (['"""wordnet"""'], {'quiet': '(True)'}), False, 'import nltk\n'), (36, 'nltk.download', 'nltk.download', (['"""punkt"""'], {'quiet': '(True)'}), False, 'import nltk\n'), (40, 'nltk.download', 'nltk.download', (['"""averaged_perceptron_tagger"""'], {'quiet': '(True)'}), False, 'import nltk\n'), (102, 'pickle.load', 'pickle.load', (['r_obj'], {'encoding': '"""utf-8"""'}), False, 'import pickle\n'), (223, 'streamlit.markdown', 'st.markdown', (['colored[i]'], {'unsafe_allow_html': '(True)'}), True, 'import streamlit as st\n'), (272, 'streamlit.markdown', 'st.markdown', (['f"""#### Epoch {i}"""'], {}), True, 'import streamlit as st\n'), (286, 'numpy.append', 'np.append', (['isomap_raw.embedding_', 'raw_emb_text'], {'axis': '(0)'}), True, 'import numpy as np\n'), (291, 'numpy.append', 'np.append', (['isomap_intermediate.embedding_', 'intermediate_emb_text'], {'axis': '(0)'}), True, 'import numpy as np\n'), (296, 'numpy.append', 'np.append', (['isomap_proc.embedding_', 'proc_emb_text'], {'axis': '(0)'}), True, 'import numpy as np\n'), (309, 'altair.selection_interval', 'alt.selection_interval', ([], {'empty': '"""all"""', 'encodings': "['x', 'y']"}), True, 'import altair as alt\n'), (356, 'streamlit.altair_chart', 'st.altair_chart', (['(words_tsned | interm_tsned | sentences_tsned)'], {}), True, 'import streamlit as st\n'), (67, 'nltk.pos_tag', 'nltk.pos_tag', (['[token]'], {}), False, 'import nltk\n'), (125, 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', (['(64)'], {}), True, 'import tensorflow as tf\n'), (126, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), True, 'import tensorflow as tf\n'), (127, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(64)'], {'activation': '"""relu"""'}), True, 'import tensorflow as tf\n'), (128, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), True, 'import tensorflow as tf\n'), (129, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), True, 'import tensorflow as tf\n'), (305, 'numpy.abs', 'np.abs', (['probs'], {}), True, 'import numpy as np\n'), (142, 'numpy.abs', 'np.abs', (['probs'], {}), True, 'import numpy as np\n'), (79, 'numpy.exp', 'np.exp', (['(-x)'], {}), True, 'import numpy as np\n'), (157, 'numpy.array', 'np.array', (['[proc_text]'], {}), True, 'import numpy as np\n'), (314, 'altair.Tooltip', 'alt.Tooltip', (['"""sentence"""'], {}), True, 'import altair as alt\n'), (314, 'altair.Tooltip', 'alt.Tooltip', (['"""prob"""'], {}), True, 'import altair as alt\n'), (317, 'altair.value', 'alt.value', (['(0.05)'], {}), True, 'import altair as alt\n'), (329, 'altair.Tooltip', 'alt.Tooltip', (['"""sentence"""'], {}), True, 'import altair as alt\n'), (329, 'altair.Tooltip', 'alt.Tooltip', (['"""prob"""'], {}), True, 'import altair as alt\n'), (332, 'altair.value', 'alt.value', (['(0.05)'], {}), True, 'import altair as alt\n'), (344, 'altair.Tooltip', 'alt.Tooltip', (['"""sentence"""'], {}), True, 'import altair as alt\n'), (344, 'altair.Tooltip', 'alt.Tooltip', (['"""prob"""'], {}), True, 'import altair as alt\n'), (347, 'altair.value', 'alt.value', (['(0.05)'], {}), True, 'import altair as alt\n'), (145, 'numpy.abs', 'np.abs', (['probs'], {}), True, 'import numpy as np\n'), (147, 'numpy.abs', 'np.abs', (['probs'], {}), True, 'import numpy as np\n'), (311, 'altair.Chart', 'alt.Chart', (['plot_data'], {}), True, 'import altair as alt\n'), (315, 'altair.Scale', 'alt.Scale', ([], {'domain': "['Negative', 'Positive', 'User']", 'range': "['red', 'green', 'blue']"}), True, 'import altair as alt\n'), (316, 'altair.Legend', 'alt.Legend', ([], {'symbolOpacity': '(1)'}), True, 'import altair as alt\n'), (326, 'altair.Chart', 'alt.Chart', (['plot_data'], {}), True, 'import altair as alt\n'), (330, 'altair.Scale', 'alt.Scale', ([], {'domain': "['Negative', 'Positive', 'User']", 'range': "['red', 'green', 'blue']"}), True, 'import altair as alt\n'), (331, 'altair.Legend', 'alt.Legend', ([], {'symbolOpacity': '(1)'}), True, 'import altair as alt\n'), (341, 'altair.Chart', 'alt.Chart', (['plot_data'], {}), True, 'import altair as alt\n'), (345, 'altair.Scale', 'alt.Scale', ([], {'domain': "['Negative', 'Positive', 'User']", 'range': "['red', 'green', 'blue']"}), True, 'import altair as alt\n'), (346, 'altair.Legend', 'alt.Legend', ([], {'symbolOpacity': '(1)'}), True, 'import altair as alt\n')]
zurutech/anomaly-toolbox
ee772898b66b8be86cfa300334fb8cf7b826dc4d
# Copyright 2021 Zuru Tech HK Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Trainer for the GANomaly model.""" import json from pathlib import Path from typing import Dict, Set, Union import tensorflow as tf import tensorflow.keras as k from anomaly_toolbox.datasets.dataset import AnomalyDetectionDataset from anomaly_toolbox.losses.ganomaly import AdversarialLoss, generator_bce from anomaly_toolbox.models.ganomaly import Decoder, Discriminator, Encoder from anomaly_toolbox.trainers.trainer import Trainer class GANomaly(Trainer): """GANomaly Trainer.""" def __init__( self, dataset: AnomalyDetectionDataset, hps: Dict, summary_writer: tf.summary.SummaryWriter, log_dir: Path, ): """Initialize GANomaly Networks.""" super().__init__( dataset=dataset, hps=hps, summary_writer=summary_writer, log_dir=log_dir ) n_channels = dataset.channels # Discriminator self.discriminator = Discriminator(n_channels=n_channels, l2_penalty=0.2) # Generator (aka Decoder) self.generator = Decoder( n_channels=n_channels, latent_space_dimension=self._hps["latent_vector_size"], l2_penalty=0.2, ) # Encoder self.encoder = Encoder( n_channels=n_channels, latent_space_dimension=self._hps["latent_vector_size"], l2_penalty=0.2, ) fake_batch_size = (1, 32, 32, n_channels) self.discriminator(tf.zeros(fake_batch_size)) self.discriminator.summary() self.encoder(tf.zeros(fake_batch_size)) self.encoder.summary() fake_latent_vector = (1, self._hps["latent_vector_size"]) self.generator(tf.zeros(fake_latent_vector)) self.generator.summary() # Losses self._mse = k.losses.MeanSquaredError() self._mae = k.losses.MeanAbsoluteError() # Optimizers self.optimizer_ge = k.optimizers.Adam( learning_rate=hps["learning_rate"], beta_1=0.5, beta_2=0.999 ) self.optimizer_d = k.optimizers.Adam( learning_rate=hps["learning_rate"], beta_1=0.5, beta_2=0.999 ) # Training Metrics self.epoch_g_loss_avg = k.metrics.Mean(name="epoch_generator_loss") self.epoch_d_loss_avg = k.metrics.Mean(name="epoch_discriminator_loss") self.epoch_e_loss_avg = k.metrics.Mean(name="epoch_encoder_loss") self._auc_rc = k.metrics.AUC(name="auc_rc", curve="PR", num_thresholds=500) self._auc_roc = k.metrics.AUC(name="auc_roc", curve="ROC", num_thresholds=500) self.keras_metrics = { metric.name: metric for metric in [ self.epoch_d_loss_avg, self.epoch_g_loss_avg, self.epoch_e_loss_avg, self._auc_rc, self._auc_roc, ] } self._minmax = AdversarialLoss(from_logits=True) # Flatten op self._flatten = k.layers.Flatten() @staticmethod def hyperparameters() -> Set[str]: """List of the hyperparameters name used by the trainer.""" return { "learning_rate", "latent_vector_size", "adversarial_loss_weight", "contextual_loss_weight", "enc_loss_weight", } def train( self, epochs: int, adversarial_loss_weight: float, contextual_loss_weight: float, enc_loss_weight: float, step_log_frequency: int = 100, ): best_auc_rc, best_auc_roc = -1, -1 for epoch in range(epochs): for batch in self._dataset.train_normal: x, _ = batch # Perform the train step g_z, g_ex, d_loss, g_loss, e_loss = self.train_step( x, adversarial_loss_weight, contextual_loss_weight, enc_loss_weight, ) # Update the losses metrics self.epoch_g_loss_avg.update_state(g_loss) self.epoch_d_loss_avg.update_state(d_loss) self.epoch_e_loss_avg.update_state(e_loss) step = self.optimizer_d.iterations.numpy() learning_rate = self.optimizer_ge.learning_rate.numpy() if tf.equal(tf.math.mod(step, step_log_frequency), 0): with self._summary_writer.as_default(): tf.summary.scalar("learning_rate", learning_rate, step=step) tf.summary.image( "x/g_z/g_ex", tf.concat([x, g_z, g_ex], axis=2), step=step, ) tf.summary.scalar( "d_loss", self.epoch_d_loss_avg.result(), step=step, ) tf.summary.scalar( "g_loss", self.epoch_g_loss_avg.result(), step=step, ) tf.summary.scalar( "e_loss", self.epoch_e_loss_avg.result(), step=step, ) tf.print( "Step ", step, ". d_loss: ", self.epoch_d_loss_avg.result(), ", g_loss: ", self.epoch_g_loss_avg.result(), ", e_loss: ", self.epoch_e_loss_avg.result(), "lr: ", learning_rate, ) # Epoch end tf.print(epoch, "Epoch completed") # Model selection self._auc_rc.reset_state() self._auc_roc.reset_state() for batch in self._dataset.validation: x, labels_test = batch anomaly_scores = self._compute_anomaly_scores( x, self.encoder, self.generator ) self._auc_rc.update_state(labels_test, anomaly_scores) self._auc_roc.update_state(labels_test, anomaly_scores) # Save the model when AUC-RC is the best current_auc_rc = self._auc_rc.result() if best_auc_rc < current_auc_rc: tf.print("Best AUC-RC on validation set: ", current_auc_rc) # Replace the best best_auc_rc = current_auc_rc base_path = self._log_dir / "results" / "auc_rc" self.generator.save(str(base_path / "generator"), overwrite=True) self.encoder.save(str(base_path / "encoder"), overwrite=True) self.discriminator.save( str(base_path / "discriminator"), overwrite=True ) with open(base_path / "validation.json", "w") as fp: json.dump( { "value": float(best_auc_rc), }, fp, ) # Save the model when AUC-ROC is the best current_auc_roc = self._auc_roc.result() if best_auc_roc < current_auc_roc: tf.print("Best AUC-ROC on validation set: ", current_auc_roc) # Replace the best best_auc_roc = current_auc_roc base_path = self._log_dir / "results" / "auc_roc" self.generator.save(str(base_path / "generator"), overwrite=True) self.encoder.save(str(base_path / "encoder"), overwrite=True) self.discriminator.save( str(base_path / "discriminator"), overwrite=True ) with open(base_path / "validation.json", "w") as fp: json.dump( { "value": float(best_auc_rc), }, fp, ) # Reset metrics or the data will keep accruing becoming an average of ALL the epochs self._reset_keras_metrics() @tf.function def train_step( self, x, adversarial_loss_weight: float, contextual_loss_weight: float, enc_loss_weight: float, ): # Random noise z = tf.random.normal((tf.shape(x)[0], self._hps["latent_vector_size"])) """Single training step.""" with tf.GradientTape(persistent=True) as tape: # Generator reconstruction from random noise g_z = self.generator(z, training=True) # or False? # Discriminator on real data d_x, _ = self.discriminator(x, training=True) # Reconstruct real data after encoding e_x = self.encoder(x, training=True) g_ex = self.generator(e_x, training=True) # Discriminator on the reconstructed real data g_ex d_gex, _ = self.discriminator(inputs=g_ex, training=True) # Encode the reconstructed real data g_ex e_gex = self.encoder(g_ex, training=True) # Discriminator Loss # d_loss = self._minmax(d_x_features, d_gex_features) d_loss = self._minmax(d_x, d_gex) # Generator Loss # adversarial_loss = losses.adversarial_loss_fm(d_f_x, d_f_x_hat) bce_g_loss = generator_bce(g_ex, from_logits=True) l1_loss = self._mae(x, g_ex) # Contextual loss e_loss = self._mse(e_x, e_gex) # Encoder loss g_loss = ( adversarial_loss_weight * bce_g_loss + contextual_loss_weight * l1_loss + enc_loss_weight * e_loss ) d_grads = tape.gradient(d_loss, self.discriminator.trainable_variables) g_grads = tape.gradient(g_loss, self.generator.trainable_variables) del tape self.optimizer_ge.apply_gradients( zip( g_grads, self.generator.trainable_variables + self.encoder.trainable_variables, ) ) self.optimizer_d.apply_gradients( zip(d_grads, self.discriminator.trainable_variables) ) # NOTE: If d_loss = self._minmax(d_x_features, d_gex_features), dense layer would return # a warning. To suppress the warning "Gradients does not exist for variables" try using # the following lines. # self.optimizer_d.apply_gradients( # (grad, var) # for (grad, var) in zip(d_grads, self.discriminator.trainable_variables) # if grad is not None # ) return ( g_z, g_ex, d_loss, g_loss, e_loss, ) def test(self, base_path: Union[Path, None] = None): """ Test the model on all the meaningful metrics. Args: base_path: the path to use for loading the models. If None, the default is used. """ # Loop over every "best model" for every metric used in model selection for metric in ["auc_rc", "auc_roc"]: if not base_path: base_path = self._log_dir / "results" / metric encoder_path = base_path / "encoder" generator_path = base_path / "generator" # Load the best models to use as the model here encoder = k.models.load_model(encoder_path) encoder.summary() generator = k.models.load_model(generator_path) generator.summary() # Resetting the state of the AUPRC variable self._auc_rc.reset_states() tf.print("Using the best model selected via ", metric) # Test on the test dataset for batch in self._dataset.test: x, labels_test = batch # Get the anomaly scores anomaly_scores = self._compute_anomaly_scores(x, encoder, generator) self._auc_rc.update_state(labels_test, anomaly_scores) self._auc_roc.update_state(labels_test, anomaly_scores) auc_rc = self._auc_rc.result() auc_roc = self._auc_roc.result() tf.print("Get AUC-RC: ", auc_rc) tf.print("Get AUC-ROC: ", auc_roc) base_path = self._log_dir / "results" / metric result = { "auc_roc": { "value": float(auc_roc), }, "auc_rc": { "value": float(auc_rc), }, } # Write the file with open(base_path / "test.json", "w") as fp: json.dump(result, fp) def _compute_anomaly_scores( self, x: tf.Tensor, encoder: k.Model, generator: k.Model ) -> tf.Tensor: """ Compute the anomaly scores as indicated in the GANomaly paper https://arxiv.org/abs/1805.06725. Args: x: The batch of data to use to calculate the anomaly scores. Returns: The anomaly scores on the input batch, [0, 1] normalized. """ # Get the generator reconstruction of a decoded input data e_x = encoder(x, training=False) g_ex = generator(e_x, training=False) # Encode the generated g_ex e_gex = encoder(g_ex, training=False) # Get the anomaly scores normalized_anomaly_scores, _ = tf.linalg.normalize( tf.norm( self._flatten(tf.abs(e_x - e_gex)), axis=1, keepdims=False, ) ) return normalized_anomaly_scores
[ "tensorflow.keras.models.load_model", "tensorflow.concat", "tensorflow.summary.scalar", "tensorflow.zeros", "tensorflow.shape", "tensorflow.keras.losses.MeanSquaredError", "tensorflow.keras.metrics.AUC", "tensorflow.GradientTape", "tensorflow.keras.optimizers.Adam", "tensorflow.math.mod", "tensorflow.abs", "tensorflow.print", "tensorflow.keras.layers.Flatten", "tensorflow.keras.metrics.Mean", "tensorflow.keras.losses.MeanAbsoluteError" ]
src/anomaly_toolbox/trainers/ganomaly.py
[(48, 'anomaly_toolbox.models.ganomaly.Discriminator', 'Discriminator', ([], {'n_channels': 'n_channels', 'l2_penalty': '(0.2)'}), False, 'from anomaly_toolbox.models.ganomaly import Decoder, Discriminator, Encoder\n'), (51, 'anomaly_toolbox.models.ganomaly.Decoder', 'Decoder', ([], {'n_channels': 'n_channels', 'latent_space_dimension': "self._hps['latent_vector_size']", 'l2_penalty': '(0.2)'}), False, 'from anomaly_toolbox.models.ganomaly import Decoder, Discriminator, Encoder\n'), (58, 'anomaly_toolbox.models.ganomaly.Encoder', 'Encoder', ([], {'n_channels': 'n_channels', 'latent_space_dimension': "self._hps['latent_vector_size']", 'l2_penalty': '(0.2)'}), False, 'from anomaly_toolbox.models.ganomaly import Decoder, Discriminator, Encoder\n'), (76, 'tensorflow.keras.losses.MeanSquaredError', 'k.losses.MeanSquaredError', ([], {}), True, 'import tensorflow.keras as k\n'), (77, 'tensorflow.keras.losses.MeanAbsoluteError', 'k.losses.MeanAbsoluteError', ([], {}), True, 'import tensorflow.keras as k\n'), (80, 'tensorflow.keras.optimizers.Adam', 'k.optimizers.Adam', ([], {'learning_rate': "hps['learning_rate']", 'beta_1': '(0.5)', 'beta_2': '(0.999)'}), True, 'import tensorflow.keras as k\n'), (83, 'tensorflow.keras.optimizers.Adam', 'k.optimizers.Adam', ([], {'learning_rate': "hps['learning_rate']", 'beta_1': '(0.5)', 'beta_2': '(0.999)'}), True, 'import tensorflow.keras as k\n'), (88, 'tensorflow.keras.metrics.Mean', 'k.metrics.Mean', ([], {'name': '"""epoch_generator_loss"""'}), True, 'import tensorflow.keras as k\n'), (89, 'tensorflow.keras.metrics.Mean', 'k.metrics.Mean', ([], {'name': '"""epoch_discriminator_loss"""'}), True, 'import tensorflow.keras as k\n'), (90, 'tensorflow.keras.metrics.Mean', 'k.metrics.Mean', ([], {'name': '"""epoch_encoder_loss"""'}), True, 'import tensorflow.keras as k\n'), (92, 'tensorflow.keras.metrics.AUC', 'k.metrics.AUC', ([], {'name': '"""auc_rc"""', 'curve': '"""PR"""', 'num_thresholds': '(500)'}), True, 'import tensorflow.keras as k\n'), (93, 'tensorflow.keras.metrics.AUC', 'k.metrics.AUC', ([], {'name': '"""auc_roc"""', 'curve': '"""ROC"""', 'num_thresholds': '(500)'}), True, 'import tensorflow.keras as k\n'), (106, 'anomaly_toolbox.losses.ganomaly.AdversarialLoss', 'AdversarialLoss', ([], {'from_logits': '(True)'}), False, 'from anomaly_toolbox.losses.ganomaly import AdversarialLoss, generator_bce\n'), (109, 'tensorflow.keras.layers.Flatten', 'k.layers.Flatten', ([], {}), True, 'import tensorflow.keras as k\n'), (65, 'tensorflow.zeros', 'tf.zeros', (['fake_batch_size'], {}), True, 'import tensorflow as tf\n'), (68, 'tensorflow.zeros', 'tf.zeros', (['fake_batch_size'], {}), True, 'import tensorflow as tf\n'), (72, 'tensorflow.zeros', 'tf.zeros', (['fake_latent_vector'], {}), True, 'import tensorflow as tf\n'), (190, 'tensorflow.print', 'tf.print', (['epoch', '"""Epoch completed"""'], {}), True, 'import tensorflow as tf\n'), (264, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {'persistent': '(True)'}), True, 'import tensorflow as tf\n'), (287, 'anomaly_toolbox.losses.ganomaly.generator_bce', 'generator_bce', (['g_ex'], {'from_logits': '(True)'}), False, 'from anomaly_toolbox.losses.ganomaly import AdversarialLoss, generator_bce\n'), (344, 'tensorflow.keras.models.load_model', 'k.models.load_model', (['encoder_path'], {}), True, 'import tensorflow.keras as k\n'), (346, 'tensorflow.keras.models.load_model', 'k.models.load_model', (['generator_path'], {}), True, 'import tensorflow.keras as k\n'), (351, 'tensorflow.print', 'tf.print', (['"""Using the best model selected via """', 'metric'], {}), True, 'import tensorflow as tf\n'), (363, 'tensorflow.print', 'tf.print', (['"""Get AUC-RC: """', 'auc_rc'], {}), True, 'import tensorflow as tf\n'), (364, 'tensorflow.print', 'tf.print', (['"""Get AUC-ROC: """', 'auc_roc'], {}), True, 'import tensorflow as tf\n'), (207, 'tensorflow.print', 'tf.print', (['"""Best AUC-RC on validation set: """', 'current_auc_rc'], {}), True, 'import tensorflow as tf\n'), (229, 'tensorflow.print', 'tf.print', (['"""Best AUC-ROC on validation set: """', 'current_auc_roc'], {}), True, 'import tensorflow as tf\n'), (377, 'json.dump', 'json.dump', (['result', 'fp'], {}), False, 'import json\n'), (152, 'tensorflow.math.mod', 'tf.math.mod', (['step', 'step_log_frequency'], {}), True, 'import tensorflow as tf\n'), (261, 'tensorflow.shape', 'tf.shape', (['x'], {}), True, 'import tensorflow as tf\n'), (403, 'tensorflow.abs', 'tf.abs', (['(e_x - e_gex)'], {}), True, 'import tensorflow as tf\n'), (154, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""learning_rate"""', 'learning_rate'], {'step': 'step'}), True, 'import tensorflow as tf\n'), (157, 'tensorflow.concat', 'tf.concat', (['[x, g_z, g_ex]'], {'axis': '(2)'}), True, 'import tensorflow as tf\n')]
microprediction/successor
80f61a59c93d45cff2851f8048fda5378bd05c4c
# Borrowed from timemachines def nonecast(x,fill_value=0.0): if x is not None: return [xj if xj is not None else fill_value for xj in x] def wrap(x): """ Ensure x is a list of float """ if x is None: return None elif isinstance(x,(float,int)): return [float(x)] else: return list(x) def skater_model_suffix(skater_name, k, n_input): return skater_name + '_' + str(k) + '_' + str(n_input) from tensorflow import keras # TODO: Move into package shared by sklearned, maybe def keras_optimizer_from_name(opt_name,learning_rate): if opt_name == 'SGD': return keras.optimizers.SGD(learning_rate=learning_rate) elif opt_name == 'RMSprop': return keras.optimizers.RMSprop(learning_rate=learning_rate) elif opt_name == 'Adam': return keras.optimizers.Adam(learning_rate=learning_rate) elif opt_name == 'Adagrad': return keras.optimizers.Adagrad(learning_rate=learning_rate) elif opt_name == 'Adadelta': return keras.optimizers.Adagrad(learning_rate=learning_rate) elif opt_name == 'Adamax': return keras.optimizers.Adamax(learning_rate=learning_rate) elif opt_name == 'Nadam': return keras.optimizers.Nadam(learning_rate=learning_rate) elif opt_name =='Ftrl': return keras.optimizers.Ftrl(learning_rate=learning_rate) else: raise ValueError('Forgot '+opt_name)
[ "tensorflow.keras.optimizers.Nadam", "tensorflow.keras.optimizers.Ftrl", "tensorflow.keras.optimizers.Adamax", "tensorflow.keras.optimizers.RMSprop", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.optimizers.Adagrad", "tensorflow.keras.optimizers.SGD" ]
successor/conventions.py
[(28, 'tensorflow.keras.optimizers.SGD', 'keras.optimizers.SGD', ([], {'learning_rate': 'learning_rate'}), False, 'from tensorflow import keras\n'), (30, 'tensorflow.keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', ([], {'learning_rate': 'learning_rate'}), False, 'from tensorflow import keras\n'), (32, 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': 'learning_rate'}), False, 'from tensorflow import keras\n'), (34, 'tensorflow.keras.optimizers.Adagrad', 'keras.optimizers.Adagrad', ([], {'learning_rate': 'learning_rate'}), False, 'from tensorflow import keras\n'), (36, 'tensorflow.keras.optimizers.Adagrad', 'keras.optimizers.Adagrad', ([], {'learning_rate': 'learning_rate'}), False, 'from tensorflow import keras\n'), (38, 'tensorflow.keras.optimizers.Adamax', 'keras.optimizers.Adamax', ([], {'learning_rate': 'learning_rate'}), False, 'from tensorflow import keras\n'), (40, 'tensorflow.keras.optimizers.Nadam', 'keras.optimizers.Nadam', ([], {'learning_rate': 'learning_rate'}), False, 'from tensorflow import keras\n'), (42, 'tensorflow.keras.optimizers.Ftrl', 'keras.optimizers.Ftrl', ([], {'learning_rate': 'learning_rate'}), False, 'from tensorflow import keras\n')]
m-bizhani/Digital-rock-image-processing
4d3914dcfa1f814b953e6ce7e97a198f861f8e3a
import tensorflow as tf import os from model.DIDN import get_model from tensorflow.keras.optimizers import Adam from common.losses import * from common.lr_scheduler import * from common.metrics import * from functools import wraps import time tf.keras.backend.clear_session() gpu = tf.test.gpu_device_name() os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' os.environ['CUDA_VISIBLE_DEVICES'] = gpu print(tf.__version__) #TODO load data def _get_dataset(): """Function that returns tf.data.Dataset objects Train, val, and test datasets """ pass model = get_model(input_shape=(128, 128, 1)) print(model.summary()) content_loss_22 = ContentLoss(criterion='l2', output_layer=22, before_act=True) #Output layer 22 or 54 content_loss_54 = ContentLoss(criterion='l2', output_layer=54, before_act=True) #Output layer 22 or 54 pixel_loss_l1 = PixelLoss(criterion='l1') ###MeanAbsoluteError pixel_loss_l2 = PixelLoss(criterion='l2') ###MeanSquaredError ssim_loss = SSIM_loss() ###Weighted l1, l2 and SSIM loss loss_weights = [1.0, 0.2, 0.2] def loss_func(y_true, y_pred): """Content loss from VGG19 model""" c_loss = content_loss_22(y_true, y_pred) l1 = tf.keras.losses.mean_absolute_error(y_true, y_pred) l2 = tf.keras.losses.mean_squared_error(y_true, y_pred) total_loss = loss_weights[0]*c_loss + loss_weights[1]*l1 + loss_weights[2]*l2 return total_loss learning_rate_fn = MultiStepLR(1e-4, [5000, 10000, 15000, 30000, 50000], 0.5) optim = tf.keras.optimizers.Adam(learning_rate=learning_rate_fn) model.compile(optimizer = Adam(learning_rate=learning_rate_fn), loss = pixel_loss_l1, metrics = [PSNR, ssim, mssim, 'mse', 'mae']) C = [ tf.keras.callbacks.CSVLogger('DIDN_l1.csv', append=True), tf.keras.callbacks.ModelCheckpoint('DIDN_l1.h5', save_best_only=True, verbose=1), tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=30, verbose =1) ] gpu = str(tf.test.gpu_device_name()) def execuation_time(func): @wraps(func) def wrapper(*args, **kwargs): start = time.time() results = func(*args, **kwargs) end = time.time() print(f'\n Total execuation time for {func.__name__} is {end-start}s') return results return wrapper @execuation_time def train(epoch=200): if gpu: print('GPU training') with tf.device(gpu): model.fit(train_ds, epochs = epoch, validation_data = val_ds, callbacks = C) else: print('CPU training') model.fit(train_ds, epochs = epoch, validation_data = val_ds, callbacks = C) return model model = train() with tf.device(gpu): I_pred = model.evaluate(test_ds, batch_size = 16) print(I_pred.shape)
[ "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.device", "tensorflow.test.gpu_device_name", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.backend.clear_session", "tensorflow.keras.losses.mean_absolute_error", "tensorflow.keras.losses.mean_squared_error", "tensorflow.keras.callbacks.CSVLogger", "tensorflow.keras.callbacks.EarlyStopping" ]
Denoising.py
[(12, 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), True, 'import tensorflow as tf\n'), (14, 'tensorflow.test.gpu_device_name', 'tf.test.gpu_device_name', ([], {}), True, 'import tensorflow as tf\n'), (30, 'model.DIDN.get_model', 'get_model', ([], {'input_shape': '(128, 128, 1)'}), False, 'from model.DIDN import get_model\n'), (52, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'learning_rate_fn'}), True, 'import tensorflow as tf\n'), (45, 'tensorflow.keras.losses.mean_absolute_error', 'tf.keras.losses.mean_absolute_error', (['y_true', 'y_pred'], {}), True, 'import tensorflow as tf\n'), (46, 'tensorflow.keras.losses.mean_squared_error', 'tf.keras.losses.mean_squared_error', (['y_true', 'y_pred'], {}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.keras.callbacks.CSVLogger', 'tf.keras.callbacks.CSVLogger', (['"""DIDN_l1.csv"""'], {'append': '(True)'}), True, 'import tensorflow as tf\n'), (61, 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', (['"""DIDN_l1.h5"""'], {'save_best_only': '(True)', 'verbose': '(1)'}), True, 'import tensorflow as tf\n'), (62, 'tensorflow.keras.callbacks.EarlyStopping', 'tf.keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(30)', 'verbose': '(1)'}), True, 'import tensorflow as tf\n'), (66, 'tensorflow.test.gpu_device_name', 'tf.test.gpu_device_name', ([], {}), True, 'import tensorflow as tf\n'), (68, 'functools.wraps', 'wraps', (['func'], {}), False, 'from functools import wraps\n'), (92, 'tensorflow.device', 'tf.device', (['gpu'], {}), True, 'import tensorflow as tf\n'), (54, 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'learning_rate': 'learning_rate_fn'}), False, 'from tensorflow.keras.optimizers import Adam\n'), (70, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (72, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (81, 'tensorflow.device', 'tf.device', (['gpu'], {}), True, 'import tensorflow as tf\n')]
puruBHU/tf-keras-stacked-hourglass-keypoint-detection
56707252501c73b2bf2aac8fff3e22760fd47dca
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os, sys, argparse import cv2 from PIL import Image import numpy as np import time from timeit import default_timer as timer from tensorflow.keras.models import Model, load_model import tensorflow.keras.backend as K from hourglass.model import get_hourglass_model from hourglass.postprocess import post_process_heatmap, post_process_heatmap_single from common.data_utils import preprocess_image from common.utils import get_classes, get_skeleton, render_skeleton, optimize_tf_gpu os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' default_config = { "num_stacks": 2, "mobile" : False, "tiny" : False, "model_image_size": (256, 256), "num_channels": 256, "conf_threshold": 0.1, "classes_path": 'configs/mpii_classes.txt', "skeleton_path": None, "weights_path": 'weights/hourglass_mobile.h5', "gpu_num" : 1, } class Hourglass(object): _defaults = default_config @classmethod def get_defaults(cls, n): if n in cls._defaults: return cls._defaults[n] else: return "Unrecognized attribute name '" + n + "'" def __init__(self, **kwargs): super(Hourglass, self).__init__() self.__dict__.update(self._defaults) # set up default values self.__dict__.update(kwargs) # and update with user overrides if self.skeleton_path: self.skeleton_lines = get_skeleton(self.skeleton_path) else: self.skeleton_lines = None self.class_names = get_classes(self.classes_path) self.hourglass_model = self._generate_model() K.set_learning_phase(0) def _generate_model(self): '''to generate the bounding boxes''' weights_path = os.path.expanduser(self.weights_path) assert weights_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.' num_classes = len(self.class_names) # update param for tiny model if self.tiny is True: self.num_channels = 128 # construct model and load weights. hourglass_model = get_hourglass_model(num_classes, self.num_stacks, self.num_channels, input_size=self.model_image_size, mobile=self.mobile) hourglass_model.load_weights(weights_path, by_name=True)#, skip_mismatch=True) hourglass_model.summary() return hourglass_model def detect_image(self, image): image_data = preprocess_image(image, self.model_image_size) image_size = image.size scale = (image_size[0] * 1.0 / self.model_image_size[0], image_size[1] * 1.0 / self.model_image_size[1]) start = time.time() keypoints = self.predict(image_data) end = time.time() print("Inference time: {:.8f}s".format(end - start)) # rescale keypoints back to origin image size keypoints_dict = dict() for i, keypoint in enumerate(keypoints): keypoints_dict[self.class_names[i]] = (keypoint[0] * scale[0] * 4, keypoint[1] * scale[1] * 4, keypoint[2]) # draw the keypoint skeleton on image image_array = np.array(image, dtype='uint8') image_array = render_skeleton(image_array, keypoints_dict, self.skeleton_lines, self.conf_threshold) return Image.fromarray(image_array) def predict(self, image_data): # get final predict heatmap heatmap = self.hourglass_model.predict(image_data)[-1] heatmap = heatmap[0] # parse out predicted keypoint from heatmap keypoints = post_process_heatmap(heatmap) return keypoints def dump_model_file(self, output_model_file): # Dump out the final heatmap output model as inference model, # since we don't need the intermediate heatmap in inference stage model = Model(inputs=self.hourglass_model.input, outputs=self.hourglass_model.outputs[-1]) model.save(output_model_file) def detect_video(hourglass, video_path, output_path=""): import cv2 vid = cv2.VideoCapture(0 if video_path == '0' else video_path) if not vid.isOpened(): raise IOError("Couldn't open webcam or video") video_FourCC = cv2.VideoWriter_fourcc(*'XVID') if video_path == '0' else int(vid.get(cv2.CAP_PROP_FOURCC)) video_fps = vid.get(cv2.CAP_PROP_FPS) video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))) isOutput = True if output_path != "" else False if isOutput: print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size)) out = cv2.VideoWriter(output_path, video_FourCC, (5. if video_path == '0' else video_fps), video_size) accum_time = 0 curr_fps = 0 fps = "FPS: ??" prev_time = timer() while True: return_value, frame = vid.read() image = Image.fromarray(frame) image = hourglass.detect_image(image) result = np.asarray(image) curr_time = timer() exec_time = curr_time - prev_time prev_time = curr_time accum_time = accum_time + exec_time curr_fps = curr_fps + 1 if accum_time > 1: accum_time = accum_time - 1 fps = "FPS: " + str(curr_fps) curr_fps = 0 cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255, 0, 0), thickness=2) cv2.namedWindow("result", cv2.WINDOW_NORMAL) cv2.imshow("result", result) if isOutput: out.write(result) if cv2.waitKey(1) & 0xFF == ord('q'): break # Release everything if job is finished vid.release() if isOutput: out.release() cv2.destroyAllWindows() def detect_img(hourglass): while True: img = input('Input image filename:') try: image = Image.open(img) except: print('Open Error! Try again!') continue else: r_image = hourglass.detect_image(image) r_image.show() if __name__ == "__main__": # class Hourglass defines the default value, so suppress any default here parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS, description='demo or dump out Hourglass h5 model') ''' Command line options ''' parser.add_argument( '--num_stacks', type=int, help='num of stacks, default ' + str(Hourglass.get_defaults("num_stacks")) ) parser.add_argument( '--mobile', default=False, action="store_true", help='use depthwise conv in hourglass, default ' + str(Hourglass.get_defaults("mobile")) ) parser.add_argument( '--tiny', default=False, action="store_true", help='tiny network for speed, feature channel=128, default ' + str(Hourglass.get_defaults("tiny")) ) parser.add_argument( '--model_image_size', type=str, help='model image input size as <num>x<num>, default ' + str(Hourglass.get_defaults("model_image_size")[0])+'x'+str(Hourglass.get_defaults("model_image_size")[1]), default=str(Hourglass.get_defaults("model_image_size")[0])+'x'+str(Hourglass.get_defaults("model_image_size")[1]) ) parser.add_argument( '--weights_path', type=str, help='path to model weight file, default ' + Hourglass.get_defaults("weights_path") ) parser.add_argument( '--classes_path', type=str, required=False, help='path to keypoint class definitions, default ' + Hourglass.get_defaults("classes_path") ) parser.add_argument( '--skeleton_path', type=str, required=False, help='path to keypoint skeleton definitions, default ' + str(Hourglass.get_defaults("skeleton_path")) ) parser.add_argument( '--conf_threshold', type=float, help='confidence threshold, default ' + str(Hourglass.get_defaults("conf_threshold")) ) parser.add_argument( '--image', default=False, action="store_true", help='Image detection mode, will ignore all positional arguments' ) ''' Command line positional arguments -- for video detection mode ''' parser.add_argument( "--input", nargs='?', type=str,required=False,default='./path2your_video', help = "Video input path" ) parser.add_argument( "--output", nargs='?', type=str, default="", help = "[Optional] Video output path" ) ''' Command line positional arguments -- for model dump ''' parser.add_argument( '--dump_model', default=False, action="store_true", help='Dump out training model to inference model' ) parser.add_argument( '--output_model_file', type=str, help='output inference model file' ) args = parser.parse_args() # param parse if args.model_image_size: height, width = args.model_image_size.split('x') args.model_image_size = (int(height), int(width)) # get wrapped inference object hourglass = Hourglass(**vars(args)) if args.dump_model: """ Dump out training model to inference model """ if not args.output_model_file: raise ValueError('output model file is not specified') print('Dumping out training model to inference model') hourglass.dump_model_file(args.output_model_file) sys.exit() if args.image: """ Image detection mode, disregard any remaining command line arguments """ print("Image detection mode") if "input" in args: print(" Ignoring remaining command line arguments: " + args.input + "," + args.output) detect_img(hourglass) elif "input" in args: detect_video(hourglass, args.input, args.output) else: print("Must specify at least video_input_path. See usage with --help.")
[ "numpy.asarray", "tensorflow.keras.backend.set_learning_phase", "numpy.array", "tensorflow.keras.models.Model" ]
demo.py
[(113, 'cv2.VideoCapture', 'cv2.VideoCapture', (["(0 if video_path == '0' else video_path)"], {}), False, 'import cv2\n'), (127, 'timeit.default_timer', 'timer', ([], {}), True, 'from timeit import default_timer as timer\n'), (154, 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), False, 'import cv2\n'), (174, 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'argument_default': 'argparse.SUPPRESS', 'description': '"""demo or dump out Hourglass h5 model"""'}), False, 'import os, sys, argparse\n'), (51, 'common.utils.get_classes', 'get_classes', (['self.classes_path'], {}), False, 'from common.utils import get_classes, get_skeleton, render_skeleton, optimize_tf_gpu\n'), (53, 'tensorflow.keras.backend.set_learning_phase', 'K.set_learning_phase', (['(0)'], {}), True, 'import tensorflow.keras.backend as K\n'), (57, 'os.path.expanduser', 'os.path.expanduser', (['self.weights_path'], {}), False, 'import os, sys, argparse\n'), (67, 'hourglass.model.get_hourglass_model', 'get_hourglass_model', (['num_classes', 'self.num_stacks', 'self.num_channels'], {'input_size': 'self.model_image_size', 'mobile': 'self.mobile'}), False, 'from hourglass.model import get_hourglass_model\n'), (74, 'common.data_utils.preprocess_image', 'preprocess_image', (['image', 'self.model_image_size'], {}), False, 'from common.data_utils import preprocess_image\n'), (79, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (81, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (90, 'numpy.array', 'np.array', (['image'], {'dtype': '"""uint8"""'}), True, 'import numpy as np\n'), (91, 'common.utils.render_skeleton', 'render_skeleton', (['image_array', 'keypoints_dict', 'self.skeleton_lines', 'self.conf_threshold'], {}), False, 'from common.utils import get_classes, get_skeleton, render_skeleton, optimize_tf_gpu\n'), (93, 'PIL.Image.fromarray', 'Image.fromarray', (['image_array'], {}), False, 'from PIL import Image\n'), (100, 'hourglass.postprocess.post_process_heatmap', 'post_process_heatmap', (['heatmap'], {}), False, 'from hourglass.postprocess import post_process_heatmap, post_process_heatmap_single\n'), (107, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'self.hourglass_model.input', 'outputs': 'self.hourglass_model.outputs[-1]'}), False, 'from tensorflow.keras.models import Model, load_model\n'), (116, 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), False, 'import cv2\n'), (123, 'cv2.VideoWriter', 'cv2.VideoWriter', (['output_path', 'video_FourCC', "(5.0 if video_path == '0' else video_fps)", 'video_size'], {}), False, 'import cv2\n'), (130, 'PIL.Image.fromarray', 'Image.fromarray', (['frame'], {}), False, 'from PIL import Image\n'), (132, 'numpy.asarray', 'np.asarray', (['image'], {}), True, 'import numpy as np\n'), (133, 'timeit.default_timer', 'timer', ([], {}), True, 'from timeit import default_timer as timer\n'), (142, 'cv2.putText', 'cv2.putText', (['result'], {'text': 'fps', 'org': '(3, 15)', 'fontFace': 'cv2.FONT_HERSHEY_SIMPLEX', 'fontScale': '(0.5)', 'color': '(255, 0, 0)', 'thickness': '(2)'}), False, 'import cv2\n'), (144, 'cv2.namedWindow', 'cv2.namedWindow', (['"""result"""', 'cv2.WINDOW_NORMAL'], {}), False, 'import cv2\n'), (145, 'cv2.imshow', 'cv2.imshow', (['"""result"""', 'result'], {}), False, 'import cv2\n'), (259, 'sys.exit', 'sys.exit', ([], {}), False, 'import os, sys, argparse\n'), (48, 'common.utils.get_skeleton', 'get_skeleton', (['self.skeleton_path'], {}), False, 'from common.utils import get_classes, get_skeleton, render_skeleton, optimize_tf_gpu\n'), (162, 'PIL.Image.open', 'Image.open', (['img'], {}), False, 'from PIL import Image\n'), (148, 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), False, 'import cv2\n')]
Jss7268/magenta
10e0b2c50baaa01a9c942ed3334b5b2cca761bef
# Copyright 2020 Jack Spencer Smith. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from dotmap import DotMap from tensorflow.keras import backend as K from tensorflow.keras.layers import * from tensorflow.keras.models import Model from magenta.models.polyamp import constants, sequence_prediction_util from magenta.models.polyamp.accuracy_util import multi_track_present_accuracy_wrapper, \ multi_track_prf_wrapper, \ single_track_present_accuracy_wrapper from magenta.models.polyamp.layer_util import NoteCroppingsToPianorolls from magenta.models.polyamp.loss_util import full_model_loss_wrapper from magenta.models.polyamp.timbre_dataset_util import NoteCropping def get_default_hparams(): return { 'full_learning_rate': 2e-4, 'multiple_instruments_threshold': 0.6, 'use_all_instruments': False, 'melodic_trainable': True, 'family_recall_weight': [1.0, 1.4, 1.4, .225, .05, 1.2, 1., 1.3, .45, 1.0, 1.0, .7], } class FullModel: def __init__(self, melodic_model, timbre_model, hparams): if hparams is None: hparams = DotMap(get_default_hparams()) self.hparams = hparams self.melodic_model: Model = melodic_model self.timbre_model: Model = timbre_model def sequence_to_note_croppings(self, sequence): """ Converts a NoteSequence Proto to a list of note_croppings :param sequence: NoteSequence to convert :return: list of note_croppings generated from sequence """ note_croppings = [] for note in sequence.notes: note_croppings.append(NoteCropping(pitch=note.pitch, start_idx=note.start_time * self.hparams.sample_rate, end_idx=note.end_time * self.hparams.sample_rate)) if len(note_croppings) == 0: note_croppings.append(NoteCropping( pitch=-1e+7, start_idx=-1e+7, end_idx=-1e+7 )) return note_croppings def get_croppings(self, input_list): """ Convert frame predictions into a sequence. Pad so all batches have same nof notes. :param input_list: frames, onsets, offsets :return: Tensor of padded cropping lists (padded with large negative numbers) """ batched_frame_predictions, batched_onset_predictions, batched_offset_predictions = \ input_list croppings_list = [] for batch_idx in range(K.int_shape(batched_frame_predictions)[0]): frame_predictions = batched_frame_predictions[batch_idx] onset_predictions = batched_onset_predictions[batch_idx] offset_predictions = batched_offset_predictions[batch_idx] sequence = sequence_prediction_util.predict_sequence( frame_predictions=frame_predictions, onset_predictions=onset_predictions, offset_predictions=offset_predictions, velocity_values=None, min_pitch=constants.MIN_MIDI_PITCH, hparams=self.hparams) croppings_list.append(self.sequence_to_note_croppings(sequence)) padded = tf.keras.preprocessing.sequence.pad_sequences(croppings_list, padding='post', dtype='int64', value=-1e+7) return tf.convert_to_tensor(padded) def get_full_model(self): """Build the Full Model architecture.""" spec_512 = Input(shape=(None, constants.SPEC_BANDS, 1), name='melodic_spec') spec_256 = Input(shape=(None, constants.SPEC_BANDS, 1), name='timbre_spec') present_instruments = Input(shape=(self.hparams.timbre_num_classes,)) # Maybe freeze the layers of the Melodic Model. self.melodic_model.trainable = self.hparams.melodic_trainable frame_probs, onset_probs, offset_probs = self.melodic_model.call([spec_512]) stop_gradient_layer = Lambda(lambda x: K.stop_gradient(x)) frame_predictions = stop_gradient_layer( frame_probs > self.hparams.predict_frame_threshold) generous_onset_predictions = stop_gradient_layer( onset_probs > self.hparams.predict_onset_threshold) offset_predictions = stop_gradient_layer( offset_probs > self.hparams.predict_offset_threshold) note_croppings = Lambda(self.get_croppings, output_shape=(None, 3), dynamic=True, dtype='int64')( [frame_predictions, generous_onset_predictions, offset_predictions]) timbre_probs = self.timbre_model.call([spec_256, note_croppings]) expand_dims = Lambda(lambda x_list: K.expand_dims(x_list[0], axis=x_list[1])) float_cast = Lambda(lambda x: K.cast_to_floatx(x)) pianoroll = Lambda(lambda x: tf.repeat(K.expand_dims(x), self.hparams.timbre_num_classes, -1), output_shape=(None, constants.MIDI_PITCHES, self.hparams.timbre_num_classes), dynamic=True)(frame_probs) timbre_pianoroll = NoteCroppingsToPianorolls(self.hparams, dynamic=True, )( [stop_gradient_layer(note_croppings), timbre_probs, stop_gradient_layer(pianoroll)]) expanded_present_instruments = float_cast(expand_dims([expand_dims([ (present_instruments), -2]), -2])) present_pianoroll = ( Multiply(name='apply_present')([timbre_pianoroll, expanded_present_instruments])) pianoroll_no_gradient = stop_gradient_layer(present_pianoroll) # Roll the pianoroll to get instrument predictions for offsets # which is normally where we stop the pianoroll fill. rolled_pianoroll = Lambda(lambda x: tf.roll(x, 1, axis=-3))(pianoroll_no_gradient) expanded_frames = expand_dims([frame_probs, -1]) expanded_onsets = expand_dims([onset_probs, -1]) expanded_offsets = expand_dims([offset_probs, -1]) # Use the last channel for instrument-agnostic midi. broadcasted_frames = Concatenate(name='multi_frames')( [present_pianoroll, expanded_frames]) broadcasted_onsets = Concatenate(name='multi_onsets')( [present_pianoroll, expanded_onsets]) broadcasted_offsets = Concatenate(name='multi_offsets')( [rolled_pianoroll, expanded_offsets]) losses = { 'multi_frames': full_model_loss_wrapper(self.hparams, self.hparams.frames_true_weighing), 'multi_onsets': full_model_loss_wrapper(self.hparams, self.hparams.onsets_true_weighing), 'multi_offsets': full_model_loss_wrapper(self.hparams, self.hparams.offsets_true_weighing), } accuracies = { 'multi_frames': [ multi_track_present_accuracy_wrapper( self.hparams.predict_frame_threshold, multiple_instruments_threshold=self.hparams.multiple_instruments_threshold), single_track_present_accuracy_wrapper( self.hparams.predict_frame_threshold), multi_track_prf_wrapper( self.hparams.predict_frame_threshold, multiple_instruments_threshold=self.hparams.multiple_instruments_threshold, print_report=True, hparams=self.hparams) ], 'multi_onsets': [ multi_track_present_accuracy_wrapper( self.hparams.predict_onset_threshold, multiple_instruments_threshold=self.hparams.multiple_instruments_threshold), single_track_present_accuracy_wrapper( self.hparams.predict_onset_threshold), multi_track_prf_wrapper( self.hparams.predict_onset_threshold, multiple_instruments_threshold=self.hparams.multiple_instruments_threshold, print_report=True, hparams=self.hparams) ], 'multi_offsets': [ multi_track_present_accuracy_wrapper( self.hparams.predict_offset_threshold, multiple_instruments_threshold=self.hparams.multiple_instruments_threshold), single_track_present_accuracy_wrapper( self.hparams.predict_offset_threshold), multi_track_prf_wrapper( self.hparams.predict_offset_threshold, multiple_instruments_threshold=self.hparams.multiple_instruments_threshold, hparams=self.hparams) ] } return Model(inputs=[spec_512, spec_256, present_instruments], outputs=[broadcasted_frames, broadcasted_onsets, broadcasted_offsets]), losses, accuracies
[ "tensorflow.convert_to_tensor", "tensorflow.keras.models.Model", "tensorflow.keras.backend.int_shape", "tensorflow.roll", "tensorflow.keras.backend.expand_dims", "tensorflow.keras.backend.cast_to_floatx", "tensorflow.keras.backend.stop_gradient", "tensorflow.keras.preprocessing.sequence.pad_sequences" ]
magenta/models/polyamp/full_model.py
[(90, 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'tf.keras.preprocessing.sequence.pad_sequences', (['croppings_list'], {'padding': '"""post"""', 'dtype': '"""int64"""', 'value': '(-10000000.0)'}), True, 'import tensorflow as tf\n'), (94, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['padded'], {}), True, 'import tensorflow as tf\n'), (81, 'magenta.models.polyamp.sequence_prediction_util.predict_sequence', 'sequence_prediction_util.predict_sequence', ([], {'frame_predictions': 'frame_predictions', 'onset_predictions': 'onset_predictions', 'offset_predictions': 'offset_predictions', 'velocity_values': 'None', 'min_pitch': 'constants.MIN_MIDI_PITCH', 'hparams': 'self.hparams'}), False, 'from magenta.models.polyamp import constants, sequence_prediction_util\n'), (133, 'magenta.models.polyamp.layer_util.NoteCroppingsToPianorolls', 'NoteCroppingsToPianorolls', (['self.hparams'], {'dynamic': '(True)'}), False, 'from magenta.models.polyamp.layer_util import NoteCroppingsToPianorolls\n'), (162, 'magenta.models.polyamp.loss_util.full_model_loss_wrapper', 'full_model_loss_wrapper', (['self.hparams', 'self.hparams.frames_true_weighing'], {}), False, 'from magenta.models.polyamp.loss_util import full_model_loss_wrapper\n'), (164, 'magenta.models.polyamp.loss_util.full_model_loss_wrapper', 'full_model_loss_wrapper', (['self.hparams', 'self.hparams.onsets_true_weighing'], {}), False, 'from magenta.models.polyamp.loss_util import full_model_loss_wrapper\n'), (166, 'magenta.models.polyamp.loss_util.full_model_loss_wrapper', 'full_model_loss_wrapper', (['self.hparams', 'self.hparams.offsets_true_weighing'], {}), False, 'from magenta.models.polyamp.loss_util import full_model_loss_wrapper\n'), (208, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[spec_512, spec_256, present_instruments]', 'outputs': '[broadcasted_frames, broadcasted_onsets, broadcasted_offsets]'}), False, 'from tensorflow.keras.models import Model\n'), (56, 'magenta.models.polyamp.timbre_dataset_util.NoteCropping', 'NoteCropping', ([], {'pitch': 'note.pitch', 'start_idx': '(note.start_time * self.hparams.sample_rate)', 'end_idx': '(note.end_time * self.hparams.sample_rate)'}), False, 'from magenta.models.polyamp.timbre_dataset_util import NoteCropping\n'), (60, 'magenta.models.polyamp.timbre_dataset_util.NoteCropping', 'NoteCropping', ([], {'pitch': '(-10000000.0)', 'start_idx': '(-10000000.0)', 'end_idx': '(-10000000.0)'}), False, 'from magenta.models.polyamp.timbre_dataset_util import NoteCropping\n'), (77, 'tensorflow.keras.backend.int_shape', 'K.int_shape', (['batched_frame_predictions'], {}), True, 'from tensorflow.keras import backend as K\n'), (106, 'tensorflow.keras.backend.stop_gradient', 'K.stop_gradient', (['x'], {}), True, 'from tensorflow.keras import backend as K\n'), (122, 'tensorflow.keras.backend.expand_dims', 'K.expand_dims', (['x_list[0]'], {'axis': 'x_list[1]'}), True, 'from tensorflow.keras import backend as K\n'), (123, 'tensorflow.keras.backend.cast_to_floatx', 'K.cast_to_floatx', (['x'], {}), True, 'from tensorflow.keras import backend as K\n'), (172, 'magenta.models.polyamp.accuracy_util.multi_track_present_accuracy_wrapper', 'multi_track_present_accuracy_wrapper', (['self.hparams.predict_frame_threshold'], {'multiple_instruments_threshold': 'self.hparams.multiple_instruments_threshold'}), False, 'from magenta.models.polyamp.accuracy_util import multi_track_present_accuracy_wrapper, multi_track_prf_wrapper, single_track_present_accuracy_wrapper\n'), (175, 'magenta.models.polyamp.accuracy_util.single_track_present_accuracy_wrapper', 'single_track_present_accuracy_wrapper', (['self.hparams.predict_frame_threshold'], {}), False, 'from magenta.models.polyamp.accuracy_util import multi_track_present_accuracy_wrapper, multi_track_prf_wrapper, single_track_present_accuracy_wrapper\n'), (177, 'magenta.models.polyamp.accuracy_util.multi_track_prf_wrapper', 'multi_track_prf_wrapper', (['self.hparams.predict_frame_threshold'], {'multiple_instruments_threshold': 'self.hparams.multiple_instruments_threshold', 'print_report': '(True)', 'hparams': 'self.hparams'}), False, 'from magenta.models.polyamp.accuracy_util import multi_track_present_accuracy_wrapper, multi_track_prf_wrapper, single_track_present_accuracy_wrapper\n'), (184, 'magenta.models.polyamp.accuracy_util.multi_track_present_accuracy_wrapper', 'multi_track_present_accuracy_wrapper', (['self.hparams.predict_onset_threshold'], {'multiple_instruments_threshold': 'self.hparams.multiple_instruments_threshold'}), False, 'from magenta.models.polyamp.accuracy_util import multi_track_present_accuracy_wrapper, multi_track_prf_wrapper, single_track_present_accuracy_wrapper\n'), (187, 'magenta.models.polyamp.accuracy_util.single_track_present_accuracy_wrapper', 'single_track_present_accuracy_wrapper', (['self.hparams.predict_onset_threshold'], {}), False, 'from magenta.models.polyamp.accuracy_util import multi_track_present_accuracy_wrapper, multi_track_prf_wrapper, single_track_present_accuracy_wrapper\n'), (189, 'magenta.models.polyamp.accuracy_util.multi_track_prf_wrapper', 'multi_track_prf_wrapper', (['self.hparams.predict_onset_threshold'], {'multiple_instruments_threshold': 'self.hparams.multiple_instruments_threshold', 'print_report': '(True)', 'hparams': 'self.hparams'}), False, 'from magenta.models.polyamp.accuracy_util import multi_track_present_accuracy_wrapper, multi_track_prf_wrapper, single_track_present_accuracy_wrapper\n'), (196, 'magenta.models.polyamp.accuracy_util.multi_track_present_accuracy_wrapper', 'multi_track_present_accuracy_wrapper', (['self.hparams.predict_offset_threshold'], {'multiple_instruments_threshold': 'self.hparams.multiple_instruments_threshold'}), False, 'from magenta.models.polyamp.accuracy_util import multi_track_present_accuracy_wrapper, multi_track_prf_wrapper, single_track_present_accuracy_wrapper\n'), (199, 'magenta.models.polyamp.accuracy_util.single_track_present_accuracy_wrapper', 'single_track_present_accuracy_wrapper', (['self.hparams.predict_offset_threshold'], {}), False, 'from magenta.models.polyamp.accuracy_util import multi_track_present_accuracy_wrapper, multi_track_prf_wrapper, single_track_present_accuracy_wrapper\n'), (201, 'magenta.models.polyamp.accuracy_util.multi_track_prf_wrapper', 'multi_track_prf_wrapper', (['self.hparams.predict_offset_threshold'], {'multiple_instruments_threshold': 'self.hparams.multiple_instruments_threshold', 'hparams': 'self.hparams'}), False, 'from magenta.models.polyamp.accuracy_util import multi_track_present_accuracy_wrapper, multi_track_prf_wrapper, single_track_present_accuracy_wrapper\n'), (147, 'tensorflow.roll', 'tf.roll', (['x', '(1)'], {'axis': '(-3)'}), True, 'import tensorflow as tf\n'), (125, 'tensorflow.keras.backend.expand_dims', 'K.expand_dims', (['x'], {}), True, 'from tensorflow.keras import backend as K\n')]
zli117/Evolution
b5be1552338fa57b9a3e4743c8e917e30d2caada
import os from abc import ABC from abc import abstractmethod from dataclasses import dataclass from multiprocessing import Manager from multiprocessing import Pool from multiprocessing import Process from queue import Queue from typing import Dict, Any, Tuple, Generator, List import numpy as np import tensorflow as tf from sklearn.model_selection import KFold from tensorflow import keras from tensorflow.python.client import device_lib from evolution.encoding.base import Edge from evolution.train.progress_observer import ProgressObserver class BaseTrainer(ABC): @abstractmethod def train_and_eval(self, higher_level_model: Edge, name: str, observers: Tuple[ProgressObserver, ...] = ()) -> float: pass @abstractmethod def optimizer_factory(self) -> keras.optimizers.Optimizer: pass # Since Tensorflow will allocate more memory than specified in # per_process_gpu_memory_fraction, we need to shrink the allocation fraction. MEMORY_SHRINK_FACTOR = 0.65 @dataclass class Params(object): x_train: np.array x_valid: np.array y_train: np.array y_valid: np.array edge: Edge cv_idx: int log_dir: str device: str memory_fraction: float name: str progress_queue: 'Queue[Tuple[str, int, int, int]]' @dataclass class ParallelTrainer(BaseTrainer): k_folds: int num_process: int x_train: np.array y_train: np.array x_valid: np.array y_valid: np.array fit_args: Dict[str, Any] loss: Any metrics: Any log_dir: str def __post_init__(self) -> None: super().__init__() @staticmethod def _get_device_info_worker(devices: List[Any]) -> None: devices.extend(device_lib.list_local_devices()) def _get_device_info(self) -> List[Any]: manager = Manager() devices: List[Any] = manager.list() # Since this is a CUDA call, if done in parent process will hang the # sub-processes. Fix is to run CUDA call in a separate process. See: # https://github.com/tensorflow/tensorflow/issues/8220 process = Process(target=self._get_device_info_worker, args=(devices,)) process.start() process.join() return devices def _param_generator(self, edge: Edge, name: str, gpus: List[Any], cpus: List[Any], queue: Queue) -> Generator[Params, None, None]: kf = KFold(n_splits=self.k_folds) total_gpu_memory = sum([device.memory_limit for device in gpus]) if total_gpu_memory == 0: device_allocation: List[Tuple[str, float]] = [(str(device.name), 0) for device in cpus] else: gpu_process_count = [ int(self.num_process * device.memory_limit / total_gpu_memory) for device in gpus] device_allocation = [ (str(device.name), MEMORY_SHRINK_FACTOR / count) for device, count in zip(gpus, gpu_process_count)] for i, index in enumerate(kf.split(self.x_train)): train_idx, valid_idx = index x_train: np.array = self.x_train[train_idx] x_valid: np.array = self.x_train[valid_idx] y_train: np.array = self.y_train[train_idx] y_valid: np.array = self.y_train[valid_idx] dev_name, allocation = device_allocation[i % len( device_allocation)] yield Params(x_train=x_train, x_valid=x_valid, y_train=y_train, y_valid=y_valid, edge=edge, cv_idx=i, log_dir=os.path.join(os.path.join(self.log_dir, name), 'cv_%d' % i), device=dev_name, memory_fraction=allocation, name=name, progress_queue=queue) def _worker(self, params: Params) -> float: gpu_options = tf.compat.v1.GPUOptions( per_process_gpu_memory_fraction=params.memory_fraction) with tf.device(params.device): with tf.compat.v1.Session(config=tf.compat.v1.ConfigProto( gpu_options=gpu_options)) as sess: tf.compat.v1.keras.backend.set_session(sess) input_tensor = keras.Input(shape=params.x_train.shape[1:]) out = params.edge.build(input_tensor) model = keras.Model(inputs=input_tensor, outputs=out) model.compile(loss=self.loss, optimizer=self.optimizer_factory(), metrics=[self.metrics]) tensor_board = keras.callbacks.TensorBoard( batch_size=10, write_graph=True, log_dir=params.log_dir) total_epochs = self.fit_args.get('epochs', 1) progress_callback = keras.callbacks.LambdaCallback( on_epoch_end=lambda epoch, logs: params.progress_queue.put( (params.name, epoch, params.cv_idx, total_epochs))) model.fit(params.x_train, params.y_train, validation_data=(params.x_valid, params.y_valid), callbacks=[tensor_board, progress_callback], **self.fit_args) _, test_metrics = model.evaluate(self.x_valid, self.y_valid, verbose=0) return test_metrics def _run_train_pool(self, edge: Edge, name: str, history: List[float], progress_queue: Queue) -> None: devices = self._get_device_info() available_gpus = [device for device in devices if device.device_type == 'GPU'] available_cpus = [device for device in devices if device.device_type == 'CPU'] with Pool(self.num_process) as pool: history.extend(pool.map(self._worker, self._param_generator(edge, name, available_gpus, available_cpus, progress_queue))) progress_queue.put(None) def train_and_eval(self, edge: Edge, name: str, observers: Tuple[ProgressObserver, ...] = ()) -> float: manager = Manager() history: List[float] = manager.list() queue: Queue = manager.Queue() process = Process(target=self._run_train_pool, args=(edge, name, history, queue)) process.start() for name, epoch, cv_idx, total_epoch in iter(queue.get, None): for observer in observers: observer.on_progress(name, cv_idx, epoch, self.k_folds, total_epoch) process.join() return sum(history) / len(history) def optimizer_factory(self) -> keras.optimizers.Optimizer: return keras.optimizers.Adam()
[ "tensorflow.device", "tensorflow.compat.v1.ConfigProto", "tensorflow.compat.v1.GPUOptions", "tensorflow.python.client.device_lib.list_local_devices", "tensorflow.keras.Input", "tensorflow.compat.v1.keras.backend.set_session", "sklearn.model_selection.KFold", "tensorflow.keras.Model", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.callbacks.TensorBoard" ]
evolution/train/trainer.py
[(74, 'multiprocessing.Manager', 'Manager', ([], {}), False, 'from multiprocessing import Manager\n'), (80, 'multiprocessing.Process', 'Process', ([], {'target': 'self._get_device_info_worker', 'args': '(devices,)'}), False, 'from multiprocessing import Process\n'), (90, 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'self.k_folds'}), False, 'from sklearn.model_selection import KFold\n'), (120, 'tensorflow.compat.v1.GPUOptions', 'tf.compat.v1.GPUOptions', ([], {'per_process_gpu_memory_fraction': 'params.memory_fraction'}), True, 'import tensorflow as tf\n'), (174, 'multiprocessing.Manager', 'Manager', ([], {}), False, 'from multiprocessing import Manager\n'), (177, 'multiprocessing.Process', 'Process', ([], {'target': 'self._run_train_pool', 'args': '(edge, name, history, queue)'}), False, 'from multiprocessing import Process\n'), (188, 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {}), False, 'from tensorflow import keras\n'), (71, 'tensorflow.python.client.device_lib.list_local_devices', 'device_lib.list_local_devices', ([], {}), False, 'from tensorflow.python.client import device_lib\n'), (122, 'tensorflow.device', 'tf.device', (['params.device'], {}), True, 'import tensorflow as tf\n'), (164, 'multiprocessing.Pool', 'Pool', (['self.num_process'], {}), False, 'from multiprocessing import Pool\n'), (125, 'tensorflow.compat.v1.keras.backend.set_session', 'tf.compat.v1.keras.backend.set_session', (['sess'], {}), True, 'import tensorflow as tf\n'), (127, 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': 'params.x_train.shape[1:]'}), False, 'from tensorflow import keras\n'), (129, 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'input_tensor', 'outputs': 'out'}), False, 'from tensorflow import keras\n'), (134, 'tensorflow.keras.callbacks.TensorBoard', 'keras.callbacks.TensorBoard', ([], {'batch_size': '(10)', 'write_graph': '(True)', 'log_dir': 'params.log_dir'}), False, 'from tensorflow import keras\n'), (123, 'tensorflow.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {'gpu_options': 'gpu_options'}), True, 'import tensorflow as tf\n'), (114, 'os.path.join', 'os.path.join', (['self.log_dir', 'name'], {}), False, 'import os\n')]
eupston/Deepbox
cdb0ba9a3de3a81672a9fc3610e60da9773d9ddd
#!/usr/bin/env python3 """Convert a Keras model to frugally-deep format. """ #TODO make frugally-deep submodule and use convertModel.py from there import base64 import datetime import hashlib import json import sys import numpy as np from tensorflow.keras import backend as K from tensorflow.keras.layers import Input, Embedding from tensorflow.keras.models import Model, load_model __author__ = "Tobias Hermann" __copyright__ = "Copyright 2017, Tobias Hermann" __license__ = "MIT" __maintainer__ = "Tobias Hermann, https://github.com/Dobiasd/frugally-deep" __email__ = "[email protected]" STORE_FLOATS_HUMAN_READABLE = False def transform_input_kernel(kernel): """Transforms weights of a single CuDNN input kernel into the regular Keras format.""" return kernel.T.reshape(kernel.shape, order='F') def transform_recurrent_kernel(kernel): """Transforms weights of a single CuDNN recurrent kernel into the regular Keras format.""" return kernel.T def transform_kernels(kernels, n_gates, transform_func): """ Transforms CuDNN kernel matrices (either LSTM or GRU) into the regular Keras format. Parameters ---------- kernels : numpy.ndarray Composite matrix of input or recurrent kernels. n_gates : int Number of recurrent unit gates, 3 for GRU, 4 for LSTM. transform_func: function(numpy.ndarray) Function to apply to each input or recurrent kernel. Returns ------- numpy.ndarray Transformed composite matrix of input or recurrent kernels in C-contiguous layout. """ return np.require(np.hstack([transform_func(kernel) for kernel in np.hsplit(kernels, n_gates)]), requirements='C') def transform_bias(bias): """Transforms bias weights of an LSTM layer into the regular Keras format.""" return np.sum(np.split(bias, 2, axis=0), axis=0) def write_text_file(path, text): """Write a string to a file""" with open(path, "w") as text_file: print(text, file=text_file) def int_or_none(value): """Leave None values as is, convert everything else to int""" if value is None: return value return int(value) def keras_shape_to_fdeep_tensor_shape(raw_shape): """Convert a keras shape to an fdeep shape""" return singleton_list_to_value(raw_shape)[1:] def get_layer_input_shape_tensor_shape(layer): """Convert layer input shape to an fdeep shape""" return keras_shape_to_fdeep_tensor_shape(layer.input_shape) def show_tensor(tens): """Serialize 3-tensor to a dict""" return { 'shape': tens.shape[1:], 'values': encode_floats(tens.flatten()) } def get_model_input_layers(model): """Works for different Keras version.""" if hasattr(model, '_input_layers'): return model._input_layers if hasattr(model, 'input_layers'): return model.input_layers raise ValueError('can not get (_)input_layers from model') def measure_predict(model, data_in): """Returns output and duration in seconds""" start_time = datetime.datetime.now() data_out = model.predict(data_in) end_time = datetime.datetime.now() duration = end_time - start_time print('Forward pass took {} s.'.format(duration.total_seconds())) return data_out, duration.total_seconds() def replace_none_with(value, shape): """Replace every None with a fixed value.""" return tuple(list(map(lambda x: x if x is not None else value, shape))) def are_embedding_layer_positions_ok_for_testing(model): """ Test data can only be generated if all embeddings layers are positioned directly behind the input nodes """ def embedding_layer_names(model): layers = model.layers result = set() for layer in layers: if isinstance(layer, Embedding): result.add(layer.name) layer_type = type(layer).__name__ if layer_type in ['Model', 'Sequential', 'Functional']: result.union(embedding_layer_names(layer)) return result def embedding_layer_names_at_input_nodes(model): result = set() for input_layer in get_model_input_layers(model): if input_layer._outbound_nodes and isinstance( input_layer._outbound_nodes[0].outbound_layer, Embedding): result.add(input_layer._outbound_nodes[0].outbound_layer.name) return set(result) return embedding_layer_names(model) == embedding_layer_names_at_input_nodes(model) def gen_test_data(model): """Generate data for model verification test.""" def set_shape_idx_0_to_1_if_none(shape): """Change first element in tuple to 1.""" if shape[0] is not None: return shape shape_lst = list(shape) shape_lst[0] = 1 shape = tuple(shape_lst) return shape def generate_input_data(input_layer): """Random data fitting the input shape of a layer.""" if input_layer._outbound_nodes and isinstance( input_layer._outbound_nodes[0].outbound_layer, Embedding): random_fn = lambda size: np.random.randint( 0, input_layer._outbound_nodes[0].outbound_layer.input_dim, size) else: random_fn = np.random.normal try: shape = input_layer.batch_input_shape except AttributeError: shape = input_layer.input_shape return random_fn( size=replace_none_with(32, set_shape_idx_0_to_1_if_none(singleton_list_to_value(shape)))).astype(np.float32) assert are_embedding_layer_positions_ok_for_testing( model), "Test data can only be generated if embedding layers are positioned directly after input nodes." data_in = list(map(generate_input_data, get_model_input_layers(model))) warm_up_runs = 3 test_runs = 5 for i in range(warm_up_runs): if i == 0: # store the results of first call for the test # this is because states of recurrent layers is 0. # cannot call model.reset_states() in some cases in keras without an error. # an error occurs when recurrent layer is stateful and the initial state is passed as input data_out_test, duration = measure_predict(model, data_in) else: measure_predict(model, data_in) duration_sum = 0 print('Starting performance measurements.') for _ in range(test_runs): data_out, duration = measure_predict(model, data_in) duration_sum = duration_sum + duration duration_avg = duration_sum / test_runs print('Forward pass took {} s on average.'.format(duration_avg)) return { 'inputs': list(map(show_tensor, as_list(data_in))), 'outputs': list(map(show_tensor, as_list(data_out_test))) } def split_every(size, seq): """Split a sequence every seq elements.""" return (seq[pos:pos + size] for pos in range(0, len(seq), size)) def encode_floats(arr): """Serialize a sequence of floats.""" if STORE_FLOATS_HUMAN_READABLE: return arr.flatten().tolist() return list(split_every(1024, base64.b64encode(arr).decode('ascii'))) def prepare_filter_weights_conv_2d(weights): """Change dimension order of 2d filter weights to the one used in fdeep""" assert len(weights.shape) == 4 return np.moveaxis(weights, [0, 1, 2, 3], [1, 2, 3, 0]).flatten() def prepare_filter_weights_slice_conv_2d(weights): """Change dimension order of 2d filter weights to the one used in fdeep""" assert len(weights.shape) == 4 return np.moveaxis(weights, [0, 1, 2, 3], [1, 2, 0, 3]).flatten() def prepare_filter_weights_conv_1d(weights): """Change dimension order of 1d filter weights to the one used in fdeep""" assert len(weights.shape) == 3 return np.moveaxis(weights, [0, 1, 2], [1, 2, 0]).flatten() def show_conv_1d_layer(layer): """Serialize Conv1D layer to dict""" weights = layer.get_weights() assert len(weights) == 1 or len(weights) == 2 assert len(weights[0].shape) == 3 weights_flat = prepare_filter_weights_conv_1d(weights[0]) assert layer.padding in ['valid', 'same', 'causal'] assert len(layer.input_shape) == 3 assert layer.input_shape[0] in {None, 1} result = { 'weights': encode_floats(weights_flat) } if len(weights) == 2: bias = weights[1] result['bias'] = encode_floats(bias) return result def show_conv_2d_layer(layer): """Serialize Conv2D layer to dict""" weights = layer.get_weights() assert len(weights) == 1 or len(weights) == 2 assert len(weights[0].shape) == 4 weights_flat = prepare_filter_weights_conv_2d(weights[0]) assert layer.padding in ['valid', 'same'] assert len(layer.input_shape) == 4 assert layer.input_shape[0] in {None, 1} result = { 'weights': encode_floats(weights_flat) } if len(weights) == 2: bias = weights[1] result['bias'] = encode_floats(bias) return result def show_separable_conv_2d_layer(layer): """Serialize SeparableConv2D layer to dict""" weights = layer.get_weights() assert layer.depth_multiplier == 1 assert len(weights) == 2 or len(weights) == 3 assert len(weights[0].shape) == 4 assert len(weights[1].shape) == 4 # probably incorrect for depth_multiplier > 1? slice_weights = prepare_filter_weights_slice_conv_2d(weights[0]) stack_weights = prepare_filter_weights_conv_2d(weights[1]) assert layer.padding in ['valid', 'same'] assert len(layer.input_shape) == 4 assert layer.input_shape[0] in {None, 1} result = { 'slice_weights': encode_floats(slice_weights), 'stack_weights': encode_floats(stack_weights), } if len(weights) == 3: bias = weights[2] result['bias'] = encode_floats(bias) return result def show_depthwise_conv_2d_layer(layer): """Serialize DepthwiseConv2D layer to dict""" weights = layer.get_weights() assert layer.depth_multiplier == 1 assert len(weights) in [1, 2] assert len(weights[0].shape) == 4 # probably incorrect for depth_multiplier > 1? slice_weights = prepare_filter_weights_slice_conv_2d(weights[0]) assert layer.padding in ['valid', 'same'] assert len(layer.input_shape) == 4 assert layer.input_shape[0] in {None, 1} result = { 'slice_weights': encode_floats(slice_weights), } if len(weights) == 2: bias = weights[1] result['bias'] = encode_floats(bias) return result def show_batch_normalization_layer(layer): """Serialize batch normalization layer to dict""" moving_mean = K.get_value(layer.moving_mean) moving_variance = K.get_value(layer.moving_variance) result = {} result['moving_mean'] = encode_floats(moving_mean) result['moving_variance'] = encode_floats(moving_variance) if layer.center: beta = K.get_value(layer.beta) result['beta'] = encode_floats(beta) if layer.scale: gamma = K.get_value(layer.gamma) result['gamma'] = encode_floats(gamma) return result def show_dense_layer(layer): """Serialize dense layer to dict""" weights = layer.get_weights() assert len(weights) == 1 or len(weights) == 2 assert len(weights[0].shape) == 2 weights_flat = weights[0].flatten() result = { 'weights': encode_floats(weights_flat) } if len(weights) == 2: bias = weights[1] result['bias'] = encode_floats(bias) return result def show_prelu_layer(layer): """Serialize prelu layer to dict""" weights = layer.get_weights() assert len(weights) == 1 weights_flat = weights[0].flatten() result = { 'alpha': encode_floats(weights_flat) } return result def show_relu_layer(layer): """Serialize relu layer to dict""" assert layer.negative_slope == 0 assert layer.threshold == 0 return {} def show_embedding_layer(layer): """Serialize Embedding layer to dict""" weights = layer.get_weights() assert len(weights) == 1 result = { 'weights': encode_floats(weights[0]) } return result def show_lstm_layer(layer): """Serialize LSTM layer to dict""" assert not layer.go_backwards assert not layer.unroll weights = layer.get_weights() if isinstance(layer.input, list): assert len(layer.input) in [1, 3] assert len(weights) == 2 or len(weights) == 3 result = {'weights': encode_floats(weights[0]), 'recurrent_weights': encode_floats(weights[1])} if len(weights) == 3: result['bias'] = encode_floats(weights[2]) return result def show_gru_layer(layer): """Serialize GRU layer to dict""" assert not layer.go_backwards assert not layer.unroll assert not layer.return_state weights = layer.get_weights() assert len(weights) == 2 or len(weights) == 3 result = {'weights': encode_floats(weights[0]), 'recurrent_weights': encode_floats(weights[1])} if len(weights) == 3: result['bias'] = encode_floats(weights[2]) return result def transform_cudnn_weights(input_weights, recurrent_weights, n_gates): return transform_kernels(input_weights, n_gates, transform_input_kernel), \ transform_kernels(recurrent_weights, n_gates, transform_recurrent_kernel) def show_cudnn_lstm_layer(layer): """Serialize a GPU-trained LSTM layer to dict""" weights = layer.get_weights() if isinstance(layer.input, list): assert len(layer.input) in [1, 3] assert len(weights) == 3 # CuDNN LSTM always has a bias n_gates = 4 input_weights, recurrent_weights = transform_cudnn_weights(weights[0], weights[1], n_gates) result = {'weights': encode_floats(input_weights), 'recurrent_weights': encode_floats(recurrent_weights), 'bias': encode_floats(transform_bias(weights[2]))} return result def show_cudnn_gru_layer(layer): """Serialize a GPU-trained GRU layer to dict""" weights = layer.get_weights() assert len(weights) == 3 # CuDNN GRU always has a bias n_gates = 3 input_weights, recurrent_weights = transform_cudnn_weights(weights[0], weights[1], n_gates) result = {'weights': encode_floats(input_weights), 'recurrent_weights': encode_floats(recurrent_weights), 'bias': encode_floats(weights[2])} return result def get_transform_func(layer): """Returns functions that can be applied to layer weights to transform them into the standard Keras format, if applicable.""" if layer.__class__.__name__ in ['CuDNNGRU', 'CuDNNLSTM']: if layer.__class__.__name__ == 'CuDNNGRU': n_gates = 3 elif layer.__class__.__name__ == 'CuDNNLSTM': n_gates = 4 input_transform_func = lambda kernels: transform_kernels(kernels, n_gates, transform_input_kernel) recurrent_transform_func = lambda kernels: transform_kernels(kernels, n_gates, transform_recurrent_kernel) else: input_transform_func = lambda kernels: kernels recurrent_transform_func = lambda kernels: kernels if layer.__class__.__name__ == 'CuDNNLSTM': bias_transform_func = transform_bias else: bias_transform_func = lambda bias: bias return input_transform_func, recurrent_transform_func, bias_transform_func def show_bidirectional_layer(layer): """Serialize Bidirectional layer to dict""" forward_weights = layer.forward_layer.get_weights() assert len(forward_weights) == 2 or len(forward_weights) == 3 forward_input_transform_func, forward_recurrent_transform_func, forward_bias_transform_func = get_transform_func( layer.forward_layer) backward_weights = layer.backward_layer.get_weights() assert len(backward_weights) == 2 or len(backward_weights) == 3 backward_input_transform_func, backward_recurrent_transform_func, backward_bias_transform_func = get_transform_func( layer.backward_layer) result = {'forward_weights': encode_floats(forward_input_transform_func(forward_weights[0])), 'forward_recurrent_weights': encode_floats(forward_recurrent_transform_func(forward_weights[1])), 'backward_weights': encode_floats(backward_input_transform_func(backward_weights[0])), 'backward_recurrent_weights': encode_floats(backward_recurrent_transform_func(backward_weights[1]))} if len(forward_weights) == 3: result['forward_bias'] = encode_floats(forward_bias_transform_func(forward_weights[2])) if len(backward_weights) == 3: result['backward_bias'] = encode_floats(backward_bias_transform_func(backward_weights[2])) return result def show_input_layer(layer): """Serialize input layer to dict""" assert not layer.sparse return {} def show_softmax_layer(layer): """Serialize softmax layer to dict""" assert layer.axis == -1 def show_reshape_layer(layer): """Serialize reshape layer to dict""" for dim_size in layer.target_shape: assert dim_size != -1, 'Reshape inference not supported' def get_layer_functions_dict(): return { 'Conv1D': show_conv_1d_layer, 'Conv2D': show_conv_2d_layer, 'SeparableConv2D': show_separable_conv_2d_layer, 'DepthwiseConv2D': show_depthwise_conv_2d_layer, 'BatchNormalization': show_batch_normalization_layer, 'Dense': show_dense_layer, 'PReLU': show_prelu_layer, 'ReLU': show_relu_layer, 'Embedding': show_embedding_layer, 'LSTM': show_lstm_layer, 'GRU': show_gru_layer, 'CuDNNLSTM': show_cudnn_lstm_layer, 'CuDNNGRU': show_cudnn_gru_layer, 'Bidirectional': show_bidirectional_layer, 'TimeDistributed': show_time_distributed_layer, 'Input': show_input_layer, 'Softmax': show_softmax_layer } def show_time_distributed_layer(layer): show_layer_functions = get_layer_functions_dict() config = layer.get_config() class_name = config['layer']['class_name'] if class_name in show_layer_functions: if len(layer.input_shape) == 3: input_shape_new = (layer.input_shape[0], layer.input_shape[2]) elif len(layer.input_shape) == 4: input_shape_new = (layer.input_shape[0], layer.input_shape[2], layer.input_shape[3]) elif len(layer.input_shape) == 5: input_shape_new = (layer.input_shape[0], layer.input_shape[2], layer.input_shape[3], layer.input_shape[4]) elif len(layer.input_shape) == 6: input_shape_new = (layer.input_shape[0], layer.input_shape[2], layer.input_shape[3], layer.input_shape[4], layer.input_shape[5]) else: raise Exception('Wrong input shape') layer_function = show_layer_functions[class_name] attributes = dir(layer.layer) class CopiedLayer: pass copied_layer = CopiedLayer() for attr in attributes: try: if attr not in ['input_shape', '__class__']: setattr(copied_layer, attr, getattr(layer.layer, attr)) elif attr == 'input_shape': setattr(copied_layer, 'input_shape', input_shape_new) except Exception: continue setattr(copied_layer, "output_shape", getattr(layer, "output_shape")) return layer_function(copied_layer) else: return None def get_dict_keys(d): """Return keys of a dictionary""" return [key for key in d] def merge_two_disjunct_dicts(x, y): """Given two dicts, merge them into a new dict as a shallow copy. No Key is allowed to be present in both dictionaries. """ assert set(get_dict_keys(x)).isdisjoint(get_dict_keys(y)) z = x.copy() z.update(y) return z def is_ascii(some_string): """Check if a string only contains ascii characters""" try: some_string.encode('ascii') except UnicodeEncodeError: return False else: return True def get_all_weights(model, prefix): """Serialize all weights of the models layers""" show_layer_functions = get_layer_functions_dict() result = {} layers = model.layers assert K.image_data_format() == 'channels_last' for layer in layers: layer_type = type(layer).__name__ name = prefix + layer.name assert is_ascii(name) if name in result: raise ValueError('duplicate layer name ' + name) if layer_type in ['Model', 'Sequential', 'Functional']: result = merge_two_disjunct_dicts(result, get_all_weights(layer, name + '_')) else: if hasattr(layer, 'data_format'): if layer_type in ['AveragePooling1D', 'MaxPooling1D', 'AveragePooling2D', 'MaxPooling2D', 'GlobalAveragePooling1D', 'GlobalMaxPooling1D', 'GlobalAveragePooling2D', 'GlobalMaxPooling2D']: assert layer.data_format == 'channels_last' or layer.data_format == 'channels_first' else: assert layer.data_format == 'channels_last' show_func = show_layer_functions.get(layer_type, None) shown_layer = None if show_func: shown_layer = show_func(layer) if shown_layer: result[name] = shown_layer if show_func and layer_type == 'TimeDistributed': if name not in result: result[name] = {} result[name]['td_input_len'] = encode_floats(np.array([len(layer.input_shape) - 1], dtype=np.float32)) result[name]['td_output_len'] = encode_floats(np.array([len(layer.output_shape) - 1], dtype=np.float32)) return result def get_model_name(model): """Return .name or ._name or 'dummy_model_name'""" if hasattr(model, 'name'): return model.name if hasattr(model, '_name'): return model._name return 'dummy_model_name' def convert_sequential_to_model(model): """Convert a sequential model to the underlying functional format""" if type(model).__name__ == 'Sequential': name = get_model_name(model) if hasattr(model, '_inbound_nodes'): inbound_nodes = model._inbound_nodes elif hasattr(model, 'inbound_nodes'): inbound_nodes = model.inbound_nodes else: raise ValueError('can not get (_)inbound_nodes from model') input_layer = Input(batch_shape=model.layers[0].input_shape) prev_layer = input_layer for layer in model.layers: layer._inbound_nodes = [] prev_layer = layer(prev_layer) funcmodel = Model([input_layer], [prev_layer], name=name) model = funcmodel if hasattr(model, '_inbound_nodes'): model._inbound_nodes = inbound_nodes elif hasattr(model, 'inbound_nodes'): model.inbound_nodes = inbound_nodes assert model.layers for i in range(len(model.layers)): layer_type = type(model.layers[i]).__name__ if layer_type in ['Model', 'Sequential', 'Functional']: # "model.layers[i] = ..." would not overwrite the layer. model._layers[i] = convert_sequential_to_model(model.layers[i]) return model def offset_conv2d_eval(depth, padding, x): """Perform a conv2d on x with a given padding""" kernel = K.variable(value=np.array([[[[1]] + [[0]] * (depth - 1)]]), dtype='float32') return K.conv2d(x, kernel, strides=(3, 3), padding=padding) def offset_sep_conv2d_eval(depth, padding, x): """Perform a separable conv2d on x with a given padding""" depthwise_kernel = K.variable(value=np.array([[[[1]] * depth]]), dtype='float32') pointwise_kernel = K.variable(value=np.array([[[[1]] + [[0]] * (depth - 1)]]), dtype='float32') return K.separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(3, 3), padding=padding) def conv2d_offset_max_pool_eval(_, padding, x): """Perform a max pooling operation on x""" return K.pool2d(x, (1, 1), strides=(3, 3), padding=padding, pool_mode='max') def conv2d_offset_average_pool_eval(_, padding, x): """Perform an average pooling operation on x""" return K.pool2d(x, (1, 1), strides=(3, 3), padding=padding, pool_mode='avg') def check_operation_offset(depth, eval_f, padding): """Check if backend used an offset while placing the filter e.g. during a convolution. TensorFlow is inconsistent in doing so depending on the type of operation, the used device (CPU/GPU) and the input depth. """ in_arr = np.array([[[[i] * depth for i in range(6)]]]) input_data = K.variable(value=in_arr, dtype='float32') output = eval_f(depth, padding, input_data) result = K.eval(output).flatten().tolist() assert result in [[0, 3], [1, 4]] return result == [1, 4] def get_shapes(tensors): """Return shapes of a list of tensors""" return [t['shape'] for t in tensors] def calculate_hash(model): layers = model.layers hash_m = hashlib.sha256() for layer in layers: for weights in layer.get_weights(): assert isinstance(weights, np.ndarray) hash_m.update(weights.tobytes()) hash_m.update(layer.name.encode('ascii')) return hash_m.hexdigest() def as_list(value_or_values): """Leave lists untouched, convert non-list types to a singleton list""" if isinstance(value_or_values, list): return value_or_values return [value_or_values] def singleton_list_to_value(value_or_values): """ Leaves non-list values untouched. Raises an Exception in case the input list does not have exactly one element. """ if isinstance(value_or_values, list): assert len(value_or_values) == 1 return value_or_values[0] return value_or_values def model_to_fdeep_json(model, no_tests=False): """Convert any Keras model to the frugally-deep model format.""" # Force creation of underlying functional model. # see: https://github.com/fchollet/keras/issues/8136 # Loss and optimizer type do not matter, since we do not train the model. model.compile(loss='mse', optimizer='sgd') model = convert_sequential_to_model(model) test_data = None if no_tests else gen_test_data(model) json_output = {} print('Converting model architecture.') json_output['architecture'] = json.loads(model.to_json()) json_output['image_data_format'] = K.image_data_format() json_output['input_shapes'] = list(map(get_layer_input_shape_tensor_shape, get_model_input_layers(model))) json_output['output_shapes'] = list(map(keras_shape_to_fdeep_tensor_shape, as_list(model.output_shape))) if test_data: json_output['tests'] = [test_data] print('Converting model weights.') json_output['trainable_params'] = get_all_weights(model, '') print('Done converting model weights.') print('Calculating model hash.') json_output['hash'] = calculate_hash(model) print('Model conversion finished.') return json_output def convert(in_path, out_path, no_tests=False): """Convert any (h5-)stored Keras model to the frugally-deep model format.""" print('loading {}'.format(in_path)) model = load_model(in_path) json_output = model_to_fdeep_json(model, no_tests) print('writing {}'.format(out_path)) write_text_file(out_path, json.dumps( json_output, allow_nan=False, indent=2, sort_keys=True)) def main(): """Parse command line and convert model.""" usage = 'usage: [Keras model in HDF5 format] [output path] (--no-tests)' # todo: Use ArgumentParser instead. if len(sys.argv) not in [3, 4]: print(usage) sys.exit(1) in_path = sys.argv[1] out_path = sys.argv[2] no_tests = False if len(sys.argv) == 4: if sys.argv[3] not in ['--no-tests']: print(usage) sys.exit(1) if sys.argv[3] == '--no-tests': no_tests = True convert(in_path, out_path, no_tests) if __name__ == "__main__": main()
[ "tensorflow.keras.models.load_model", "numpy.split", "tensorflow.keras.backend.conv2d", "tensorflow.keras.layers.Input", "tensorflow.keras.backend.image_data_format", "tensorflow.keras.models.Model", "tensorflow.keras.backend.get_value", "numpy.hsplit", "tensorflow.keras.backend.separable_conv2d", "tensorflow.keras.backend.eval", "numpy.random.randint", "tensorflow.keras.backend.pool2d", "numpy.moveaxis", "numpy.array", "tensorflow.keras.backend.variable" ]
ModelCreation/fdeepConvertModel.py
[(101, 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), False, 'import datetime\n'), (103, 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), False, 'import datetime\n'), (313, 'tensorflow.keras.backend.get_value', 'K.get_value', (['layer.moving_mean'], {}), True, 'from tensorflow.keras import backend as K\n'), (314, 'tensorflow.keras.backend.get_value', 'K.get_value', (['layer.moving_variance'], {}), True, 'from tensorflow.keras import backend as K\n'), (676, 'tensorflow.keras.backend.conv2d', 'K.conv2d', (['x', 'kernel'], {'strides': '(3, 3)', 'padding': 'padding'}), True, 'from tensorflow.keras import backend as K\n'), (685, 'tensorflow.keras.backend.separable_conv2d', 'K.separable_conv2d', (['x', 'depthwise_kernel', 'pointwise_kernel'], {'strides': '(3, 3)', 'padding': 'padding'}), True, 'from tensorflow.keras import backend as K\n'), (691, 'tensorflow.keras.backend.pool2d', 'K.pool2d', (['x', '(1, 1)'], {'strides': '(3, 3)', 'padding': 'padding', 'pool_mode': '"""max"""'}), True, 'from tensorflow.keras import backend as K\n'), (696, 'tensorflow.keras.backend.pool2d', 'K.pool2d', (['x', '(1, 1)'], {'strides': '(3, 3)', 'padding': 'padding', 'pool_mode': '"""avg"""'}), True, 'from tensorflow.keras import backend as K\n'), (706, 'tensorflow.keras.backend.variable', 'K.variable', ([], {'value': 'in_arr', 'dtype': '"""float32"""'}), True, 'from tensorflow.keras import backend as K\n'), (720, 'hashlib.sha256', 'hashlib.sha256', ([], {}), False, 'import hashlib\n'), (762, 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), True, 'from tensorflow.keras import backend as K\n'), (784, 'tensorflow.keras.models.load_model', 'load_model', (['in_path'], {}), False, 'from tensorflow.keras.models import Model, load_model\n'), (56, 'numpy.split', 'np.split', (['bias', '(2)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (319, 'tensorflow.keras.backend.get_value', 'K.get_value', (['layer.beta'], {}), True, 'from tensorflow.keras import backend as K\n'), (322, 'tensorflow.keras.backend.get_value', 'K.get_value', (['layer.gamma'], {}), True, 'from tensorflow.keras import backend as K\n'), (600, 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), True, 'from tensorflow.keras import backend as K\n'), (652, 'tensorflow.keras.layers.Input', 'Input', ([], {'batch_shape': 'model.layers[0].input_shape'}), False, 'from tensorflow.keras.layers import Input, Embedding\n'), (657, 'tensorflow.keras.models.Model', 'Model', (['[input_layer]', '[prev_layer]'], {'name': 'name'}), False, 'from tensorflow.keras.models import Model, load_model\n'), (787, 'json.dumps', 'json.dumps', (['json_output'], {'allow_nan': '(False)', 'indent': '(2)', 'sort_keys': '(True)'}), False, 'import json\n'), (799, 'sys.exit', 'sys.exit', (['(1)'], {}), False, 'import sys\n'), (213, 'numpy.moveaxis', 'np.moveaxis', (['weights', '[0, 1, 2, 3]', '[1, 2, 3, 0]'], {}), True, 'import numpy as np\n'), (219, 'numpy.moveaxis', 'np.moveaxis', (['weights', '[0, 1, 2, 3]', '[1, 2, 0, 3]'], {}), True, 'import numpy as np\n'), (225, 'numpy.moveaxis', 'np.moveaxis', (['weights', '[0, 1, 2]', '[1, 2, 0]'], {}), True, 'import numpy as np\n'), (674, 'numpy.array', 'np.array', (['[[[[1]] + [[0]] * (depth - 1)]]'], {}), True, 'import numpy as np\n'), (681, 'numpy.array', 'np.array', (['[[[[1]] * depth]]'], {}), True, 'import numpy as np\n'), (683, 'numpy.array', 'np.array', (['[[[[1]] + [[0]] * (depth - 1)]]'], {}), True, 'import numpy as np\n'), (808, 'sys.exit', 'sys.exit', (['(1)'], {}), False, 'import sys\n'), (158, 'numpy.random.randint', 'np.random.randint', (['(0)', 'input_layer._outbound_nodes[0].outbound_layer.input_dim', 'size'], {}), True, 'import numpy as np\n'), (51, 'numpy.hsplit', 'np.hsplit', (['kernels', 'n_gates'], {}), True, 'import numpy as np\n'), (207, 'base64.b64encode', 'base64.b64encode', (['arr'], {}), False, 'import base64\n'), (708, 'tensorflow.keras.backend.eval', 'K.eval', (['output'], {}), True, 'from tensorflow.keras import backend as K\n')]
kokimishev/TensorflowTTS
234db13ccf0eb53bd7373c409514fb092588a755
# -*- coding: utf-8 -*- # Copyright 2020 The FastSpeech Authors, The HuggingFace Inc. team and Minh Nguyen (@dathudeptrai) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tensorflow Model modules for FastSpeech.""" import numpy as np import tensorflow as tf def get_initializer(initializer_range=0.02): """Creates a `tf.initializers.truncated_normal` with the given range. Args: initializer_range: float, initializer range for stddev. Returns: TruncatedNormal initializer with stddev = `initializer_range`. """ return tf.keras.initializers.TruncatedNormal(stddev=initializer_range) def gelu(x): """Gaussian Error Linear unit.""" cdf = 0.5 * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0))) return x * cdf def gelu_new(x): """Smoother gaussian Error Linear Unit.""" cdf = 0.5 * (1.0 + tf.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))))) return x * cdf def swish(x): """Swish activation function.""" return x * tf.sigmoid(x) def mish(x): return x * tf.math.tanh(tf.math.softplus(x)) ACT2FN = { "identity": tf.keras.layers.Activation("linear"), "tanh": tf.keras.layers.Activation("tanh"), "gelu": tf.keras.layers.Activation(gelu), "relu": tf.keras.activations.relu, "swish": tf.keras.layers.Activation(swish), "gelu_new": tf.keras.layers.Activation(gelu_new), "mish": tf.keras.layers.Activation(mish), } class TFFastSpeechEmbeddings(tf.keras.layers.Layer): """Construct charactor/phoneme/positional/speaker embeddings.""" def __init__(self, config, **kwargs): """Init variables.""" super().__init__(**kwargs) self.vocab_size = config.vocab_size self.hidden_size = config.hidden_size self.initializer_range = config.initializer_range self.config = config self.position_embeddings = tf.keras.layers.Embedding( config.max_position_embeddings + 1, config.hidden_size, weights=[self._sincos_embedding()], name="position_embeddings", trainable=False, ) if config.n_speakers > 1: self.encoder_speaker_embeddings = tf.keras.layers.Embedding( config.n_speakers, config.hidden_size, embeddings_initializer=get_initializer(self.initializer_range), name="speaker_embeddings", ) self.speaker_fc = tf.keras.layers.Dense( units=config.hidden_size, name="speaker_fc" ) def build(self, input_shape): """Build shared charactor/phoneme embedding layers.""" with tf.name_scope("charactor_embeddings"): self.charactor_embeddings = self.add_weight( "weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range), ) super().build(input_shape) def call(self, inputs, training=False): """Get charactor embeddings of inputs. Args: 1. charactor, Tensor (int32) shape [batch_size, length]. 2. speaker_id, Tensor (int32) shape [batch_size] Returns: Tensor (float32) shape [batch_size, length, embedding_size]. """ return self._embedding(inputs, training=training) def _embedding(self, inputs, training=False): """Applies embedding based on inputs tensor.""" input_ids, speaker_ids = inputs input_shape = tf.shape(input_ids) seq_length = input_shape[1] position_ids = tf.range(1, seq_length + 1, dtype=tf.int32)[tf.newaxis, :] # create embeddings inputs_embeds = tf.gather(self.charactor_embeddings, input_ids) position_embeddings = self.position_embeddings(position_ids) # sum embedding embeddings = inputs_embeds + position_embeddings if self.config.n_speakers > 1: speaker_embeddings = self.encoder_speaker_embeddings(speaker_ids) speaker_features = tf.math.softplus(self.speaker_fc(speaker_embeddings)) # extended speaker embeddings extended_speaker_features = speaker_features[:, tf.newaxis, :] embeddings += extended_speaker_features return embeddings def _sincos_embedding(self): position_enc = np.array( [ [ pos / np.power(10000, 2.0 * (i // 2) / self.hidden_size) for i in range(self.hidden_size) ] for pos in range(self.config.max_position_embeddings + 1) ] ) position_enc[:, 0::2] = np.sin(position_enc[:, 0::2]) position_enc[:, 1::2] = np.cos(position_enc[:, 1::2]) # pad embedding. position_enc[0] = 0.0 return position_enc class TFFastSpeechSelfAttention(tf.keras.layers.Layer): """Self attention module for fastspeech.""" def __init__(self, config, **kwargs): """Init variables.""" super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads) ) self.output_attentions = config.output_attentions self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = tf.keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query", ) self.key = tf.keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key", ) self.value = tf.keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value", ) self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x, batch_size): """Transpose to calculate attention scores.""" x = tf.reshape( x, (batch_size, -1, self.num_attention_heads, self.attention_head_size) ) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, inputs, training=False): """Call logic.""" hidden_states, attention_mask = inputs batch_size = tf.shape(hidden_states)[0] mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) dk = tf.cast(tf.shape(key_layer)[-1], tf.float32) # scale attention_scores attention_scores = attention_scores / tf.math.sqrt(dk) if attention_mask is not None: # extended_attention_masks for self attention encoder. extended_attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :] extended_attention_mask = tf.cast(extended_attention_mask, tf.float32) extended_attention_mask = (1.0 - extended_attention_mask) * -1e9 attention_scores = attention_scores + extended_attention_mask # Normalize the attention scores to probabilities. attention_probs = tf.nn.softmax(attention_scores, axis=-1) attention_probs = self.dropout(attention_probs, training=training) context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) context_layer = tf.reshape(context_layer, (batch_size, -1, self.all_head_size)) outputs = ( (context_layer, attention_probs) if self.output_attentions else (context_layer,) ) return outputs class TFFastSpeechSelfOutput(tf.keras.layers.Layer): """Fastspeech output of self attention module.""" def __init__(self, config, **kwargs): """Init variables.""" super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) self.LayerNorm = tf.keras.layers.LayerNormalization( epsilon=config.layer_norm_eps, name="LayerNorm" ) self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def call(self, inputs, training=False): """Call logic.""" hidden_states, input_tensor = inputs hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class TFFastSpeechAttention(tf.keras.layers.Layer): """Fastspeech attention module.""" def __init__(self, config, **kwargs): """Init variables.""" super().__init__(**kwargs) self.self_attention = TFFastSpeechSelfAttention(config, name="self") self.dense_output = TFFastSpeechSelfOutput(config, name="output") def call(self, inputs, training=False): input_tensor, attention_mask = inputs self_outputs = self.self_attention( [input_tensor, attention_mask], training=training ) attention_output = self.dense_output( [self_outputs[0], input_tensor], training=training ) masked_attention_output = attention_output * tf.cast( tf.expand_dims(attention_mask, 2), dtype=tf.float32 ) outputs = (masked_attention_output,) + self_outputs[ 1: ] # add attentions if we output them return outputs class TFFastSpeechIntermediate(tf.keras.layers.Layer): """Intermediate representation module.""" def __init__(self, config, **kwargs): """Init variables.""" super().__init__(**kwargs) self.conv1d_1 = tf.keras.layers.Conv1D( config.intermediate_size, kernel_size=config.intermediate_kernel_size, kernel_initializer=get_initializer(config.initializer_range), padding="same", name="conv1d_1", ) self.conv1d_2 = tf.keras.layers.Conv1D( config.hidden_size, kernel_size=config.intermediate_kernel_size, kernel_initializer=get_initializer(config.initializer_range), padding="same", name="conv1d_2", ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def call(self, inputs): """Call logic.""" hidden_states, attention_mask = inputs hidden_states = self.conv1d_1(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.conv1d_2(hidden_states) masked_hidden_states = hidden_states * tf.cast( tf.expand_dims(attention_mask, 2), dtype=tf.float32 ) return masked_hidden_states class TFFastSpeechOutput(tf.keras.layers.Layer): """Output module.""" def __init__(self, config, **kwargs): """Init variables.""" super().__init__(**kwargs) self.LayerNorm = tf.keras.layers.LayerNormalization( epsilon=config.layer_norm_eps, name="LayerNorm" ) self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def call(self, inputs, training=False): """Call logic.""" hidden_states, input_tensor = inputs hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class TFFastSpeechLayer(tf.keras.layers.Layer): """Fastspeech module (FFT module on the paper).""" def __init__(self, config, **kwargs): """Init variables.""" super().__init__(**kwargs) self.attention = TFFastSpeechAttention(config, name="attention") self.intermediate = TFFastSpeechIntermediate(config, name="intermediate") self.bert_output = TFFastSpeechOutput(config, name="output") def call(self, inputs, training=False): """Call logic.""" hidden_states, attention_mask = inputs attention_outputs = self.attention( [hidden_states, attention_mask], training=training ) attention_output = attention_outputs[0] intermediate_output = self.intermediate( [attention_output, attention_mask], training=training ) layer_output = self.bert_output( [intermediate_output, attention_output], training=training ) masked_layer_output = layer_output * tf.cast( tf.expand_dims(attention_mask, 2), dtype=tf.float32 ) outputs = (masked_layer_output,) + attention_outputs[ 1: ] # add attentions if we output them return outputs class TFFastSpeechEncoder(tf.keras.layers.Layer): """Fast Speech encoder module.""" def __init__(self, config, **kwargs): """Init variables.""" super().__init__(**kwargs) self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.layer = [ TFFastSpeechLayer(config, name="layer_._{}".format(i)) for i in range(config.num_hidden_layers) ] def call(self, inputs, training=False): """Call logic.""" hidden_states, attention_mask = inputs all_hidden_states = () all_attentions = () for _, layer_module in enumerate(self.layer): if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( [hidden_states, attention_mask], training=training ) hidden_states = layer_outputs[0] if self.output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states,) if self.output_hidden_states: outputs = outputs + (all_hidden_states,) if self.output_attentions: outputs = outputs + (all_attentions,) return outputs # outputs, (hidden states), (attentions) class TFFastSpeechDecoder(TFFastSpeechEncoder): """Fast Speech decoder module.""" def __init__(self, config, **kwargs): super().__init__(config, **kwargs) self.config = config # create decoder positional embedding self.decoder_positional_embeddings = tf.keras.layers.Embedding( config.max_position_embeddings + 1, config.hidden_size, weights=[self._sincos_embedding()], name="position_embeddings", trainable=False, ) if config.n_speakers > 1: self.decoder_speaker_embeddings = tf.keras.layers.Embedding( config.n_speakers, config.hidden_size, embeddings_initializer=get_initializer(config.initializer_range), name="speaker_embeddings", ) self.speaker_fc = tf.keras.layers.Dense( units=config.hidden_size, name="speaker_fc" ) def call(self, inputs, training=False): hidden_states, speaker_ids, encoder_mask, decoder_pos = inputs # calculate new hidden states. hidden_states = hidden_states + self.decoder_positional_embeddings(decoder_pos) if self.config.n_speakers > 1: speaker_embeddings = self.decoder_speaker_embeddings(speaker_ids) speaker_features = tf.math.softplus(self.speaker_fc(speaker_embeddings)) # extended speaker embeddings extended_speaker_features = speaker_features[:, tf.newaxis, :] hidden_states += extended_speaker_features return super().call([hidden_states, encoder_mask], training=training) def _sincos_embedding(self): position_enc = np.array( [ [ pos / np.power(10000, 2.0 * (i // 2) / self.config.hidden_size) for i in range(self.config.hidden_size) ] for pos in range(self.config.max_position_embeddings + 1) ] ) position_enc[:, 0::2] = np.sin(position_enc[:, 0::2]) position_enc[:, 1::2] = np.cos(position_enc[:, 1::2]) # pad embedding. position_enc[0] = 0.0 return position_enc class TFTacotronPostnet(tf.keras.layers.Layer): """Tacotron-2 postnet.""" def __init__(self, config, **kwargs): """Init variables.""" super().__init__(**kwargs) self.conv_batch_norm = [] for i in range(config.n_conv_postnet): conv = tf.keras.layers.Conv1D( filters=config.postnet_conv_filters if i < config.n_conv_postnet - 1 else config.num_mels, kernel_size=config.postnet_conv_kernel_sizes, padding="same", name="conv_._{}".format(i), ) batch_norm = tf.keras.layers.BatchNormalization( name="batch_norm_._{}".format(i) ) self.conv_batch_norm.append((conv, batch_norm)) self.dropout = tf.keras.layers.Dropout( rate=config.postnet_dropout_rate, name="dropout" ) self.activation = [tf.nn.tanh] * (config.n_conv_postnet - 1) + [tf.identity] def call(self, inputs, training=False): """Call logic.""" outputs, mask = inputs extended_mask = tf.cast(tf.expand_dims(mask, axis=2), tf.float32) for i, (conv, bn) in enumerate(self.conv_batch_norm): outputs = conv(outputs) outputs = bn(outputs) outputs = self.activation[i](outputs) outputs = self.dropout(outputs, training=training) return outputs * extended_mask class TFFastSpeechDurationPredictor(tf.keras.layers.Layer): """FastSpeech duration predictor module.""" def __init__(self, config, **kwargs): """Init variables.""" super().__init__(**kwargs) self.conv_layers = [] for i in range(config.num_duration_conv_layers): self.conv_layers.append( tf.keras.layers.Conv1D( config.duration_predictor_filters, config.duration_predictor_kernel_sizes, padding="same", name="conv_._{}".format(i), ) ) self.conv_layers.append( tf.keras.layers.LayerNormalization( epsilon=config.layer_norm_eps, name="LayerNorm_._{}".format(i) ) ) self.conv_layers.append(tf.keras.layers.Activation(tf.nn.relu6)) self.conv_layers.append( tf.keras.layers.Dropout(config.duration_predictor_dropout_probs) ) self.conv_layers_sequence = tf.keras.Sequential(self.conv_layers) self.output_layer = tf.keras.layers.Dense(1) def call(self, inputs, training=False): """Call logic.""" encoder_hidden_states, attention_mask = inputs attention_mask = tf.cast(tf.expand_dims(attention_mask, 2), tf.float32) # mask encoder hidden states masked_encoder_hidden_states = encoder_hidden_states * attention_mask # pass though first layer outputs = self.conv_layers_sequence(masked_encoder_hidden_states) outputs = self.output_layer(outputs) masked_outputs = outputs * attention_mask return tf.squeeze(tf.nn.relu6(masked_outputs), -1) # make sure positive value. class TFFastSpeechLengthRegulator(tf.keras.layers.Layer): """FastSpeech lengthregulator module.""" def __init__(self, config, enable_tflite_convertible = False, **kwargs): """Init variables.""" super().__init__(**kwargs) self.config = config self.enable_tflite_convertible = enable_tflite_convertible def call(self, inputs, training=False): """Call logic. Args: 1. encoder_hidden_states, Tensor (float32) shape [batch_size, length, hidden_size] 2. durations_gt, Tensor (float32/int32) shape [batch_size, length] """ encoder_hidden_states, durations_gt = inputs outputs, encoder_masks = self._length_regulator( encoder_hidden_states, durations_gt ) return outputs, encoder_masks def _length_regulator(self, encoder_hidden_states, durations_gt): """Length regulator logic.""" sum_durations = tf.reduce_sum(durations_gt, axis=-1) # [batch_size] max_durations = tf.reduce_max(sum_durations) input_shape = tf.shape(encoder_hidden_states) batch_size = input_shape[0] hidden_size = input_shape[-1] # initialize output hidden states and encoder masking. if self.enable_tflite_convertible: # There is only 1 batch in inference, so we don't have to use # `tf.While` op with 3-D output tensor. repeats = durations_gt[0] real_length = tf.reduce_sum(repeats) pad_size = max_durations - real_length # masks : [max_durations] masks = tf.sequence_mask([real_length], max_durations, dtype=tf.int32) repeat_encoder_hidden_states = tf.repeat( encoder_hidden_states[0], repeats=repeats, axis=0 ) repeat_encoder_hidden_states = tf.expand_dims( tf.pad(repeat_encoder_hidden_states, [[0, pad_size], [0, 0]]), 0 ) # [1, max_durations, hidden_size] outputs = repeat_encoder_hidden_states encoder_masks = masks else: outputs = tf.zeros(shape=[0, max_durations, hidden_size], dtype=tf.float32) encoder_masks = tf.zeros(shape=[0, max_durations], dtype=tf.int32) def condition( i, batch_size, outputs, encoder_masks, encoder_hidden_states, durations_gt, max_durations, ): return tf.less(i, batch_size) def body( i, batch_size, outputs, encoder_masks, encoder_hidden_states, durations_gt, max_durations, ): repeats = durations_gt[i] real_length = tf.reduce_sum(repeats) pad_size = max_durations - real_length masks = tf.sequence_mask([real_length], max_durations, dtype=tf.int32) repeat_encoder_hidden_states = tf.repeat( encoder_hidden_states[i], repeats=repeats, axis=0 ) repeat_encoder_hidden_states = tf.expand_dims( tf.pad(repeat_encoder_hidden_states, [[0, pad_size], [0, 0]]), 0 ) # [1, max_durations, hidden_size] outputs = tf.concat([outputs, repeat_encoder_hidden_states], axis=0) encoder_masks = tf.concat([encoder_masks, masks], axis=0) return [ i + 1, batch_size, outputs, encoder_masks, encoder_hidden_states, durations_gt, max_durations, ] # initialize iteration i. i = tf.constant(0, dtype=tf.int32) _, _, outputs, encoder_masks, _, _, _, = tf.while_loop( condition, body, [ i, batch_size, outputs, encoder_masks, encoder_hidden_states, durations_gt, max_durations, ], shape_invariants=[ i.get_shape(), batch_size.get_shape(), tf.TensorShape([None, None, self.config.hidden_size]), tf.TensorShape([None, None]), encoder_hidden_states.get_shape(), durations_gt.get_shape(), max_durations.get_shape(), ], ) return outputs, encoder_masks class TFFastSpeech(tf.keras.Model): """TF Fastspeech module.""" def __init__(self, config, **kwargs): """Init layers for fastspeech.""" super().__init__(**kwargs) self.embeddings = TFFastSpeechEmbeddings(config, name="embeddings") self.encoder = TFFastSpeechEncoder(config, name="encoder") self.duration_predictor = TFFastSpeechDurationPredictor( config, name="duration_predictor" ) self.length_regulator = TFFastSpeechLengthRegulator( config, name="length_regulator" ) self.decoder = TFFastSpeechDecoder(config, name="decoder") self.mel_dense = tf.keras.layers.Dense(units=config.num_mels, name="mel_before") self.postnet = TFTacotronPostnet(config=config, name="postnet") def _build(self): """Dummy input for building model.""" # fake inputs input_ids = tf.convert_to_tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]], tf.int32) attention_mask = tf.convert_to_tensor( [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], tf.int32 ) speaker_ids = tf.convert_to_tensor([0], tf.int32) duration_gts = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], tf.int32) self(input_ids, attention_mask, speaker_ids, duration_gts) def call( self, input_ids, attention_mask, speaker_ids, duration_gts, training=False ): """Call logic.""" embedding_output = self.embeddings([input_ids, speaker_ids], training=training) encoder_output = self.encoder( [embedding_output, attention_mask], training=training ) last_encoder_hidden_states = encoder_output[0] # duration predictor, here use last_encoder_hidden_states, u can use more hidden_states layers # rather than just use last_hidden_states of encoder for duration_predictor. duration_outputs = self.duration_predictor( [last_encoder_hidden_states, attention_mask] ) # [batch_size, length] length_regulator_outputs, encoder_masks = self.length_regulator( [last_encoder_hidden_states, duration_gts], training=training ) # create decoder positional embedding decoder_pos = tf.range( 1, tf.shape(length_regulator_outputs)[1] + 1, dtype=tf.int32 ) masked_decoder_pos = tf.expand_dims(decoder_pos, 0) * encoder_masks decoder_output = self.decoder( [length_regulator_outputs, speaker_ids, encoder_masks, masked_decoder_pos], training=training, ) last_decoder_hidden_states = decoder_output[0] # here u can use sum or concat more than 1 hidden states layers from decoder. mel_before = self.mel_dense(last_decoder_hidden_states) mel_after = ( self.postnet([mel_before, encoder_masks], training=training) + mel_before ) outputs = (mel_before, mel_after, duration_outputs) return outputs @tf.function( experimental_relax_shapes=True, input_signature=[ tf.TensorSpec(shape=[None, None], dtype=tf.int32), tf.TensorSpec(shape=[None, None], dtype=tf.bool), tf.TensorSpec(shape=[None,], dtype=tf.int32), tf.TensorSpec(shape=[None,], dtype=tf.float32), ], ) def inference(self, input_ids, attention_mask, speaker_ids, speed_ratios): """Call logic.""" embedding_output = self.embeddings([input_ids, speaker_ids], training=False) encoder_output = self.encoder( [embedding_output, attention_mask], training=False ) last_encoder_hidden_states = encoder_output[0] # duration predictor, here use last_encoder_hidden_states, u can use more hidden_states layers # rather than just use last_hidden_states of encoder for duration_predictor. duration_outputs = self.duration_predictor( [last_encoder_hidden_states, attention_mask] ) # [batch_size, length] duration_outputs = tf.math.exp(duration_outputs) - 1.0 if speed_ratios is None: speed_ratios = tf.convert_to_tensor(np.array([1.0]), dtype=tf.float32) duration_outputs = tf.cast( tf.math.round(duration_outputs * speed_ratios), tf.int32 ) length_regulator_outputs, encoder_masks = self.length_regulator( [last_encoder_hidden_states, duration_outputs], training=False ) # create decoder positional embedding decoder_pos = tf.range( 1, tf.shape(length_regulator_outputs)[1] + 1, dtype=tf.int32 ) masked_decoder_pos = tf.expand_dims(decoder_pos, 0) * encoder_masks decoder_output = self.decoder( [length_regulator_outputs, speaker_ids, encoder_masks, masked_decoder_pos], training=False, ) last_decoder_hidden_states = decoder_output[0] # here u can use sum or concat more than 1 hidden states layers from decoder. mel_before = self.mel_dense(last_decoder_hidden_states) mel_after = ( self.postnet([mel_before, encoder_masks], training=False) + mel_before ) outputs = (mel_before, mel_after, duration_outputs) return outputs @tf.function( experimental_relax_shapes=True, input_signature=[ tf.TensorSpec(shape=[1, None], dtype=tf.int32), tf.TensorSpec(shape=[1, None], dtype=tf.bool), tf.TensorSpec(shape=[1,], dtype=tf.int32), tf.TensorSpec(shape=[1,], dtype=tf.float32), ], ) def inference_tflite(self, input_ids, attention_mask, speaker_ids, speed_ratios): """Call logic.""" embedding_output = self.embeddings([input_ids, speaker_ids], training=False) encoder_output = self.encoder( [embedding_output, attention_mask], training=False ) last_encoder_hidden_states = encoder_output[0] # duration predictor, here use last_encoder_hidden_states, u can use more hidden_states layers # rather than just use last_hidden_states of encoder for duration_predictor. duration_outputs = self.duration_predictor( [last_encoder_hidden_states, attention_mask] ) # [batch_size, length] duration_outputs = tf.math.exp(duration_outputs) - 1.0 if speed_ratios is None: speed_ratios = tf.convert_to_tensor(np.array([1.0]), dtype=tf.float32) duration_outputs = tf.cast( tf.math.round(duration_outputs * speed_ratios), tf.int32 ) length_regulator_outputs, encoder_masks = self.length_regulator( [last_encoder_hidden_states, duration_outputs], training=False ) # create decoder positional embedding decoder_pos = tf.range( 1, tf.shape(length_regulator_outputs)[1] + 1, dtype=tf.int32 ) masked_decoder_pos = tf.expand_dims(decoder_pos, 0) * encoder_masks decoder_output = self.decoder( [length_regulator_outputs, speaker_ids, encoder_masks, masked_decoder_pos], training=False, ) last_decoder_hidden_states = decoder_output[0] # here u can use sum or concat more than 1 hidden states layers from decoder. mel_before = self.mel_dense(last_decoder_hidden_states) mel_after = ( self.postnet([mel_before, encoder_masks], training=False) + mel_before ) outputs = (mel_before, mel_after, duration_outputs) return outputs
[ "tensorflow.keras.layers.LayerNormalization", "tensorflow.convert_to_tensor", "tensorflow.concat", "numpy.sqrt", "tensorflow.zeros", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.keras.Sequential", "tensorflow.pad", "numpy.sin", "tensorflow.gather", "tensorflow.math.softplus", "tensorflow.name_scope", "tensorflow.keras.initializers.TruncatedNormal", "tensorflow.matmul", "tensorflow.TensorShape", "tensorflow.shape", "tensorflow.less", "tensorflow.keras.layers.Dense", "numpy.power", "tensorflow.pow", "tensorflow.math.exp", "tensorflow.math.round", "numpy.array", "tensorflow.sequence_mask", "tensorflow.reduce_max", "tensorflow.nn.softmax", "tensorflow.keras.layers.Activation", "tensorflow.transpose", "tensorflow.range", "tensorflow.math.sqrt", "tensorflow.nn.relu6", "tensorflow.constant", "tensorflow.reshape", "tensorflow.sigmoid", "numpy.cos", "tensorflow.expand_dims", "tensorflow.repeat", "tensorflow.keras.layers.Dropout", "tensorflow.TensorSpec" ]
tensorflow_tts/models/fastspeech.py
[(31, 'tensorflow.keras.initializers.TruncatedNormal', 'tf.keras.initializers.TruncatedNormal', ([], {'stddev': 'initializer_range'}), True, 'import tensorflow as tf\n'), (56, 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""linear"""'], {}), True, 'import tensorflow as tf\n'), (57, 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""tanh"""'], {}), True, 'import tensorflow as tf\n'), (58, 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['gelu'], {}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['swish'], {}), True, 'import tensorflow as tf\n'), (61, 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['gelu_new'], {}), True, 'import tensorflow as tf\n'), (62, 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['mish'], {}), True, 'import tensorflow as tf\n'), (48, 'tensorflow.sigmoid', 'tf.sigmoid', (['x'], {}), True, 'import tensorflow as tf\n'), (122, 'tensorflow.shape', 'tf.shape', (['input_ids'], {}), True, 'import tensorflow as tf\n'), (128, 'tensorflow.gather', 'tf.gather', (['self.charactor_embeddings', 'input_ids'], {}), True, 'import tensorflow as tf\n'), (153, 'numpy.sin', 'np.sin', (['position_enc[:, 0::2]'], {}), True, 'import numpy as np\n'), (154, 'numpy.cos', 'np.cos', (['position_enc[:, 1::2]'], {}), True, 'import numpy as np\n'), (194, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.attention_probs_dropout_prob'], {}), True, 'import tensorflow as tf\n'), (198, 'tensorflow.reshape', 'tf.reshape', (['x', '(batch_size, -1, self.num_attention_heads, self.attention_head_size)'], {}), True, 'import tensorflow as tf\n'), (201, 'tensorflow.transpose', 'tf.transpose', (['x'], {'perm': '[0, 2, 1, 3]'}), True, 'import tensorflow as tf\n'), (216, 'tensorflow.matmul', 'tf.matmul', (['query_layer', 'key_layer'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (228, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['attention_scores'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (231, 'tensorflow.matmul', 'tf.matmul', (['attention_probs', 'value_layer'], {}), True, 'import tensorflow as tf\n'), (232, 'tensorflow.transpose', 'tf.transpose', (['context_layer'], {'perm': '[0, 2, 1, 3]'}), True, 'import tensorflow as tf\n'), (233, 'tensorflow.reshape', 'tf.reshape', (['context_layer', '(batch_size, -1, self.all_head_size)'], {}), True, 'import tensorflow as tf\n'), (254, 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': 'config.layer_norm_eps', 'name': '"""LayerNorm"""'}), True, 'import tensorflow as tf\n'), (257, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.hidden_dropout_prob'], {}), True, 'import tensorflow as tf\n'), (341, 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': 'config.layer_norm_eps', 'name': '"""LayerNorm"""'}), True, 'import tensorflow as tf\n'), (344, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.hidden_dropout_prob'], {}), True, 'import tensorflow as tf\n'), (484, 'numpy.sin', 'np.sin', (['position_enc[:, 0::2]'], {}), True, 'import numpy as np\n'), (485, 'numpy.cos', 'np.cos', (['position_enc[:, 1::2]'], {}), True, 'import numpy as np\n'), (513, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ([], {'rate': 'config.postnet_dropout_rate', 'name': '"""dropout"""'}), True, 'import tensorflow as tf\n'), (555, 'tensorflow.keras.Sequential', 'tf.keras.Sequential', (['self.conv_layers'], {}), True, 'import tensorflow as tf\n'), (556, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), True, 'import tensorflow as tf\n'), (596, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['durations_gt'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (597, 'tensorflow.reduce_max', 'tf.reduce_max', (['sum_durations'], {}), True, 'import tensorflow as tf\n'), (599, 'tensorflow.shape', 'tf.shape', (['encoder_hidden_states'], {}), True, 'import tensorflow as tf\n'), (710, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'config.num_mels', 'name': '"""mel_before"""'}), True, 'import tensorflow as tf\n'), (716, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]', 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (717, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]', 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (720, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[0]', 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (721, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]', 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (52, 'tensorflow.math.softplus', 'tf.math.softplus', (['x'], {}), True, 'import tensorflow as tf\n'), (92, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'config.hidden_size', 'name': '"""speaker_fc"""'}), True, 'import tensorflow as tf\n'), (98, 'tensorflow.name_scope', 'tf.name_scope', (['"""charactor_embeddings"""'], {}), True, 'import tensorflow as tf\n'), (125, 'tensorflow.range', 'tf.range', (['(1)', '(seq_length + 1)'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (207, 'tensorflow.shape', 'tf.shape', (['hidden_states'], {}), True, 'import tensorflow as tf\n'), (218, 'tensorflow.math.sqrt', 'tf.math.sqrt', (['dk'], {}), True, 'import tensorflow as tf\n'), (223, 'tensorflow.cast', 'tf.cast', (['extended_attention_mask', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (454, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'config.hidden_size', 'name': '"""speaker_fc"""'}), True, 'import tensorflow as tf\n'), (521, 'tensorflow.expand_dims', 'tf.expand_dims', (['mask'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (561, 'tensorflow.expand_dims', 'tf.expand_dims', (['attention_mask', '(2)'], {}), True, 'import tensorflow as tf\n'), (570, 'tensorflow.nn.relu6', 'tf.nn.relu6', (['masked_outputs'], {}), True, 'import tensorflow as tf\n'), (608, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['repeats'], {}), True, 'import tensorflow as tf\n'), (611, 'tensorflow.sequence_mask', 'tf.sequence_mask', (['[real_length]', 'max_durations'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (612, 'tensorflow.repeat', 'tf.repeat', (['encoder_hidden_states[0]'], {'repeats': 'repeats', 'axis': '(0)'}), True, 'import tensorflow as tf\n'), (622, 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[0, max_durations, hidden_size]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (623, 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[0, max_durations]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (668, 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (748, 'tensorflow.expand_dims', 'tf.expand_dims', (['decoder_pos', '(0)'], {}), True, 'import tensorflow as tf\n'), (787, 'tensorflow.math.exp', 'tf.math.exp', (['duration_outputs'], {}), True, 'import tensorflow as tf\n'), (793, 'tensorflow.math.round', 'tf.math.round', (['(duration_outputs * speed_ratios)'], {}), True, 'import tensorflow as tf\n'), (804, 'tensorflow.expand_dims', 'tf.expand_dims', (['decoder_pos', '(0)'], {}), True, 'import tensorflow as tf\n'), (843, 'tensorflow.math.exp', 'tf.math.exp', (['duration_outputs'], {}), True, 'import tensorflow as tf\n'), (849, 'tensorflow.math.round', 'tf.math.round', (['(duration_outputs * speed_ratios)'], {}), True, 'import tensorflow as tf\n'), (860, 'tensorflow.expand_dims', 'tf.expand_dims', (['decoder_pos', '(0)'], {}), True, 'import tensorflow as tf\n'), (217, 'tensorflow.shape', 'tf.shape', (['key_layer'], {}), True, 'import tensorflow as tf\n'), (288, 'tensorflow.expand_dims', 'tf.expand_dims', (['attention_mask', '(2)'], {}), True, 'import tensorflow as tf\n'), (330, 'tensorflow.expand_dims', 'tf.expand_dims', (['attention_mask', '(2)'], {}), True, 'import tensorflow as tf\n'), (380, 'tensorflow.expand_dims', 'tf.expand_dims', (['attention_mask', '(2)'], {}), True, 'import tensorflow as tf\n'), (551, 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['tf.nn.relu6'], {}), True, 'import tensorflow as tf\n'), (553, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.duration_predictor_dropout_probs'], {}), True, 'import tensorflow as tf\n'), (616, 'tensorflow.pad', 'tf.pad', (['repeat_encoder_hidden_states', '[[0, pad_size], [0, 0]]'], {}), True, 'import tensorflow as tf\n'), (634, 'tensorflow.less', 'tf.less', (['i', 'batch_size'], {}), True, 'import tensorflow as tf\n'), (646, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['repeats'], {}), True, 'import tensorflow as tf\n'), (648, 'tensorflow.sequence_mask', 'tf.sequence_mask', (['[real_length]', 'max_durations'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (649, 'tensorflow.repeat', 'tf.repeat', (['encoder_hidden_states[i]'], {'repeats': 'repeats', 'axis': '(0)'}), True, 'import tensorflow as tf\n'), (655, 'tensorflow.concat', 'tf.concat', (['[outputs, repeat_encoder_hidden_states]'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (656, 'tensorflow.concat', 'tf.concat', (['[encoder_masks, masks]'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (790, 'numpy.array', 'np.array', (['[1.0]'], {}), True, 'import numpy as np\n'), (768, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, None]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (769, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, None]', 'dtype': 'tf.bool'}), True, 'import tensorflow as tf\n'), (770, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (771, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (846, 'numpy.array', 'np.array', (['[1.0]'], {}), True, 'import numpy as np\n'), (824, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[1, None]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (825, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[1, None]', 'dtype': 'tf.bool'}), True, 'import tensorflow as tf\n'), (826, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[1]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (827, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[1]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (36, 'tensorflow.math.sqrt', 'tf.math.sqrt', (['(2.0)'], {}), True, 'import tensorflow as tf\n'), (42, 'numpy.sqrt', 'np.sqrt', (['(2 / np.pi)'], {}), True, 'import numpy as np\n'), (653, 'tensorflow.pad', 'tf.pad', (['repeat_encoder_hidden_states', '[[0, pad_size], [0, 0]]'], {}), True, 'import tensorflow as tf\n'), (746, 'tensorflow.shape', 'tf.shape', (['length_regulator_outputs'], {}), True, 'import tensorflow as tf\n'), (802, 'tensorflow.shape', 'tf.shape', (['length_regulator_outputs'], {}), True, 'import tensorflow as tf\n'), (858, 'tensorflow.shape', 'tf.shape', (['length_regulator_outputs'], {}), True, 'import tensorflow as tf\n'), (146, 'numpy.power', 'np.power', (['(10000)', '(2.0 * (i // 2) / self.hidden_size)'], {}), True, 'import numpy as np\n'), (477, 'numpy.power', 'np.power', (['(10000)', '(2.0 * (i // 2) / self.config.hidden_size)'], {}), True, 'import numpy as np\n'), (684, 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, None, self.config.hidden_size]'], {}), True, 'import tensorflow as tf\n'), (685, 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, None]'], {}), True, 'import tensorflow as tf\n'), (42, 'tensorflow.pow', 'tf.pow', (['x', '(3)'], {}), True, 'import tensorflow as tf\n')]
eichinflo/lit
b46c0cac34baa571242637b53b78cfd69de536d0
# Lint as: python3 """Wrapper for fine-tuned HuggingFace models in LIT.""" import os import re import threading from typing import Optional, Dict, List, Iterable import attr from lit_nlp.api import model as lit_model from lit_nlp.api import types as lit_types from lit_nlp.examples.models import model_utils from lit_nlp.lib import utils import numpy as np import tensorflow as tf import transformers JsonDict = lit_types.JsonDict Spec = lit_types.Spec @attr.s(auto_attribs=True, kw_only=True) class GlueModelConfig(object): """Config options for a GlueModel.""" # Preprocessing options max_seq_length: int = 128 inference_batch_size: int = 32 # Input options text_a_name: str = "sentence1" text_b_name: Optional[str] = "sentence2" # set to None for single-segment label_name: str = "label" # Output options labels: Optional[List[str]] = None # set to None for regression null_label_idx: Optional[int] = None compute_grads: bool = True # if True, compute and return gradients. class GlueModel(lit_model.Model): """GLUE benchmark model, using Keras/TF2 and Huggingface Transformers. This is a general-purpose classification or regression model. It works for one- or two-segment input, and predicts either a multiclass label or a regression score. See GlueModelConfig for available options. This implements the LIT API for inference (e.g. input_spec(), output_spec(), and predict()), but also provides a train() method to run fine-tuning. This is a full-featured implementation, which includes embeddings, attention, gradients, as well as support for the different input and output types above. For a more minimal example, see ../simple_tf2_demo.py. """ @property def is_regression(self) -> bool: return self.config.labels is None def __init__(self, model_name_or_path="bert-base-uncased", **config_kw): self.config = GlueModelConfig(**config_kw) self._load_model(model_name_or_path) self._lock = threading.Lock() def _load_model(self, model_name_or_path): """Load model. Can be overridden for testing.""" self.tokenizer = transformers.AutoTokenizer.from_pretrained( model_name_or_path) self.vocab = self.tokenizer.convert_ids_to_tokens( range(len(self.tokenizer))) model_config = transformers.AutoConfig.from_pretrained( model_name_or_path, num_labels=1 if self.is_regression else len(self.config.labels), return_dict=False, # default for training; overridden for predict ) self.model = model_utils.load_pretrained( transformers.TFAutoModelForSequenceClassification, model_name_or_path, config=model_config) def _get_tokens(self, ex: JsonDict, field_name: str) -> List[str]: with self._lock: return (ex.get("tokens_" + field_name) or self.tokenizer.tokenize(ex[field_name])) def _preprocess(self, inputs: Iterable[JsonDict]) -> Dict[str, tf.Tensor]: # Use pretokenized input if available. tokens_a = [self._get_tokens(ex, self.config.text_a_name) for ex in inputs] tokens_b = None if self.config.text_b_name: tokens_b = [ self._get_tokens(ex, self.config.text_b_name) for ex in inputs ] # Use custom tokenizer call to make sure we don't mangle pre-split # wordpieces in pretokenized input. encoded_input = model_utils.batch_encode_pretokenized( self.tokenizer, tokens_a, tokens_b, max_length=self.config.max_seq_length) return encoded_input def _make_dataset(self, inputs: Iterable[JsonDict]) -> tf.data.Dataset: """Make a tf.data.Dataset from inputs in LIT format.""" encoded_input = self._preprocess(inputs) if self.is_regression: labels = tf.constant([ex[self.config.label_name] for ex in inputs], dtype=tf.float32) else: labels = tf.constant([ self.config.labels.index(ex[self.config.label_name]) for ex in inputs ], dtype=tf.int64) # encoded_input is actually a transformers.BatchEncoding # object, which tf.data.Dataset doesn't like. Convert to a regular dict. return tf.data.Dataset.from_tensor_slices((dict(encoded_input), labels)) def train(self, train_inputs: List[JsonDict], validation_inputs: List[JsonDict], learning_rate=2e-5, batch_size=32, num_epochs=3, keras_callbacks=None): """Run fine-tuning.""" train_dataset = self._make_dataset(train_inputs).shuffle(128).batch( batch_size).repeat(-1) # Use larger batch for validation since inference is about 1/2 memory usage # of backprop. eval_batch_size = 2 * batch_size validation_dataset = self._make_dataset(validation_inputs).batch( eval_batch_size) # Prepare model for training. opt = tf.keras.optimizers.Adam(learning_rate=learning_rate, epsilon=1e-08) if self.is_regression: loss = tf.keras.losses.MeanSquaredError() metric = tf.keras.metrics.RootMeanSquaredError("rmse") else: loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) metric = tf.keras.metrics.SparseCategoricalAccuracy("accuracy") self.model.compile(optimizer=opt, loss=loss, metrics=[metric]) steps_per_epoch = len(train_inputs) // batch_size validation_steps = len(validation_inputs) // eval_batch_size history = self.model.fit( train_dataset, epochs=num_epochs, steps_per_epoch=steps_per_epoch, validation_data=validation_dataset, validation_steps=validation_steps, callbacks=keras_callbacks, verbose=2) return history def save(self, path: str): """Save model weights and tokenizer info. To re-load, pass the path to the constructor instead of the name of a base model. Args: path: directory to save to. Will write several files here. """ if not os.path.isdir(path): os.mkdir(path) self.tokenizer.save_pretrained(path) self.model.save_pretrained(path) def _segment_slicers(self, tokens: List[str]): """Slicers along the tokens dimension for each segment. For tokens ['[CLS]', a0, a1, ..., '[SEP]', b0, b1, ..., '[SEP]'], we want to get the slices [a0, a1, ...] and [b0, b1, ...] Args: tokens: <string>[num_tokens], including special tokens Returns: (slicer_a, slicer_b), slice objects """ try: split_point = tokens.index(self.tokenizer.sep_token) except ValueError: split_point = len(tokens) - 1 slicer_a = slice(1, split_point) # start after [CLS] slicer_b = slice(split_point + 1, len(tokens) - 1) # end before last [SEP] return slicer_a, slicer_b def _postprocess(self, output: Dict[str, np.ndarray]): """Per-example postprocessing, on NumPy output.""" ntok = output.pop("ntok") output["tokens"] = self.tokenizer.convert_ids_to_tokens( output.pop("input_ids")[:ntok]) # Tokens for each segment, individually. slicer_a, slicer_b = self._segment_slicers(output["tokens"]) output["tokens_" + self.config.text_a_name] = output["tokens"][slicer_a] if self.config.text_b_name: output["tokens_" + self.config.text_b_name] = output["tokens"][slicer_b] # Embeddings for each segment, individually. output["input_embs_" + self.config.text_a_name] = ( output["input_embs"][slicer_a]) if self.config.text_b_name: output["input_embs_" + self.config.text_b_name] = ( output["input_embs"][slicer_b]) # Gradients for each segment, individually. if self.config.compute_grads: output["token_grad_" + self.config.text_a_name] = output["input_emb_grad"][slicer_a] if self.config.text_b_name: output["token_grad_" + self.config.text_b_name] = output["input_emb_grad"][slicer_b] if self.is_regression: output["grad_class"] = None # pytype: disable=container-type-mismatch else: # Return the label corresponding to the class index used for gradients. output["grad_class"] = self.config.labels[output["grad_class"]] # Gradients for the CLS token. output["cls_grad"] = output["input_emb_grad"][0] # Remove "input_emb_grad" since it's not in the output spec. del output["input_emb_grad"] # Process attention. for key in output: if not re.match(r"layer_(\d+)/attention", key): continue # Select only real tokens, since most of this matrix is padding. # <float32>[num_heads, max_seq_length, max_seq_length] # -> <float32>[num_heads, num_tokens, num_tokens] output[key] = output[key][:, :ntok, :ntok].transpose((0, 2, 1)) # Make a copy of this array to avoid memory leaks, since NumPy otherwise # keeps a pointer around that prevents the source array from being GCed. output[key] = output[key].copy() # pytype: disable=attribute-error return output def _scatter_embs(self, passed_input_embs, input_embs, batch_indices, offsets): """Scatters custom passed embeddings into the default model embeddings. Args: passed_input_embs: <tf.float32>[num_scatter_tokens], the custom passed embeddings to be scattered into the default model embeddings. input_embs: the default model embeddings. batch_indices: the indices of the embeddings to replace in the format (batch_index, sequence_index). offsets: the offset from which to scatter the custom embedding (number of tokens from the start of the sequence). Returns: The default model embeddings with scattered custom embeddings. """ # <float32>[scatter_batch_size, num_tokens, emb_size] filtered_embs = [emb for emb in passed_input_embs if emb is not None] # Prepares update values that should be scattered in, i.e. one for each # of the (scatter_batch_size * num_tokens) word embeddings. # <np.float32>[scatter_batch_size * num_tokens, emb_size] updates = np.concatenate(filtered_embs) # Prepares indices in format (batch_index, sequence_index) for all # values that should be scattered in, i.e. one for each of the # (scatter_batch_size * num_tokens) word embeddings. scatter_indices = [] for (batch_index, sentence_embs, offset) in zip(batch_indices, filtered_embs, offsets): for (token_index, emb) in enumerate(sentence_embs): scatter_indices.append([batch_index, token_index + offset]) # Scatters passed word embeddings into embeddings gathered from tokens. # <tf.float32>[batch_size, num_tokens + num_special_tokens, emb_size] return tf.tensor_scatter_nd_update(input_embs, scatter_indices, updates) def scatter_all_embeddings(self, inputs, input_embs): """Scatters custom passed embeddings for text segment inputs. Args: inputs: the model inputs, which contain any custom embeddings to scatter. input_embs: the default model embeddings. Returns: The default model embeddings with scattered custom embeddings. """ # Gets batch indices of any word embeddings that were passed for text_a. passed_input_embs_a = [ex.get("input_embs_" + self.config.text_a_name) for ex in inputs] batch_indices_a = [index for (index, emb) in enumerate( passed_input_embs_a) if emb is not None] # If word embeddings were passed in for text_a, scatter them into the # embeddings, gathered from the input ids. 1 is passed in as the offset # for each, since text_a starts at index 1, after the [CLS] token. if batch_indices_a: input_embs = self._scatter_embs( passed_input_embs_a, input_embs, batch_indices_a, offsets=np.ones(len(batch_indices_a), dtype=np.int64)) if self.config.text_b_name: # Gets batch indices of any word embeddings that were passed for text_b. passed_input_embs_b = [ex.get("input_embs_" + self.config.text_b_name) for ex in inputs] batch_indices_b = [ index for (index, emb) in enumerate(passed_input_embs_b) if emb is not None] # If word embeddings were also passed in for text_b, scatter them into the # embeddings gathered from the input ids. The offsets are the [lengths # of the corresponding text_a embeddings] + 2, since text_b starts after # [CLS] [text_a tokens] [SEP]. (This assumes that text_b embeddings # will only be passed together with text_a embeddings.) if batch_indices_b: lengths = np.array([len(embed) for embed in passed_input_embs_a if embed is not None]) input_embs = self._scatter_embs( passed_input_embs_b, input_embs, batch_indices_b, offsets=(lengths + 2)) return input_embs ## # LIT API implementation def max_minibatch_size(self): return self.config.inference_batch_size def get_embedding_table(self): return self.vocab, self.model.bert.embeddings.word_embeddings.numpy() def predict_minibatch(self, inputs: Iterable[JsonDict]): # Use watch_accessed_variables to save memory by having the tape do nothing # if we don't need gradients. with tf.GradientTape( watch_accessed_variables=self.config.compute_grads) as tape: encoded_input = self._preprocess(inputs) # Gathers word embeddings from BERT model embedding layer using input ids # of the tokens. input_ids = encoded_input["input_ids"] word_embeddings = self.model.bert.embeddings.word_embeddings # <tf.float32>[batch_size, num_tokens, emb_size] input_embs = tf.gather(word_embeddings, input_ids) # Scatter in any passed in embeddings. # <tf.float32>[batch_size, num_tokens, emb_size] input_embs = self.scatter_all_embeddings(inputs, input_embs) tape.watch(input_embs) # Watch input_embs for gradient calculation. model_inputs = encoded_input.copy() model_inputs["input_ids"] = None out: transformers.modeling_tf_outputs.TFSequenceClassifierOutput = \ self.model(model_inputs, inputs_embeds=input_embs, training=False, output_hidden_states=True, output_attentions=True, return_dict=True) batched_outputs = { "input_ids": encoded_input["input_ids"], "ntok": tf.reduce_sum(encoded_input["attention_mask"], axis=1), "cls_emb": out.hidden_states[-1][:, 0], # last layer, first token "input_embs": input_embs, } # First entry is embeddings, then output from each transformer layer. assert len(out.hidden_states) == self.model.config.num_hidden_layers + 1 # <float32>[batch_size, num_tokens, 1] token_mask = tf.expand_dims( tf.cast(encoded_input["attention_mask"], tf.float32), axis=2) # <float32>[batch_size, 1] denom = tf.reduce_sum(token_mask, axis=1) for i, layer_output in enumerate(out.hidden_states): # layer_output is <float32>[batch_size, num_tokens, emb_dim] # average over tokens to get <float32>[batch_size, emb_dim] batched_outputs[f"layer_{i}/avg_emb"] = tf.reduce_sum( layer_output * token_mask, axis=1) / denom assert len(out.attentions) == self.model.config.num_hidden_layers for i, layer_attention in enumerate(out.attentions): batched_outputs[f"layer_{i+1}/attention"] = layer_attention if self.is_regression: # <tf.float32>[batch_size] batched_outputs["score"] = tf.squeeze(out.logits, axis=-1) scalar_pred_for_gradients = batched_outputs["score"] else: # <tf.float32>[batch_size, num_labels] batched_outputs["probas"] = tf.nn.softmax(out.logits, axis=-1) # If a class for the gradients has been specified in the input, # calculate gradients for that class. Otherwise, calculate gradients for # the arg_max class. arg_max = tf.math.argmax(batched_outputs["probas"], axis=-1).numpy() grad_classes = [ex.get("grad_class", arg_max[i]) for (i, ex) in enumerate(inputs)] # Convert the class names to indices if needed. grad_classes = [self.config.labels.index(label) if isinstance(label, str) else label for label in grad_classes] gather_indices = list(enumerate(grad_classes)) # <tf.float32>[batch_size] scalar_pred_for_gradients = tf.gather_nd(batched_outputs["probas"], gather_indices) if self.config.compute_grads: batched_outputs["grad_class"] = tf.convert_to_tensor(grad_classes) # Request gradients after the tape is run. # Note: embs[0] includes position and segment encodings, as well as subword # embeddings. if self.config.compute_grads: # <tf.float32>[batch_size, num_tokens, emb_dim] batched_outputs["input_emb_grad"] = tape.gradient( scalar_pred_for_gradients, input_embs) detached_outputs = {k: v.numpy() for k, v in batched_outputs.items()} # Sequence of dicts, one per example. unbatched_outputs = utils.unbatch_preds(detached_outputs) return map(self._postprocess, unbatched_outputs) def input_spec(self) -> Spec: ret = {} ret[self.config.text_a_name] = lit_types.TextSegment() ret["tokens_" + self.config.text_a_name] = lit_types.Tokens( parent=self.config.text_a_name, required=False) if self.config.text_b_name: ret[self.config.text_b_name] = lit_types.TextSegment() ret["tokens_" + self.config.text_b_name] = lit_types.Tokens( parent=self.config.text_b_name, required=False) if self.is_regression: ret[self.config.label_name] = lit_types.RegressionScore(required=False) else: ret[self.config.label_name] = lit_types.CategoryLabel( required=False, vocab=self.config.labels) # The input_embs_ and grad_class fields are used for Integrated Gradients. ret["input_embs_" + self.config.text_a_name] = lit_types.TokenEmbeddings( align="tokens", required=False) if self.config.text_b_name: ret["input_embs_" + self.config.text_b_name] = lit_types.TokenEmbeddings( align="tokens", required=False) ret["grad_class"] = lit_types.CategoryLabel(required=False, vocab=self.config.labels) return ret def output_spec(self) -> Spec: ret = {"tokens": lit_types.Tokens()} ret["tokens_" + self.config.text_a_name] = lit_types.Tokens( parent=self.config.text_a_name) if self.config.text_b_name: ret["tokens_" + self.config.text_b_name] = lit_types.Tokens( parent=self.config.text_b_name) if self.is_regression: ret["score"] = lit_types.RegressionScore(parent=self.config.label_name) else: ret["probas"] = lit_types.MulticlassPreds( parent=self.config.label_name, vocab=self.config.labels, null_idx=self.config.null_label_idx) ret["cls_emb"] = lit_types.Embeddings() # Average embeddings, one per layer including embeddings. for i in range(1 + self.model.config.num_hidden_layers): ret[f"layer_{i}/avg_emb"] = lit_types.Embeddings() ret["cls_grad"] = lit_types.Gradients( grad_for="cls_emb", grad_target_field_key="grad_class") # The input_embs_ and grad_class fields are used for Integrated Gradients. ret["input_embs_" + self.config.text_a_name] = lit_types.TokenEmbeddings( align="tokens_" + self.config.text_a_name) if self.config.text_b_name: ret["input_embs_" + self.config.text_b_name] = lit_types.TokenEmbeddings( align="tokens_" + self.config.text_b_name) # Gradients, if requested. if self.config.compute_grads: ret["grad_class"] = lit_types.CategoryLabel(required=False, vocab=self.config.labels) ret["token_grad_" + self.config.text_a_name] = lit_types.TokenGradients( align="tokens_" + self.config.text_a_name, grad_for="input_embs_" + self.config.text_a_name, grad_target_field_key="grad_class") if self.config.text_b_name: ret["token_grad_" + self.config.text_b_name] = lit_types.TokenGradients( align="tokens_" + self.config.text_b_name, grad_for="input_embs_" + self.config.text_b_name, grad_target_field_key="grad_class") # Attention heads, one field for each layer. for i in range(self.model.config.num_hidden_layers): ret[f"layer_{i+1}/attention"] = lit_types.AttentionHeads( align_in="tokens", align_out="tokens") return ret class SST2Model(GlueModel): """Classification model on SST-2.""" def __init__(self, *args, **kw): super().__init__( *args, text_a_name="sentence", text_b_name=None, labels=["0", "1"], null_label_idx=0, **kw) class MNLIModel(GlueModel): """Classification model on MultiNLI.""" def __init__(self, *args, **kw): super().__init__( *args, text_a_name="premise", text_b_name="hypothesis", labels=["entailment", "neutral", "contradiction"], **kw) class STSBModel(GlueModel): """Regression model on STS-B.""" def __init__(self, *args, **kw): super().__init__( *args, text_a_name="sentence1", text_b_name="sentence2", labels=None, **kw) class ToxicityModel(GlueModel): """Classification model on Jigsaw Toxicity Dataset.""" def __init__(self, *args, **kw): super().__init__( *args, text_a_name="sentence", text_b_name=None, labels=["non-toxic", "toxic"], null_label_idx=0, **kw)
[ "tensorflow.convert_to_tensor", "tensorflow.math.argmax", "tensorflow.nn.softmax", "tensorflow.constant", "tensorflow.gather_nd", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.keras.losses.MeanSquaredError", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.squeeze", "tensorflow.tensor_scatter_nd_update", "numpy.concatenate", "tensorflow.gather", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.metrics.RootMeanSquaredError", "tensorflow.keras.metrics.SparseCategoricalAccuracy", "tensorflow.GradientTape" ]
lit_nlp/examples/models/glue_models.py
[(22, 'attr.s', 'attr.s', ([], {'auto_attribs': '(True)', 'kw_only': '(True)'}), False, 'import attr\n'), (62, 'threading.Lock', 'threading.Lock', ([], {}), False, 'import threading\n'), (66, 'transformers.AutoTokenizer.from_pretrained', 'transformers.AutoTokenizer.from_pretrained', (['model_name_or_path'], {}), False, 'import transformers\n'), (75, 'lit_nlp.examples.models.model_utils.load_pretrained', 'model_utils.load_pretrained', (['transformers.TFAutoModelForSequenceClassification', 'model_name_or_path'], {'config': 'model_config'}), False, 'from lit_nlp.examples.models import model_utils\n'), (95, 'lit_nlp.examples.models.model_utils.batch_encode_pretokenized', 'model_utils.batch_encode_pretokenized', (['self.tokenizer', 'tokens_a', 'tokens_b'], {'max_length': 'self.config.max_seq_length'}), False, 'from lit_nlp.examples.models import model_utils\n'), (134, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'learning_rate', 'epsilon': '(1e-08)'}), True, 'import tensorflow as tf\n'), (264, 'numpy.concatenate', 'np.concatenate', (['filtered_embs'], {}), True, 'import numpy as np\n'), (277, 'tensorflow.tensor_scatter_nd_update', 'tf.tensor_scatter_nd_update', (['input_embs', 'scatter_indices', 'updates'], {}), True, 'import tensorflow as tf\n'), (419, 'lit_nlp.lib.utils.unbatch_preds', 'utils.unbatch_preds', (['detached_outputs'], {}), False, 'from lit_nlp.lib import utils\n'), (424, 'lit_nlp.api.types.TextSegment', 'lit_types.TextSegment', ([], {}), True, 'from lit_nlp.api import types as lit_types\n'), (425, 'lit_nlp.api.types.Tokens', 'lit_types.Tokens', ([], {'parent': 'self.config.text_a_name', 'required': '(False)'}), True, 'from lit_nlp.api import types as lit_types\n'), (437, 'lit_nlp.api.types.TokenEmbeddings', 'lit_types.TokenEmbeddings', ([], {'align': '"""tokens"""', 'required': '(False)'}), True, 'from lit_nlp.api import types as lit_types\n'), (442, 'lit_nlp.api.types.CategoryLabel', 'lit_types.CategoryLabel', ([], {'required': '(False)', 'vocab': 'self.config.labels'}), True, 'from lit_nlp.api import types as lit_types\n'), (448, 'lit_nlp.api.types.Tokens', 'lit_types.Tokens', ([], {'parent': 'self.config.text_a_name'}), True, 'from lit_nlp.api import types as lit_types\n'), (460, 'lit_nlp.api.types.Embeddings', 'lit_types.Embeddings', ([], {}), True, 'from lit_nlp.api import types as lit_types\n'), (465, 'lit_nlp.api.types.Gradients', 'lit_types.Gradients', ([], {'grad_for': '"""cls_emb"""', 'grad_target_field_key': '"""grad_class"""'}), True, 'from lit_nlp.api import types as lit_types\n'), (469, 'lit_nlp.api.types.TokenEmbeddings', 'lit_types.TokenEmbeddings', ([], {'align': "('tokens_' + self.config.text_a_name)"}), True, 'from lit_nlp.api import types as lit_types\n'), (106, 'tensorflow.constant', 'tf.constant', (['[ex[self.config.label_name] for ex in inputs]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (136, 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), True, 'import tensorflow as tf\n'), (137, 'tensorflow.keras.metrics.RootMeanSquaredError', 'tf.keras.metrics.RootMeanSquaredError', (['"""rmse"""'], {}), True, 'import tensorflow as tf\n'), (139, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), True, 'import tensorflow as tf\n'), (140, 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', (['"""accuracy"""'], {}), True, 'import tensorflow as tf\n'), (164, 'os.path.isdir', 'os.path.isdir', (['path'], {}), False, 'import os\n'), (165, 'os.mkdir', 'os.mkdir', (['path'], {}), False, 'import os\n'), (335, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {'watch_accessed_variables': 'self.config.compute_grads'}), True, 'import tensorflow as tf\n'), (344, 'tensorflow.gather', 'tf.gather', (['word_embeddings', 'input_ids'], {}), True, 'import tensorflow as tf\n'), (372, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['token_mask'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (428, 'lit_nlp.api.types.TextSegment', 'lit_types.TextSegment', ([], {}), True, 'from lit_nlp.api import types as lit_types\n'), (429, 'lit_nlp.api.types.Tokens', 'lit_types.Tokens', ([], {'parent': 'self.config.text_b_name', 'required': '(False)'}), True, 'from lit_nlp.api import types as lit_types\n'), (432, 'lit_nlp.api.types.RegressionScore', 'lit_types.RegressionScore', ([], {'required': '(False)'}), True, 'from lit_nlp.api import types as lit_types\n'), (434, 'lit_nlp.api.types.CategoryLabel', 'lit_types.CategoryLabel', ([], {'required': '(False)', 'vocab': 'self.config.labels'}), True, 'from lit_nlp.api import types as lit_types\n'), (440, 'lit_nlp.api.types.TokenEmbeddings', 'lit_types.TokenEmbeddings', ([], {'align': '"""tokens"""', 'required': '(False)'}), True, 'from lit_nlp.api import types as lit_types\n'), (447, 'lit_nlp.api.types.Tokens', 'lit_types.Tokens', ([], {}), True, 'from lit_nlp.api import types as lit_types\n'), (451, 'lit_nlp.api.types.Tokens', 'lit_types.Tokens', ([], {'parent': 'self.config.text_b_name'}), True, 'from lit_nlp.api import types as lit_types\n'), (454, 'lit_nlp.api.types.RegressionScore', 'lit_types.RegressionScore', ([], {'parent': 'self.config.label_name'}), True, 'from lit_nlp.api import types as lit_types\n'), (456, 'lit_nlp.api.types.MulticlassPreds', 'lit_types.MulticlassPreds', ([], {'parent': 'self.config.label_name', 'vocab': 'self.config.labels', 'null_idx': 'self.config.null_label_idx'}), True, 'from lit_nlp.api import types as lit_types\n'), (463, 'lit_nlp.api.types.Embeddings', 'lit_types.Embeddings', ([], {}), True, 'from lit_nlp.api import types as lit_types\n'), (472, 'lit_nlp.api.types.TokenEmbeddings', 'lit_types.TokenEmbeddings', ([], {'align': "('tokens_' + self.config.text_b_name)"}), True, 'from lit_nlp.api import types as lit_types\n'), (477, 'lit_nlp.api.types.CategoryLabel', 'lit_types.CategoryLabel', ([], {'required': '(False)', 'vocab': 'self.config.labels'}), True, 'from lit_nlp.api import types as lit_types\n'), (479, 'lit_nlp.api.types.TokenGradients', 'lit_types.TokenGradients', ([], {'align': "('tokens_' + self.config.text_a_name)", 'grad_for': "('input_embs_' + self.config.text_a_name)", 'grad_target_field_key': '"""grad_class"""'}), True, 'from lit_nlp.api import types as lit_types\n'), (491, 'lit_nlp.api.types.AttentionHeads', 'lit_types.AttentionHeads', ([], {'align_in': '"""tokens"""', 'align_out': '"""tokens"""'}), True, 'from lit_nlp.api import types as lit_types\n'), (229, 're.match', 're.match', (['"""layer_(\\\\d+)/attention"""', 'key'], {}), False, 'import re\n'), (361, 'tensorflow.reduce_sum', 'tf.reduce_sum', (["encoded_input['attention_mask']"], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (370, 'tensorflow.cast', 'tf.cast', (["encoded_input['attention_mask']", 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (385, 'tensorflow.squeeze', 'tf.squeeze', (['out.logits'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (389, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['out.logits'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (404, 'tensorflow.gather_nd', 'tf.gather_nd', (["batched_outputs['probas']", 'gather_indices'], {}), True, 'import tensorflow as tf\n'), (484, 'lit_nlp.api.types.TokenGradients', 'lit_types.TokenGradients', ([], {'align': "('tokens_' + self.config.text_b_name)", 'grad_for': "('input_embs_' + self.config.text_b_name)", 'grad_target_field_key': '"""grad_class"""'}), True, 'from lit_nlp.api import types as lit_types\n'), (376, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(layer_output * token_mask)'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (407, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['grad_classes'], {}), True, 'import tensorflow as tf\n'), (394, 'tensorflow.math.argmax', 'tf.math.argmax', (["batched_outputs['probas']"], {'axis': '(-1)'}), True, 'import tensorflow as tf\n')]
voxmenthe/keras-io
7165ea10a913de1857cbaa81a90c4443a699c726
""" Title: Training & evaluation with the built-in methods Author: [fchollet](https://twitter.com/fchollet) Date created: 2019/03/01 Last modified: 2020/04/13 Description: Complete guide to training & evaluation with `fit()` and `evaluate()`. """ """ ## Setup """ import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers """ ## Introduction This guide covers training, evaluation, and prediction (inference) models when using built-in APIs for training & validation (such as `model.fit()`, `model.evaluate()`, `model.predict()`). If you are interested in leveraging `fit()` while specifying your own training step function, see the guide ["customizing what happens in `fit()`"](/guides/customizing_what_happens_in_fit/). If you are interested in writing your own training & evaluation loops from scratch, see the guide ["writing a training loop from scratch"](/guides/writing_a_training_loop_from_scratch/). In general, whether you are using built-in loops or writing your own, model training & evaluation works strictly in the same way across every kind of Keras model -- Sequential models, models built with the Functional API, and models written from scratch via model subclassing. This guide doesn't cover distributed training. For distributed training, see our [guide to multi-gpu & distributed training](https://keras.io/guides/distributed_training/). """ """ ## API overview: a first end-to-end example When passing data to the built-in training loops of a model, you should either use **NumPy arrays** (if your data is small and fits in memory) or **`tf.data Dataset` objects**. In the next few paragraphs, we'll use the MNIST dataset as NumPy arrays, in order to demonstrate how to use optimizers, losses, and metrics. Let's consider the following model (here, we build in with the Functional API, but it could be a Sequential model or a subclassed model as well): """ inputs = keras.Input(shape=(784,), name="digits") x = layers.Dense(64, activation="relu", name="dense_1")(inputs) x = layers.Dense(64, activation="relu", name="dense_2")(x) outputs = layers.Dense(10, activation="softmax", name="predictions")(x) model = keras.Model(inputs=inputs, outputs=outputs) """ Here's what the typical end-to-end workflow looks like, consisting of: - Training - Validation on a holdout set generated from the original training data - Evaluation on the test data We'll use MNIST data for this example. """ (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() # Preprocess the data (these are NumPy arrays) x_train = x_train.reshape(60000, 784).astype("float32") / 255 x_test = x_test.reshape(10000, 784).astype("float32") / 255 y_train = y_train.astype("float32") y_test = y_test.astype("float32") # Reserve 10,000 samples for validation x_val = x_train[-10000:] y_val = y_train[-10000:] x_train = x_train[:-10000] y_train = y_train[:-10000] """ We specify the training configuration (optimizer, loss, metrics): """ model.compile( optimizer=keras.optimizers.RMSprop(), # Optimizer # Loss function to minimize loss=keras.losses.SparseCategoricalCrossentropy(), # List of metrics to monitor metrics=[keras.metrics.SparseCategoricalAccuracy()], ) """ We call `fit()`, which will train the model by slicing the data into "batches" of size "batch_size", and repeatedly iterating over the entire dataset for a given number of "epochs". """ print("Fit model on training data") history = model.fit( x_train, y_train, batch_size=64, epochs=2, # We pass some validation for # monitoring validation loss and metrics # at the end of each epoch validation_data=(x_val, y_val), ) """ The returned "history" object holds a record of the loss values and metric values during training: """ history.history """ We evaluate the model on the test data via `evaluate()`: """ # Evaluate the model on the test data using `evaluate` print("Evaluate on test data") results = model.evaluate(x_test, y_test, batch_size=128) print("test loss, test acc:", results) # Generate predictions (probabilities -- the output of the last layer) # on new data using `predict` print("Generate predictions for 3 samples") predictions = model.predict(x_test[:3]) print("predictions shape:", predictions.shape) """ Now, let's review each piece of this workflow in detail. """ """ ## The `compile()` method: specifying a loss, metrics, and an optimizer To train a model with `fit()`, you need to specify a loss function, an optimizer, and optionally, some metrics to monitor. You pass these to the model as arguments to the `compile()` method: """ model.compile( optimizer=keras.optimizers.RMSprop(learning_rate=1e-3), loss=keras.losses.SparseCategoricalCrossentropy(), metrics=[keras.metrics.SparseCategoricalAccuracy()], ) """ The `metrics` argument should be a list -- your model can have any number of metrics. If your model has multiple outputs, you can specify different losses and metrics for each output, and you can modulate the contribution of each output to the total loss of the model. You will find more details about this in the section **"Passing data to multi-input, multi-output models"**. Note that if you're satisfied with the default settings, in many cases the optimizer, loss, and metrics can be specified via string identifiers as a shortcut: """ model.compile( optimizer="rmsprop", loss="sparse_categorical_crossentropy", metrics=["sparse_categorical_accuracy"], ) """ For later reuse, let's put our model definition and compile step in functions; we will call them several times across different examples in this guide. """ def get_uncompiled_model(): inputs = keras.Input(shape=(784,), name="digits") x = layers.Dense(64, activation="relu", name="dense_1")(inputs) x = layers.Dense(64, activation="relu", name="dense_2")(x) outputs = layers.Dense(10, activation="softmax", name="predictions")(x) model = keras.Model(inputs=inputs, outputs=outputs) return model def get_compiled_model(): model = get_uncompiled_model() model.compile( optimizer="rmsprop", loss="sparse_categorical_crossentropy", metrics=["sparse_categorical_accuracy"], ) return model """ ### Many built-in optimizers, losses, and metrics are available In general, you won't have to create from scratch your own losses, metrics, or optimizers, because what you need is likely already part of the Keras API: Optimizers: - `SGD()` (with or without momentum) - `RMSprop()` - `Adam()` - etc. Losses: - `MeanSquaredError()` - `KLDivergence()` - `CosineSimilarity()` - etc. Metrics: - `AUC()` - `Precision()` - `Recall()` - etc. """ """ ### Custom losses There are two ways to provide custom losses with Keras. The first example creates a function that accepts inputs `y_true` and `y_pred`. The following example shows a loss function that computes the mean squared error between the real data and the predictions: """ def custom_mean_squared_error(y_true, y_pred): return tf.math.reduce_mean(tf.square(y_true - y_pred)) model = get_uncompiled_model() model.compile(optimizer=keras.optimizers.Adam(), loss=custom_mean_squared_error) # We need to one-hot encode the labels to use MSE y_train_one_hot = tf.one_hot(y_train, depth=10) model.fit(x_train, y_train_one_hot, batch_size=64, epochs=1) """ If you need a loss function that takes in parameters beside `y_true` and `y_pred`, you can subclass the `tf.keras.losses.Loss` class and implement the following two methods: - `__init__(self)`: accept parameters to pass during the call of your loss function - `call(self, y_true, y_pred)`: use the targets (y_true) and the model predictions (y_pred) to compute the model's loss Let's say you want to use mean squared error, but with an added term that will de-incentivize prediction values far from 0.5 (we assume that the categorical targets are one-hot encoded and take values between 0 and 1). This creates an incentive for the model not to be too confident, which may help reduce overfitting (we won't know if it works until we try!). Here's how you would do it: """ class CustomMSE(keras.losses.Loss): def __init__(self, regularization_factor=0.1, name="custom_mse"): super().__init__(name=name) self.regularization_factor = regularization_factor def call(self, y_true, y_pred): mse = tf.math.reduce_mean(tf.square(y_true - y_pred)) reg = tf.math.reduce_mean(tf.square(0.5 - y_pred)) return mse + reg * self.regularization_factor model = get_uncompiled_model() model.compile(optimizer=keras.optimizers.Adam(), loss=CustomMSE()) y_train_one_hot = tf.one_hot(y_train, depth=10) model.fit(x_train, y_train_one_hot, batch_size=64, epochs=1) """ ### Custom metrics If you need a metric that isn't part of the API, you can easily create custom metrics by subclassing the `tf.keras.metrics.Metric` class. You will need to implement 4 methods: - `__init__(self)`, in which you will create state variables for your metric. - `update_state(self, y_true, y_pred, sample_weight=None)`, which uses the targets y_true and the model predictions y_pred to update the state variables. - `result(self)`, which uses the state variables to compute the final results. - `reset_states(self)`, which reinitializes the state of the metric. State update and results computation are kept separate (in `update_state()` and `result()`, respectively) because in some cases, results computation might be very expensive, and would only be done periodically. Here's a simple example showing how to implement a `CategoricalTruePositives` metric, that counts how many samples were correctly classified as belonging to a given class: """ class CategoricalTruePositives(keras.metrics.Metric): def __init__(self, name="categorical_true_positives", **kwargs): super(CategoricalTruePositives, self).__init__(name=name, **kwargs) self.true_positives = self.add_weight(name="ctp", initializer="zeros") def update_state(self, y_true, y_pred, sample_weight=None): y_pred = tf.reshape(tf.argmax(y_pred, axis=1), shape=(-1, 1)) values = tf.cast(y_true, "int32") == tf.cast(y_pred, "int32") values = tf.cast(values, "float32") if sample_weight is not None: sample_weight = tf.cast(sample_weight, "float32") values = tf.multiply(values, sample_weight) self.true_positives.assign_add(tf.reduce_sum(values)) def result(self): return self.true_positives def reset_states(self): # The state of the metric will be reset at the start of each epoch. self.true_positives.assign(0.0) model = get_uncompiled_model() model.compile( optimizer=keras.optimizers.RMSprop(learning_rate=1e-3), loss=keras.losses.SparseCategoricalCrossentropy(), metrics=[CategoricalTruePositives()], ) model.fit(x_train, y_train, batch_size=64, epochs=3) """ ### Handling losses and metrics that don't fit the standard signature The overwhelming majority of losses and metrics can be computed from `y_true` and `y_pred`, where `y_pred` is an output of your model. But not all of them. For instance, a regularization loss may only require the activation of a layer (there are no targets in this case), and this activation may not be a model output. In such cases, you can call `self.add_loss(loss_value)` from inside the call method of a custom layer. Losses added in this way get added to the "main" loss during training (the one passed to `compile()`). Here's a simple example that adds activity regularization (note that activity regularization is built-in in all Keras layers -- this layer is just for the sake of providing a concrete example): """ class ActivityRegularizationLayer(layers.Layer): def call(self, inputs): self.add_loss(tf.reduce_sum(inputs) * 0.1) return inputs # Pass-through layer. inputs = keras.Input(shape=(784,), name="digits") x = layers.Dense(64, activation="relu", name="dense_1")(inputs) # Insert activity regularization as a layer x = ActivityRegularizationLayer()(x) x = layers.Dense(64, activation="relu", name="dense_2")(x) outputs = layers.Dense(10, name="predictions")(x) model = keras.Model(inputs=inputs, outputs=outputs) model.compile( optimizer=keras.optimizers.RMSprop(learning_rate=1e-3), loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), ) # The displayed loss will be much higher than before # due to the regularization component. model.fit(x_train, y_train, batch_size=64, epochs=1) """ You can do the same for logging metric values, using `add_metric()`: """ class MetricLoggingLayer(layers.Layer): def call(self, inputs): # The `aggregation` argument defines # how to aggregate the per-batch values # over each epoch: # in this case we simply average them. self.add_metric( keras.backend.std(inputs), name="std_of_activation", aggregation="mean" ) return inputs # Pass-through layer. inputs = keras.Input(shape=(784,), name="digits") x = layers.Dense(64, activation="relu", name="dense_1")(inputs) # Insert std logging as a layer. x = MetricLoggingLayer()(x) x = layers.Dense(64, activation="relu", name="dense_2")(x) outputs = layers.Dense(10, name="predictions")(x) model = keras.Model(inputs=inputs, outputs=outputs) model.compile( optimizer=keras.optimizers.RMSprop(learning_rate=1e-3), loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), ) model.fit(x_train, y_train, batch_size=64, epochs=1) """ In the [Functional API](/guides/functional_api/), you can also call `model.add_loss(loss_tensor)`, or `model.add_metric(metric_tensor, name, aggregation)`. Here's a simple example: """ inputs = keras.Input(shape=(784,), name="digits") x1 = layers.Dense(64, activation="relu", name="dense_1")(inputs) x2 = layers.Dense(64, activation="relu", name="dense_2")(x1) outputs = layers.Dense(10, name="predictions")(x2) model = keras.Model(inputs=inputs, outputs=outputs) model.add_loss(tf.reduce_sum(x1) * 0.1) model.add_metric(keras.backend.std(x1), name="std_of_activation", aggregation="mean") model.compile( optimizer=keras.optimizers.RMSprop(1e-3), loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), ) model.fit(x_train, y_train, batch_size=64, epochs=1) """ Note that when you pass losses via `add_loss()`, it becomes possible to call `compile()` without a loss function, since the model already has a loss to minimize. Consider the following `LogisticEndpoint` layer: it takes as inputs targets & logits, and it tracks a crossentropy loss via `add_loss()`. It also tracks classification accuracy via `add_metric()`. """ class LogisticEndpoint(keras.layers.Layer): def __init__(self, name=None): super(LogisticEndpoint, self).__init__(name=name) self.loss_fn = keras.losses.BinaryCrossentropy(from_logits=True) self.accuracy_fn = keras.metrics.BinaryAccuracy() def call(self, targets, logits, sample_weights=None): # Compute the training-time loss value and add it # to the layer using `self.add_loss()`. loss = self.loss_fn(targets, logits, sample_weights) self.add_loss(loss) # Log accuracy as a metric and add it # to the layer using `self.add_metric()`. acc = self.accuracy_fn(targets, logits, sample_weights) self.add_metric(acc, name="accuracy") # Return the inference-time prediction tensor (for `.predict()`). return tf.nn.softmax(logits) """ You can use it in a model with two inputs (input data & targets), compiled without a `loss` argument, like this: """ import numpy as np inputs = keras.Input(shape=(3,), name="inputs") targets = keras.Input(shape=(10,), name="targets") logits = keras.layers.Dense(10)(inputs) predictions = LogisticEndpoint(name="predictions")(logits, targets) model = keras.Model(inputs=[inputs, targets], outputs=predictions) model.compile(optimizer="adam") # No loss argument! data = { "inputs": np.random.random((3, 3)), "targets": np.random.random((3, 10)), } model.fit(data) """ For more information about training multi-input models, see the section **Passing data to multi-input, multi-output models**. """ """ ### Automatically setting apart a validation holdout set In the first end-to-end example you saw, we used the `validation_data` argument to pass a tuple of NumPy arrays `(x_val, y_val)` to the model for evaluating a validation loss and validation metrics at the end of each epoch. Here's another option: the argument `validation_split` allows you to automatically reserve part of your training data for validation. The argument value represents the fraction of the data to be reserved for validation, so it should be set to a number higher than 0 and lower than 1. For instance, `validation_split=0.2` means "use 20% of the data for validation", and `validation_split=0.6` means "use 60% of the data for validation". The way the validation is computed is by taking the last x% samples of the arrays received by the fit call, before any shuffling. Note that you can only use `validation_split` when training with NumPy data. """ model = get_compiled_model() model.fit(x_train, y_train, batch_size=64, validation_split=0.2, epochs=1) """ ## Training & evaluation from tf.data Datasets In the past few paragraphs, you've seen how to handle losses, metrics, and optimizers, and you've seen how to use the `validation_data` and `validation_split` arguments in fit, when your data is passed as NumPy arrays. Let's now take a look at the case where your data comes in the form of a `tf.data.Dataset` object. The `tf.data` API is a set of utilities in TensorFlow 2.0 for loading and preprocessing data in a way that's fast and scalable. For a complete guide about creating `Datasets`, see the [tf.data documentation](https://www.tensorflow.org/guide/data). You can pass a `Dataset` instance directly to the methods `fit()`, `evaluate()`, and `predict()`: """ model = get_compiled_model() # First, let's create a training Dataset instance. # For the sake of our example, we'll use the same MNIST data as before. train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) # Shuffle and slice the dataset. train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64) # Now we get a test dataset. test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test)) test_dataset = test_dataset.batch(64) # Since the dataset already takes care of batching, # we don't pass a `batch_size` argument. model.fit(train_dataset, epochs=3) # You can also evaluate or predict on a dataset. print("Evaluate") result = model.evaluate(test_dataset) dict(zip(model.metrics_names, result)) """ Note that the Dataset is reset at the end of each epoch, so it can be reused of the next epoch. If you want to run training only on a specific number of batches from this Dataset, you can pass the `steps_per_epoch` argument, which specifies how many training steps the model should run using this Dataset before moving on to the next epoch. If you do this, the dataset is not reset at the end of each epoch, instead we just keep drawing the next batches. The dataset will eventually run out of data (unless it is an infinitely-looping dataset). """ model = get_compiled_model() # Prepare the training dataset train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64) # Only use the 100 batches per epoch (that's 64 * 100 samples) model.fit(train_dataset, epochs=3, steps_per_epoch=100) """ ### Using a validation dataset You can pass a `Dataset` instance as the `validation_data` argument in `fit()`: """ model = get_compiled_model() # Prepare the training dataset train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64) # Prepare the validation dataset val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val)) val_dataset = val_dataset.batch(64) model.fit(train_dataset, epochs=1, validation_data=val_dataset) """ At the end of each epoch, the model will iterate over the validation dataset and compute the validation loss and validation metrics. If you want to run validation only on a specific number of batches from this dataset, you can pass the `validation_steps` argument, which specifies how many validation steps the model should run with the validation dataset before interrupting validation and moving on to the next epoch: """ model = get_compiled_model() # Prepare the training dataset train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64) # Prepare the validation dataset val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val)) val_dataset = val_dataset.batch(64) model.fit( train_dataset, epochs=1, # Only run validation using the first 10 batches of the dataset # using the `validation_steps` argument validation_data=val_dataset, validation_steps=10, ) """ Note that the validation dataset will be reset after each use (so that you will always be evaluating on the same samples from epoch to epoch). The argument `validation_split` (generating a holdout set from the training data) is not supported when training from `Dataset` objects, since this feature requires the ability to index the samples of the datasets, which is not possible in general with the `Dataset` API. """ """ ## Other input formats supported Besides NumPy arrays, eager tensors, and TensorFlow `Datasets`, it's possible to train a Keras model using Pandas dataframes, or from Python generators that yield batches of data & labels. In particular, the `keras.utils.Sequence` class offers a simple interface to build Python data generators that are multiprocessing-aware and can be shuffled. In general, we recommend that you use: - NumPy input data if your data is small and fits in memory - `Dataset` objects if you have large datasets and you need to do distributed training - `Sequence` objects if you have large datasets and you need to do a lot of custom Python-side processing that cannot be done in TensorFlow (e.g. if you rely on external libraries for data loading or preprocessing). ## Using a `keras.utils.Sequence` object as input `keras.utils.Sequence` is a utility that you can subclass to obtain a Python generator with two important properties: - It works well with multiprocessing. - It can be shuffled (e.g. when passing `shuffle=True` in `fit()`). A `Sequence` must implement two methods: - `__getitem__` - `__len__` The method `__getitem__` should return a complete batch. If you want to modify your dataset between epochs, you may implement `on_epoch_end`. Here's a quick example: ```python from skimage.io import imread from skimage.transform import resize import numpy as np # Here, `filenames` is list of path to the images # and `labels` are the associated labels. class CIFAR10Sequence(Sequence): def __init__(self, filenames, labels, batch_size): self.filenames, self.labels = filenames, labels self.batch_size = batch_size def __len__(self): return int(np.ceil(len(self.filenames) / float(self.batch_size))) def __getitem__(self, idx): batch_x = self.filenames[idx * self.batch_size:(idx + 1) * self.batch_size] batch_y = self.labels[idx * self.batch_size:(idx + 1) * self.batch_size] return np.array([ resize(imread(filename), (200, 200)) for filename in batch_x]), np.array(batch_y) sequence = CIFAR10Sequence(filenames, labels, batch_size) model.fit(sequence, epochs=10) ``` """ """ ## Using sample weighting and class weighting With the default settings the weight of a sample is decided by its frequency in the dataset. There are two methods to weight the data, independent of sample frequency: * Class weights * Sample weights """ """ ### Class weights This is set by passing a dictionary to the `class_weight` argument to `Model.fit()`. This dictionary maps class indices to the weight that should be used for samples belonging to this class. This can be used to balance classes without resampling, or to train a model that gives more importance to a particular class. For instance, if class "0" is half as represented as class "1" in your data, you could use `Model.fit(..., class_weight={0: 1., 1: 0.5})`. """ """ Here's a NumPy example where we use class weights or sample weights to give more importance to the correct classification of class #5 (which is the digit "5" in the MNIST dataset). """ import numpy as np class_weight = { 0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, # Set weight "2" for class "5", # making this class 2x more important 5: 2.0, 6: 1.0, 7: 1.0, 8: 1.0, 9: 1.0, } print("Fit with class weight") model = get_compiled_model() model.fit(x_train, y_train, class_weight=class_weight, batch_size=64, epochs=1) """ ### Sample weights For fine grained control, or if you are not building a classifier, you can use "sample weights". - When training from NumPy data: Pass the `sample_weight` argument to `Model.fit()`. - When training from `tf.data` or any other sort of iterator: Yield `(input_batch, label_batch, sample_weight_batch)` tuples. A "sample weights" array is an array of numbers that specify how much weight each sample in a batch should have in computing the total loss. It is commonly used in imbalanced classification problems (the idea being to give more weight to rarely-seen classes). When the weights used are ones and zeros, the array can be used as a *mask* for the loss function (entirely discarding the contribution of certain samples to the total loss). """ sample_weight = np.ones(shape=(len(y_train),)) sample_weight[y_train == 5] = 2.0 print("Fit with sample weight") model = get_compiled_model() model.fit(x_train, y_train, sample_weight=sample_weight, batch_size=64, epochs=1) """ Here's a matching `Dataset` example: """ sample_weight = np.ones(shape=(len(y_train),)) sample_weight[y_train == 5] = 2.0 # Create a Dataset that includes sample weights # (3rd element in the return tuple). train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train, sample_weight)) # Shuffle and slice the dataset. train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64) model = get_compiled_model() model.fit(train_dataset, epochs=1) """ ## Passing data to multi-input, multi-output models In the previous examples, we were considering a model with a single input (a tensor of shape `(764,)`) and a single output (a prediction tensor of shape `(10,)`). But what about models that have multiple inputs or outputs? Consider the following model, which has an image input of shape `(32, 32, 3)` (that's `(height, width, channels)`) and a timeseries input of shape `(None, 10)` (that's `(timesteps, features)`). Our model will have two outputs computed from the combination of these inputs: a "score" (of shape `(1,)`) and a probability distribution over five classes (of shape `(5,)`). """ image_input = keras.Input(shape=(32, 32, 3), name="img_input") timeseries_input = keras.Input(shape=(None, 10), name="ts_input") x1 = layers.Conv2D(3, 3)(image_input) x1 = layers.GlobalMaxPooling2D()(x1) x2 = layers.Conv1D(3, 3)(timeseries_input) x2 = layers.GlobalMaxPooling1D()(x2) x = layers.concatenate([x1, x2]) score_output = layers.Dense(1, name="score_output")(x) class_output = layers.Dense(5, name="class_output")(x) model = keras.Model( inputs=[image_input, timeseries_input], outputs=[score_output, class_output] ) """ Let's plot this model, so you can clearly see what we're doing here (note that the shapes shown in the plot are batch shapes, rather than per-sample shapes). """ keras.utils.plot_model(model, "multi_input_and_output_model.png", show_shapes=True) """ At compilation time, we can specify different losses to different outputs, by passing the loss functions as a list: """ model.compile( optimizer=keras.optimizers.RMSprop(1e-3), loss=[keras.losses.MeanSquaredError(), keras.losses.CategoricalCrossentropy()], ) """ If we only passed a single loss function to the model, the same loss function would be applied to every output (which is not appropriate here). Likewise for metrics: """ model.compile( optimizer=keras.optimizers.RMSprop(1e-3), loss=[keras.losses.MeanSquaredError(), keras.losses.CategoricalCrossentropy()], metrics=[ [ keras.metrics.MeanAbsolutePercentageError(), keras.metrics.MeanAbsoluteError(), ], [keras.metrics.CategoricalAccuracy()], ], ) """ Since we gave names to our output layers, we could also specify per-output losses and metrics via a dict: """ model.compile( optimizer=keras.optimizers.RMSprop(1e-3), loss={ "score_output": keras.losses.MeanSquaredError(), "class_output": keras.losses.CategoricalCrossentropy(), }, metrics={ "score_output": [ keras.metrics.MeanAbsolutePercentageError(), keras.metrics.MeanAbsoluteError(), ], "class_output": [keras.metrics.CategoricalAccuracy()], }, ) """ We recommend the use of explicit names and dicts if you have more than 2 outputs. It's possible to give different weights to different output-specific losses (for instance, one might wish to privilege the "score" loss in our example, by giving to 2x the importance of the class loss), using the `loss_weights` argument: """ model.compile( optimizer=keras.optimizers.RMSprop(1e-3), loss={ "score_output": keras.losses.MeanSquaredError(), "class_output": keras.losses.CategoricalCrossentropy(), }, metrics={ "score_output": [ keras.metrics.MeanAbsolutePercentageError(), keras.metrics.MeanAbsoluteError(), ], "class_output": [keras.metrics.CategoricalAccuracy()], }, loss_weights={"score_output": 2.0, "class_output": 1.0}, ) """ You could also chose not to compute a loss for certain outputs, if these outputs meant for prediction but not for training: """ # List loss version model.compile( optimizer=keras.optimizers.RMSprop(1e-3), loss=[None, keras.losses.CategoricalCrossentropy()], ) # Or dict loss version model.compile( optimizer=keras.optimizers.RMSprop(1e-3), loss={"class_output": keras.losses.CategoricalCrossentropy()}, ) """ Passing data to a multi-input or multi-output model in fit works in a similar way as specifying a loss function in compile: you can pass **lists of NumPy arrays** (with 1:1 mapping to the outputs that received a loss function) or **dicts mapping output names to NumPy arrays**. """ model.compile( optimizer=keras.optimizers.RMSprop(1e-3), loss=[keras.losses.MeanSquaredError(), keras.losses.CategoricalCrossentropy()], ) # Generate dummy NumPy data img_data = np.random.random_sample(size=(100, 32, 32, 3)) ts_data = np.random.random_sample(size=(100, 20, 10)) score_targets = np.random.random_sample(size=(100, 1)) class_targets = np.random.random_sample(size=(100, 5)) # Fit on lists model.fit([img_data, ts_data], [score_targets, class_targets], batch_size=32, epochs=1) # Alternatively, fit on dicts model.fit( {"img_input": img_data, "ts_input": ts_data}, {"score_output": score_targets, "class_output": class_targets}, batch_size=32, epochs=1, ) """ Here's the `Dataset` use case: similarly as what we did for NumPy arrays, the `Dataset` should return a tuple of dicts. """ train_dataset = tf.data.Dataset.from_tensor_slices( ( {"img_input": img_data, "ts_input": ts_data}, {"score_output": score_targets, "class_output": class_targets}, ) ) train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64) model.fit(train_dataset, epochs=1) """ ## Using callbacks Callbacks in Keras are objects that are called at different points during training (at the start of an epoch, at the end of a batch, at the end of an epoch, etc.) and which can be used to implement behaviors such as: - Doing validation at different points during training (beyond the built-in per-epoch validation) - Checkpointing the model at regular intervals or when it exceeds a certain accuracy threshold - Changing the learning rate of the model when training seems to be plateauing - Doing fine-tuning of the top layers when training seems to be plateauing - Sending email or instant message notifications when training ends or where a certain performance threshold is exceeded - Etc. Callbacks can be passed as a list to your call to `fit()`: """ model = get_compiled_model() callbacks = [ keras.callbacks.EarlyStopping( # Stop training when `val_loss` is no longer improving monitor="val_loss", # "no longer improving" being defined as "no better than 1e-2 less" min_delta=1e-2, # "no longer improving" being further defined as "for at least 2 epochs" patience=2, verbose=1, ) ] model.fit( x_train, y_train, epochs=20, batch_size=64, callbacks=callbacks, validation_split=0.2, ) """ ### Many built-in callbacks are available - `ModelCheckpoint`: Periodically save the model. - `EarlyStopping`: Stop training when training is no longer improving the validation metrics. - `TensorBoard`: periodically write model logs that can be visualized in [TensorBoard](https://www.tensorflow.org/tensorboard) (more details in the section "Visualization"). - `CSVLogger`: streams loss and metrics data to a CSV file. - etc. See the [callbacks documentation](/api/callbacks/) for the complete list. ### Writing your own callback You can create a custom callback by extending the base class `keras.callbacks.Callback`. A callback has access to its associated model through the class property `self.model`. Make sure to read the [complete guide to writing custom callbacks](/guides/writing_your_own_callbacks/). Here's a simple example saving a list of per-batch loss values during training: """ class LossHistory(keras.callbacks.Callback): def on_train_begin(self, logs): self.per_batch_losses = [] def on_batch_end(self, batch, logs): self.per_batch_losses.append(logs.get("loss")) """ ## Checkpointing models When you're training model on relatively large datasets, it's crucial to save checkpoints of your model at frequent intervals. The easiest way to achieve this is with the `ModelCheckpoint` callback: """ model = get_compiled_model() callbacks = [ keras.callbacks.ModelCheckpoint( # Path where to save the model # The two parameters below mean that we will overwrite # the current checkpoint if and only if # the `val_loss` score has improved. # The saved model name will include the current epoch. filepath="mymodel_{epoch}", save_best_only=True, # Only save a model if `val_loss` has improved. monitor="val_loss", verbose=1, ) ] model.fit( x_train, y_train, epochs=2, batch_size=64, callbacks=callbacks, validation_split=0.2 ) """ The `ModelCheckpoint` callback can be used to implement fault-tolerance: the ability to restart training from the last saved state of the model in case training gets randomly interrupted. Here's a basic example: """ import os # Prepare a directory to store all the checkpoints. checkpoint_dir = "./ckpt" if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) def make_or_restore_model(): # Either restore the latest model, or create a fresh one # if there is no checkpoint available. checkpoints = [checkpoint_dir + "/" + name for name in os.listdir(checkpoint_dir)] if checkpoints: latest_checkpoint = max(checkpoints, key=os.path.getctime) print("Restoring from", latest_checkpoint) return keras.models.load_model(latest_checkpoint) print("Creating a new model") return get_compiled_model() model = make_or_restore_model() callbacks = [ # This callback saves a SavedModel every 100 batches. # We include the training loss in the saved model name. keras.callbacks.ModelCheckpoint( filepath=checkpoint_dir + "/ckpt-loss={loss:.2f}", save_freq=100 ) ] model.fit(x_train, y_train, epochs=1, callbacks=callbacks) """ You call also write your own callback for saving and restoring models. For a complete guide on serialization and saving, see the [guide to saving and serializing Models](/guides/serialization_and_saving/). """ """ ## Using learning rate schedules A common pattern when training deep learning models is to gradually reduce the learning as training progresses. This is generally known as "learning rate decay". The learning decay schedule could be static (fixed in advance, as a function of the current epoch or the current batch index), or dynamic (responding to the current behavior of the model, in particular the validation loss). ### Passing a schedule to an optimizer You can easily use a static learning rate decay schedule by passing a schedule object as the `learning_rate` argument in your optimizer: """ initial_learning_rate = 0.1 lr_schedule = keras.optimizers.schedules.ExponentialDecay( initial_learning_rate, decay_steps=100000, decay_rate=0.96, staircase=True ) optimizer = keras.optimizers.RMSprop(learning_rate=lr_schedule) """ Several built-in schedules are available: `ExponentialDecay`, `PiecewiseConstantDecay`, `PolynomialDecay`, and `InverseTimeDecay`. ### Using callbacks to implement a dynamic learning rate schedule A dynamic learning rate schedule (for instance, decreasing the learning rate when the validation loss is no longer improving) cannot be achieved with these schedule objects since the optimizer does not have access to validation metrics. However, callbacks do have access to all metrics, including validation metrics! You can thus achieve this pattern by using a callback that modifies the current learning rate on the optimizer. In fact, this is even built-in as the `ReduceLROnPlateau` callback. """ """ ## Visualizing loss and metrics during training The best way to keep an eye on your model during training is to use [TensorBoard](https://www.tensorflow.org/tensorboard), a browser-based application that you can run locally that provides you with: - Live plots of the loss and metrics for training and evaluation - (optionally) Visualizations of the histograms of your layer activations - (optionally) 3D visualizations of the embedding spaces learned by your `Embedding` layers If you have installed TensorFlow with pip, you should be able to launch TensorBoard from the command line: ``` tensorboard --logdir=/full_path_to_your_logs ``` """ """ ### Using the TensorBoard callback The easiest way to use TensorBoard with a Keras model and the fit method is the `TensorBoard` callback. In the simplest case, just specify where you want the callback to write logs, and you're good to go: """ keras.callbacks.TensorBoard( log_dir="/full_path_to_your_logs", histogram_freq=0, # How often to log histogram visualizations embeddings_freq=0, # How often to log embedding visualizations update_freq="epoch", ) # How often to write logs (default: once per epoch) """ For more information, see the [documentation for the `TensorBoard` callback](/api/callbacks/tensorboard/). """
[ "tensorflow.keras.metrics.BinaryAccuracy", "tensorflow.keras.models.load_model", "tensorflow.keras.losses.CategoricalCrossentropy", "tensorflow.reduce_sum", "tensorflow.cast", "numpy.random.random_sample", "tensorflow.keras.metrics.CategoricalAccuracy", "tensorflow.keras.callbacks.TensorBoard", "tensorflow.keras.metrics.MeanAbsolutePercentageError", "tensorflow.keras.Input", "tensorflow.keras.optimizers.RMSprop", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.losses.BinaryCrossentropy", "tensorflow.square", "tensorflow.argmax", "tensorflow.keras.callbacks.EarlyStopping", "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.keras.layers.GlobalMaxPooling1D", "tensorflow.keras.layers.Dense", "tensorflow.keras.losses.MeanSquaredError", "tensorflow.keras.utils.plot_model", "tensorflow.keras.Model", "tensorflow.one_hot", "tensorflow.nn.softmax", "numpy.random.random", "tensorflow.multiply", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.keras.layers.Conv1D", "tensorflow.keras.optimizers.schedules.ExponentialDecay", "tensorflow.keras.layers.concatenate", "tensorflow.keras.datasets.mnist.load_data", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.backend.std", "tensorflow.keras.metrics.MeanAbsoluteError", "tensorflow.keras.layers.GlobalMaxPooling2D", "tensorflow.keras.metrics.SparseCategoricalAccuracy" ]
guides/training_with_built_in_methods.py
[(54, 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(784,)', 'name': '"""digits"""'}), False, 'from tensorflow import keras\n'), (59, 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'inputs', 'outputs': 'outputs'}), False, 'from tensorflow import keras\n'), (71, 'tensorflow.keras.datasets.mnist.load_data', 'keras.datasets.mnist.load_data', ([], {}), False, 'from tensorflow import keras\n'), (246, 'tensorflow.one_hot', 'tf.one_hot', (['y_train'], {'depth': '(10)'}), True, 'import tensorflow as tf\n'), (281, 'tensorflow.one_hot', 'tf.one_hot', (['y_train'], {'depth': '(10)'}), True, 'import tensorflow as tf\n'), (358, 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(784,)', 'name': '"""digits"""'}), False, 'from tensorflow import keras\n'), (367, 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'inputs', 'outputs': 'outputs'}), False, 'from tensorflow import keras\n'), (394, 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(784,)', 'name': '"""digits"""'}), False, 'from tensorflow import keras\n'), (403, 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'inputs', 'outputs': 'outputs'}), False, 'from tensorflow import keras\n'), (418, 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(784,)', 'name': '"""digits"""'}), False, 'from tensorflow import keras\n'), (422, 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'inputs', 'outputs': 'outputs'}), False, 'from tensorflow import keras\n'), (472, 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(3,)', 'name': '"""inputs"""'}), False, 'from tensorflow import keras\n'), (473, 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(10,)', 'name': '"""targets"""'}), False, 'from tensorflow import keras\n'), (477, 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[inputs, targets]', 'outputs': 'predictions'}), False, 'from tensorflow import keras\n'), (538, 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x_train, y_train)'], {}), True, 'import tensorflow as tf\n'), (543, 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x_test, y_test)'], {}), True, 'import tensorflow as tf\n'), (571, 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x_train, y_train)'], {}), True, 'import tensorflow as tf\n'), (586, 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x_train, y_train)'], {}), True, 'import tensorflow as tf\n'), (590, 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x_val, y_val)'], {}), True, 'import tensorflow as tf\n'), (608, 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x_train, y_train)'], {}), True, 'import tensorflow as tf\n'), (612, 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x_val, y_val)'], {}), True, 'import tensorflow as tf\n'), (789, 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x_train, y_train, sample_weight)'], {}), True, 'import tensorflow as tf\n'), (811, 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(32, 32, 3)', 'name': '"""img_input"""'}), False, 'from tensorflow import keras\n'), (812, 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(None, 10)', 'name': '"""ts_input"""'}), False, 'from tensorflow import keras\n'), (820, 'tensorflow.keras.layers.concatenate', 'layers.concatenate', (['[x1, x2]'], {}), False, 'from tensorflow.keras import layers\n'), (825, 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[image_input, timeseries_input]', 'outputs': '[score_output, class_output]'}), False, 'from tensorflow import keras\n'), (834, 'tensorflow.keras.utils.plot_model', 'keras.utils.plot_model', (['model', '"""multi_input_and_output_model.png"""'], {'show_shapes': '(True)'}), False, 'from tensorflow import keras\n'), (939, 'numpy.random.random_sample', 'np.random.random_sample', ([], {'size': '(100, 32, 32, 3)'}), True, 'import numpy as np\n'), (940, 'numpy.random.random_sample', 'np.random.random_sample', ([], {'size': '(100, 20, 10)'}), True, 'import numpy as np\n'), (941, 'numpy.random.random_sample', 'np.random.random_sample', ([], {'size': '(100, 1)'}), True, 'import numpy as np\n'), (942, 'numpy.random.random_sample', 'np.random.random_sample', ([], {'size': '(100, 5)'}), True, 'import numpy as np\n'), (960, 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (["({'img_input': img_data, 'ts_input': ts_data}, {'score_output':\n score_targets, 'class_output': class_targets})"], {}), True, 'import tensorflow as tf\n'), (1135, 'tensorflow.keras.optimizers.schedules.ExponentialDecay', 'keras.optimizers.schedules.ExponentialDecay', (['initial_learning_rate'], {'decay_steps': '(100000)', 'decay_rate': '(0.96)', 'staircase': '(True)'}), False, 'from tensorflow import keras\n'), (1139, 'tensorflow.keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', ([], {'learning_rate': 'lr_schedule'}), False, 'from tensorflow import keras\n'), (1186, 'tensorflow.keras.callbacks.TensorBoard', 'keras.callbacks.TensorBoard', ([], {'log_dir': '"""/full_path_to_your_logs"""', 'histogram_freq': '(0)', 'embeddings_freq': '(0)', 'update_freq': '"""epoch"""'}), False, 'from tensorflow import keras\n'), (55, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""', 'name': '"""dense_1"""'}), False, 'from tensorflow.keras import layers\n'), (56, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""', 'name': '"""dense_2"""'}), False, 'from tensorflow.keras import layers\n'), (57, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(10)'], {'activation': '"""softmax"""', 'name': '"""predictions"""'}), False, 'from tensorflow.keras import layers\n'), (182, 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(784,)', 'name': '"""digits"""'}), False, 'from tensorflow import keras\n'), (186, 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'inputs', 'outputs': 'outputs'}), False, 'from tensorflow import keras\n'), (359, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""', 'name': '"""dense_1"""'}), False, 'from tensorflow.keras import layers\n'), (364, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""', 'name': '"""dense_2"""'}), False, 'from tensorflow.keras import layers\n'), (365, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(10)'], {'name': '"""predictions"""'}), False, 'from tensorflow.keras import layers\n'), (395, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""', 'name': '"""dense_1"""'}), False, 'from tensorflow.keras import layers\n'), (400, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""', 'name': '"""dense_2"""'}), False, 'from tensorflow.keras import layers\n'), (401, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(10)'], {'name': '"""predictions"""'}), False, 'from tensorflow.keras import layers\n'), (419, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""', 'name': '"""dense_1"""'}), False, 'from tensorflow.keras import layers\n'), (420, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""', 'name': '"""dense_2"""'}), False, 'from tensorflow.keras import layers\n'), (421, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(10)'], {'name': '"""predictions"""'}), False, 'from tensorflow.keras import layers\n'), (426, 'tensorflow.keras.backend.std', 'keras.backend.std', (['x1'], {}), False, 'from tensorflow import keras\n'), (474, 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(10)'], {}), False, 'from tensorflow import keras\n'), (481, 'numpy.random.random', 'np.random.random', (['(3, 3)'], {}), True, 'import numpy as np\n'), (482, 'numpy.random.random', 'np.random.random', (['(3, 10)'], {}), True, 'import numpy as np\n'), (814, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(3)', '(3)'], {}), False, 'from tensorflow.keras import layers\n'), (815, 'tensorflow.keras.layers.GlobalMaxPooling2D', 'layers.GlobalMaxPooling2D', ([], {}), False, 'from tensorflow.keras import layers\n'), (817, 'tensorflow.keras.layers.Conv1D', 'layers.Conv1D', (['(3)', '(3)'], {}), False, 'from tensorflow.keras import layers\n'), (818, 'tensorflow.keras.layers.GlobalMaxPooling1D', 'layers.GlobalMaxPooling1D', ([], {}), False, 'from tensorflow.keras import layers\n'), (822, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {'name': '"""score_output"""'}), False, 'from tensorflow.keras import layers\n'), (823, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(5)'], {'name': '"""class_output"""'}), False, 'from tensorflow.keras import layers\n'), (993, 'tensorflow.keras.callbacks.EarlyStopping', 'keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0.01)', 'patience': '(2)', 'verbose': '(1)'}), False, 'from tensorflow import keras\n'), (1059, 'tensorflow.keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', ([], {'filepath': '"""mymodel_{epoch}"""', 'save_best_only': '(True)', 'monitor': '"""val_loss"""', 'verbose': '(1)'}), False, 'from tensorflow import keras\n'), (1085, 'os.path.exists', 'os.path.exists', (['checkpoint_dir'], {}), False, 'import os\n'), (1086, 'os.makedirs', 'os.makedirs', (['checkpoint_dir'], {}), False, 'import os\n'), (1105, 'tensorflow.keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', ([], {'filepath': "(checkpoint_dir + '/ckpt-loss={loss:.2f}')", 'save_freq': '(100)'}), False, 'from tensorflow import keras\n'), (91, 'tensorflow.keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', ([], {}), False, 'from tensorflow import keras\n'), (93, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'keras.losses.SparseCategoricalCrossentropy', ([], {}), False, 'from tensorflow import keras\n'), (152, 'tensorflow.keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', ([], {'learning_rate': '(0.001)'}), False, 'from tensorflow import keras\n'), (153, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'keras.losses.SparseCategoricalCrossentropy', ([], {}), False, 'from tensorflow import keras\n'), (183, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""', 'name': '"""dense_1"""'}), False, 'from tensorflow.keras import layers\n'), (184, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""', 'name': '"""dense_2"""'}), False, 'from tensorflow.keras import layers\n'), (185, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(10)'], {'activation': '"""softmax"""', 'name': '"""predictions"""'}), False, 'from tensorflow.keras import layers\n'), (239, 'tensorflow.square', 'tf.square', (['(y_true - y_pred)'], {}), True, 'import tensorflow as tf\n'), (243, 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {}), False, 'from tensorflow import keras\n'), (279, 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {}), False, 'from tensorflow import keras\n'), (314, 'tensorflow.cast', 'tf.cast', (['values', '"""float32"""'], {}), True, 'import tensorflow as tf\n'), (330, 'tensorflow.keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', ([], {'learning_rate': '(0.001)'}), False, 'from tensorflow import keras\n'), (331, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'keras.losses.SparseCategoricalCrossentropy', ([], {}), False, 'from tensorflow import keras\n'), (369, 'tensorflow.keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', ([], {'learning_rate': '(0.001)'}), False, 'from tensorflow import keras\n'), (370, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), False, 'from tensorflow import keras\n'), (405, 'tensorflow.keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', ([], {'learning_rate': '(0.001)'}), False, 'from tensorflow import keras\n'), (406, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), False, 'from tensorflow import keras\n'), (424, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['x1'], {}), True, 'import tensorflow as tf\n'), (429, 'tensorflow.keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', (['(0.001)'], {}), False, 'from tensorflow import keras\n'), (430, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), False, 'from tensorflow import keras\n'), (447, 'tensorflow.keras.losses.BinaryCrossentropy', 'keras.losses.BinaryCrossentropy', ([], {'from_logits': '(True)'}), False, 'from tensorflow import keras\n'), (448, 'tensorflow.keras.metrics.BinaryAccuracy', 'keras.metrics.BinaryAccuracy', ([], {}), False, 'from tensorflow import keras\n'), (462, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), True, 'import tensorflow as tf\n'), (842, 'tensorflow.keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', (['(0.001)'], {}), False, 'from tensorflow import keras\n'), (854, 'tensorflow.keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', (['(0.001)'], {}), False, 'from tensorflow import keras\n'), (871, 'tensorflow.keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', (['(0.001)'], {}), False, 'from tensorflow import keras\n'), (894, 'tensorflow.keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', (['(0.001)'], {}), False, 'from tensorflow import keras\n'), (916, 'tensorflow.keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', (['(0.001)'], {}), False, 'from tensorflow import keras\n'), (922, 'tensorflow.keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', (['(0.001)'], {}), False, 'from tensorflow import keras\n'), (934, 'tensorflow.keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', (['(0.001)'], {}), False, 'from tensorflow import keras\n'), (1096, 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['latest_checkpoint'], {}), False, 'from tensorflow import keras\n'), (95, 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'keras.metrics.SparseCategoricalAccuracy', ([], {}), False, 'from tensorflow import keras\n'), (154, 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'keras.metrics.SparseCategoricalAccuracy', ([], {}), False, 'from tensorflow import keras\n'), (273, 'tensorflow.square', 'tf.square', (['(y_true - y_pred)'], {}), True, 'import tensorflow as tf\n'), (274, 'tensorflow.square', 'tf.square', (['(0.5 - y_pred)'], {}), True, 'import tensorflow as tf\n'), (312, 'tensorflow.argmax', 'tf.argmax', (['y_pred'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (313, 'tensorflow.cast', 'tf.cast', (['y_true', '"""int32"""'], {}), True, 'import tensorflow as tf\n'), (313, 'tensorflow.cast', 'tf.cast', (['y_pred', '"""int32"""'], {}), True, 'import tensorflow as tf\n'), (316, 'tensorflow.cast', 'tf.cast', (['sample_weight', '"""float32"""'], {}), True, 'import tensorflow as tf\n'), (317, 'tensorflow.multiply', 'tf.multiply', (['values', 'sample_weight'], {}), True, 'import tensorflow as tf\n'), (318, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['values'], {}), True, 'import tensorflow as tf\n'), (389, 'tensorflow.keras.backend.std', 'keras.backend.std', (['inputs'], {}), False, 'from tensorflow import keras\n'), (843, 'tensorflow.keras.losses.MeanSquaredError', 'keras.losses.MeanSquaredError', ([], {}), False, 'from tensorflow import keras\n'), (843, 'tensorflow.keras.losses.CategoricalCrossentropy', 'keras.losses.CategoricalCrossentropy', ([], {}), False, 'from tensorflow import keras\n'), (855, 'tensorflow.keras.losses.MeanSquaredError', 'keras.losses.MeanSquaredError', ([], {}), False, 'from tensorflow import keras\n'), (855, 'tensorflow.keras.losses.CategoricalCrossentropy', 'keras.losses.CategoricalCrossentropy', ([], {}), False, 'from tensorflow import keras\n'), (873, 'tensorflow.keras.losses.MeanSquaredError', 'keras.losses.MeanSquaredError', ([], {}), False, 'from tensorflow import keras\n'), (874, 'tensorflow.keras.losses.CategoricalCrossentropy', 'keras.losses.CategoricalCrossentropy', ([], {}), False, 'from tensorflow import keras\n'), (896, 'tensorflow.keras.losses.MeanSquaredError', 'keras.losses.MeanSquaredError', ([], {}), False, 'from tensorflow import keras\n'), (897, 'tensorflow.keras.losses.CategoricalCrossentropy', 'keras.losses.CategoricalCrossentropy', ([], {}), False, 'from tensorflow import keras\n'), (917, 'tensorflow.keras.losses.CategoricalCrossentropy', 'keras.losses.CategoricalCrossentropy', ([], {}), False, 'from tensorflow import keras\n'), (923, 'tensorflow.keras.losses.CategoricalCrossentropy', 'keras.losses.CategoricalCrossentropy', ([], {}), False, 'from tensorflow import keras\n'), (935, 'tensorflow.keras.losses.MeanSquaredError', 'keras.losses.MeanSquaredError', ([], {}), False, 'from tensorflow import keras\n'), (935, 'tensorflow.keras.losses.CategoricalCrossentropy', 'keras.losses.CategoricalCrossentropy', ([], {}), False, 'from tensorflow import keras\n'), (1092, 'os.listdir', 'os.listdir', (['checkpoint_dir'], {}), False, 'import os\n'), (354, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['inputs'], {}), True, 'import tensorflow as tf\n'), (858, 'tensorflow.keras.metrics.MeanAbsolutePercentageError', 'keras.metrics.MeanAbsolutePercentageError', ([], {}), False, 'from tensorflow import keras\n'), (859, 'tensorflow.keras.metrics.MeanAbsoluteError', 'keras.metrics.MeanAbsoluteError', ([], {}), False, 'from tensorflow import keras\n'), (861, 'tensorflow.keras.metrics.CategoricalAccuracy', 'keras.metrics.CategoricalAccuracy', ([], {}), False, 'from tensorflow import keras\n'), (878, 'tensorflow.keras.metrics.MeanAbsolutePercentageError', 'keras.metrics.MeanAbsolutePercentageError', ([], {}), False, 'from tensorflow import keras\n'), (879, 'tensorflow.keras.metrics.MeanAbsoluteError', 'keras.metrics.MeanAbsoluteError', ([], {}), False, 'from tensorflow import keras\n'), (881, 'tensorflow.keras.metrics.CategoricalAccuracy', 'keras.metrics.CategoricalAccuracy', ([], {}), False, 'from tensorflow import keras\n'), (901, 'tensorflow.keras.metrics.MeanAbsolutePercentageError', 'keras.metrics.MeanAbsolutePercentageError', ([], {}), False, 'from tensorflow import keras\n'), (902, 'tensorflow.keras.metrics.MeanAbsoluteError', 'keras.metrics.MeanAbsoluteError', ([], {}), False, 'from tensorflow import keras\n'), (904, 'tensorflow.keras.metrics.CategoricalAccuracy', 'keras.metrics.CategoricalAccuracy', ([], {}), False, 'from tensorflow import keras\n')]
RiceD2KLab/TCH_CardiacSignals_F20
ea6e84703086ddb7bfc5ba164aa67acdc9e78b7d
import tensorflow from tensorflow import keras from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, LSTM, Dropout, RepeatVector, TimeDistributed import numpy as np import os import matplotlib.pyplot as plt import sys def create_model(X): model = Sequential() model.add(LSTM(10, input_shape=(X.shape[1], X.shape[2]))) model.add(Dropout(rate=0.2)) model.add(RepeatVector(X.shape[1])) model.add(LSTM(10, return_sequences=True)) model.add(Dropout(rate=0.2)) model.add(TimeDistributed(Dense(X.shape[2]))) model.compile(optimizer='adam', loss='mse') model.summary() history = model.fit(X, X, epochs=100, batch_size=1, validation_split=0.1, callbacks=[keras.callbacks.EarlyStopping(monitor='loss', patience=3, mode='min')], shuffle=False) model.save('Working_Data/lstm_model') # model.predict(X[0:10, :]) # plot the loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.savefig("Working_Data/lstm_loss.png") plt.show() print("loss of the model is: ") print(history.history['loss']) def create_sequences(data): Xs, ys = [], [] time_steps = 10 for i in range(len(data) - time_steps): Xs.append(data[i:(i + time_steps)].reshape(100*time_steps,4)) ys.append(data[i + time_steps].reshape(100,4)) return np.array(Xs), np.array(ys) data = np.load(os.path.join("Working_Data/Normalized_Fixed_Dim_HBs_Idx" + str(1) + ".npy")) data = data[0:1000, :, :] # print(data[0:10].reshape(10000,4).shape) X, y = create_sequences(data) print(X.shape, y.shape) # create_model(X) model = keras.models.load_model('Working_Data/lstm_model') model.predict(create_sequences(X[0:5, :, :])[0])
[ "tensorflow.keras.models.load_model", "matplotlib.pyplot.legend", "tensorflow.keras.layers.Dropout", "matplotlib.pyplot.title", "tensorflow.keras.layers.Dense", "tensorflow.keras.callbacks.EarlyStopping", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "tensorflow.keras.layers.RepeatVector", "tensorflow.keras.layers.LSTM", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "tensorflow.keras.models.Sequential", "matplotlib.pyplot.ylabel" ]
src/archive/LSTM/LSTMAEts10.py
[(60, 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['"""Working_Data/lstm_model"""'], {}), False, 'from tensorflow import keras\n'), (12, 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), False, 'from tensorflow.keras.models import Sequential\n'), (30, 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {}), True, 'import matplotlib.pyplot as plt\n'), (31, 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {}), True, 'import matplotlib.pyplot as plt\n'), (32, 'matplotlib.pyplot.title', 'plt.title', (['"""model loss"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (33, 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (34, 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (35, 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {'loc': '"""upper left"""'}), True, 'import matplotlib.pyplot as plt\n'), (36, 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Working_Data/lstm_loss.png"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (37, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (13, 'tensorflow.keras.layers.LSTM', 'LSTM', (['(10)'], {'input_shape': '(X.shape[1], X.shape[2])'}), False, 'from tensorflow.keras.layers import Dense, LSTM, Dropout, RepeatVector, TimeDistributed\n'), (14, 'tensorflow.keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.2)'}), False, 'from tensorflow.keras.layers import Dense, LSTM, Dropout, RepeatVector, TimeDistributed\n'), (15, 'tensorflow.keras.layers.RepeatVector', 'RepeatVector', (['X.shape[1]'], {}), False, 'from tensorflow.keras.layers import Dense, LSTM, Dropout, RepeatVector, TimeDistributed\n'), (16, 'tensorflow.keras.layers.LSTM', 'LSTM', (['(10)'], {'return_sequences': '(True)'}), False, 'from tensorflow.keras.layers import Dense, LSTM, Dropout, RepeatVector, TimeDistributed\n'), (17, 'tensorflow.keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.2)'}), False, 'from tensorflow.keras.layers import Dense, LSTM, Dropout, RepeatVector, TimeDistributed\n'), (50, 'numpy.array', 'np.array', (['Xs'], {}), True, 'import numpy as np\n'), (50, 'numpy.array', 'np.array', (['ys'], {}), True, 'import numpy as np\n'), (18, 'tensorflow.keras.layers.Dense', 'Dense', (['X.shape[2]'], {}), False, 'from tensorflow.keras.layers import Dense, LSTM, Dropout, RepeatVector, TimeDistributed\n'), (23, 'tensorflow.keras.callbacks.EarlyStopping', 'keras.callbacks.EarlyStopping', ([], {'monitor': '"""loss"""', 'patience': '(3)', 'mode': '"""min"""'}), False, 'from tensorflow import keras\n')]
Dazzid/Deep_Learning_Techniques_Applied_to_Estimate_Music_Gestural_Patterns
4a61a3d85429a978cb520a9efacee537747f813d
# convlstm model import numpy as np import csv import tensorflow as tf import matplotlib.pyplot as plt import matplotlib as mpl # load a single file as a numpy array def load_file(filepath): data = [] with open(filepath) as csvfile: reader = csv.reader(csvfile, delimiter=',') for row in reader: data.append(row) return np.array(data) # load a list of files and return as a 3d numpy array def load_group(filenames, prefix=''): loaded = list() for name in filenames: data = load_file(prefix + name) loaded.append(data) # stack group so that features are the 3rd dimension loaded = np.dstack(loaded) return loaded # load a dataset group, such as train or test def load_dataset_group(group, prefix=''): filepath = prefix + group # load all 9 files as a single array # total acceleration filenames = ['01_acc_x.csv', '02_acc_y.csv', '03_acc_z.csv', '04_gyro_x.csv', '05_gyro_y.csv', '06_gyro_z.csv', '07_euler_x.csv', '08_euler_y.csv', '09_euler_z.csv'] # load input data X = load_group(filenames, filepath).astype(np.float64) # load class output y = load_file(prefix + group + '10_label.csv').astype(np.int) return X, y # load the dataset, returns train and test X and y elements def load_dataset(prefix=''): # load all train trainX, trainy = load_dataset_group('train/', prefix + 'data/Gestures/Groups/') # load all test testX, testy = load_dataset_group('test/', prefix + 'data/Gestures/Groups/') # zero-offset class values trainy = trainy - 1 testy = testy - 1 # one hot encode y trainy = tf.keras.utils.to_categorical(trainy) testy = tf.keras.utils.to_categorical(testy) return trainX, trainy, testX, testy # fit and evaluate a model def evaluate_model(trainX, trainy, testX, testy, batches): # define model batch_size = batches verbose, epochs = 0, 50 n_features, n_outputs = trainX.shape[2], trainy.shape[1] # reshape into subsequences (samples, time steps, rows, cols, channels) n_steps, n_length = 3, 50 trainX = trainX.reshape((trainX.shape[0], n_steps, 1, n_length, n_features)) testX = testX.reshape((testX.shape[0], n_steps, 1, n_length, n_features)) # define model model = tf.keras.Sequential() model.add(tf.keras.layers.ConvLSTM2D(64, (1,3), activation='relu', input_shape=(n_steps, 1, n_length, n_features))) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(100, activation='relu')) model.add(tf.keras.layers.Dense(n_outputs, activation='softmax')) tf.keras.utils.plot_model(model, show_shapes=False, show_layer_names=True, to_file='figues/Conv_LSTM_Model.png') model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # fit network history = model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=verbose, validation_data=(testX, testy)) # evaluate model _, accuracy = model.evaluate(testX, testy, batch_size=batch_size, verbose=0) return accuracy, history # summarize scores def summarize_results(scores, params): print(scores, params) # summarize mean and standard deviation for i in range(len(scores)): m, s = np.mean(scores[i]), np.std(scores[i]) print('Param = %d: %.3f%% (+/-%.3f)' % (params[i], m, s)) # boxplot of scores plt.boxplot(scores, labels=params) plt.savefig('figures/ConvLSTM2D.png') plt.show() # run an experiment def run_experiment(repeats=10): # load data trainX, trainy, testX, testy = load_dataset() final_scores = list() batches = [8, 16, 32, 64, 128, 256] for i in range(len(batches)): scores = list() # repeat experiment for r in range(repeats): score, history = evaluate_model(trainX, trainy, testX, testy, batches[i]) score = score * 100.0 print('>#%d: %.3f' % (r+1, score)) scores.append(score) # summarize results final_scores.append(scores) summarize_results(final_scores, batches) return score, history def plot_learning_curves(loss, val_loss): plt.plot(np.arange(len(loss)) + 0.5, loss, "b.-", label="Training loss") plt.plot(np.arange(len(val_loss)) + 1, val_loss, "r.-", label="Validation loss") plt.gca().xaxis.set_major_locator(mpl.ticker.MaxNLocator(integer=True)) plt.axis([1, 50, 0, 0.5]) plt.legend(fontsize=14) plt.xlabel("Epochs") plt.ylabel("Loss") plt.grid(True) _, history = run_experiment(10) plot_learning_curves(history.history["loss"], history.history["val_loss"]) plt.show()
[ "matplotlib.pyplot.legend", "tensorflow.keras.Sequential", "numpy.mean", "tensorflow.keras.layers.ConvLSTM2D", "matplotlib.pyplot.gca", "numpy.std", "matplotlib.pyplot.axis", "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.Dense", "tensorflow.keras.utils.plot_model", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.boxplot", "numpy.dstack", "matplotlib.ticker.MaxNLocator", "matplotlib.pyplot.grid", "tensorflow.keras.layers.Dropout", "tensorflow.keras.utils.to_categorical" ]
L3_Conv_LSTM_Model.py
[(125, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (15, 'numpy.array', 'np.array', (['data'], {}), True, 'import numpy as np\n'), (24, 'numpy.dstack', 'np.dstack', (['loaded'], {}), True, 'import numpy as np\n'), (53, 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['trainy'], {}), True, 'import tensorflow as tf\n'), (54, 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['testy'], {}), True, 'import tensorflow as tf\n'), (68, 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), True, 'import tensorflow as tf\n'), (74, 'tensorflow.keras.utils.plot_model', 'tf.keras.utils.plot_model', (['model'], {'show_shapes': '(False)', 'show_layer_names': '(True)', 'to_file': '"""figues/Conv_LSTM_Model.png"""'}), True, 'import tensorflow as tf\n'), (90, 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['scores'], {'labels': 'params'}), True, 'import matplotlib.pyplot as plt\n'), (91, 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/ConvLSTM2D.png"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (92, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (117, 'matplotlib.pyplot.axis', 'plt.axis', (['[1, 50, 0, 0.5]'], {}), True, 'import matplotlib.pyplot as plt\n'), (118, 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(14)'}), True, 'import matplotlib.pyplot as plt\n'), (119, 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (120, 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (121, 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), True, 'import matplotlib.pyplot as plt\n'), (12, 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), False, 'import csv\n'), (69, 'tensorflow.keras.layers.ConvLSTM2D', 'tf.keras.layers.ConvLSTM2D', (['(64)', '(1, 3)'], {'activation': '"""relu"""', 'input_shape': '(n_steps, 1, n_length, n_features)'}), True, 'import tensorflow as tf\n'), (70, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.5)'], {}), True, 'import tensorflow as tf\n'), (71, 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), True, 'import tensorflow as tf\n'), (72, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(100)'], {'activation': '"""relu"""'}), True, 'import tensorflow as tf\n'), (73, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['n_outputs'], {'activation': '"""softmax"""'}), True, 'import tensorflow as tf\n'), (116, 'matplotlib.ticker.MaxNLocator', 'mpl.ticker.MaxNLocator', ([], {'integer': '(True)'}), True, 'import matplotlib as mpl\n'), (87, 'numpy.mean', 'np.mean', (['scores[i]'], {}), True, 'import numpy as np\n'), (87, 'numpy.std', 'np.std', (['scores[i]'], {}), True, 'import numpy as np\n'), (116, 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), True, 'import matplotlib.pyplot as plt\n')]
salesforce/CASPI
3e4cd23f4f3d1fa7132ba89805366472c9fe5983
from gensim.models.keyedvectors import KeyedVectors import json from tensorflow.keras.callbacks import ReduceLROnPlateau, TensorBoard, ModelCheckpoint, EarlyStopping from tensorflow.keras.layers import * from tensorflow.keras.metrics import * from tensorflow.keras.models import Sequential, Model from tensorflow.keras.optimizers import * from tensorflow.keras import backend as K import os from random import shuffle import re import time from tqdm import tqdm import traceback import numpy as np import pandas as pd from argparse import ArgumentParser import random class RewardLearning(): def __init__(self, fold, seed, action_space, metric): self.reward_report_template = 'reward_report_{}_{}_.*.csv' word_embed_file_path='./damd_multiwoz/data/embeddings/glove.6B.100d.w2v.txt' self.train_val_fraction=0.8 self.EMBED_DIM=100 self.HIDDEN_DIM=100 self.MAX_POP=10 self.MAX_TIME_STEP=30 self.MAX_GOAL_LEN=50 self.MAX_STATE_LEN=50 self.MAX_ACT_LEN=50 self.reduce_lr_patience = 10 self.es_patience = 25 self.train_reward_split=[0.8,0.9][1] self.batch_size = 50 self.num_epoch = 100 self.fold = fold self.metric = metric self.TRAIN_ON=action_space self.root_path = './damd_multiwoz' self.dataset=json.loads(open(os.path.join(self.root_path,'data/multi-woz-processed/data_for_damd_reward_{}.json'.format(self.fold)),'r').read()) self.glove_kv = KeyedVectors.load_word2vec_format(word_embed_file_path, binary=False, unicode_errors='ignore') self.reward_folder_path= os.path.join(self.root_path,'data/multi-woz-oppe/reward') self.data_for_damd = json.loads(open(os.path.join(self.root_path,'data/multi-woz-processed/data_for_damd.json'), 'r').read()) self.processed_reward_rollouts = None self.embed_cache = {} def metric_score(self, sucess,match,bleu): return sucess+match+2*bleu/100 def load_reward_rollouts(self): reward_record_file_prefix = self.reward_report_template.format(self.fold, self.metric) print('reward_record_file_prefix:',reward_record_file_prefix) rollouts_processed = {} for file in os.listdir(self.reward_folder_path): if re.search(reward_record_file_prefix,file): print('file:',file) reward_record_path = os.path.join(self.reward_folder_path,file) df = pd.read_csv(reward_record_path) for _,row in df.iterrows(): dial_id = row['dial_id'] rollout = json.loads(row['rollout']) turn_nums = [int(z) for z in rollout.keys()] turn_nums = sorted(turn_nums) if dial_id not in rollouts_processed: rollouts_processed[dial_id]={} rollouts_processed[dial_id]['gen']=[] dia_rollout={} rollouts_processed[dial_id]['gen'].append(dia_rollout) dia_rollout['score'] = self.metric_score(row['success'],row['match'],row['bleu']) dia_rollout['rollout']=[] for turn_num in turn_nums: true_act_prob = [1.] if 'aspn_prob' in rollout[str(turn_num)]: true_act_prob = np.exp(rollout[str(turn_num)]['aspn_prob']).tolist() dia_rollout['rollout'].append({ 'tn':turn_num, 'act':rollout[str(turn_num)]['aspn_gen'], 'true_act':rollout[str(turn_num)]['aspn'], 'resp':rollout[str(turn_num)]['resp_gen'], 'true_act_prob':true_act_prob }) if 'gt' not in rollouts_processed[dial_id]: rollouts_processed[dial_id]['gt']={} rollouts_processed[dial_id]['gt']['score']=4 rollouts_processed[dial_id]['gt']['rollout']=[] for turn_num in turn_nums: rollouts_processed[dial_id]['gt']['rollout'].append({ 'tn':turn_num, 'act':rollout[str(turn_num)]['aspn'], 'resp':rollout[str(turn_num)]['resp'], 'true_act':rollout[str(turn_num)]['aspn'], 'true_act_prob':[1] }) self.processed_reward_rollouts = rollouts_processed self.dial_ids = list(self.processed_reward_rollouts.keys()) self.load_gt_dia_logs(self.dial_ids) return rollouts_processed def load_gt_dia_logs(self, dial_ids): gt_dia_logs={} for dial_id in dial_ids: goal = self.goal_as_st(self.dataset[dial_id]['goal']) gt_dia_log={ 'goal':goal } gt_dia_logs[dial_id]=gt_dia_log for turn in self.dataset[dial_id]['log']: gt_dia_log[turn['turn_num']]={} gt_dia_log[turn['turn_num']]['state']='begin '+turn['cons_delex']+' end' self.gt_dia_logs = gt_dia_logs def pad_sentence(self, token_embeds,max_seq_len): token_embeds = token_embeds.copy() token_embeds = token_embeds[:max_seq_len].tolist() for i in range(max_seq_len-len(token_embeds)): token_embeds.append(np.zeros(self.EMBED_DIM)) token_embeds = np.array(token_embeds) return token_embeds def pad_time_step(self, sentence_embeds,max_seq_len): sentence_embeds = sentence_embeds[:self.MAX_TIME_STEP] time_padded_sentences = np.array(sentence_embeds) if self.MAX_TIME_STEP>len(sentence_embeds): pad = np.zeros((self.MAX_TIME_STEP-len(sentence_embeds),max_seq_len,self.EMBED_DIM)) time_padded_sentences = np.concatenate([sentence_embeds,pad]) return time_padded_sentences def get_embedding(self, token): token = token.lower() token = token.replace('reqt','request')\ .replace('arriveby','arrive_by')\ .replace('towninfo','town_info')\ .replace('pricerange','price_range')\ .replace('leaveat','leave_at')\ .replace('mutliple','multiple')\ .replace('dontcare','dont_care')\ .replace('-','')\ .replace('addres','address')\ .replace('addressss','address')\ .replace('addresss','address') token = token.strip() if token in self.embed_cache: return self.embed_cache[token] if token in self.glove_kv: embedding = self.glove_kv[token] else: if '_' in token: embeds = [] for sub_token in token.split('_'): embeds.append(self.get_embedding(sub_token)) embedding = np.mean(embeds,axis=0) else: #print('token not in embed:',token) embedding = self.glove_kv['unk'] self.embed_cache[token]=embedding return embedding def tokens_to_embeddings(self, tokens): embeddings = [] for token in tokens: embeddings.append(self.get_embedding(token)) return np.array(embeddings) def tokenize(self, sentence): sentence=sentence.lower() sentence = sentence.replace('[',' ').replace(']',' ').replace(':','').replace(' ',' ') return sentence.split() def goal_as_st(self, goal): return str(goal).replace("'",' ')\ .replace(',',' , ').replace('{',' ')\ .replace('}',' ').replace(' ',' ') def sample_roll_out(self, dial_id): start = time.time() gen_rollouts_info = self.processed_reward_rollouts[dial_id]['gen'] gt_rollout_info = self.processed_reward_rollouts[dial_id]['gt'] rollout_infos = np.random.choice(gen_rollouts_info+[gt_rollout_info], size=2, replace=False) #print(rollout_infos) dia_log= self.gt_dia_logs[dial_id] goal = dia_log['goal'] goal = self.tokenize(goal) goal = self.tokens_to_embeddings(goal) goal = self.pad_sentence(goal, self.MAX_GOAL_LEN) rollout_pairs = [] for rollout_info in rollout_infos: acts = [] states = [] for turn in rollout_info['rollout']: tn = turn['tn'] act = turn[self.TRAIN_ON]#turn['act'] if tn not in self.gt_dia_logs[dial_id]: break state = self.gt_dia_logs[dial_id][tn]['state'] # if random.uniform(0,1)>0.95: # print('act:',act) # print('state:',state) act = self.tokenize(act) state = self.tokenize(state) act = self.tokens_to_embeddings(act) state = self.tokens_to_embeddings(state) act = self.pad_sentence(act,self.MAX_ACT_LEN) state = self.pad_sentence(state,self.MAX_STATE_LEN) acts.append(act) states.append(state) acts=self.pad_time_step(acts,self.MAX_ACT_LEN) states=self.pad_time_step(states,self.MAX_STATE_LEN) score=rollout_info['score'] rollout_pairs.append([goal,states,acts,score]) prob = rollout_pairs[0][-1]/(rollout_pairs[0][-1]+rollout_pairs[1][-1]+1e-20) rollout_pairs[0][-1]=prob rollout_pairs[1][-1]=1-prob return rollout_pairs def get_data_gen(self, sample_roll_out): def data_gen(dial_ids,batch_size): try: s1s = [] a1s = [] g1s = [] s2s = [] a2s = [] g2s = [] probs = [] while True: shuffle(dial_ids) for dial_id in dial_ids: rollout_pair = sample_roll_out(dial_id) g1,s1,a1,p1=rollout_pair[0] g2,s2,a2,p2=rollout_pair[1] s1s.append(s1) a1s.append(a1) g1s.append(g1) s2s.append(s2) a2s.append(a2) g2s.append(g2) probs.append([p1,p2]) if len(s1s)>=batch_size: s1s = np.array(s1s) a1s = np.array(a1s) g1s = np.array(g1s) s2s = np.array(s2s) a2s = np.array(a2s) g2s = np.array(g2s) #print('as:',np.sum(a1s-a2s)) probs = np.array(probs) yield [s1s,a1s,g1s,s2s,a2s,g2s],probs s1s = [] a1s = [] g1s = [] s2s = [] a2s = [] g2s = [] probs = [] except Exception as e: print(traceback.format_exc()) raise e return data_gen def build_reward_model(self): s_bilstm = Bidirectional(LSTM(self.HIDDEN_DIM)) a_bilstms = [Conv1D(self.HIDDEN_DIM,1,activation='tanh'), Conv1D(self.HIDDEN_DIM,1,activation='tanh'), Lambda(lambda z:K.mean(z,axis=-2))] a_bilstms=[Bidirectional(LSTM(self.HIDDEN_DIM))] g_bilstm = Bidirectional(LSTM(self.HIDDEN_DIM)) reward_convs=[] reward_convs.append(Dense(self.HIDDEN_DIM,activation='tanh')) reward_convs.append(Dense(self.HIDDEN_DIM,activation='tanh')) reward_convs.append(Dense(self.HIDDEN_DIM,activation='tanh')) reward_convs.append(Dense(1,activation='sigmoid')) s = Input(shape=(self.MAX_STATE_LEN, self.EMBED_DIM)) a = Input(shape=(self.MAX_ACT_LEN, self.EMBED_DIM)) g = Input(shape=(self.MAX_GOAL_LEN, self.EMBED_DIM)) s_h = s_bilstm(s) a_h = a for layer in a_bilstms: a_h = layer(a_h) g_h = g_bilstm(g) #s_h = Lambda(lambda z:z*1e-20)(s_h) #g_h = Lambda(lambda z:z*1e-20)(g_h) reward = Concatenate(axis=-1)([s_h,a_h,g_h]) for reward_conv in reward_convs: reward = reward_conv(reward) reward = Lambda(lambda z:K.squeeze(z,axis=-1))(reward) model_reward = Model(inputs=[s,a,g],outputs=reward) model_reward.summary() return model_reward def _build_reward_flatten_model(self): x = Input(shape=(self.MAX_STATE_LEN + self.MAX_ACT_LEN + self.MAX_GOAL_LEN, self.EMBED_DIM)) s=Lambda(lambda z:z[:,:self.MAX_STATE_LEN])(x) a=Lambda(lambda z:z[:,self.MAX_STATE_LEN : self.MAX_STATE_LEN + self.MAX_ACT_LEN])(x) g=Lambda(lambda z:z[:,self.MAX_STATE_LEN + self.MAX_ACT_LEN:])(x) reward = self.model_reward([s,a,g]) model_reward_flatten = Model(x,reward) model_reward_flatten.summary() return model_reward_flatten def _build_cummulative_reward_model(self): model_reward_flatten = self._build_reward_flatten_model() s = Input(shape=(self.MAX_TIME_STEP, self.MAX_STATE_LEN, self.EMBED_DIM)) a = Input(shape=(self.MAX_TIME_STEP, self.MAX_ACT_LEN, self.EMBED_DIM)) g = Input(shape=(self.MAX_GOAL_LEN, self.EMBED_DIM)) g_padded = Lambda(lambda z:K.expand_dims(z,axis=1))(g) g_padded = Lambda(lambda z:K.repeat_elements(z, self.MAX_TIME_STEP,axis=1))(g_padded) comb_inp = Concatenate(axis=2)([s,a,g_padded]) rewards = TimeDistributed(model_reward_flatten)(comb_inp) returns = Lambda(lambda z:K.sum(z,axis=1,keepdims=True))(rewards) model_cummulative_reward = Model([s,a,g],returns) model_cummulative_reward.summary() return model_cummulative_reward def _build_preferential_model(self): model_cummulative_reward = self._build_cummulative_reward_model() s_1 = Input(shape=(self.MAX_TIME_STEP, self.MAX_STATE_LEN, self.EMBED_DIM)) a_1 = Input(shape=(self.MAX_TIME_STEP, self.MAX_ACT_LEN, self.EMBED_DIM)) g_1 = Input(shape=(self.MAX_GOAL_LEN, self.EMBED_DIM)) s_2 = Input(shape=(self.MAX_TIME_STEP, self.MAX_STATE_LEN, self.EMBED_DIM)) a_2 = Input(shape=(self.MAX_TIME_STEP, self.MAX_ACT_LEN, self.EMBED_DIM)) g_2 = Input(shape=(self.MAX_GOAL_LEN, self.EMBED_DIM)) chi_1 = model_cummulative_reward([s_1,a_1,g_1]) chi_2 = model_cummulative_reward([s_2,a_2,g_2]) chi = Concatenate()([chi_1,chi_2]) #Pref = Activation('softmax')(chi) Pref = Lambda(lambda z:z/K.sum(z,axis=-1,keepdims=True))(chi) model_preferential = Model([s_1,a_1,g_1,s_2,a_2,g_2],Pref) model_preferential.summary() return model_preferential def get_reward(self, input_seq): g = [] s = [] a = [] for goal,state, aspn, resp in input_seq: state_tokens = self.tokenize(state) state_token_embeds = self.tokens_to_embeddings(state_tokens) state_token_embeds = self.pad_sentence(state_token_embeds, self.MAX_STATE_LEN) s.append(state_token_embeds) if self.TRAIN_ON=='act': action_tokens = self.tokenize(aspn) elif self.TRAIN_ON=='resp': action_tokens = self.tokenize(resp) else: raise Exception('Invalid TRAIN_ON selection') action_token_embeds = self.tokens_to_embeddings(action_tokens) action_token_embeds = self.pad_sentence(action_token_embeds, self.MAX_ACT_LEN) a.append(action_token_embeds) goal_tokens = self.tokenize(goal) goal_token_embeds = self.tokens_to_embeddings(goal_tokens) goal_token_embeds = self.pad_sentence(goal_token_embeds, self.MAX_GOAL_LEN) g.append(goal_token_embeds) rewards = self.model_reward.predict([np.array(s),np.array(a),np.array(g)]) #print('aspn:',aspn,':',reward) return rewards def get_Gs(self, gamma=0.9): fn_Gs = {} num_fns = len(self.data_for_damd.keys()) for ex_num,fn in enumerate(tqdm(reversed(list(self.data_for_damd.keys())),total=num_fns)): #print('%:{0.2f}'.format(ex_num/num_fns),end='') next_state=None fn_Gs[fn] = {} goal = self.goal_as_st(self.data_for_damd[fn]['goal']) turn_num_inp_seq = {} for turn in self.data_for_damd[fn]['log']: turn_num = turn['turn_num'] resp = turn['resp'] state = 'begin '+turn['cons_delex']+' end'#turn['cons_delex'] aspn = turn['sys_act'] turn_num_inp_seq[turn_num]=[goal,state,aspn,resp] reverse_turn_nums = sorted(list(turn_num_inp_seq.keys()),reverse=True) inp_seq = [] for turn_num in reverse_turn_nums: inp_seq.append(turn_num_inp_seq[turn_num]) rewards = self.get_reward(inp_seq) G = 0 for turn_num,reward in zip(reverse_turn_nums,rewards): G = reward + gamma*G fn_Gs[fn][turn_num] = { 'G':G, 'gamma':gamma } return fn_Gs def compile_models(self): self.model_reward = self.build_reward_model() self.model_preferential = self._build_preferential_model() self.model_preferential.compile(loss='categorical_crossentropy', optimizer='adam') def train_model(self): shuffle(self.dial_ids) train_dial_ids = self.dial_ids[:int(len(self.dial_ids) * self.train_val_fraction)] val_dial_ids = self.dial_ids[int(len(self.dial_ids) * self.train_val_fraction):] train_num_examples = len(train_dial_ids) valid_num_examples = len(val_dial_ids) print('train_num_examples:',train_num_examples) print('valid_num_examples:',valid_num_examples) train_num_examples_per_epoch = max(3,int((train_num_examples/self.batch_size)/10)) train_data_gen = self.get_data_gen(self.sample_roll_out)(train_dial_ids, self.batch_size) val_data_gen = self.get_data_gen(self.sample_roll_out)(val_dial_ids, self.batch_size) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=self.reduce_lr_patience, min_lr=0.000001,verbose=1) early_stopping = EarlyStopping(monitor='val_loss', patience=self.es_patience, verbose=1, restore_best_weights=True) self.model_preferential.fit_generator(train_data_gen, steps_per_epoch = train_num_examples_per_epoch, validation_data = val_data_gen, validation_steps = max(1,int(valid_num_examples/(self.batch_size))), callbacks = [reduce_lr,early_stopping], epochs = self.num_epoch, ) def save_returns(self, gamma=0.): num_fns = len(self.data_for_damd.keys()) fn_Gs = self.get_Gs(gamma=gamma) fn_G_file_name = 'fn_Gs_{}_{}_{}_{}.json'.format(self.fold, gamma, self.TRAIN_ON, self.metric) print(fn_G_file_name) fn_Gs_file_path = os.path.join(self.root_path,'data','multi-woz-oppe',fn_G_file_name) print('fn_Gs_file_path:',fn_Gs_file_path) with open(fn_Gs_file_path,'w') as f: json.dump(fn_Gs,f) if __name__ == '__main__': parser = ArgumentParser() parser.add_argument("-s", "--seed", dest="seed", default=11, type=int, help="seed") parser.add_argument("-K", "--folds", dest="folds", default=10, type=int, help="Number of folds") parser.add_argument("-a", "--action_space", dest="action_space", choices={"act", "resp"}, default='act', help="action space. can either be act or resp") parser.add_argument("-m", "--metric", dest="metric", choices={"hard", "soft"}, default='soft', help="metric used for pairwise reward candidate generation") parser.add_argument("-g", "--gamma", dest="gamma", default=0.0, type=float, help="The discount factor used in reward learning") args = parser.parse_args() print('param:',args) rewardLearning = RewardLearning(args.folds, args.seed, args.action_space, args.metric) rewardLearning.load_reward_rollouts() rewardLearning.compile_models() rewardLearning.train_model() rewardLearning.save_returns(args.gamma)
[ "tensorflow.keras.backend.repeat_elements", "pandas.read_csv", "tensorflow.keras.models.Model", "numpy.random.choice", "tensorflow.keras.callbacks.ReduceLROnPlateau", "tensorflow.keras.backend.sum", "tensorflow.keras.callbacks.EarlyStopping", "tensorflow.keras.backend.squeeze", "numpy.concatenate", "numpy.mean", "tensorflow.keras.backend.expand_dims", "tensorflow.keras.backend.mean", "numpy.array", "numpy.zeros" ]
RewardLearning.py
[(512, 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), False, 'from argparse import ArgumentParser\n'), (49, 'gensim.models.keyedvectors.KeyedVectors.load_word2vec_format', 'KeyedVectors.load_word2vec_format', (['word_embed_file_path'], {'binary': '(False)', 'unicode_errors': '"""ignore"""'}), False, 'from gensim.models.keyedvectors import KeyedVectors\n'), (51, 'os.path.join', 'os.path.join', (['self.root_path', '"""data/multi-woz-oppe/reward"""'], {}), False, 'import os\n'), (66, 'os.listdir', 'os.listdir', (['self.reward_folder_path'], {}), False, 'import os\n'), (138, 'numpy.array', 'np.array', (['token_embeds'], {}), True, 'import numpy as np\n'), (143, 'numpy.array', 'np.array', (['sentence_embeds'], {}), True, 'import numpy as np\n'), (183, 'numpy.array', 'np.array', (['embeddings'], {}), True, 'import numpy as np\n'), (197, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (200, 'numpy.random.choice', 'np.random.choice', (['(gen_rollouts_info + [gt_rollout_info])'], {'size': '(2)', 'replace': '(False)'}), True, 'import numpy as np\n'), (340, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[s, a, g]', 'outputs': 'reward'}), False, 'from tensorflow.keras.models import Sequential, Model\n'), (351, 'tensorflow.keras.models.Model', 'Model', (['x', 'reward'], {}), False, 'from tensorflow.keras.models import Sequential, Model\n'), (373, 'tensorflow.keras.models.Model', 'Model', (['[s, a, g]', 'returns'], {}), False, 'from tensorflow.keras.models import Sequential, Model\n'), (396, 'tensorflow.keras.models.Model', 'Model', (['[s_1, a_1, g_1, s_2, a_2, g_2]', 'Pref'], {}), False, 'from tensorflow.keras.models import Sequential, Model\n'), (473, 'random.shuffle', 'shuffle', (['self.dial_ids'], {}), False, 'from random import shuffle\n'), (488, 'tensorflow.keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_loss"""', 'factor': '(0.1)', 'patience': 'self.reduce_lr_patience', 'min_lr': '(1e-06)', 'verbose': '(1)'}), False, 'from tensorflow.keras.callbacks import ReduceLROnPlateau, TensorBoard, ModelCheckpoint, EarlyStopping\n'), (489, 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': 'self.es_patience', 'verbose': '(1)', 'restore_best_weights': '(True)'}), False, 'from tensorflow.keras.callbacks import ReduceLROnPlateau, TensorBoard, ModelCheckpoint, EarlyStopping\n'), (505, 'os.path.join', 'os.path.join', (['self.root_path', '"""data"""', '"""multi-woz-oppe"""', 'fn_G_file_name'], {}), False, 'import os\n'), (67, 're.search', 're.search', (['reward_record_file_prefix', 'file'], {}), False, 'import re\n'), (146, 'numpy.concatenate', 'np.concatenate', (['[sentence_embeds, pad]'], {}), True, 'import numpy as np\n'), (508, 'json.dump', 'json.dump', (['fn_Gs', 'f'], {}), False, 'import json\n'), (69, 'os.path.join', 'os.path.join', (['self.reward_folder_path', 'file'], {}), False, 'import os\n'), (70, 'pandas.read_csv', 'pd.read_csv', (['reward_record_path'], {}), True, 'import pandas as pd\n'), (136, 'numpy.zeros', 'np.zeros', (['self.EMBED_DIM'], {}), True, 'import numpy as np\n'), (172, 'numpy.mean', 'np.mean', (['embeds'], {'axis': '(0)'}), True, 'import numpy as np\n'), (428, 'numpy.array', 'np.array', (['s'], {}), True, 'import numpy as np\n'), (428, 'numpy.array', 'np.array', (['a'], {}), True, 'import numpy as np\n'), (428, 'numpy.array', 'np.array', (['g'], {}), True, 'import numpy as np\n'), (73, 'json.loads', 'json.loads', (["row['rollout']"], {}), False, 'import json\n'), (262, 'random.shuffle', 'shuffle', (['dial_ids'], {}), False, 'from random import shuffle\n'), (311, 'tensorflow.keras.backend.mean', 'K.mean', (['z'], {'axis': '(-2)'}), True, 'from tensorflow.keras import backend as K\n'), (338, 'tensorflow.keras.backend.squeeze', 'K.squeeze', (['z'], {'axis': '(-1)'}), True, 'from tensorflow.keras import backend as K\n'), (363, 'tensorflow.keras.backend.expand_dims', 'K.expand_dims', (['z'], {'axis': '(1)'}), True, 'from tensorflow.keras import backend as K\n'), (364, 'tensorflow.keras.backend.repeat_elements', 'K.repeat_elements', (['z', 'self.MAX_TIME_STEP'], {'axis': '(1)'}), True, 'from tensorflow.keras import backend as K\n'), (371, 'tensorflow.keras.backend.sum', 'K.sum', (['z'], {'axis': '(1)', 'keepdims': '(True)'}), True, 'from tensorflow.keras import backend as K\n'), (52, 'os.path.join', 'os.path.join', (['self.root_path', '"""data/multi-woz-processed/data_for_damd.json"""'], {}), False, 'import os\n'), (301, 'traceback.format_exc', 'traceback.format_exc', ([], {}), False, 'import traceback\n'), (394, 'tensorflow.keras.backend.sum', 'K.sum', (['z'], {'axis': '(-1)', 'keepdims': '(True)'}), True, 'from tensorflow.keras import backend as K\n'), (278, 'numpy.array', 'np.array', (['s1s'], {}), True, 'import numpy as np\n'), (279, 'numpy.array', 'np.array', (['a1s'], {}), True, 'import numpy as np\n'), (280, 'numpy.array', 'np.array', (['g1s'], {}), True, 'import numpy as np\n'), (282, 'numpy.array', 'np.array', (['s2s'], {}), True, 'import numpy as np\n'), (283, 'numpy.array', 'np.array', (['a2s'], {}), True, 'import numpy as np\n'), (284, 'numpy.array', 'np.array', (['g2s'], {}), True, 'import numpy as np\n'), (288, 'numpy.array', 'np.array', (['probs'], {}), True, 'import numpy as np\n')]
raimamathew/Brain-Tumor-Segmentation
748bc37b61a2e89637a2ddf1da9029c0c820f400
import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import numpy as np import os from skimage import measure import re import nibabel as nib import tensorflow as tf import time from scipy.ndimage import zoom import tensorflow as tf from tensorflow.keras.models import Model, load_model from tensorflow.keras.layers import Input, Dropout, concatenate, Flatten, Dense, Reshape, BatchNormalization, Activation from tensorflow.keras.layers import Lambda from tensorflow.keras.layers import Conv3D, Conv3DTranspose, UpSampling3D from tensorflow.keras.layers import MaxPooling3D from tensorflow.keras.layers import concatenate from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from tensorflow.keras import backend as K import seaborn as sns def H_brain(scan, tumour): """ Returns healthy brain voxels Args: scan - full scan tumor - segmented tumour """ return np.logical_and(scan, np.logical_not(tumour)) def get_obj(tumor_array, fname='out.obj'): """ Returns .obj file Args: tumour_array - np array fname - file name[OPTIONAL] """ verts, faces, normals, values = measure.marching_cubes_lewiner(tumor_array, 0) faces = faces + 1 thefile = open(fname, 'w') for item in verts: thefile.write("v {0} {1} {2}\n".format(item[0],item[1],item[2])) for item in normals: thefile.write("vn {0} {1} {2}\n".format(item[0],item[1],item[2])) for item in faces: thefile.write("f {0}//{0} {1}//{1} {2}//{2}\n".format(item[0],item[1],item[2])) thefile.close() import subprocess folders = subprocess.check_output("ls ./HGG/", shell=True) folders = folders.decode().split("\n") folders.pop() scan_list = [] for folder in folders: sc = subprocess.check_output("ls ./HGG/" + str(folder), shell=True) sc = sc.decode().split("\n") sc.pop() sc = ["./HGG/"+str(folder)+"/" +i for i in sc] scan_list.append(tuple(sc)) # In[17]: flair_scans = [] for i in scan_list: for _ in i: if "flair" in _: scan = _ if "seg" in _: seg = _ flair_scans.append((scan, seg)) flair_scans[0] # In[18]: t1ce_scans = [] for i in scan_list: for _ in i: if "t1ce" in _: scan = _ if "seg" in _: seg = _ t1ce_scans.append((scan, seg)) t1ce_scans[-1] # In[19]: t2_scans = [] for i in scan_list: for _ in i: if "t2" in _: scan = _ if "seg" in _: seg = _ t2_scans.append((scan, seg)) t2_scans[0] # In[38]: def get_scan(scan_path='HGG/Brats18_CBICA_AAB_1/Brats18_CBICA_AAB_1_seg.nii.gz'): """ Returns np array scan_path - path to .nib file """ x = nib.load(scan_path).get_fdata()[:,:,:] return np.expand_dims(np.append(x, np.zeros((240,240,5)), axis=-1), axis=-1) def get_seg(scan_path='HGG/Brats18_CBICA_AAB_1/Brats18_CBICA_AAB_1_seg.nii.gz', contrast=1): """ Returns np array with true segmentation scan_path - path to .nib file """ x = nib.load(scan_path).get_fdata()==contrast return np.expand_dims(np.append(x, np.zeros((240,240,5)), axis=-1), axis=-1) # In[21]: def show_scan(scan='HGG/Brats18_CBICA_AAB_1/Brats18_CBICA_AAB_1_seg.nii.gz'): """ plots the scan scan_path - path to .nib file """ plt.imshow(get_scan(scan)[:,:,76]) def show_seg(scan='HGG/Brats18_CBICA_AAB_1/Brats18_CBICA_AAB_1_seg.nii.gz', contrast=1): """ plots scan with true segmentation scan_path - path to .nib file """ plt.imshow(get_seg(scan)[:,:,76]==contrast) # In[22]: def generate_set(scans, contrast=1, batch_size=1): """ Train/Test set Generator scans - list of paths to scans contrast - ground truth label """ for scan in scans: batch_x = [] batch_y = [] count = 0 while True: (x, y) = scan x = get_scan(x) y = get_seg(y, contrast) batch_x.append(x) batch_y.append(y) count += 1 if count == batch_size: count = 0 yield np.array(batch_x), np.array(batch_y) batch_x = [] batch_y = [] # In[25]: def BatchActivate(x): x = Activation('relu')(x) return x def conv_block(x, filters, size, strides=(1,1,1), padding='same', activation=True): x = Conv3D(filters, (size,size,size), strides=strides, padding=padding)(x) if activation == True: x = BatchActivate(x) return x def pool_block(x, size): return MaxPooling3D((size, size, size))(x) def upsam_block(x, size): return UpSampling3D((size, size, size))(x) def res_block(blockInput, num_filters, size, batch_activate = False): x = BatchActivate(blockInput) x = conv_block(x, num_filters, size) x = conv_block(x, num_filters, size, activation=True) x = Add()([x, blockInput]) if batch_activate: x = BatchActivate(x) return x def build_model(inputs, start_filters=8, filter_size=2, pool_size=2): #240 -> 120 #152 -> 76 conv1 = conv_block(inputs, start_filters, filter_size) conv1 = conv_block(conv1, start_filters, filter_size) pool1 = pool_block(conv1, pool_size) #120 -> 60 #76 -> 38 conv2 = conv_block(pool1, start_filters*2, filter_size) conv2 = conv_block(conv2, start_filters*2, filter_size) pool2 = pool_block(conv2, pool_size) #60 -> 30 #38 -> 19 conv3 = conv_block(pool2, start_filters*4, filter_size) conv3 = conv_block(conv3, start_filters*4, filter_size) pool3 = pool_block(conv3, pool_size) conv4 = conv_block(pool3, start_filters*8, filter_size) conv4 = conv_block(conv4, start_filters*8, filter_size) conv5 = upsam_block(conv4, pool_size) conv5 = concatenate([conv3, conv5]) conv5 = conv_block(conv5, start_filters*4, filter_size) conv5 = conv_block(conv5, start_filters*4, filter_size) conv6 = upsam_block(conv5, pool_size) conv6 = concatenate([conv2, conv6]) conv6 = conv_block(conv6, start_filters*2, filter_size) conv6 = conv_block(conv6, start_filters*2, filter_size) conv7 = upsam_block(conv6, pool_size) conv7 = concatenate([conv1, conv7]) conv7 = conv_block(conv7, start_filters, filter_size) conv7 = conv_block(conv7, start_filters, filter_size) output = conv_block(conv7, 1, filter_size) return output inputs = Input((240,240,160,1)) outputs = build_model(inputs, 16) model = Model(inputs=[inputs], outputs=[outputs]) model.compile(optimizer='adam', loss='binary_crossentropy') model.summary() #sets = generate_set(flair_scans, 2) earlystopper = EarlyStopping(patience=5, verbose=1) checkpointer = ModelCheckpoint('model-tgs-salt-1.h5', verbose=1, save_best_only=True) results = model.fit_generator(generate_set(flair_scans, 2), steps_per_epoch=len(flair_scans), epochs=30, callbacks=[earlystopper, checkpointer])
[ "tensorflow.keras.callbacks.ModelCheckpoint", "numpy.logical_not", "tensorflow.keras.layers.Activation", "tensorflow.keras.models.Model", "tensorflow.keras.layers.MaxPooling3D", "tensorflow.keras.layers.UpSampling3D", "tensorflow.keras.layers.Conv3D", "tensorflow.keras.layers.concatenate", "numpy.array", "tensorflow.keras.callbacks.EarlyStopping", "numpy.zeros", "tensorflow.keras.layers.Input" ]
train_script.py
[(57, 'subprocess.check_output', 'subprocess.check_output', (['"""ls ./HGG/"""'], {'shell': '(True)'}), False, 'import subprocess\n'), (248, 'tensorflow.keras.layers.Input', 'Input', (['(240, 240, 160, 1)'], {}), False, 'from tensorflow.keras.layers import Input, Dropout, concatenate, Flatten, Dense, Reshape, BatchNormalization, Activation\n'), (250, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[inputs]', 'outputs': '[outputs]'}), False, 'from tensorflow.keras.models import Model, load_model\n'), (256, 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': '(5)', 'verbose': '(1)'}), False, 'from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\n'), (257, 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""model-tgs-salt-1.h5"""'], {'verbose': '(1)', 'save_best_only': '(True)'}), False, 'from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\n'), (42, 'skimage.measure.marching_cubes_lewiner', 'measure.marching_cubes_lewiner', (['tumor_array', '(0)'], {}), False, 'from skimage import measure\n'), (230, 'tensorflow.keras.layers.concatenate', 'concatenate', (['[conv3, conv5]'], {}), False, 'from tensorflow.keras.layers import concatenate\n'), (235, 'tensorflow.keras.layers.concatenate', 'concatenate', (['[conv2, conv6]'], {}), False, 'from tensorflow.keras.layers import concatenate\n'), (240, 'tensorflow.keras.layers.concatenate', 'concatenate', (['[conv1, conv7]'], {}), False, 'from tensorflow.keras.layers import concatenate\n'), (31, 'numpy.logical_not', 'np.logical_not', (['tumour'], {}), True, 'import numpy as np\n'), (182, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), False, 'from tensorflow.keras.layers import Input, Dropout, concatenate, Flatten, Dense, Reshape, BatchNormalization, Activation\n'), (186, 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['filters', '(size, size, size)'], {'strides': 'strides', 'padding': 'padding'}), False, 'from tensorflow.keras.layers import Conv3D, Conv3DTranspose, UpSampling3D\n'), (192, 'tensorflow.keras.layers.MaxPooling3D', 'MaxPooling3D', (['(size, size, size)'], {}), False, 'from tensorflow.keras.layers import MaxPooling3D\n'), (195, 'tensorflow.keras.layers.UpSampling3D', 'UpSampling3D', (['(size, size, size)'], {}), False, 'from tensorflow.keras.layers import Conv3D, Conv3DTranspose, UpSampling3D\n'), (122, 'numpy.zeros', 'np.zeros', (['(240, 240, 5)'], {}), True, 'import numpy as np\n'), (129, 'numpy.zeros', 'np.zeros', (['(240, 240, 5)'], {}), True, 'import numpy as np\n'), (121, 'nibabel.load', 'nib.load', (['scan_path'], {}), True, 'import nibabel as nib\n'), (128, 'nibabel.load', 'nib.load', (['scan_path'], {}), True, 'import nibabel as nib\n'), (171, 'numpy.array', 'np.array', (['batch_x'], {}), True, 'import numpy as np\n'), (171, 'numpy.array', 'np.array', (['batch_y'], {}), True, 'import numpy as np\n')]
ibeauregard/tensorflow
20bc44d8fc2feee4c63dd90e49dbcdf34ed6564c
# Lint as: python2, python3 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for lite.py functionality related to TensorFlow 2.0.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from absl.testing import parameterized import numpy as np from six.moves import range from six.moves import zip import tensorflow as tf from tensorflow.lite.python import lite from tensorflow.lite.python import lite_v2_test_util from tensorflow.lite.python.convert import mlir_quantize from tensorflow.lite.python.interpreter import Interpreter from tensorflow.lite.toco import types_pb2 as _types_pb2 from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.lib.io import file_io from tensorflow.python.platform import test from tensorflow.python.saved_model import save_options from tensorflow.python.saved_model import saved_model from tensorflow.python.saved_model.loader_impl import parse_saved_model from tensorflow.python.saved_model.save import save from tensorflow.python.training.tracking import tracking class FromConcreteFunctionTest(lite_v2_test_util.ModelTest): @test_util.run_v2_only def testTypeInvalid(self): root = self._getSimpleVariableModel() with self.assertRaises(ValueError) as error: _ = lite.TFLiteConverterV2.from_concrete_functions([root.f]) self.assertIn('call get_concrete_function', str(error.exception)) @parameterized.named_parameters( ('EnableMlirConverter', True), # enable mlir ('DisableMlirConverter', False)) # disable mlir @test_util.run_v2_only def testFloat(self, enable_mlir_converter): root = self._getSimpleVariableModel() input_data = tf.constant(1., shape=[1]) concrete_func = root.f.get_concrete_function(input_data) # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func]) converter.experimental_new_converter = enable_mlir_converter tflite_model = converter.convert() # Check values from converted model. expected_value = root.f(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) self.assertEqual(expected_value.numpy(), actual_value) @parameterized.named_parameters( ('_INT8InputOutput', dtypes.int8), ('_UINT8InputOutput', dtypes.uint8), ('_INT16InputOutput', dtypes.int16)) @test_util.run_v2_only def testInvalidFloat(self, inference_input_output_type): root = self._getSimpleVariableModel() input_data = tf.constant(1., shape=[1]) concrete_func = root.f.get_concrete_function(input_data) # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func]) with self.assertRaises(ValueError) as error: converter.inference_input_type = inference_input_output_type converter.inference_output_type = inference_input_output_type converter.convert() self.assertEqual( 'The inference_input_type and inference_output_type ' 'must be tf.float32.', str(error.exception)) @test_util.run_v2_only def testScalarInput(self): root = self._getSimpleVariableModel() input_data = tf.constant(1., shape=[]) concrete_func = root.f.get_concrete_function(input_data) # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func]) tflite_model = converter.convert() # Check values from converted model. expected_value = root.f(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) self.assertEqual(expected_value.numpy(), actual_value) @test_util.run_v2_only def testMultiFunctionModel(self): """Convert a single model in a multi-functional model.""" root = self._getMultiFunctionModel() input_data = tf.constant(1., shape=[1]) concrete_func = root.add.get_concrete_function(input_data) # Convert model and ensure model is not None. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func]) tflite_model = converter.convert() # Check values from converted model. expected_value = root.add(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) self.assertEqual(expected_value.numpy(), actual_value) @test_util.run_v2_only def testConvertMultipleFunctions(self): """Convert multiple functions in a multi-functional model.""" root = self._getMultiFunctionModel() input_data = tf.constant(1., shape=[1]) add_func = root.add.get_concrete_function(input_data) sub_func = root.sub.get_concrete_function(input_data) # Try converting multiple functions. converter = lite.TFLiteConverterV2.from_concrete_functions( [add_func, sub_func]) with self.assertRaises(ValueError) as error: _ = converter.convert() self.assertIn('can only convert a single ConcreteFunction', str(error.exception)) def _getIntegerQuantizeModel(self): np.random.seed(0) root = tracking.AutoTrackable() @tf.function( input_signature=[tf.TensorSpec(shape=[1, 5, 5, 3], dtype=tf.float32)]) def func(inp): conv = tf.nn.conv2d( inp, tf.ones([3, 3, 3, 16]), strides=[1, 1, 1, 1], padding='SAME') output = tf.nn.relu(conv, name='output') return output def calibration_gen(): for _ in range(5): yield [np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32)] root.f = func to_save = root.f.get_concrete_function() return (to_save, calibration_gen) @parameterized.named_parameters( ('EnableMlirQuantizer', True), # enable mlir quantizer ('DisableMlirQuantizer', False)) # disable mlir quantizer def testPostTrainingCalibrateAndQuantize(self, mlir_quantizer): func, calibration_gen = self._getIntegerQuantizeModel() # Convert float model. float_converter = lite.TFLiteConverterV2.from_concrete_functions([func]) float_tflite_model = float_converter.convert() self.assertIsNotNone(float_tflite_model) # Convert quantized model. quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func]) quantized_converter.optimizations = [lite.Optimize.DEFAULT] quantized_converter.representative_dataset = calibration_gen quantized_converter._experimental_new_quantizer = mlir_quantizer quantized_tflite_model = quantized_converter.convert() self.assertIsNotNone(quantized_tflite_model) # The default input and output types should be float. interpreter = Interpreter(model_content=quantized_tflite_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() self.assertLen(input_details, 1) self.assertEqual(np.float32, input_details[0]['dtype']) output_details = interpreter.get_output_details() self.assertLen(output_details, 1) self.assertEqual(np.float32, output_details[0]['dtype']) # Ensure that the quantized weights tflite model is smaller. self.assertLess(len(quantized_tflite_model), len(float_tflite_model)) @parameterized.named_parameters( ('_INT8InputOutput', dtypes.int8), ('_UINT8InputOutput', dtypes.uint8), ('_INT16InputOutput', dtypes.int16)) @test_util.run_v2_only def testInvalidPostTrainingDynamicRangeQuantization( self, inference_input_output_type): func, _ = self._getIntegerQuantizeModel() # Convert float model. converter = lite.TFLiteConverterV2.from_concrete_functions([func]) tflite_model = converter.convert() self.assertTrue(tflite_model) # Convert quantized model. quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func]) quantized_converter.optimizations = [lite.Optimize.DEFAULT] with self.assertRaises(ValueError) as error: quantized_converter.inference_input_type = inference_input_output_type quantized_converter.inference_output_type = inference_input_output_type quantized_converter.convert() self.assertEqual( 'The inference_input_type and inference_output_type ' 'must be tf.float32.', str(error.exception)) @parameterized.named_parameters( ('_Default', False, False, dtypes.float32), ('_INT8InputOutput', False, False, dtypes.int8), ('_UINT8InputOutput', False, False, dtypes.uint8), ('_INT16Quantize', False, True, dtypes.float32), ('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16), ('_IntOnly', True, False, dtypes.float32), ('_IntOnly_INT8InputOutput', True, False, dtypes.int8), ('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8), ('_IntOnly_INT16Quantize', True, True, dtypes.float32), ('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16)) def testIntegerQuantization(self, is_int_only, is_int16_quantize, inference_input_output_type): func, calibration_gen = self._getIntegerQuantizeModel() # Convert float model. converter = lite.TFLiteConverterV2.from_concrete_functions([func]) tflite_model = converter.convert() self.assertTrue(tflite_model) # Convert quantized model. quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func]) quantized_converter.optimizations = [lite.Optimize.DEFAULT] quantized_converter.representative_dataset = calibration_gen if is_int_only: if is_int16_quantize: quantized_converter.target_spec.supported_ops = [ lite.OpsSet.\ EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 ] else: quantized_converter.target_spec.supported_ops = [ lite.OpsSet.TFLITE_BUILTINS_INT8 ] else: if is_int16_quantize: quantized_converter.target_spec.supported_ops = [ lite.OpsSet.\ EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8, lite.OpsSet.TFLITE_BUILTINS ] quantized_converter.inference_input_type = inference_input_output_type quantized_converter.inference_output_type = inference_input_output_type quantized_tflite_model = quantized_converter.convert() self.assertIsNotNone(quantized_tflite_model) interpreter = Interpreter(model_content=quantized_tflite_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() self.assertLen(input_details, 1) self.assertEqual(inference_input_output_type.as_numpy_dtype, input_details[0]['dtype']) output_details = interpreter.get_output_details() self.assertLen(output_details, 1) self.assertEqual(inference_input_output_type.as_numpy_dtype, output_details[0]['dtype']) # Ensure that the quantized tflite model is smaller. self.assertLess(len(quantized_tflite_model), len(tflite_model)) @parameterized.named_parameters( ('_INT16Quantize_INT8InputOutput', True, dtypes.int8)) def testInvalidIntegerQuantization(self, is_int16_quantize, inference_input_output_type): func, calibration_gen = self._getIntegerQuantizeModel() # Convert quantized model. quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func]) quantized_converter.optimizations = [lite.Optimize.DEFAULT] quantized_converter.representative_dataset = calibration_gen if is_int16_quantize: quantized_converter.target_spec.supported_ops = [ lite.OpsSet.\ EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8, lite.OpsSet.TFLITE_BUILTINS ] with self.assertRaises(ValueError) as error: quantized_converter.inference_input_type = dtypes.int8 quantized_converter.inference_output_type = dtypes.int8 quantized_converter.convert() self.assertEqual( "The inference_input_type and inference_output_type " "must be in ['tf.float32', 'tf.int16'].", str(error.exception)) def testCalibrateAndQuantizeBuiltinInt16(self): func, calibration_gen = self._getIntegerQuantizeModel() # Convert float model. float_converter = lite.TFLiteConverterV2.from_concrete_functions([func]) float_tflite_model = float_converter.convert() self.assertIsNotNone(float_tflite_model) converter = lite.TFLiteConverterV2.from_concrete_functions([func]) # TODO(b/156309549): We should add INT16 to the builtin types. converter.target_spec.supported_ops = [ lite.OpsSet.TFLITE_BUILTINS_INT8 ] converter.representative_dataset = calibration_gen converter._experimental_calibrate_only = True calibrated_tflite = converter.convert() quantized_tflite_model = mlir_quantize( calibrated_tflite, inference_type=_types_pb2.QUANTIZED_INT16) self.assertIsNotNone(quantized_tflite_model) # The default input and output types should be float. interpreter = Interpreter(model_content=quantized_tflite_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() self.assertLen(input_details, 1) self.assertEqual(np.float32, input_details[0]['dtype']) output_details = interpreter.get_output_details() self.assertLen(output_details, 1) self.assertEqual(np.float32, output_details[0]['dtype']) # Ensure that the quantized weights tflite model is smaller. self.assertLess(len(quantized_tflite_model), len(float_tflite_model)) def _getTrainingTimeQuantizedModel(self): class QLinear(tf.keras.layers.Layer): def __init__(self, units=3, **kwargs): super(QLinear, self).__init__(**kwargs) self.units = units def build(self, input_shape): self.w = self.add_weight( 'weight', shape=(input_shape[-1], self.units), initializer='random_normal', trainable=True) self.min_var = self.add_weight( 'min', initializer=tf.keras.initializers.Constant(-6.0), trainable=False) self.max_var = self.add_weight( 'max', initializer=tf.keras.initializers.Constant(6.0), trainable=False) def call(self, inputs): x = tf.quantization.fake_quant_with_min_max_vars( inputs, self.min_var, self.max_var) w_fq = tf.quantization.fake_quant_with_min_max_vars( self.w, self.min_var, self.max_var) x = tf.matmul(x, w_fq) x = tf.quantization.fake_quant_with_min_max_vars( x, self.min_var, self.max_var) return x return tf.keras.Sequential(QLinear(3, input_shape=(2,))) @parameterized.named_parameters( ('_DefaultFLOAT32InputOutput', dtypes.float32), ('_INT8InputOutput', dtypes.int8), ('_UINT8InputOutput', dtypes.uint8)) @test_util.run_v2_only def testTrainingTimeQuantization(self, inference_input_output_type): model = self._getTrainingTimeQuantizedModel() float_converter = lite.TFLiteConverterV2.from_keras_model(model) float_tflite_model = float_converter.convert() self.assertIsNotNone(float_tflite_model) quantized_converter = lite.TFLiteConverterV2.from_keras_model(model) quantized_converter.optimizations = [lite.Optimize.DEFAULT] quantized_converter.inference_input_type = inference_input_output_type quantized_converter.inference_output_type = inference_input_output_type quantized_tflite_model = quantized_converter.convert() self.assertIsNotNone(quantized_tflite_model) interpreter = Interpreter(model_content=quantized_tflite_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() self.assertLen(input_details, 1) self.assertEqual(inference_input_output_type.as_numpy_dtype, input_details[0]['dtype']) output_details = interpreter.get_output_details() self.assertLen(output_details, 1) self.assertEqual(inference_input_output_type.as_numpy_dtype, output_details[0]['dtype']) # Ensure that the quantized tflite model is smaller. self.assertLess(len(quantized_tflite_model), len(float_tflite_model)) @test_util.run_v2_only def testNewQuantizer(self): """Test the model quantized by the new converter.""" func, calibration_gen = self._getIntegerQuantizeModel() quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func]) quantized_converter.target_spec.supported_ops = [ lite.OpsSet.TFLITE_BUILTINS_INT8 ] quantized_converter.representative_dataset = calibration_gen # default quantizer quantized_converter._experimental_new_quantizer = False old_tflite = quantized_converter.convert() # new quantizer quantized_converter._experimental_new_quantizer = True new_tflite = quantized_converter.convert() for _ in range(5): input_data = tf.constant( np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32)) old_value = self._evaluateTFLiteModel(old_tflite, [input_data]) new_value = self._evaluateTFLiteModel(new_tflite, [input_data]) self.assertAllClose(old_value, new_value, atol=1e-01) @parameterized.named_parameters( ('EnableMlirConverter', True), # enable mlir ('DisableMlirConverter', False)) # disable mlir @test_util.run_v2_only def testEmbeddings(self, enable_mlir_converter): """Test model with embeddings.""" input_data = tf.constant( np.array(np.random.random_sample((20)), dtype=np.int32)) class EmbeddingModel(tf.keras.Model): def __init__(self): super(EmbeddingModel, self).__init__() self.shared_weights = self.add_weight( 'weights', shape=(2000, 300), dtype=tf.float32, initializer=tf.random_normal_initializer( mean=0.0, stddev=300**(-0.5))) @tf.function(input_signature=[tf.TensorSpec(shape=(20), dtype=tf.int32)]) def func(self, x): return tf.gather(self.shared_weights, x) # Building the model. root = EmbeddingModel() concrete_func = root.func.get_concrete_function() # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func]) converter.experimental_new_converter = enable_mlir_converter tflite_model = converter.convert() # Check values from converted model. expected_value = root.func(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) self.assertAllClose(expected_value.numpy(), actual_value[0], atol=1e-05) @test_util.run_v2_only def testGraphDebugInfo(self): """Test a concrete function has debug info captured.""" root = tracking.AutoTrackable() root.v1 = tf.Variable(3.) root.f = tf.function(lambda x: root.v1 * x) input_data = tf.constant(1., shape=[1]) concrete_func = root.f.get_concrete_function(input_data) # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func]) converter.convert() self._assertValidDebugInfo(converter._debug_info) def _getIntegerQuantizationModelWithFlexOp(self): np.random.seed(0) root = tracking.AutoTrackable() @tf.function(input_signature=[ tf.TensorSpec(shape=[3, 3, 3, 3, 3], dtype=tf.float32) ]) def func(inp): tanh = tf.math.tanh(inp) # Flex delegate will merge the consecutive conv3d and erf ops into one # Delegate node. conv3d = tf.nn.conv3d( tanh, tf.ones([3, 3, 3, 3, 3]), strides=[1, 1, 1, 1, 1], padding='SAME') erf = tf.math.erf(conv3d) output = tf.math.tanh(erf) return output def calibration_gen(): for _ in range(5): yield [ np.random.uniform(-1, 1, size=(3, 3, 3, 3, 3)).astype(np.float32) ] root.f = func return (root.f.get_concrete_function(), calibration_gen) @parameterized.named_parameters( ('_Default', False, False, dtypes.float32), ('_INT8InputOutput', False, False, dtypes.int8), ('_UINT8InputOutput', False, False, dtypes.uint8), ('_INT16Quantize', False, True, dtypes.float32), ('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16), ('_IntOnly', True, False, dtypes.float32), ('_IntOnly_INT8InputOutput', True, False, dtypes.int8), ('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8), ('_IntOnly_INT16Quantize', True, True, dtypes.float32), ('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16)) @test_util.run_v2_only def testIntegerQuantizationWithFlexOp(self, is_int_only, is_int16_quantize, inference_input_output_type): func, calibration_gen = self._getIntegerQuantizationModelWithFlexOp() quantized_converter = tf.lite.TFLiteConverter.from_concrete_functions( [func]) quantized_converter.optimizations = [lite.Optimize.DEFAULT] quantized_converter.representative_dataset = calibration_gen if is_int_only: if is_int16_quantize: quantized_converter.target_spec.supported_ops = [ lite.OpsSet.\ EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8, lite.OpsSet.SELECT_TF_OPS ] else: quantized_converter.target_spec.supported_ops = [ lite.OpsSet.TFLITE_BUILTINS_INT8, lite.OpsSet.SELECT_TF_OPS ] else: if is_int16_quantize: quantized_converter.target_spec.supported_ops = [ lite.OpsSet.\ EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8, lite.OpsSet.TFLITE_BUILTINS, lite.OpsSet.SELECT_TF_OPS ] else: quantized_converter.target_spec.supported_ops = [ lite.OpsSet.TFLITE_BUILTINS, lite.OpsSet.SELECT_TF_OPS ] quantized_converter.inference_input_type = inference_input_output_type quantized_converter.inference_output_type = inference_input_output_type quantized_tflite_model = quantized_converter.convert() self.assertIsNotNone(quantized_tflite_model) interpreter = Interpreter(model_content=quantized_tflite_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() self.assertLen(input_details, 1) self.assertEqual(inference_input_output_type.as_numpy_dtype, input_details[0]['dtype']) output_details = interpreter.get_output_details() self.assertLen(output_details, 1) self.assertEqual(inference_input_output_type.as_numpy_dtype, output_details[0]['dtype']) def _getIntegerQuantizationModelWithUnsupportedOps(self): np.random.seed(0) root = tracking.AutoTrackable() @tf.function(input_signature=[ tf.TensorSpec(shape=[3], dtype=tf.float32), tf.TensorSpec(shape=[3], dtype=tf.float32) ]) def func(a, b): # ceil kernel does not support int8 nor int16 types neither. left = tf.math.ceil(a) right = tf.nn.tanh(b) add = tf.math.add(left, right) # ceil kernel does not support int8 nor int16 types neither. output = tf.math.ceil(add) return (output, right) def calibration_gen(): for _ in range(5): yield [ np.random.uniform(-1, 1, size=(3)).astype(np.float32), np.random.uniform(-1, 1, size=(3)).astype(np.float32) ] root.f = func return (root.f.get_concrete_function(), calibration_gen) @parameterized.named_parameters( ('_INT8InputOutput', False, False, dtypes.int8), ('_UINT8InputOutput', False, False, dtypes.uint8), ('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16), ('_IntOnly_INT8InputOutput', True, False, dtypes.int8), ('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8), ('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16)) @test_util.run_v2_only def testIntegerQuantizationWithUnsupportedOps(self, is_int_only, is_int16_quantize, inference_input_output_type): func, calib_gen = self._getIntegerQuantizationModelWithUnsupportedOps() quantized_converter = tf.lite.TFLiteConverter.from_concrete_functions( [func]) quantized_converter.optimizations = [lite.Optimize.DEFAULT] quantized_converter.representative_dataset = calib_gen if is_int_only: if is_int16_quantize: quantized_converter.target_spec.supported_ops = [ lite.OpsSet.\ EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8, lite.OpsSet.TFLITE_BUILTINS ] else: quantized_converter.target_spec.supported_ops = [ lite.OpsSet.TFLITE_BUILTINS_INT8, lite.OpsSet.TFLITE_BUILTINS ] else: if is_int16_quantize: quantized_converter.target_spec.supported_ops = [ lite.OpsSet.\ EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8, lite.OpsSet.TFLITE_BUILTINS ] else: quantized_converter.target_spec.supported_ops = [ lite.OpsSet.TFLITE_BUILTINS ] quantized_converter.inference_input_type = inference_input_output_type quantized_converter.inference_output_type = inference_input_output_type quantized_tflite_model = quantized_converter.convert() self.assertIsNotNone(quantized_tflite_model) interpreter = Interpreter(model_content=quantized_tflite_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() self.assertLen(input_details, 2) # Allow float32 for fallback. self.assertEqual(input_details[0]['dtype'], dtypes.float32) self.assertEqual(input_details[1]['dtype'], inference_input_output_type.as_numpy_dtype) output_details = interpreter.get_output_details() self.assertLen(output_details, 2) # Allow float32 for fallback. self.assertEqual(output_details[0]['dtype'], dtypes.float32) self.assertEqual(output_details[1]['dtype'], inference_input_output_type.as_numpy_dtype) class FromSavedModelTest(lite_v2_test_util.ModelTest): def _createV1SavedModel(self, shape): """Create a simple SavedModel.""" saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel') with tf.Graph().as_default(): with tf.compat.v1.Session() as sess: in_tensor_1 = tf.compat.v1.placeholder( shape=shape, dtype=tf.float32, name='inputB') in_tensor_2 = tf.compat.v1.placeholder( shape=shape, dtype=tf.float32, name='inputA') variable_node = tf.Variable(1.0, name='variable_node') out_tensor = in_tensor_1 + in_tensor_2 * variable_node inputs = {'x': in_tensor_1, 'y': in_tensor_2} outputs = {'z': out_tensor} sess.run(tf.compat.v1.variables_initializer([variable_node])) saved_model.simple_save(sess, saved_model_dir, inputs, outputs) return saved_model_dir @test_util.run_v2_only def testV1SimpleModel(self): """Test a SavedModel.""" with tf.Graph().as_default(): saved_model_dir = self._createV1SavedModel(shape=[1, 16, 16, 3]) # Convert model and ensure model is not None. converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir) tflite_model = converter.convert() self.assertTrue(tflite_model) interpreter = Interpreter(model_content=tflite_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() self.assertLen(input_details, 2) self.assertStartsWith(input_details[0]['name'], 'inputA') self.assertEqual(np.float32, input_details[0]['dtype']) self.assertAllEqual([1, 16, 16, 3], input_details[0]['shape']) self.assertEqual((0., 0.), input_details[0]['quantization']) self.assertStartsWith( input_details[1]['name'], 'inputB', ) self.assertEqual(np.float32, input_details[1]['dtype']) self.assertTrue([1, 16, 16, 3], input_details[1]['shape']) self.assertEqual((0., 0.), input_details[1]['quantization']) output_details = interpreter.get_output_details() self.assertLen(output_details, 1) self.assertStartsWith(output_details[0]['name'], 'add') self.assertEqual(np.float32, output_details[0]['dtype']) self.assertTrue([1, 16, 16, 3], output_details[0]['shape']) self.assertEqual((0., 0.), output_details[0]['quantization']) @test_util.run_v2_only def testTF1HubFormattedModel(self): """Test a TF1 hub formatted model.""" saved_model_dir = self._createV1SavedModel(shape=[1, 16, 16, 3]) # TF1 hub model is based on V1 saved model and they omit the saved model # schema version setting. saved_model_proto = parse_saved_model(saved_model_dir) saved_model_proto.saved_model_schema_version = 0 saved_model_pb_file_path = os.path.join(saved_model_dir, 'saved_model.pb') with file_io.FileIO(saved_model_pb_file_path, 'wb') as writer: writer.write(saved_model_proto.SerializeToString()) # Convert model and ensure model is not None. converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir) tflite_model = converter.convert() self.assertTrue(tflite_model) @test_util.run_v2_only def testConstModel(self): """Test a basic model with functions to make sure functions are inlined.""" input_data = tf.constant(1., shape=[1]) root = tracking.AutoTrackable() root.f = tf.function(lambda x: 2. * x) to_save = root.f.get_concrete_function(input_data) save_dir = os.path.join(self.get_temp_dir(), 'saved_model') save(root, save_dir, to_save) # Convert model and ensure model is not None. converter = lite.TFLiteConverterV2.from_saved_model(save_dir) tflite_model = converter.convert() # Check values from converted model. expected_value = root.f(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) self.assertEqual(expected_value.numpy(), actual_value) @test_util.run_v2_only def testVariableModel(self): """Test a basic model with Variables with saving/loading the SavedModel.""" root = self._getSimpleVariableModel() input_data = tf.constant(1., shape=[1]) to_save = root.f.get_concrete_function(input_data) save_dir = os.path.join(self.get_temp_dir(), 'saved_model') save(root, save_dir, to_save) # Convert model and ensure model is not None. converter = lite.TFLiteConverterV2.from_saved_model(save_dir) tflite_model = converter.convert() # Check values from converted model. expected_value = root.f(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) self.assertEqual(expected_value.numpy(), actual_value) @test_util.run_v2_only def testSignatures(self): """Test values for `signature_keys` argument.""" root = self._getSimpleVariableModel() input_data = tf.constant(1., shape=[1]) to_save = root.f.get_concrete_function(input_data) save_dir = os.path.join(self.get_temp_dir(), 'saved_model') save(root, save_dir, to_save) # Convert model with invalid `signature_keys`. with self.assertRaises(ValueError) as error: _ = lite.TFLiteConverterV2.from_saved_model( save_dir, signature_keys=['INVALID']) self.assertIn("Invalid signature key 'INVALID'", str(error.exception)) # Convert model with empty `signature_keys`. converter = lite.TFLiteConverterV2.from_saved_model( save_dir, signature_keys=[]) tflite_model = converter.convert() # Check values from converted model. expected_value = root.f(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) self.assertEqual(expected_value.numpy(), actual_value) @test_util.run_v2_only def testMultipleFunctionModel(self): """Convert multiple functions in a multi-functional model.""" root = self._getMultiFunctionModel() input_data = tf.constant(1., shape=[1]) add_func = root.add.get_concrete_function(input_data) sub_func = root.sub.get_concrete_function(input_data) save_dir = os.path.join(self.get_temp_dir(), 'saved_model') save(root, save_dir, {'add': add_func, 'sub': sub_func}) # Try converting multiple functions. with self.assertRaises(ValueError) as error: _ = lite.TFLiteConverterV2.from_saved_model(save_dir) self.assertIn('Only support a single signature key.', str(error.exception)) @test_util.run_v2_only def testNoConcreteFunctionModel(self): root = self._getMultiFunctionModel() save_dir = os.path.join(self.get_temp_dir(), 'saved_model') save(root, save_dir) with self.assertRaises(ValueError) as error: _ = lite.TFLiteConverterV2.from_saved_model(save_dir) self.assertIn('Only support a single signature key.', str(error.exception)) @test_util.run_v2_only def testKerasSequentialModel(self): """Test a simple sequential tf.Keras model.""" input_data = tf.constant(1., shape=[1, 1]) x = np.array([[1.], [2.]]) y = np.array([[2.], [4.]]) model = tf.keras.models.Sequential([ tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(1), ]) model.compile(optimizer='sgd', loss='mean_squared_error') model.fit(x, y, epochs=1) save_dir = os.path.join(self.get_temp_dir(), 'saved_model') save(model, save_dir) # Convert model and ensure model is not None. converter = lite.TFLiteConverterV2.from_saved_model(save_dir) tflite_model = converter.convert() # Check values from converted model. expected_value = model.predict(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) self.assertEqual(expected_value, actual_value) @test_util.run_v2_only def testGraphDebugInfo(self): """Test a SavedModel has debug info captured.""" input_data = tf.constant(1., shape=[1]) root = tracking.AutoTrackable() root.f = tf.function(lambda x: 2. * x) to_save = root.f.get_concrete_function(input_data) options = save_options.SaveOptions(save_debug_info=True) save_dir = os.path.join(self.get_temp_dir(), 'saved_model') save(root, save_dir, to_save, options) # Convert model and ensure model is not None. converter = lite.TFLiteConverterV2.from_saved_model(save_dir) converter.convert() self._assertValidDebugInfo(converter._debug_info) @test_util.run_v2_only def testFallbackPath(self): """Test a SavedModel fallback path using old converter.""" saved_model_dir = self._createV1SavedModel(shape=[1, 16, 16, 3]) # Convert model and ensure model is not None. converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir) converter.experimental_new_converter = False tflite_model = converter.convert() self.assertTrue(tflite_model) @test_util.run_v2_only def testNonStatefulConvLSTM2D(self): """Test saved model with non stateful ConvLSTM2D keras layer.""" # Create keras model model = tf.keras.Sequential([ tf.keras.layers.ConvLSTM2D( 32, (3, 3), padding='same', return_sequences=True, stateful=False, batch_input_shape=(1, 1, 10, 10, 1)) ]) model.compile() # Export the keras model to saved model. saved_model_dir = os.path.join(self.get_temp_dir(), 'conv_lstm_2d') model.save(saved_model_dir, save_format='tf', include_optimizer=False) converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir) converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS ] tflite_model = converter.convert() self.assertTrue(tflite_model) class FromKerasModelTest(lite_v2_test_util.ModelTest): @test_util.run_v2_only def testSequentialModel(self): """Test a simple sequential tf.Keras model.""" input_data = tf.constant(1., shape=[1, 1]) # Create a simple Keras model. x = np.array([[1.], [2.]]) y = np.array([[2.], [4.]]) model = tf.keras.models.Sequential([ tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=1, input_shape=[1]) ]) model.compile(optimizer='sgd', loss='mean_squared_error') model.fit(x, y, epochs=1) # Convert model and ensure model is not None. converter = lite.TFLiteConverterV2.from_keras_model(model) tflite_model = converter.convert() # Check values from converted model. expected_value = model.predict(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) self.assertEqual(expected_value, actual_value) @test_util.run_v2_only def testSequentialMultiInputOutputModel(self): """Test a tf.Keras model with multiple inputs and outputs.""" left_input_data = tf.constant(1., shape=[1, 3]) right_input_data = tf.constant(1., shape=[1, 3]) # Create a simple Keras model. input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_c_np = np.random.random((10, 3)) output_d_np = np.random.random((10, 2)) input_a = tf.keras.layers.Input(shape=(3,), name='input_a') input_b = tf.keras.layers.Input(shape=(3,), name='input_b') dense = tf.keras.layers.Dense(8, name='dense_1') interm_a = dense(input_a) interm_b = dense(input_b) merged = tf.keras.layers.concatenate([interm_a, interm_b], name='merge') output_c = tf.keras.layers.Dense( 3, activation='softmax', name='dense_2')( merged) output_d = tf.keras.layers.Dense( 2, activation='softmax', name='dense_3')( merged) model = tf.keras.models.Model( inputs=[input_a, input_b], outputs=[output_c, output_d]) model.compile(optimizer='sgd', loss='mean_squared_error') model.fit([input_a_np, input_b_np], [output_c_np, output_d_np], epochs=1) # Convert model and ensure model is not None. converter = lite.TFLiteConverterV2.from_keras_model(model) tflite_model = converter.convert() # Check values from converted model. input_data = [left_input_data, right_input_data] expected_value = model.predict(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, input_data) for tf_result, tflite_result in zip(expected_value, actual_value): self.assertAllClose(tf_result, tflite_result, atol=1e-05) @test_util.run_v2_only def testGraphDebugInfo(self): """Test a tf.Keras model has debug info captured.""" # Create a simple Keras model. x = [-1, 0, 1, 2, 3, 4] y = [-3, -1, 1, 3, 5, 7] model = tf.keras.models.Sequential( [tf.keras.layers.Dense(units=1, input_shape=[1])]) model.compile(optimizer='sgd', loss='mean_squared_error') model.fit(x, y, epochs=1) converter = lite.TFLiteConverterV2.from_keras_model(model) converter.convert() self._assertValidDebugInfo(converter._debug_info) @test_util.run_v2_only def testKerasFallbackPath(self): """Test keras model which failed when exporting to the saved model.""" input_data = tf.constant( np.array(np.random.random_sample((20)), dtype=np.float32)) class Model(tf.keras.Model): def __init__(self): super(Model, self).__init__() # A None name will cause a failure in exporting to a saved model. self.shared_weights = self.add_weight( name=None, shape=(20, 1), dtype=tf.float32, initializer=tf.random_normal_initializer( mean=0.0, stddev=300**(-0.5))) def call(self, x): return tf.add(self.shared_weights, x) # Building the model. model = Model() model.compile(optimizer='sgd', loss='mean_squared_error') model.fit(input_data, input_data, epochs=1) # Convert model. converter = lite.TFLiteConverterV2.from_keras_model(model) tflite_model = converter.convert() self.assertTrue(tflite_model) class ControlFlowTest(lite_v2_test_util.ModelTest): @test_util.run_v2_only def testCond(self): input_data = { 'x': tf.constant([1., 2.], shape=[1, 2]), 'b': tf.constant(True) } weights = tf.Variable([[0.1, 0.2], [0.3, 0.4]], dtype=tf.float32) def true_fn(x): return tf.matmul(x, weights) def false_fn(x): return tf.add(x, weights) @tf.function(input_signature=[ tf.TensorSpec(shape=[1, 2], dtype=tf.float32), tf.TensorSpec(shape=(), dtype=tf.bool) ]) def model(x, b): return tf.cond( b, true_fn=lambda: true_fn(x), false_fn=lambda: false_fn(x)) concrete_func = model.get_concrete_function() # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func]) tflite_model = converter.convert() # Check values from converted model. expected_value = concrete_func(**input_data) actual_value = self._evaluateTFLiteModel( tflite_model, [input_data['x'], input_data['b']])[0] self.assertAllClose(expected_value, actual_value) @test_util.run_v2_only def testStaticRnn(self): input_data = tf.constant( np.array(np.random.random_sample((3, 10)), dtype=np.float32)) cell = tf.compat.v1.nn.rnn_cell.LSTMCell(10) @tf.function( input_signature=[tf.TensorSpec(shape=[3, 10], dtype=tf.float32)]) def model(x): seq = tf.split(x, 3, 0) return tf.compat.v1.nn.static_rnn( cell, seq, dtype=tf.float32, sequence_length=[1]) concrete_func = model.get_concrete_function() # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func]) tflite_model = converter.convert() # Check values from converted model. expected_value = concrete_func(input_data)[0] actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) for expected, actual in zip(expected_value, actual_value): self.assertAllClose(expected, actual) @test_util.run_v2_only def testWhileLoop(self): input_data = tf.constant([1., 2., 3., 4.], shape=[2, 2]) weights = tf.Variable([[0.1, 0.2], [0.3, 0.4]], dtype=tf.float32) def condition(x): return tf.reduce_sum(x) < 100 def body(x): return tf.add(x, weights) @tf.function( input_signature=[tf.TensorSpec(shape=[2, 2], dtype=tf.float32)]) def model(x): return tf.while_loop(condition, body, [x]) concrete_func = model.get_concrete_function() # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func]) tflite_model = converter.convert() # Check values from converted model. expected_value = concrete_func(input_data)[0] actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0] self.assertAllClose(expected_value, actual_value) @test_util.run_v2_only def testDynamicRnn(self): input_data = tf.constant( np.array(np.random.random_sample((3, 10, 10)), dtype=np.float32)) cell = tf.compat.v1.nn.rnn_cell.LSTMCell(10) @tf.function( input_signature=[tf.TensorSpec(shape=[3, 10, 10], dtype=tf.float32)]) def model(x): return tf.compat.v1.nn.dynamic_rnn(cell, x, dtype=tf.float32) concrete_func = model.get_concrete_function() # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func]) tflite_model = converter.convert() # Check values from converted model. expected_value = concrete_func(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) for expected, actual in zip(expected_value, actual_value): if not isinstance(expected, ops.EagerTensor): expected = expected.c self.assertAllClose(expected, actual) @parameterized.named_parameters(('LSTM', tf.keras.layers.LSTM), ('SimpleRNN', tf.keras.layers.SimpleRNN), ('GRU', tf.keras.layers.GRU)) @test_util.run_v2_only def testKerasRNN(self, rnn_layer): # This relies on TFLiteConverter to rewrite unknown batch size to 1. The # model will fail if resizing the input to non-1 batch size. input_data = tf.constant( np.array(np.random.random_sample((1, 10, 10)), dtype=np.float32)) rnn_obj = rnn_layer(units=10, input_shape=(10, 10)) model = tf.keras.models.Sequential([ tf.keras.layers.Input(batch_size=1, shape=(10, 10), name='input'), rnn_obj, ]) # Convert model. converter = lite.TFLiteConverterV2.from_keras_model(model) tflite_model = converter.convert() actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0] # Check values from converted model. expected_value = model.predict(input_data) self.assertAllClose(expected_value, actual_value, atol=1e-05) @parameterized.named_parameters(('LSTM', tf.keras.layers.LSTM), ('SimpleRNN', tf.keras.layers.SimpleRNN), ('GRU', tf.keras.layers.GRU)) @test_util.run_v2_only def testKerasRNNMultiBatches(self, rnn_layer): input_data = tf.constant( np.array(np.random.random_sample((4, 10, 10)), dtype=np.float32)) # Specify a fixed batch size(4) for the test model. x = tf.keras.layers.Input(batch_shape=(4, 10, 10)) y = rnn_layer(units=10, input_shape=(10, 10))(x) model = tf.keras.Model(inputs=[x], outputs=[y]) # Convert model. converter = lite.TFLiteConverterV2.from_keras_model(model) tflite_model = converter.convert() actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0] # Check values from converted model. expected_value = model.predict(input_data) self.assertAllClose(expected_value, actual_value, atol=1e-05) @test_util.run_v2_only def testKerasBidirectionalRNNReturnSequence(self): input_data = tf.constant( np.array(np.random.random_sample((1, 10, 10)), dtype=np.float32)) model = tf.keras.models.Sequential() model.add(tf.keras.layers.Input(batch_size=1, shape=(10, 10), name='input')) model.add( tf.keras.layers.Bidirectional( tf.keras.layers.LSTM(units=10, return_sequences=True), input_shape=(10, 10))) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(5)) model.add(tf.keras.layers.Activation('softmax')) # Convert model. converter = lite.TFLiteConverterV2.from_keras_model(model) tflite_model = converter.convert() actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0] # Check values from converted model. expected_value = model.predict(input_data) self.assertAllClose(expected_value, actual_value, atol=1e-05) @test_util.run_v2_only def testKerasBidirectionalRNN(self): input_data = tf.constant( np.array(np.random.random_sample((1, 10, 10)), dtype=np.float32)) model = tf.keras.models.Sequential() model.add(tf.keras.layers.Input(batch_size=1, shape=(10, 10), name='input')) model.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(units=10))) model.add(tf.keras.layers.Dense(5)) model.add(tf.keras.layers.Activation('softmax')) # Convert model. converter = lite.TFLiteConverterV2.from_keras_model(model) tflite_model = converter.convert() actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0] # Check values from converted model. expected_value = model.predict(input_data) self.assertAllClose(expected_value, actual_value, atol=1e-05) class GrapplerTest(lite_v2_test_util.ModelTest): @test_util.run_v2_only def testConstantFolding(self): # Constant folding handles the tf.broadcast_to operation which was not # supported by the TFLite at the time this test was added. input_data = tf.constant([1., 2., 3., 4., 5., 6., 7., 8., 9.], shape=[3, 3]) @tf.function def func(x): y_const = tf.constant([1., 2., 3.]) y_broadcast = tf.broadcast_to(y_const, [3, 3]) return tf.matmul(x, y_broadcast) root = tracking.AutoTrackable() root.f = func concrete_func = root.f.get_concrete_function(input_data) # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func]) tflite_model = converter.convert() # Check values from converted model. expected_value = root.f(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0] self.assertAllClose(expected_value, actual_value) # Enable hybrid quantization, same result converter.optimizations = [lite.Optimize.DEFAULT] tflite_model = converter.convert() actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0] self.assertAllClose(expected_value, actual_value) class UnknownShapes(lite_v2_test_util.ModelTest): @test_util.run_v2_only def testMatMul(self): input_data = tf.constant( np.array(np.random.random_sample((10, 4)), dtype=np.float32)) @tf.function( input_signature=[tf.TensorSpec(shape=[None, 4], dtype=tf.float32)]) def model(in_tensor): shape = tf.shape(in_tensor) fill = tf.transpose(tf.fill(shape, 1.)) return tf.matmul(fill, in_tensor) concrete_func = model.get_concrete_function() converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func]) tflite_model = converter.convert() # Check values from converted model. expected_value = concrete_func(input_data) actual_value = self._evaluateTFLiteModel( tflite_model, [input_data], input_shapes=[([-1, 4], [10, 4])])[0] self.assertAllClose(expected_value, actual_value, atol=1e-06) def _getIntegerQuantizeModelWithUnknownShapes(self): np.random.seed(0) @tf.function( input_signature=[tf.TensorSpec(shape=[None, 33], dtype=tf.float32)]) def model(input_tensor): """Define a model with tf.MatMul and unknown shapes.""" # We need the tensor to have more than 1024 elements for quantize_weights # to kick in. Thus, the [33, 33] shape. const_tensor = tf.constant( np.random.uniform(low=-10., high=10., size=[33, 33]), shape=[33, 33], dtype=tf.float32, name='inputB') shape = tf.shape(input_tensor) fill = tf.transpose(tf.fill(shape, 1.)) mult = tf.matmul(fill, input_tensor) return tf.matmul(mult, const_tensor) root = tracking.AutoTrackable() root.f = model concrete_func = root.f.get_concrete_function() def calibration_gen(): for batch in range(5, 20, 5): for _ in range(5): yield [np.random.uniform(-1, 1, size=(batch, 33)).astype(np.float32)] return concrete_func, calibration_gen @test_util.run_v2_only def testMatMulQuantize(self): concrete_func, _ = self._getIntegerQuantizeModelWithUnknownShapes() float_converter = lite.TFLiteConverterV2.from_concrete_functions( [concrete_func]) float_tflite_model = float_converter.convert() quantized_converter = lite.TFLiteConverterV2.from_concrete_functions( [concrete_func]) quantized_converter.optimizations = [lite.Optimize.DEFAULT] quantized_tflite_model = quantized_converter.convert() # The default input and output types should be float. quantized_interpreter = Interpreter(model_content=quantized_tflite_model) quantized_interpreter.allocate_tensors() input_details = quantized_interpreter.get_input_details() self.assertLen(input_details, 1) self.assertEqual(np.float32, input_details[0]['dtype']) self.assertAllEqual([-1, 33], input_details[0]['shape_signature']) # Ensure that the quantized weights tflite model is smaller. self.assertLess(len(quantized_tflite_model), len(float_tflite_model)) @test_util.run_v2_only def testMatMulCalibrateAndQuantize(self): concrete_func, calibration_gen = \ self._getIntegerQuantizeModelWithUnknownShapes() float_converter = lite.TFLiteConverterV2.from_concrete_functions( [concrete_func]) float_tflite_model = float_converter.convert() quantized_converter = lite.TFLiteConverterV2.from_concrete_functions( [concrete_func]) quantized_converter.optimizations = [lite.Optimize.DEFAULT] quantized_converter.representative_dataset = calibration_gen quantized_tflite_model = quantized_converter.convert() # The default input and output types should be float. quantized_interpreter = Interpreter(model_content=quantized_tflite_model) quantized_interpreter.allocate_tensors() input_details = quantized_interpreter.get_input_details() self.assertLen(input_details, 1) self.assertEqual(np.float32, input_details[0]['dtype']) self.assertAllEqual([-1, 33], input_details[0]['shape_signature']) # Ensure that the quantized weights tflite model is smaller. self.assertLess(len(quantized_tflite_model), len(float_tflite_model)) def testBatchMatMul(self): input_data_1 = tf.constant( np.array(np.random.random_sample((1, 256, 256)), dtype=np.float32)) input_data_2 = tf.constant( np.array(np.random.random_sample((1, 256, 256)), dtype=np.float32)) @tf.function(input_signature=[ tf.TensorSpec(shape=[None, 256, 256], dtype=tf.float32), tf.TensorSpec(shape=[None, 256, 256], dtype=tf.float32) ]) def model(in_tensor_1, in_tensor_2): return tf.matmul(in_tensor_1, in_tensor_2) concrete_func = model.get_concrete_function() converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func]) tflite_model = converter.convert() # Check values from converted model. expected_value = concrete_func(input_data_1, input_data_2) actual_value = self._evaluateTFLiteModel( tflite_model, [input_data_1, input_data_2], input_shapes=[([-1, 256, 256], [1, 256, 256])])[0] self.assertAllClose(expected_value, actual_value, atol=4) def testSizeInvalid(self): @tf.function(input_signature=[ tf.TensorSpec(shape=[1, None, 16, 3], dtype=tf.float32) ]) def model(in_tensor): return in_tensor + in_tensor concrete_func = model.get_concrete_function() # Test invalid shape. None after 1st dimension. Run with TOCO in order to # invoke shape checking code. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func]) converter.experimental_new_converter = False with self.assertRaises(ValueError) as error: converter.convert() self.assertEqual( 'None is only supported in the 1st dimension. Tensor ' '\'in_tensor\' has invalid shape \'[1, None, 16, 3]\'.', str(error.exception)) if __name__ == '__main__': test.main()
[ "tensorflow.math.add", "tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model", "tensorflow.reduce_sum", "tensorflow.lite.python.interpreter.Interpreter", "numpy.random.random_sample", "tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model", "tensorflow.compat.v1.nn.rnn_cell.LSTMCell", "tensorflow.compat.v1.nn.static_rnn", "tensorflow.keras.layers.ConvLSTM2D", "tensorflow.python.saved_model.saved_model.simple_save", "tensorflow.math.tanh", "tensorflow.python.saved_model.save_options.SaveOptions", "tensorflow.Graph", "tensorflow.while_loop", "tensorflow.Variable", "tensorflow.lite.TFLiteConverter.from_concrete_functions", "tensorflow.python.saved_model.loader_impl.parse_saved_model", "tensorflow.gather", "tensorflow.python.platform.test.main", "tensorflow.math.ceil", "tensorflow.add", "tensorflow.quantization.fake_quant_with_min_max_vars", "tensorflow.lite.TFLiteConverter.from_saved_model", "tensorflow.random_normal_initializer", "tensorflow.keras.models.Sequential", "tensorflow.keras.layers.Flatten", "tensorflow.compat.v1.nn.dynamic_rnn", "tensorflow.math.erf", "tensorflow.matmul", "tensorflow.fill", "tensorflow.python.saved_model.save.save", "tensorflow.keras.models.Model", "tensorflow.keras.layers.Dense", "tensorflow.python.training.tracking.tracking.AutoTrackable", "tensorflow.shape", "tensorflow.keras.Model", "tensorflow.nn.tanh", "tensorflow.function", "tensorflow.split", "numpy.array", "tensorflow.nn.relu", "tensorflow.keras.initializers.Constant", "tensorflow.constant", "numpy.random.random", "numpy.random.seed", "tensorflow.keras.layers.Activation", "tensorflow.python.lib.io.file_io.FileIO", "tensorflow.broadcast_to", "tensorflow.lite.python.convert.mlir_quantize", "tensorflow.compat.v1.variables_initializer", "tensorflow.keras.layers.concatenate", "tensorflow.ones", "tensorflow.compat.v1.Session", "numpy.random.uniform", "tensorflow.compat.v1.placeholder", "tensorflow.keras.layers.LSTM", "tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions", "tensorflow.keras.layers.Dropout", "tensorflow.TensorSpec", "tensorflow.keras.layers.Input" ]
tensorflow/lite/python/lite_v2_test.py
[(56, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('EnableMlirConverter', True)", "('DisableMlirConverter', False)"], {}), False, 'from absl.testing import parameterized\n'), (75, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('_INT8InputOutput', dtypes.int8)", "('_UINT8InputOutput', dtypes.uint8)", "('_INT16InputOutput', dtypes.int16)"], {}), False, 'from absl.testing import parameterized\n'), (163, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('EnableMlirQuantizer', True)", "('DisableMlirQuantizer', False)"], {}), False, 'from absl.testing import parameterized\n'), (195, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('_INT8InputOutput', dtypes.int8)", "('_UINT8InputOutput', dtypes.uint8)", "('_INT16InputOutput', dtypes.int16)"], {}), False, 'from absl.testing import parameterized\n'), (220, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('_Default', False, False, dtypes.float32)", "('_INT8InputOutput', False, False, dtypes.int8)", "('_UINT8InputOutput', False, False, dtypes.uint8)", "('_INT16Quantize', False, True, dtypes.float32)", "('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16)", "('_IntOnly', True, False, dtypes.float32)", "('_IntOnly_INT8InputOutput', True, False, dtypes.int8)", "('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8)", "('_IntOnly_INT16Quantize', True, True, dtypes.float32)", "('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16)"], {}), False, 'from absl.testing import parameterized\n'), (282, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('_INT16Quantize_INT8InputOutput', True, dtypes.int8)"], {}), False, 'from absl.testing import parameterized\n'), (378, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('_DefaultFLOAT32InputOutput', dtypes.float32)", "('_INT8InputOutput', dtypes.int8)", "('_UINT8InputOutput', dtypes.uint8)"], {}), False, 'from absl.testing import parameterized\n'), (437, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('EnableMlirConverter', True)", "('DisableMlirConverter', False)"], {}), False, 'from absl.testing import parameterized\n'), (519, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('_Default', False, False, dtypes.float32)", "('_INT8InputOutput', False, False, dtypes.int8)", "('_UINT8InputOutput', False, False, dtypes.uint8)", "('_INT16Quantize', False, True, dtypes.float32)", "('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16)", "('_IntOnly', True, False, dtypes.float32)", "('_IntOnly_INT8InputOutput', True, False, dtypes.int8)", "('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8)", "('_IntOnly_INT16Quantize', True, True, dtypes.float32)", "('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16)"], {}), False, 'from absl.testing import parameterized\n'), (607, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('_INT8InputOutput', False, False, dtypes.int8)", "('_UINT8InputOutput', False, False, dtypes.uint8)", "('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16)", "('_IntOnly_INT8InputOutput', True, False, dtypes.int8)", "('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8)", "('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16)"], {}), False, 'from absl.testing import parameterized\n'), (1148, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('LSTM', tf.keras.layers.LSTM)", "('SimpleRNN', tf.keras.layers.SimpleRNN)", "('GRU', tf.keras.layers.GRU)"], {}), False, 'from absl.testing import parameterized\n'), (1172, 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('LSTM', tf.keras.layers.LSTM)", "('SimpleRNN', tf.keras.layers.SimpleRNN)", "('GRU', tf.keras.layers.GRU)"], {}), False, 'from absl.testing import parameterized\n'), (1422, 'tensorflow.python.platform.test.main', 'test.main', ([], {}), False, 'from tensorflow.python.platform import test\n'), (62, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (66, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (82, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (86, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (98, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[]'}), True, 'import tensorflow as tf\n'), (102, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (114, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (118, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (130, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (135, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[add_func, sub_func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (143, 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), True, 'import numpy as np\n'), (145, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (170, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (175, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (183, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'quantized_tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (205, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (210, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (238, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (243, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (268, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'quantized_tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (289, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (310, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (314, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (322, 'tensorflow.lite.python.convert.mlir_quantize', 'mlir_quantize', (['calibrated_tflite'], {'inference_type': '_types_pb2.QUANTIZED_INT16'}), False, 'from tensorflow.lite.python.convert import mlir_quantize\n'), (328, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'quantized_tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (386, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model', 'lite.TFLiteConverterV2.from_keras_model', (['model'], {}), False, 'from tensorflow.lite.python import lite\n'), (390, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model', 'lite.TFLiteConverterV2.from_keras_model', (['model'], {}), False, 'from tensorflow.lite.python import lite\n'), (397, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'quantized_tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (416, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (430, 'six.moves.range', 'range', (['(5)'], {}), False, 'from six.moves import range\n'), (466, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (478, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (479, 'tensorflow.Variable', 'tf.Variable', (['(3.0)'], {}), True, 'import tensorflow as tf\n'), (480, 'tensorflow.function', 'tf.function', (['(lambda x: root.v1 * x)'], {}), True, 'import tensorflow as tf\n'), (481, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (485, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (490, 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), True, 'import numpy as np\n'), (492, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (535, 'tensorflow.lite.TFLiteConverter.from_concrete_functions', 'tf.lite.TFLiteConverter.from_concrete_functions', (['[func]'], {}), True, 'import tensorflow as tf\n'), (568, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'quantized_tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (580, 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), True, 'import numpy as np\n'), (582, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (620, 'tensorflow.lite.TFLiteConverter.from_concrete_functions', 'tf.lite.TFLiteConverter.from_concrete_functions', (['[func]'], {}), True, 'import tensorflow as tf\n'), (652, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'quantized_tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (730, 'tensorflow.python.saved_model.loader_impl.parse_saved_model', 'parse_saved_model', (['saved_model_dir'], {}), False, 'from tensorflow.python.saved_model.loader_impl import parse_saved_model\n'), (733, 'os.path.join', 'os.path.join', (['saved_model_dir', '"""saved_model.pb"""'], {}), False, 'import os\n'), (738, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['saved_model_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (745, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (746, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (747, 'tensorflow.function', 'tf.function', (['(lambda x: 2.0 * x)'], {}), True, 'import tensorflow as tf\n'), (751, 'tensorflow.python.saved_model.save.save', 'save', (['root', 'save_dir', 'to_save'], {}), False, 'from tensorflow.python.saved_model.save import save\n'), (754, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['save_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (766, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (770, 'tensorflow.python.saved_model.save.save', 'save', (['root', 'save_dir', 'to_save'], {}), False, 'from tensorflow.python.saved_model.save import save\n'), (773, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['save_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (785, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (789, 'tensorflow.python.saved_model.save.save', 'save', (['root', 'save_dir', 'to_save'], {}), False, 'from tensorflow.python.saved_model.save import save\n'), (798, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['save_dir'], {'signature_keys': '[]'}), False, 'from tensorflow.lite.python import lite\n'), (811, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (816, 'tensorflow.python.saved_model.save.save', 'save', (['root', 'save_dir', "{'add': add_func, 'sub': sub_func}"], {}), False, 'from tensorflow.python.saved_model.save import save\n'), (828, 'tensorflow.python.saved_model.save.save', 'save', (['root', 'save_dir'], {}), False, 'from tensorflow.python.saved_model.save import save\n'), (837, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1, 1]'}), True, 'import tensorflow as tf\n'), (839, 'numpy.array', 'np.array', (['[[1.0], [2.0]]'], {}), True, 'import numpy as np\n'), (840, 'numpy.array', 'np.array', (['[[2.0], [4.0]]'], {}), True, 'import numpy as np\n'), (850, 'tensorflow.python.saved_model.save.save', 'save', (['model', 'save_dir'], {}), False, 'from tensorflow.python.saved_model.save import save\n'), (853, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['save_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (864, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1]'}), True, 'import tensorflow as tf\n'), (865, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (866, 'tensorflow.function', 'tf.function', (['(lambda x: 2.0 * x)'], {}), True, 'import tensorflow as tf\n'), (868, 'tensorflow.python.saved_model.save_options.SaveOptions', 'save_options.SaveOptions', ([], {'save_debug_info': '(True)'}), False, 'from tensorflow.python.saved_model import save_options\n'), (870, 'tensorflow.python.saved_model.save.save', 'save', (['root', 'save_dir', 'to_save', 'options'], {}), False, 'from tensorflow.python.saved_model.save import save\n'), (873, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['save_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (883, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['saved_model_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (907, 'tensorflow.lite.TFLiteConverter.from_saved_model', 'tf.lite.TFLiteConverter.from_saved_model', (['saved_model_dir'], {}), True, 'import tensorflow as tf\n'), (920, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1, 1]'}), True, 'import tensorflow as tf\n'), (923, 'numpy.array', 'np.array', (['[[1.0], [2.0]]'], {}), True, 'import numpy as np\n'), (924, 'numpy.array', 'np.array', (['[[2.0], [4.0]]'], {}), True, 'import numpy as np\n'), (934, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model', 'lite.TFLiteConverterV2.from_keras_model', (['model'], {}), False, 'from tensorflow.lite.python import lite\n'), (945, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1, 3]'}), True, 'import tensorflow as tf\n'), (946, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1, 3]'}), True, 'import tensorflow as tf\n'), (949, 'numpy.random.random', 'np.random.random', (['(10, 3)'], {}), True, 'import numpy as np\n'), (950, 'numpy.random.random', 'np.random.random', (['(10, 3)'], {}), True, 'import numpy as np\n'), (951, 'numpy.random.random', 'np.random.random', (['(10, 3)'], {}), True, 'import numpy as np\n'), (952, 'numpy.random.random', 'np.random.random', (['(10, 2)'], {}), True, 'import numpy as np\n'), (954, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(3,)', 'name': '"""input_a"""'}), True, 'import tensorflow as tf\n'), (955, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(3,)', 'name': '"""input_b"""'}), True, 'import tensorflow as tf\n'), (957, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(8)'], {'name': '"""dense_1"""'}), True, 'import tensorflow as tf\n'), (960, 'tensorflow.keras.layers.concatenate', 'tf.keras.layers.concatenate', (['[interm_a, interm_b]'], {'name': '"""merge"""'}), True, 'import tensorflow as tf\n'), (969, 'tensorflow.keras.models.Model', 'tf.keras.models.Model', ([], {'inputs': '[input_a, input_b]', 'outputs': '[output_c, output_d]'}), True, 'import tensorflow as tf\n'), (975, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model', 'lite.TFLiteConverterV2.from_keras_model', (['model'], {}), False, 'from tensorflow.lite.python import lite\n'), (982, 'six.moves.zip', 'zip', (['expected_value', 'actual_value'], {}), False, 'from six.moves import zip\n'), (995, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model', 'lite.TFLiteConverterV2.from_keras_model', (['model'], {}), False, 'from tensorflow.lite.python import lite\n'), (1026, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model', 'lite.TFLiteConverterV2.from_keras_model', (['model'], {}), False, 'from tensorflow.lite.python import lite\n'), (1040, 'tensorflow.Variable', 'tf.Variable', (['[[0.1, 0.2], [0.3, 0.4]]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (1059, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (1073, 'tensorflow.compat.v1.nn.rnn_cell.LSTMCell', 'tf.compat.v1.nn.rnn_cell.LSTMCell', (['(10)'], {}), True, 'import tensorflow as tf\n'), (1085, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (1091, 'six.moves.zip', 'zip', (['expected_value', 'actual_value'], {}), False, 'from six.moves import zip\n'), (1096, 'tensorflow.constant', 'tf.constant', (['[1.0, 2.0, 3.0, 4.0]'], {'shape': '[2, 2]'}), True, 'import tensorflow as tf\n'), (1098, 'tensorflow.Variable', 'tf.Variable', (['[[0.1, 0.2], [0.3, 0.4]]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (1114, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (1127, 'tensorflow.compat.v1.nn.rnn_cell.LSTMCell', 'tf.compat.v1.nn.rnn_cell.LSTMCell', (['(10)'], {}), True, 'import tensorflow as tf\n'), (1137, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (1143, 'six.moves.zip', 'zip', (['expected_value', 'actual_value'], {}), False, 'from six.moves import zip\n'), (1164, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model', 'lite.TFLiteConverterV2.from_keras_model', (['model'], {}), False, 'from tensorflow.lite.python import lite\n'), (1180, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'batch_shape': '(4, 10, 10)'}), True, 'import tensorflow as tf\n'), (1182, 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': '[x]', 'outputs': '[y]'}), True, 'import tensorflow as tf\n'), (1185, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model', 'lite.TFLiteConverterV2.from_keras_model', (['model'], {}), False, 'from tensorflow.lite.python import lite\n'), (1197, 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), True, 'import tensorflow as tf\n'), (1208, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model', 'lite.TFLiteConverterV2.from_keras_model', (['model'], {}), False, 'from tensorflow.lite.python import lite\n'), (1220, 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), True, 'import tensorflow as tf\n'), (1227, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model', 'lite.TFLiteConverterV2.from_keras_model', (['model'], {}), False, 'from tensorflow.lite.python import lite\n'), (1242, 'tensorflow.constant', 'tf.constant', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]'], {'shape': '[3, 3]'}), True, 'import tensorflow as tf\n'), (1250, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (1255, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (1286, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (1296, 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), True, 'import numpy as np\n'), (1315, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (1329, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (1333, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (1339, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'quantized_tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (1353, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (1357, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (1364, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'quantized_tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (1389, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (1411, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[concrete_func]'], {}), False, 'from tensorflow.lite.python import lite\n'), (53, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions', 'lite.TFLiteConverterV2.from_concrete_functions', (['[root.f]'], {}), False, 'from tensorflow.lite.python import lite\n'), (152, 'tensorflow.nn.relu', 'tf.nn.relu', (['conv'], {'name': '"""output"""'}), True, 'import tensorflow as tf\n'), (156, 'six.moves.range', 'range', (['(5)'], {}), False, 'from six.moves import range\n'), (498, 'tensorflow.math.tanh', 'tf.math.tanh', (['inp'], {}), True, 'import tensorflow as tf\n'), (506, 'tensorflow.math.erf', 'tf.math.erf', (['conv3d'], {}), True, 'import tensorflow as tf\n'), (507, 'tensorflow.math.tanh', 'tf.math.tanh', (['erf'], {}), True, 'import tensorflow as tf\n'), (511, 'six.moves.range', 'range', (['(5)'], {}), False, 'from six.moves import range\n'), (590, 'tensorflow.math.ceil', 'tf.math.ceil', (['a'], {}), True, 'import tensorflow as tf\n'), (591, 'tensorflow.nn.tanh', 'tf.nn.tanh', (['b'], {}), True, 'import tensorflow as tf\n'), (592, 'tensorflow.math.add', 'tf.math.add', (['left', 'right'], {}), True, 'import tensorflow as tf\n'), (594, 'tensorflow.math.ceil', 'tf.math.ceil', (['add'], {}), True, 'import tensorflow as tf\n'), (598, 'six.moves.range', 'range', (['(5)'], {}), False, 'from six.moves import range\n'), (694, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['saved_model_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (698, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (734, 'tensorflow.python.lib.io.file_io.FileIO', 'file_io.FileIO', (['saved_model_pb_file_path', '"""wb"""'], {}), False, 'from tensorflow.python.lib.io import file_io\n'), (793, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['save_dir'], {'signature_keys': "['INVALID']"}), False, 'from tensorflow.lite.python import lite\n'), (820, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['save_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (831, 'tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model', 'lite.TFLiteConverterV2.from_saved_model', (['save_dir'], {}), False, 'from tensorflow.lite.python import lite\n'), (962, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(3)'], {'activation': '"""softmax"""', 'name': '"""dense_2"""'}), True, 'import tensorflow as tf\n'), (965, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)'], {'activation': '"""softmax"""', 'name': '"""dense_3"""'}), True, 'import tensorflow as tf\n'), (1036, 'tensorflow.constant', 'tf.constant', (['[1.0, 2.0]'], {'shape': '[1, 2]'}), True, 'import tensorflow as tf\n'), (1037, 'tensorflow.constant', 'tf.constant', (['(True)'], {}), True, 'import tensorflow as tf\n'), (1043, 'tensorflow.matmul', 'tf.matmul', (['x', 'weights'], {}), True, 'import tensorflow as tf\n'), (1046, 'tensorflow.add', 'tf.add', (['x', 'weights'], {}), True, 'import tensorflow as tf\n'), (1078, 'tensorflow.split', 'tf.split', (['x', '(3)', '(0)'], {}), True, 'import tensorflow as tf\n'), (1079, 'tensorflow.compat.v1.nn.static_rnn', 'tf.compat.v1.nn.static_rnn', (['cell', 'seq'], {'dtype': 'tf.float32', 'sequence_length': '[1]'}), True, 'import tensorflow as tf\n'), (1104, 'tensorflow.add', 'tf.add', (['x', 'weights'], {}), True, 'import tensorflow as tf\n'), (1109, 'tensorflow.while_loop', 'tf.while_loop', (['condition', 'body', '[x]'], {}), True, 'import tensorflow as tf\n'), (1132, 'tensorflow.compat.v1.nn.dynamic_rnn', 'tf.compat.v1.nn.dynamic_rnn', (['cell', 'x'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (1198, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'batch_size': '(1)', 'shape': '(10, 10)', 'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (1203, 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), True, 'import tensorflow as tf\n'), (1204, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(5)'], {}), True, 'import tensorflow as tf\n'), (1205, 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""softmax"""'], {}), True, 'import tensorflow as tf\n'), (1221, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'batch_size': '(1)', 'shape': '(10, 10)', 'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (1223, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(5)'], {}), True, 'import tensorflow as tf\n'), (1224, 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""softmax"""'], {}), True, 'import tensorflow as tf\n'), (1246, 'tensorflow.constant', 'tf.constant', (['[1.0, 2.0, 3.0]'], {}), True, 'import tensorflow as tf\n'), (1247, 'tensorflow.broadcast_to', 'tf.broadcast_to', (['y_const', '[3, 3]'], {}), True, 'import tensorflow as tf\n'), (1248, 'tensorflow.matmul', 'tf.matmul', (['x', 'y_broadcast'], {}), True, 'import tensorflow as tf\n'), (1280, 'tensorflow.shape', 'tf.shape', (['in_tensor'], {}), True, 'import tensorflow as tf\n'), (1282, 'tensorflow.matmul', 'tf.matmul', (['fill', 'in_tensor'], {}), True, 'import tensorflow as tf\n'), (1310, 'tensorflow.shape', 'tf.shape', (['input_tensor'], {}), True, 'import tensorflow as tf\n'), (1312, 'tensorflow.matmul', 'tf.matmul', (['fill', 'input_tensor'], {}), True, 'import tensorflow as tf\n'), (1313, 'tensorflow.matmul', 'tf.matmul', (['mult', 'const_tensor'], {}), True, 'import tensorflow as tf\n'), (1320, 'six.moves.range', 'range', (['(5)', '(20)', '(5)'], {}), False, 'from six.moves import range\n'), (1385, 'tensorflow.matmul', 'tf.matmul', (['in_tensor_1', 'in_tensor_2'], {}), True, 'import tensorflow as tf\n'), (151, 'tensorflow.ones', 'tf.ones', (['[3, 3, 3, 16]'], {}), True, 'import tensorflow as tf\n'), (364, 'tensorflow.quantization.fake_quant_with_min_max_vars', 'tf.quantization.fake_quant_with_min_max_vars', (['inputs', 'self.min_var', 'self.max_var'], {}), True, 'import tensorflow as tf\n'), (367, 'tensorflow.quantization.fake_quant_with_min_max_vars', 'tf.quantization.fake_quant_with_min_max_vars', (['self.w', 'self.min_var', 'self.max_var'], {}), True, 'import tensorflow as tf\n'), (369, 'tensorflow.matmul', 'tf.matmul', (['x', 'w_fq'], {}), True, 'import tensorflow as tf\n'), (371, 'tensorflow.quantization.fake_quant_with_min_max_vars', 'tf.quantization.fake_quant_with_min_max_vars', (['x', 'self.min_var', 'self.max_var'], {}), True, 'import tensorflow as tf\n'), (444, 'numpy.random.random_sample', 'np.random.random_sample', (['(20)'], {}), True, 'import numpy as np\n'), (459, 'tensorflow.gather', 'tf.gather', (['self.shared_weights', 'x'], {}), True, 'import tensorflow as tf\n'), (503, 'tensorflow.ones', 'tf.ones', (['[3, 3, 3, 3, 3]'], {}), True, 'import tensorflow as tf\n'), (674, 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), True, 'import tensorflow as tf\n'), (675, 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'shape': 'shape', 'dtype': 'tf.float32', 'name': '"""inputB"""'}), True, 'import tensorflow as tf\n'), (677, 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'shape': 'shape', 'dtype': 'tf.float32', 'name': '"""inputA"""'}), True, 'import tensorflow as tf\n'), (679, 'tensorflow.Variable', 'tf.Variable', (['(1.0)'], {'name': '"""variable_node"""'}), True, 'import tensorflow as tf\n'), (684, 'tensorflow.python.saved_model.saved_model.simple_save', 'saved_model.simple_save', (['sess', 'saved_model_dir', 'inputs', 'outputs'], {}), False, 'from tensorflow.python.saved_model import saved_model\n'), (843, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), True, 'import tensorflow as tf\n'), (844, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), True, 'import tensorflow as tf\n'), (894, 'tensorflow.keras.layers.ConvLSTM2D', 'tf.keras.layers.ConvLSTM2D', (['(32)', '(3, 3)'], {'padding': '"""same"""', 'return_sequences': '(True)', 'stateful': '(False)', 'batch_input_shape': '(1, 1, 10, 10, 1)'}), True, 'import tensorflow as tf\n'), (927, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), True, 'import tensorflow as tf\n'), (928, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(1)', 'input_shape': '[1]'}), True, 'import tensorflow as tf\n'), (992, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(1)', 'input_shape': '[1]'}), True, 'import tensorflow as tf\n'), (1003, 'numpy.random.random_sample', 'np.random.random_sample', (['(20)'], {}), True, 'import numpy as np\n'), (1018, 'tensorflow.add', 'tf.add', (['self.shared_weights', 'x'], {}), True, 'import tensorflow as tf\n'), (1071, 'numpy.random.random_sample', 'np.random.random_sample', (['(3, 10)'], {}), True, 'import numpy as np\n'), (1101, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['x'], {}), True, 'import tensorflow as tf\n'), (1125, 'numpy.random.random_sample', 'np.random.random_sample', (['(3, 10, 10)'], {}), True, 'import numpy as np\n'), (1156, 'numpy.random.random_sample', 'np.random.random_sample', (['(1, 10, 10)'], {}), True, 'import numpy as np\n'), (1159, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'batch_size': '(1)', 'shape': '(10, 10)', 'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (1178, 'numpy.random.random_sample', 'np.random.random_sample', (['(4, 10, 10)'], {}), True, 'import numpy as np\n'), (1196, 'numpy.random.random_sample', 'np.random.random_sample', (['(1, 10, 10)'], {}), True, 'import numpy as np\n'), (1201, 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', ([], {'units': '(10)', 'return_sequences': '(True)'}), True, 'import tensorflow as tf\n'), (1219, 'numpy.random.random_sample', 'np.random.random_sample', (['(1, 10, 10)'], {}), True, 'import numpy as np\n'), (1222, 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', ([], {'units': '(10)'}), True, 'import tensorflow as tf\n'), (1275, 'numpy.random.random_sample', 'np.random.random_sample', (['(10, 4)'], {}), True, 'import numpy as np\n'), (1281, 'tensorflow.fill', 'tf.fill', (['shape', '(1.0)'], {}), True, 'import tensorflow as tf\n'), (1305, 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-10.0)', 'high': '(10.0)', 'size': '[33, 33]'}), True, 'import numpy as np\n'), (1311, 'tensorflow.fill', 'tf.fill', (['shape', '(1.0)'], {}), True, 'import tensorflow as tf\n'), (1321, 'six.moves.range', 'range', (['(5)'], {}), False, 'from six.moves import range\n'), (1376, 'numpy.random.random_sample', 'np.random.random_sample', (['(1, 256, 256)'], {}), True, 'import numpy as np\n'), (1378, 'numpy.random.random_sample', 'np.random.random_sample', (['(1, 256, 256)'], {}), True, 'import numpy as np\n'), (148, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[1, 5, 5, 3]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (495, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[3, 3, 3, 3, 3]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (585, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[3]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (586, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[3]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (673, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (683, 'tensorflow.compat.v1.variables_initializer', 'tf.compat.v1.variables_initializer', (['[variable_node]'], {}), True, 'import tensorflow as tf\n'), (690, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (1049, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[1, 2]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (1050, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '()', 'dtype': 'tf.bool'}), True, 'import tensorflow as tf\n'), (1076, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[3, 10]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (1107, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[2, 2]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (1130, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[3, 10, 10]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (1278, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, 4]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (1299, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, 33]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (1381, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, 256, 256]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (1382, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, 256, 256]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (1402, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[1, None, 16, 3]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (356, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(-6.0)'], {}), True, 'import tensorflow as tf\n'), (360, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(6.0)'], {}), True, 'import tensorflow as tf\n'), (432, 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(1, 5, 5, 3)'}), True, 'import numpy as np\n'), (454, 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(300 ** -0.5)'}), True, 'import tensorflow as tf\n'), (457, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '(20)', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (1014, 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(300 ** -0.5)'}), True, 'import tensorflow as tf\n'), (157, 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(1, 5, 5, 3)'}), True, 'import numpy as np\n'), (513, 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(3, 3, 3, 3, 3)'}), True, 'import numpy as np\n'), (600, 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(3)'}), True, 'import numpy as np\n'), (601, 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(3)'}), True, 'import numpy as np\n'), (1322, 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(batch, 33)'}), True, 'import numpy as np\n')]
cryptexis/debias
a9e0106dcb8668b95e4654ccb3e7373a70ea37a3
import tensorflow as tf import numpy as np from utils.data import convert_categorical from models.base_model import BaseModel class Discriminator: def __init__(self, discriminator_model, protected_variable): self.model = discriminator_model self.protected_variable = protected_variable class FairClassifier(BaseModel): def __init__(self, predictor_model, discriminator_model: Discriminator, hyper_parameters=None): # assigning predictor and discriminator models self.predictor = predictor_model self.discriminator = discriminator_model # losses and optimizers self.loss = tf.keras.losses.BinaryCrossentropy(from_logits=True) self.cosine_loss = tf.keras.losses.CosineSimilarity() self.predictor_optimizer = tf.keras.optimizers.Adam(1e-3) self.discriminator_optimizer = tf.keras.optimizers.Adam(1e-3) self.metrics = [ tf.keras.metrics.Mean(name='loss_mean'), tf.keras.metrics.TruePositives(name='tp'), tf.keras.metrics.FalsePositives(name='fp'), tf.keras.metrics.TrueNegatives(name='tn'), tf.keras.metrics.FalseNegatives(name='fn'), tf.keras.metrics.BinaryAccuracy(name='accuracy') ] self.hyper_parameters = hyper_parameters if hyper_parameters is not None else {} def __predictor_gradient(self, gradients_of_predictor_pred_loss, gradients_of_predictor_disc_loss): """ Calculate the final form of the gradient of the predictor network :param gradients_of_predictor_pred_loss: gradient of parameters based on the loss from predictor network :param gradients_of_predictor_disc_loss: gradient of parameters based on the loss from discriminator network :return: """ gradients_of_predictor = [] num_gradients = len(gradients_of_predictor_disc_loss) for i in range(num_gradients): # weighted gradient coming from the discriminator alpha = self.hyper_parameters.get("alpha", 1.0) disc_term = alpha*gradients_of_predictor_disc_loss[i] # projection of the gradient onto the discriminator gradient cosine_term = self.cosine_loss(gradients_of_predictor_pred_loss[i], gradients_of_predictor_disc_loss[i]) proj_term = (cosine_term*tf.norm(gradients_of_predictor_pred_loss[i])*gradients_of_predictor_disc_loss[i])/\ tf.norm(gradients_of_predictor_disc_loss[i]) # final form of the gradient gradients_of_predictor.append(gradients_of_predictor_pred_loss[i] - proj_term - disc_term) return gradients_of_predictor @tf.function def _train_step(self, input_features, labels): with tf.GradientTape() as predictor_tape, tf.GradientTape(persistent=True) as disc_tape: # predicting the label predictor_output = self.predictor(input_features, training=True) predictor_loss = self.loss(labels, predictor_output) # creating input for the discriminator labels = tf.cast(labels, dtype=tf.float32) # ( s = (1.0 + np.abs(self.hyper_parameters.get('c', 1.0)))*predictor_output discriminator_input = tf.squeeze(tf.stack([s, s*labels, s*(1.0 - labels)], axis=1)) # predicting the protected_variable discriminator_ouput = self.discriminator.model(discriminator_input, training=True) # converting protected variable into target column protected_feature = tf.keras.layers.DenseFeatures(convert_categorical(self.discriminator.protected_variable, self.hyper_parameters['category_maps'] )) protected_output = tf.gather(protected_feature(input_features), 0, axis=1) # calculating the loss of the discriminator disc_loss = self.loss(protected_output, discriminator_ouput) # calculate and apply the gradient of parameters of the discriminator network gradients_of_discriminator = disc_tape.gradient(disc_loss, self.discriminator.model.trainable_variables) self.discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, self.discriminator.model.trainable_variables)) # calculate gradients of parameters of predictor network based on # loss in the discriminator network gradients_of_predictor_disc_loss = disc_tape.gradient(disc_loss, self.predictor.trainable_variables) # loss in the predictor network gradients_of_predictor_pred_loss = predictor_tape.gradient(predictor_loss, self.predictor.trainable_variables) gradients_of_predictor = self.__predictor_gradient(gradients_of_predictor_pred_loss, gradients_of_predictor_disc_loss) # apply gradient updates self.predictor_optimizer.apply_gradients(zip(gradients_of_predictor, self.predictor.trainable_variables)) return tf.cast(tf.greater(predictor_output, 0.0), dtype=tf.int32), predictor_loss
[ "tensorflow.keras.metrics.BinaryAccuracy", "tensorflow.norm", "tensorflow.keras.metrics.TruePositives", "tensorflow.greater", "tensorflow.keras.losses.CosineSimilarity", "tensorflow.stack", "tensorflow.keras.metrics.TrueNegatives", "tensorflow.cast", "tensorflow.keras.losses.BinaryCrossentropy", "tensorflow.GradientTape", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.metrics.FalsePositives", "tensorflow.keras.metrics.Mean", "tensorflow.keras.metrics.FalseNegatives" ]
models/adversarial_model.py
[(24, 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {'from_logits': '(True)'}), True, 'import tensorflow as tf\n'), (25, 'tensorflow.keras.losses.CosineSimilarity', 'tf.keras.losses.CosineSimilarity', ([], {}), True, 'import tensorflow as tf\n'), (26, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['(0.001)'], {}), True, 'import tensorflow as tf\n'), (27, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['(0.001)'], {}), True, 'import tensorflow as tf\n'), (30, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {'name': '"""loss_mean"""'}), True, 'import tensorflow as tf\n'), (31, 'tensorflow.keras.metrics.TruePositives', 'tf.keras.metrics.TruePositives', ([], {'name': '"""tp"""'}), True, 'import tensorflow as tf\n'), (32, 'tensorflow.keras.metrics.FalsePositives', 'tf.keras.metrics.FalsePositives', ([], {'name': '"""fp"""'}), True, 'import tensorflow as tf\n'), (33, 'tensorflow.keras.metrics.TrueNegatives', 'tf.keras.metrics.TrueNegatives', ([], {'name': '"""tn"""'}), True, 'import tensorflow as tf\n'), (34, 'tensorflow.keras.metrics.FalseNegatives', 'tf.keras.metrics.FalseNegatives', ([], {'name': '"""fn"""'}), True, 'import tensorflow as tf\n'), (35, 'tensorflow.keras.metrics.BinaryAccuracy', 'tf.keras.metrics.BinaryAccuracy', ([], {'name': '"""accuracy"""'}), True, 'import tensorflow as tf\n'), (66, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (66, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {'persistent': '(True)'}), True, 'import tensorflow as tf\n'), (73, 'tensorflow.cast', 'tf.cast', (['labels'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (56, 'tensorflow.norm', 'tf.norm', (['gradients_of_predictor_disc_loss[i]'], {}), True, 'import tensorflow as tf\n'), (76, 'tensorflow.stack', 'tf.stack', (['[s, s * labels, s * (1.0 - labels)]'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (81, 'utils.data.convert_categorical', 'convert_categorical', (['self.discriminator.protected_variable', "self.hyper_parameters['category_maps']"], {}), False, 'from utils.data import convert_categorical\n'), (107, 'tensorflow.greater', 'tf.greater', (['predictor_output', '(0.0)'], {}), True, 'import tensorflow as tf\n'), (55, 'tensorflow.norm', 'tf.norm', (['gradients_of_predictor_pred_loss[i]'], {}), True, 'import tensorflow as tf\n')]
avagait/gaitcopy
2fee760156b289ef12f19fb366fb62cf535c305e
# Copies signatures # (c) MJMJ/2021 import tensorflow as tf import tensorflow.keras.backend as K import tensorflow_addons as tfa import deepdish as dd import os import numpy as np from nets.triplet_loss_all import TripletBatchAllLoss # =============================================================== # Version 1 of L1-smooth HUBER_DELTA = 0.5 def mj_smoothL1(y_true, y_pred): x = K.abs(y_true - y_pred) x = K.switch(x < HUBER_DELTA, 0.5 * x ** 2, HUBER_DELTA * (x - 0.5 * HUBER_DELTA)) return K.sum(x) def mj_smoothL1ex(h_delta): def mj_smoothL1_(y_true, y_pred): x = K.abs(y_true - y_pred) x = K.switch(x < h_delta, 0.5 * x ** 2, h_delta * (x - 0.5 * h_delta)) # return K.mean(x, axis=-1) return K.sum(x) return mj_smoothL1_ class GaitCopyModel(): def __init__(self, experdir): self.model = None self.model_encode = None self.hist = None self.experdir = experdir def load(self, netpath, encode_layer=None, compile=True, gaitset=False): try: if gaitset: self.model = tf.keras.models.load_model(netpath, compile=compile, custom_objects={"MatMul": MatMul(), 'tf': tf, "TripletBatchAllLoss": TripletBatchAllLoss()}) encode_layer = "flatten" else: self.model = tf.keras.models.load_model(netpath, compile=compile, custom_objects={'mj_smoothL1': mj_smoothL1}) print("++++++++++++++++++++++++++++++++++++++++++++") print("Model loaded from: " + netpath) except: # Load config file and build model. bdir = os.path.dirname(netpath) fconfig = os.path.join(bdir, "model-config.hdf5") netconfig = dd.io.load(fconfig) self.model = self.build_by_config(netconfig) # Load weights file. bname = os.path.basename(netpath) fparts = os.path.splitext(bname) filewes = os.path.join(bdir, fparts[0] + "_weights.hdf5") self.model.load_weights(filewes, by_name=True) print("++++++++++++++++++++++++++++++++++++++++++++") print("Model loaded from config + weight files: " + fconfig + ' ' + filewes) if encode_layer is None: self.model_encode = tf.keras.Model(self.model.input, self.model.layers[-1].input) else: out_layer = self.model.get_layer(encode_layer).output self.model_encode = tf.keras.Model(self.model.input, out_layer) def build_or_load(self, input_shape, number_convolutional_layers, filters_size, filters_numbers, strides, ndense_units=2048, weight_decay=0.0001, dropout=0.4, optimizer=tf.keras.optimizers.SGD(0.01, 0.9), nclasses=0, initnet="", freeze_convs=False, use3D=False, freeze_all=False, model_version='iwann', lstm=-1, lstm_number=512, dropout_lstm=0.0, L2_norm_lstm=None, loss_mode='both', margin=0.25, loss_weights=[1.0, 0.1], with_l2=False, kinit='glorot_uniform', drop_code=0, mobpars=None, hdelta=0.5): if initnet == "": build = self.build(input_shape, number_convolutional_layers, filters_size, filters_numbers, strides, ndense_units, weight_decay, dropout, optimizer, nclasses, use3D=use3D, model_version=model_version, lstm=lstm, lstm_number=lstm_number, dropout_lstm=dropout_lstm, L2_norm_lstm=L2_norm_lstm, loss_mode=loss_mode, margin=margin, loss_weights=loss_weights, with_l2=with_l2, kinit=kinit, drop_code=drop_code, mobpars=mobpars) else: self.load(initnet) # Check if freeze some weights if freeze_convs or freeze_all: seq1 = self.model.get_layer("convBranch") for layer in seq1.layers: if freeze_all or type(layer) == tf.keras.layers.Conv2D or type(layer) == tf.keras.layers.Conv3D: layer.trainable = False for layer in self.model.layers: if freeze_all or type(layer) == tf.keras.layers.Dense: layer.trainable = False # Check if exists FC for classification: replace_fc = False for layer in self.model.layers: if layer.name == "probs": # Check number of classes. if layer.units != nclasses: print("Replacing FC layer for the new classes...") replace_fc = True if replace_fc: main_branch = self.model.layers[-1].input main_branch = tf.keras.layers.Dense(nclasses, activation='softmax', kernel_initializer='he_uniform', name="probs")(main_branch) self.model = tf.keras.Model(inputs=self.model.input, outputs=main_branch) self.model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]) print("Alright") def build_by_config(self, netconfig): filters_size = netconfig["filters_size"] filters_numbers = netconfig["filters_numbers"] strides = netconfig["strides"] input_shape = netconfig["input_shape"] ndense_units = netconfig["ndense_units"] weight_decay = netconfig["weight_decay"] dropout = netconfig["dropout"] if "nclasses" in netconfig.keys(): nclasses = netconfig["nclasses"] else: nclasses = 150 optimizer = netconfig["optimizer"] if "use3D" in netconfig.keys(): use3D = netconfig["use3D"] else: use3D = False if "drop_code" in netconfig.keys(): drop_code = netconfig["drop_code"] else: drop_code = 0 model_version = netconfig["model_version"] lstm = netconfig["lstm"] lstm_number = netconfig["lstm_number"] dropout_lstm = netconfig["dropout_lstm"] L2_norm_lstm = netconfig["L2_norm_lstm"] if "l2" in netconfig.keys(): with_l2 = netconfig["l2"] else: with_l2 = False self.model = self.build(input_shape, len(filters_numbers), filters_size, filters_numbers, strides, ndense_units, weight_decay, dropout, nclasses=nclasses, optimizer=optimizer, use3D=use3D, model_version=model_version, lstm=lstm, lstm_number=lstm_number, dropout_lstm=dropout_lstm, L2_norm_lstm=L2_norm_lstm, with_l2=with_l2, drop_code=drop_code) def build(self, input_shape, number_convolutional_layers, filters_size, filters_numbers, strides, ndense_units=512, weight_decay=0.0005, dropout=0.4, optimizer=tf.keras.optimizers.SGD(0.01, 0.9), nclasses=0, use3D=False, model_version='iwann', lstm=-1, lstm_number=512, dropout_lstm=0.0, L2_norm_lstm=None, loss_mode='MSE', margin=0.25, loss_weights=[1.0, 0.1], with_l2=False, kinit='glorot_uniform', drop_code=0, mobpars=None, hdelta=0.5, cross=False): """ :param input_shapes: tuple ((50,60,60), (25,60,60)) :param number_convolutional_layers: :param filters_size: :param filters_numbers: :param ndense_units: :param weight_decay: :param dropout: :param optimizer: :param margin: :return: """ if number_convolutional_layers < 1: print("ERROR: Number of convolutional layers must be greater than 0") outputs = [] losses = [] metrics = [] if lstm == 0: input = tf.keras.layers.Input(shape=(None, input_shape[0], input_shape[1], input_shape[2]), name='input') lstm_layer = tf.keras.layers.ConvLSTM2D(16, 3, padding='same', data_format='channels_first')(input) input_shape = (16, input_shape[1], input_shape[2]) elif lstm == 1 or lstm == 2: input = tf.keras.layers.Input(shape=(None, input_shape[0], input_shape[1], input_shape[2]), name='input') input_shape = (None, input_shape[0], input_shape[1], input_shape[2]) elif lstm == 5: input = tf.keras.layers.Input(shape=(None, input_shape[0], input_shape[1], input_shape[2]), name='input') lstm_layer = tf.keras.layers.ConvLSTM2D(50, 3, padding='same', data_format='channels_first')(input) input_shape = (50, input_shape[1], input_shape[2]) elif lstm == 6: input = tf.keras.layers.Input(shape=(None, input_shape[0], input_shape[1], input_shape[2]), name='input') lstm_layer = tf.keras.layers.ConvLSTM2D(16, 7, padding='same', data_format='channels_first')(input) input_shape = (16, input_shape[1], input_shape[2]) elif lstm == 7: input = tf.keras.layers.Input(shape=(None, input_shape[0], input_shape[1], input_shape[2]), name='input') lstm_layer = tf.keras.layers.ConvLSTM2D(50, 7, padding='same', data_format='channels_first')(input) input_shape = (50, input_shape[1], input_shape[2]) elif lstm >= 8: input = tf.keras.layers.Input(shape=(None, input_shape[0], input_shape[1], input_shape[2]), name='input') lstm_layer = tf.keras.layers.ConvLSTM2D(50, 3, padding='same', data_format='channels_first')(input) input_shape = [(50, input_shape[1], input_shape[2]), (None, input_shape[0], input_shape[1], input_shape[2])] else: input = tf.keras.layers.Input(shape=input_shape, name='input') if use3D: if model_version == 'bmvc' or model_version=='smallA' or model_version=='smallB' : convBranch = self.build_3Dbranch_Manuel("convBranch", input_shape, ndense_units, dropout=dropout, kernel_initializer=kinit, filters_size=filters_size, filters_numbers=filters_numbers) elif model_version == "gaitset" or model_version == "gaitset_cross": convBranch = self.build_gaitset_branch(input_layer=input, input_shape=input_shape) else: convBranch = self.build_3Dbranch("convBranch", input_shape, number_convolutional_layers, filters_size, strides, filters_numbers, weight_decay, ndense_units, dropout) else: if model_version == 'bmvc': if lstm >= 8: lstm_branch1 = 5 lstm_branch2 = 2 convBranch1 = self.build_branch_Manuel("convBranch", input_shape[0], number_convolutional_layers, filters_size, strides, filters_numbers, weight_decay, ndense_units, dropout, lstm_branch1, lstm_number, dropout_lstm, L2_norm_lstm, False) convBranch2 = self.build_branch_Manuel("convBranch2", input_shape[1], number_convolutional_layers, filters_size, strides, filters_numbers, weight_decay, ndense_units, dropout, lstm_branch2, lstm_number, dropout_lstm, L2_norm_lstm, False) convBranch = [convBranch1, convBranch2] else: convBranch = self.build_branch_Manuel("convBranch", input_shape, number_convolutional_layers, filters_size, strides, filters_numbers, weight_decay, ndense_units, dropout, lstm, lstm_number, dropout_lstm, L2_norm_lstm) elif model_version == 'bmvcfc': convBranch = self.build_branch_fc("convBranch", input_shape, number_convolutional_layers, filters_size, strides, filters_numbers, weight_decay, ndense_units, dropout) convBranch.summary() else: if lstm >= 8: lstm_branch1 = 5 lstm_branch2 = 2 convBranch1 = self.build_branch("convBranch", input_shape[0], number_convolutional_layers, filters_size, strides, filters_numbers, weight_decay, ndense_units, dropout, lstm_branch1, lstm_number, dropout_lstm, L2_norm_lstm, False) convBranch2 = self.build_branch("convBranch2", input_shape[1], number_convolutional_layers, filters_size, strides, filters_numbers, weight_decay, ndense_units, dropout, lstm_branch2, lstm_number, dropout_lstm, L2_norm_lstm, False) convBranch = [convBranch1, convBranch2] else: convBranch = self.build_branch("convBranch", input_shape, number_convolutional_layers, filters_size, strides, filters_numbers, weight_decay, ndense_units, dropout, lstm, lstm_number, dropout_lstm, L2_norm_lstm, final_pool=False) if lstm == 0 or lstm == 5 or lstm == 6 or lstm == 7: output = convBranch(lstm_layer) elif lstm >= 8 and lstm <= 15: output1 = convBranch[0](lstm_layer) output2 = convBranch[1](input) if lstm == 10 or lstm == 15: # Add max layer output1 = tf.keras.layers.Lambda(lambda x: tf.math.l2_normalize(x, axis=1), name="embedL2_1")(output1) output2 = tf.keras.layers.Lambda(lambda x: tf.math.l2_normalize(x, axis=1), name="embedL2_2")(output2) # Add concat layer main_branch = tf.keras.layers.Concatenate(axis=1)([output1, output2]) if lstm == 11 or lstm == 12 or lstm == 15: main_branch = tf.keras.layers.Dropout(dropout, name="drop_concat")(main_branch) if lstm != 9 and lstm != 12 and lstm != 15: # Add dense layer + dropout main_branch = tf.keras.layers.Dense(ndense_units * 2, name="dense")(main_branch) if dropout > 0: main_branch = tf.keras.layers.Dropout(dropout, name="drop")(main_branch) output = tf.keras.layers.Dense(ndense_units, name="code")(main_branch) else: if model_version != "gaitset" and model_version != "gaitset_cross": output = convBranch(input) else: output = convBranch if drop_code > 0: output = tf.keras.layers.Dropout(drop_code, name="drop_code")(output) outputs.append(output) if model_version == "gaitset_cross": outputs.append(output) output = tf.keras.layers.Dense(nclasses)(output) outputs.append(output) # main_branch = tf.keras.layers.Dense(1024, activation='linear', kernel_initializer='he_uniform', # name="signature")(output) if with_l2: l2norm_ = tf.keras.layers.Lambda(lambda x: tf.math.l2_normalize(x, axis=1), name="signature")(output) outputs.append(l2norm_) if loss_mode == "MSE": losses.append('mean_squared_error') elif loss_mode == "sL1": losses.append(mj_smoothL1) elif loss_mode == "sL1e" or loss_mode == "sL1x": losses.append(mj_smoothL1ex(hdelta)) elif loss_mode == "Huber": losses.append(tf.keras.losses.Huber(delta=hdelta)) elif loss_mode == "sL1tri": losses = [mj_smoothL1, tfa.losses.TripletSemiHardLoss(margin=margin)] elif loss_mode == "sL1triH": losses = [mj_smoothL1, tfa.losses.TripletHardLoss(margin=margin)] elif loss_mode == "sL1triB": losses = [mj_smoothL1, TripletBatchAllLoss(margin=margin)] else: print("ERROR: invalid loss mode - "+loss_mode) metrics.append(tf.keras.metrics.MeanAbsoluteError()) metrics.append(tf.keras.metrics.MeanSquaredError()) if model_version == "gaitset_cross": losses.append('sparse_categorical_crossentropy') metrics.append('accuracy') loss_weights.append(0.2) self.model = tf.keras.Model(inputs=input, outputs=outputs, name="copynet") #self.model.compile(optimizer=optimizer, loss=losses, loss_weights=loss_weights, metrics=metrics) # Save useful info for recovering the model with different Python versions modelpars = {'filters_size': filters_size, 'filters_numbers': filters_numbers, 'input_shape': input_shape, 'ndense_units': ndense_units, 'weight_decay': weight_decay, 'dropout': dropout, 'optimizer': optimizer, 'custom': 'TripletSemiHardLoss', 'nclasses': nclasses, 'use3D': use3D, 'model_version': model_version, 'loss_mode': loss_mode, 'loss_weights': loss_weights, 'margin': margin, 'l2': with_l2} #dd.io.save(os.path.join(self.experdir, "model-config.hdf5"), modelpars) def build_branch(self, name, input_shape=(50, 60, 60), number_convolutional_layers=4, filters_size=None, strides=None, filters_numbers=None, weight_decay=0.0005, ndense_units=4096, dropout=0.4, lstm=-1, lstm_number=512, dropout_lstm=0.0, L2_norm_lstm=None, add_dense=True, final_pool=True): if filters_numbers is None: filters_numbers = [96, 192, 512, 4096] L2_norm = tf.keras.regularizers.l2(weight_decay) if L2_norm_lstm is not None: L2_norm_lstm = tf.keras.regularizers.l2(L2_norm_lstm) convBranch = tf.keras.Sequential(name=name) if lstm == 2: convBranch.add(tf.keras.layers.TimeDistributed( tf.keras.layers.Conv2D(filters_numbers[0], kernel_size=(filters_size[0]), strides=(strides[0]), kernel_regularizer=L2_norm, activation='relu', input_shape=input_shape, data_format='channels_first'))) convBranch.add(tf.keras.layers.TimeDistributed(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_first'))) for i in range(1, number_convolutional_layers): convBranch.add(tf.keras.layers.TimeDistributed( tf.keras.layers.Conv2D(filters_numbers[i], kernel_size=(filters_size[i]), strides=(strides[i]), kernel_regularizer=L2_norm, activation='relu', data_format='channels_first'))) convBranch.add(tf.keras.layers.TimeDistributed(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_first'))) convBranch.add(tf.keras.layers.TimeDistributed(tf.keras.layers.Flatten(name="flatten"))) convBranch.add(tf.keras.layers.LSTM(lstm_number, dropout=dropout_lstm, kernel_regularizer=L2_norm_lstm)) if add_dense: # Add dense layer + dropout convBranch.add(tf.keras.layers.Dense(ndense_units, name="dense")) if dropout > 0: convBranch.add(tf.keras.layers.Dropout(dropout, name="drop")) else: if lstm == 1: convBranch.add(tf.keras.layers.ConvLSTM2D(filters_numbers[0], kernel_size=(filters_size[0]), strides=(strides[0]), kernel_regularizer=L2_norm, input_shape=input_shape, data_format='channels_first')) else: convBranch.add(tf.keras.layers.Conv2D(filters_numbers[0], kernel_size=(filters_size[0]), strides=(strides[0]), kernel_regularizer=L2_norm, activation='relu', input_shape=input_shape, data_format='channels_first')) convBranch.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_first')) for i in range(1, number_convolutional_layers): convBranch.add(tf.keras.layers.Conv2D(filters_numbers[i], kernel_size=(filters_size[i]), strides=(strides[i]), kernel_regularizer=L2_norm, activation='relu', data_format='channels_first')) # Needed for IWANN model: if (i < (number_convolutional_layers - 1)) or (i == (number_convolutional_layers - 1) and final_pool): convBranch.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_first')) convBranch.add(tf.keras.layers.Flatten(name="flatten")) if add_dense: # Add dense layer + dropout convBranch.add(tf.keras.layers.Dense(ndense_units, name="dense")) if dropout > 0: convBranch.add(tf.keras.layers.Dropout(dropout, name="drop")) return convBranch def build_branch_Manuel(self, name, input_shape=(50, 60, 60), number_convolutional_layers=4, filters_size=None, strides=None, filters_numbers=None, weight_decay=0.0005, ndense_units=1024, dropout=0.4, lstm=-1, lstm_number=512, dropout_lstm=0.0, L2_norm_lstm=None, add_dense=True): if filters_numbers is None: filters_numbers = [96, 192, 512, 512] L2_norm = tf.keras.regularizers.l2(weight_decay) if L2_norm_lstm is not None: L2_norm_lstm = tf.keras.regularizers.l2(L2_norm_lstm) convBranch = tf.keras.Sequential(name=name) if lstm == 2: convBranch.add(tf.keras.layers.TimeDistributed( tf.keras.layers.Conv2D(filters_numbers[0], kernel_size=(filters_size[0]), strides=(strides[0]), kernel_regularizer=L2_norm, activation='relu', input_shape=input_shape, data_format='channels_first'))) convBranch.add(tf.keras.layers.TimeDistributed(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_first'))) for i in range(1, number_convolutional_layers): convBranch.add(tf.keras.layers.TimeDistributed( tf.keras.layers.Conv2D(filters_numbers[i], kernel_size=(filters_size[i]), strides=(strides[i]), kernel_regularizer=L2_norm, activation='relu', data_format='channels_first'))) #if i != number_convolutional_layers - 1: convBranch.add(tf.keras.layers.TimeDistributed(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_first'))) convBranch.add(tf.keras.layers.TimeDistributed(tf.keras.layers.Flatten(name="flatten"))) convBranch.add(tf.keras.layers.LSTM(lstm_number, dropout=dropout_lstm, kernel_regularizer=L2_norm_lstm)) if add_dense: # Add dense layer + dropout convBranch.add(tf.keras.layers.Dense(ndense_units * 2, name="dense")) if dropout > 0: convBranch.add(tf.keras.layers.Dropout(dropout, name="drop")) convBranch.add(tf.keras.layers.Dense(ndense_units, name="code")) else: if lstm == 1: convBranch.add(tf.keras.layers.ConvLSTM2D(filters_numbers[0], kernel_size=(filters_size[0]), strides=(strides[0]), kernel_regularizer=L2_norm, input_shape=input_shape, data_format='channels_first')) else: convBranch.add(tf.keras.layers.Conv2D(filters_numbers[0], kernel_size=(filters_size[0]), strides=(strides[0]), kernel_regularizer=L2_norm, activation='relu', input_shape=input_shape, data_format='channels_first')) convBranch.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_first')) for i in range(1, number_convolutional_layers): convBranch.add(tf.keras.layers.Conv2D(filters_numbers[i], kernel_size=(filters_size[i]), strides=(strides[i]), kernel_regularizer=L2_norm, activation='relu', data_format='channels_first')) #if i != number_convolutional_layers - 1: convBranch.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_first')) convBranch.add(tf.keras.layers.Flatten(name="flatten")) if add_dense: # Add dense layer + dropout convBranch.add(tf.keras.layers.Dense(ndense_units * 2, name="dense")) if dropout > 0: convBranch.add(tf.keras.layers.Dropout(dropout, name="drop")) convBranch.add(tf.keras.layers.Dense(ndense_units, name="code")) return convBranch # Fully-convolutional branch def build_branch_fc(self, name, input_shape=(50, 60, 60), number_convolutional_layers=4, filters_size=None, strides=None, filters_numbers=None, weight_decay=0.0005, ndense_units=1024, dropout=0.4): if filters_numbers is None: filters_numbers = [96, 192, 512, 512] L2_norm = tf.keras.regularizers.l2(weight_decay) convBranch = tf.keras.Sequential(name=name) convBranch.add(tf.keras.layers.Conv2D(filters_numbers[0], kernel_size=(filters_size[0]), strides=(strides[0]), kernel_regularizer=L2_norm, activation='relu', input_shape=input_shape, data_format='channels_first')) convBranch.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_first')) for i in range(1, number_convolutional_layers): convBranch.add(tf.keras.layers.Conv2D(filters_numbers[i], kernel_size=(filters_size[i]), strides=(strides[i]), kernel_regularizer=L2_norm, activation='relu', data_format='channels_first')) #if i != number_convolutional_layers - 1: convBranch.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_first')) # convBranch.add(tf.keras.layers.Flatten(name="flatten")) # Add dense layer + dropout # convBranch.add(tf.keras.layers.Dense(ndense_units * 2, name="dense")) convBranch.add(tf.keras.layers.Conv2D(ndense_units * 2, kernel_size=1, strides=1, name="fc1", data_format='channels_first')) if dropout > 0: convBranch.add(tf.keras.layers.Dropout(dropout, name="drop")) convBranch.add(tf.keras.layers.Conv2D(ndense_units, kernel_size=1, strides=1, name="code0", data_format='channels_first')) # convBranch.add(tf.keras.layers.Dense(ndense_units, name="code")) convBranch.add(tf.keras.layers.Flatten(name="code")) return convBranch def build_3Dbranch(self, name, input_shape=(50, 60, 60), number_convolutional_layers=4, filters_size=None, strides=None, filters_numbers=None, weight_decay=0.0005, ndense_units=4096, dropout=0.4): if filters_numbers is None: filters_numbers = [96, 192, 512, 4096] L2_norm = tf.keras.regularizers.l2(weight_decay) import pdb; pdb.set_trace() convBranch = tf.keras.Sequential(name=name) convBranch.add(tf.keras.layers.Conv3D(filters_numbers[0], kernel_size=(3, filters_size[0], filters_size[0]), strides=(1, strides[0], strides[0]), kernel_regularizer=L2_norm, activation='relu', input_shape=input_shape, data_format='channels_last')) convBranch.add(tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), data_format='channels_last')) for i in range(1, number_convolutional_layers): convBranch.add(tf.keras.layers.Conv2D(filters_numbers[i], kernel_size=(3, filters_size[i], filters_size[i]), strides=(1, strides[i], strides[i]), kernel_regularizer=L2_norm, activation='relu', data_format='channels_last')) if i != number_convolutional_layers - 1: convBranch.add( tf.keras.layers.Conv2D(filters_numbers[i], kernel_size=(3, filters_size[i], filters_size[i]), strides=(1, strides[i], strides[i]), kernel_regularizer=L2_norm, activation='relu', data_format='channels_last')) convBranch.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2, 2), data_format='channels_last')) else: convBranch.add(tf.keras.layers.Conv2D(filters_numbers[i], kernel_size=(filters_size[i]), strides=(1, strides[i], strides[i]), kernel_regularizer=L2_norm, activation='relu', data_format='channels_last')) convBranch.add(tf.keras.layers.Flatten(name="flatten")) # Add dense layer + dropout convBranch.add(tf.keras.layers.Dense(ndense_units, name="dense")) if dropout > 0: convBranch.add(tf.keras.layers.Dropout(dropout, name="drop")) return convBranch def build_3Dbranch_Manuel(self, name, input_shape=(25, 60, 60, 1), ndense_units=512, dropout=0.4, kernel_initializer='glorot_uniform', filters_size = [(3, 5, 5), (3, 3, 3), (3, 3, 3), (3, 3, 3), (3, 2, 2), (2, 1, 1)], filters_numbers = [64, 128, 256, 512, 512, 512]): convBranch = tf.keras.Sequential(name=name) convBranch.add(tf.keras.layers.Conv3D(filters_numbers[0], filters_size[0], strides=(1, 2, 2), padding='valid', activation='relu', input_shape=input_shape, data_format='channels_last', kernel_initializer=kernel_initializer)) convBranch.add(tf.keras.layers.Conv3D(filters_numbers[1], filters_size[1], strides=(1, 2, 2), padding='valid', activation='relu', input_shape=input_shape, data_format='channels_last', kernel_initializer=kernel_initializer)) convBranch.add(tf.keras.layers.Conv3D(filters_numbers[2], filters_size[2], strides=(2, 2, 2), padding='valid', activation='relu', input_shape=input_shape, data_format='channels_last', kernel_initializer=kernel_initializer)) convBranch.add(tf.keras.layers.Conv3D(filters_numbers[3], filters_size[3], strides=(2, 2, 2), padding='valid', activation='relu', input_shape=input_shape, data_format='channels_last', kernel_initializer=kernel_initializer)) convBranch.add(tf.keras.layers.Conv3D(filters_numbers[4], filters_size[4], strides=(1, 1, 1), padding='valid', activation='relu', input_shape=input_shape, data_format='channels_last', kernel_initializer=kernel_initializer)) convBranch.add(tf.keras.layers.Conv3D(filters_numbers[5], filters_size[5], strides=(1, 1, 1), padding='valid', activation='relu', input_shape=input_shape, data_format='channels_last', kernel_initializer=kernel_initializer)) if dropout > 0: convBranch.add(tf.keras.layers.Dropout(dropout, name="drop")) # Dense without activation function convBranch.add(tf.keras.layers.Conv3D(ndense_units, (1, 1, 1), strides=(1, 1, 1), activation=None, kernel_regularizer=tf.keras.regularizers.l2(1e-3), kernel_initializer='he_uniform', name="grayCode")) convBranch.add(tf.keras.layers.Flatten(name="code")) return convBranch def fit(self, epochs, callbacks, training_generator, validation_generator, current_step=0, validation_steps=None, encode_layer=None, steps_per_epoch=None): self.hist = self.model.fit(training_generator, validation_data=validation_generator, epochs=epochs, callbacks=callbacks, validation_steps=validation_steps, initial_epoch=current_step, verbose=1, steps_per_epoch=steps_per_epoch) #, workers=4, max_queue_size=10, use_multiprocessing=True) if encode_layer is None: self.model_encode = tf.keras.Model(self.model.input, self.model.layers[-1].input) else: out_layer = self.model.get_layer(encode_layer).output self.model_encode = tf.keras.Model(self.model.input, out_layer) return len(self.hist.epoch) def predict(self, data, batch_size=128): pred = self.model.predict(data, batch_size=batch_size) return pred def encode(self, data, reshape=False): # features = self.model_encode(data) if reshape: n_data = np.zeros(shape=(data.shape[0], 25, 60, 60, 2)) for i in range(25): n_data[:, i, :, :, 0] = data[:, i*2, :, :] n_data[:, i, :, :, 1] = data[:, i*2+1, :, :] data = n_data features = self.model.predict(data) if isinstance(features, list): features = features[0] # L2 normalize embeddings codes_norm_tf = tf.keras.layers.Lambda(lambda x: tf.math.l2_normalize(x, axis=1))(features) # Get the numpy matrix codes_norm = codes_norm_tf.numpy() return codes_norm def save(self, epoch=None): if epoch is not None: self.model.save(os.path.join(self.experdir, "model-state-{:04d}.hdf5".format(epoch))) else: self.model.save(os.path.join(self.experdir, "model-final.hdf5")) def build_gaitset_branch(self, input_layer, input_shape=(25, 60, 60, 1)): branch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.ZeroPadding2D(padding=4))(input_layer) branch_a = tf.keras.layers.TimeDistributed( tf.keras.layers.Conv2D(32, kernel_size=5, activation=None, padding='valid', use_bias=False, data_format='channels_last'))(branch_a) branch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.LeakyReLU())(branch_a) branch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.ZeroPadding2D(padding=1))(branch_a) branch_a = tf.keras.layers.TimeDistributed( tf.keras.layers.Conv2D(32, kernel_size=3, activation=None, padding='valid', use_bias=False, input_shape=input_shape, data_format='channels_last'))(branch_a) branch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.LeakyReLU())(branch_a) branch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_last'))(branch_a) branch_b = tf.keras.layers.Lambda(lambda x: tf.math.reduce_max(x, axis=1))(branch_a) branch_b = tf.keras.layers.ZeroPadding2D(padding=1)(branch_b) branch_b = tf.keras.layers.Conv2D(64, kernel_size=3, activation=None, padding='valid', use_bias=False, data_format='channels_last')(branch_b) branch_b = tf.keras.layers.LeakyReLU()(branch_b) branch_b = tf.keras.layers.ZeroPadding2D(padding=1)(branch_b) branch_b = tf.keras.layers.Conv2D(64, kernel_size=3, activation=None, padding='valid', use_bias=False, data_format='channels_last')(branch_b) branch_b = tf.keras.layers.LeakyReLU()(branch_b) branch_b = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_last')(branch_b) branch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.ZeroPadding2D(padding=1))(branch_a) branch_a = tf.keras.layers.TimeDistributed( tf.keras.layers.Conv2D(64, kernel_size=3, activation=None, padding='valid', use_bias=False, input_shape=input_shape, data_format='channels_last'))(branch_a) branch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.LeakyReLU())(branch_a) branch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.ZeroPadding2D(padding=1))(branch_a) branch_a = tf.keras.layers.TimeDistributed( tf.keras.layers.Conv2D(64, kernel_size=3, activation=None, padding='valid', use_bias=False, input_shape=input_shape, data_format='channels_last'))(branch_a) branch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.LeakyReLU())(branch_a) branch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_last'))(branch_a) branch_b_ = tf.keras.layers.Lambda(lambda x: tf.math.reduce_max(x, axis=1))(branch_a) branch_b = tf.keras.layers.Add()([branch_b, branch_b_]) branch_b = tf.keras.layers.ZeroPadding2D(padding=1)(branch_b) branch_b = tf.keras.layers.Conv2D(128, kernel_size=3, activation=None, padding='valid', use_bias=False, data_format='channels_last')(branch_b) branch_b = tf.keras.layers.LeakyReLU()(branch_b) branch_b = tf.keras.layers.ZeroPadding2D(padding=1)(branch_b) branch_b = tf.keras.layers.Conv2D(128, kernel_size=3, activation=None, padding='valid', use_bias=False, data_format='channels_last')(branch_b) branch_b = tf.keras.layers.LeakyReLU()(branch_b) branch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.ZeroPadding2D(padding=1))(branch_a) branch_a = tf.keras.layers.TimeDistributed( tf.keras.layers.Conv2D(128, kernel_size=3, activation=None, padding='valid', use_bias=False, input_shape=input_shape, data_format='channels_last'))(branch_a) branch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.LeakyReLU())(branch_a) branch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.ZeroPadding2D(padding=1))(branch_a) branch_a = tf.keras.layers.TimeDistributed( tf.keras.layers.Conv2D(128, kernel_size=3, activation=None, padding='valid', use_bias=False, input_shape=input_shape, data_format='channels_last'))(branch_a) branch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.LeakyReLU())(branch_a) branch_a = tf.keras.layers.Lambda(lambda x: tf.math.reduce_max(x, axis=1))(branch_a) branch_b = tf.keras.layers.Add()([branch_b, branch_a]) # HPP feature = list() bin_num = [1, 2, 4, 8, 16] # bin_num = [1, 16] n, h, w, c = branch_b.shape print(branch_b.shape) for num_bin in bin_num: branch_a_ = tf.keras.layers.Reshape((num_bin, -1, c))(branch_a) branch_a_ = tf.keras.layers.Lambda(lambda x: tf.math.reduce_mean(x, axis=2) + tf.math.reduce_max(x, axis=2))( branch_a_) feature.append(branch_a_) branch_b_ = tf.keras.layers.Reshape((num_bin, -1, c))(branch_b) branch_b_ = tf.keras.layers.Lambda(lambda x: tf.math.reduce_mean(x, axis=2) + tf.math.reduce_max(x, axis=2))( branch_b_) feature.append(branch_b_) model = tf.keras.layers.Concatenate(axis=1)(feature) model = tf.keras.layers.Lambda(lambda x: tf.transpose(x, [1, 0, 2]))(model) model = MatMul()(model) model = tf.keras.layers.Lambda(lambda x: tf.transpose(x, [1, 0, 2]))(model) model = tf.keras.layers.Flatten()(model) return model class MatMul(tf.keras.layers.Layer): def __init__(self, bin_num=31, hidden_dim=128, **kwargs): super(MatMul, self).__init__(**kwargs) self.bin_num = bin_num self.hidden_dim = hidden_dim # Create a trainable weight variable for this layer. w_init = tf.keras.initializers.GlorotUniform() self.kernel = tf.Variable(name="MatMul_kernel" + str(np.random.randint(100, size=1)), initial_value=w_init(shape=(bin_num * 2, 128, hidden_dim), dtype="float32"), trainable=True) def call(self, x): # Implicit broadcasting occurs here. # Shape x: (BATCH_SIZE, N, M) # Shape kernel: (N, M) # Shape output: (BATCH_SIZE, N, M) return tf.matmul(x, self.kernel) def get_config(self): config = super().get_config().copy() config.update({ 'bin_num': self.bin_num, 'hidden_dim': self.hidden_dim, }) return config
[ "tensorflow.keras.models.load_model", "tensorflow.keras.Sequential", "tensorflow.keras.layers.ConvLSTM2D", "tensorflow.keras.layers.ZeroPadding2D", "tensorflow.keras.initializers.GlorotUniform", "tensorflow.keras.optimizers.SGD", "tensorflow.keras.layers.Concatenate", "numpy.random.randint", "tensorflow.keras.layers.LeakyReLU", "tensorflow.keras.layers.MaxPooling3D", "tensorflow.keras.metrics.MeanSquaredError", "tensorflow.keras.regularizers.l2", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.Conv3D", "tensorflow.keras.layers.Add", "numpy.zeros", "tensorflow.keras.layers.Flatten", "tensorflow.matmul", "tensorflow.math.reduce_max", "tensorflow.keras.layers.Dense", "tensorflow.math.l2_normalize", "tensorflow.keras.backend.sum", "tensorflow.keras.Model", "tensorflow.keras.backend.abs", "tensorflow.keras.losses.Huber", "tensorflow.keras.layers.Reshape", "tensorflow.keras.backend.switch", "tensorflow.transpose", "tensorflow.keras.metrics.SparseCategoricalAccuracy", "tensorflow.math.reduce_mean", "tensorflow.keras.layers.LSTM", "tensorflow.keras.metrics.MeanAbsoluteError", "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.layers.Input" ]
nets/mj_gaitcopy_model.py
[(18, 'tensorflow.keras.backend.abs', 'K.abs', (['(y_true - y_pred)'], {}), True, 'import tensorflow.keras.backend as K\n'), (19, 'tensorflow.keras.backend.switch', 'K.switch', (['(x < HUBER_DELTA)', '(0.5 * x ** 2)', '(HUBER_DELTA * (x - 0.5 * HUBER_DELTA))'], {}), True, 'import tensorflow.keras.backend as K\n'), (20, 'tensorflow.keras.backend.sum', 'K.sum', (['x'], {}), True, 'import tensorflow.keras.backend as K\n'), (25, 'tensorflow.keras.backend.abs', 'K.abs', (['(y_true - y_pred)'], {}), True, 'import tensorflow.keras.backend as K\n'), (26, 'tensorflow.keras.backend.switch', 'K.switch', (['(x < h_delta)', '(0.5 * x ** 2)', '(h_delta * (x - 0.5 * h_delta))'], {}), True, 'import tensorflow.keras.backend as K\n'), (28, 'tensorflow.keras.backend.sum', 'K.sum', (['x'], {}), True, 'import tensorflow.keras.backend as K\n'), (73, 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', (['(0.01)', '(0.9)'], {}), True, 'import tensorflow as tf\n'), (159, 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', (['(0.01)', '(0.9)'], {}), True, 'import tensorflow as tf\n'), (327, 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'input', 'outputs': 'outputs', 'name': '"""copynet"""'}), True, 'import tensorflow as tf\n'), (353, 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['weight_decay'], {}), True, 'import tensorflow as tf\n'), (357, 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {'name': 'name'}), True, 'import tensorflow as tf\n'), (418, 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['weight_decay'], {}), True, 'import tensorflow as tf\n'), (422, 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {'name': 'name'}), True, 'import tensorflow as tf\n'), (486, 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['weight_decay'], {}), True, 'import tensorflow as tf\n'), (488, 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {'name': 'name'}), True, 'import tensorflow as tf\n'), (523, 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['weight_decay'], {}), True, 'import tensorflow as tf\n'), (525, 'pdb.set_trace', 'pdb.set_trace', ([], {}), False, 'import pdb\n'), (527, 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {'name': 'name'}), True, 'import tensorflow as tf\n'), (564, 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {'name': 'name'}), True, 'import tensorflow as tf\n'), (733, 'tensorflow.keras.initializers.GlorotUniform', 'tf.keras.initializers.GlorotUniform', ([], {}), True, 'import tensorflow as tf\n'), (743, 'tensorflow.matmul', 'tf.matmul', (['x', 'self.kernel'], {}), True, 'import tensorflow as tf\n'), (67, 'tensorflow.keras.Model', 'tf.keras.Model', (['self.model.input', 'self.model.layers[-1].input'], {}), True, 'import tensorflow as tf\n'), (70, 'tensorflow.keras.Model', 'tf.keras.Model', (['self.model.input', 'out_layer'], {}), True, 'import tensorflow as tf\n'), (184, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(None, input_shape[0], input_shape[1], input_shape[2])', 'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (319, 'tensorflow.keras.metrics.MeanAbsoluteError', 'tf.keras.metrics.MeanAbsoluteError', ([], {}), True, 'import tensorflow as tf\n'), (320, 'tensorflow.keras.metrics.MeanSquaredError', 'tf.keras.metrics.MeanSquaredError', ([], {}), True, 'import tensorflow as tf\n'), (355, 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['L2_norm_lstm'], {}), True, 'import tensorflow as tf\n'), (420, 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['L2_norm_lstm'], {}), True, 'import tensorflow as tf\n'), (490, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['filters_numbers[0]'], {'kernel_size': 'filters_size[0]', 'strides': 'strides[0]', 'kernel_regularizer': 'L2_norm', 'activation': '"""relu"""', 'input_shape': 'input_shape', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (494, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (508, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(ndense_units * 2)'], {'kernel_size': '(1)', 'strides': '(1)', 'name': '"""fc1"""', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (512, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['ndense_units'], {'kernel_size': '(1)', 'strides': '(1)', 'name': '"""code0"""', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (515, 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {'name': '"""code"""'}), True, 'import tensorflow as tf\n'), (529, 'tensorflow.keras.layers.Conv3D', 'tf.keras.layers.Conv3D', (['filters_numbers[0]'], {'kernel_size': '(3, filters_size[0], filters_size[0])', 'strides': '(1, strides[0], strides[0])', 'kernel_regularizer': 'L2_norm', 'activation': '"""relu"""', 'input_shape': 'input_shape', 'data_format': '"""channels_last"""'}), True, 'import tensorflow as tf\n'), (533, 'tensorflow.keras.layers.MaxPooling3D', 'tf.keras.layers.MaxPooling3D', ([], {'pool_size': '(2, 2, 2)', 'data_format': '"""channels_last"""'}), True, 'import tensorflow as tf\n'), (551, 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {'name': '"""flatten"""'}), True, 'import tensorflow as tf\n'), (554, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['ndense_units'], {'name': '"""dense"""'}), True, 'import tensorflow as tf\n'), (566, 'tensorflow.keras.layers.Conv3D', 'tf.keras.layers.Conv3D', (['filters_numbers[0]', 'filters_size[0]'], {'strides': '(1, 2, 2)', 'padding': '"""valid"""', 'activation': '"""relu"""', 'input_shape': 'input_shape', 'data_format': '"""channels_last"""', 'kernel_initializer': 'kernel_initializer'}), True, 'import tensorflow as tf\n'), (569, 'tensorflow.keras.layers.Conv3D', 'tf.keras.layers.Conv3D', (['filters_numbers[1]', 'filters_size[1]'], {'strides': '(1, 2, 2)', 'padding': '"""valid"""', 'activation': '"""relu"""', 'input_shape': 'input_shape', 'data_format': '"""channels_last"""', 'kernel_initializer': 'kernel_initializer'}), True, 'import tensorflow as tf\n'), (572, 'tensorflow.keras.layers.Conv3D', 'tf.keras.layers.Conv3D', (['filters_numbers[2]', 'filters_size[2]'], {'strides': '(2, 2, 2)', 'padding': '"""valid"""', 'activation': '"""relu"""', 'input_shape': 'input_shape', 'data_format': '"""channels_last"""', 'kernel_initializer': 'kernel_initializer'}), True, 'import tensorflow as tf\n'), (575, 'tensorflow.keras.layers.Conv3D', 'tf.keras.layers.Conv3D', (['filters_numbers[3]', 'filters_size[3]'], {'strides': '(2, 2, 2)', 'padding': '"""valid"""', 'activation': '"""relu"""', 'input_shape': 'input_shape', 'data_format': '"""channels_last"""', 'kernel_initializer': 'kernel_initializer'}), True, 'import tensorflow as tf\n'), (578, 'tensorflow.keras.layers.Conv3D', 'tf.keras.layers.Conv3D', (['filters_numbers[4]', 'filters_size[4]'], {'strides': '(1, 1, 1)', 'padding': '"""valid"""', 'activation': '"""relu"""', 'input_shape': 'input_shape', 'data_format': '"""channels_last"""', 'kernel_initializer': 'kernel_initializer'}), True, 'import tensorflow as tf\n'), (581, 'tensorflow.keras.layers.Conv3D', 'tf.keras.layers.Conv3D', (['filters_numbers[5]', 'filters_size[5]'], {'strides': '(1, 1, 1)', 'padding': '"""valid"""', 'activation': '"""relu"""', 'input_shape': 'input_shape', 'data_format': '"""channels_last"""', 'kernel_initializer': 'kernel_initializer'}), True, 'import tensorflow as tf\n'), (592, 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {'name': '"""code"""'}), True, 'import tensorflow as tf\n'), (602, 'tensorflow.keras.Model', 'tf.keras.Model', (['self.model.input', 'self.model.layers[-1].input'], {}), True, 'import tensorflow as tf\n'), (605, 'tensorflow.keras.Model', 'tf.keras.Model', (['self.model.input', 'out_layer'], {}), True, 'import tensorflow as tf\n'), (615, 'numpy.zeros', 'np.zeros', ([], {'shape': '(data.shape[0], 25, 60, 60, 2)'}), True, 'import numpy as np\n'), (652, 'tensorflow.keras.layers.ZeroPadding2D', 'tf.keras.layers.ZeroPadding2D', ([], {'padding': '(1)'}), True, 'import tensorflow as tf\n'), (653, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)'], {'kernel_size': '(3)', 'activation': 'None', 'padding': '"""valid"""', 'use_bias': '(False)', 'data_format': '"""channels_last"""'}), True, 'import tensorflow as tf\n'), (655, 'tensorflow.keras.layers.LeakyReLU', 'tf.keras.layers.LeakyReLU', ([], {}), True, 'import tensorflow as tf\n'), (656, 'tensorflow.keras.layers.ZeroPadding2D', 'tf.keras.layers.ZeroPadding2D', ([], {'padding': '(1)'}), True, 'import tensorflow as tf\n'), (657, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)'], {'kernel_size': '(3)', 'activation': 'None', 'padding': '"""valid"""', 'use_bias': '(False)', 'data_format': '"""channels_last"""'}), True, 'import tensorflow as tf\n'), (659, 'tensorflow.keras.layers.LeakyReLU', 'tf.keras.layers.LeakyReLU', ([], {}), True, 'import tensorflow as tf\n'), (660, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)', 'data_format': '"""channels_last"""'}), True, 'import tensorflow as tf\n'), (676, 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), True, 'import tensorflow as tf\n'), (677, 'tensorflow.keras.layers.ZeroPadding2D', 'tf.keras.layers.ZeroPadding2D', ([], {'padding': '(1)'}), True, 'import tensorflow as tf\n'), (678, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(128)'], {'kernel_size': '(3)', 'activation': 'None', 'padding': '"""valid"""', 'use_bias': '(False)', 'data_format': '"""channels_last"""'}), True, 'import tensorflow as tf\n'), (680, 'tensorflow.keras.layers.LeakyReLU', 'tf.keras.layers.LeakyReLU', ([], {}), True, 'import tensorflow as tf\n'), (681, 'tensorflow.keras.layers.ZeroPadding2D', 'tf.keras.layers.ZeroPadding2D', ([], {'padding': '(1)'}), True, 'import tensorflow as tf\n'), (682, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(128)'], {'kernel_size': '(3)', 'activation': 'None', 'padding': '"""valid"""', 'use_bias': '(False)', 'data_format': '"""channels_last"""'}), True, 'import tensorflow as tf\n'), (684, 'tensorflow.keras.layers.LeakyReLU', 'tf.keras.layers.LeakyReLU', ([], {}), True, 'import tensorflow as tf\n'), (699, 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), True, 'import tensorflow as tf\n'), (717, 'tensorflow.keras.layers.Concatenate', 'tf.keras.layers.Concatenate', ([], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (721, 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), True, 'import tensorflow as tf\n'), (47, 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['netpath'], {'compile': 'compile', 'custom_objects': "{'mj_smoothL1': mj_smoothL1}"}), True, 'import tensorflow as tf\n'), (52, 'os.path.dirname', 'os.path.dirname', (['netpath'], {}), False, 'import os\n'), (53, 'os.path.join', 'os.path.join', (['bdir', '"""model-config.hdf5"""'], {}), False, 'import os\n'), (54, 'deepdish.io.load', 'dd.io.load', (['fconfig'], {}), True, 'import deepdish as dd\n'), (58, 'os.path.basename', 'os.path.basename', (['netpath'], {}), False, 'import os\n'), (59, 'os.path.splitext', 'os.path.splitext', (['bname'], {}), False, 'import os\n'), (60, 'os.path.join', 'os.path.join', (['bdir', "(fparts[0] + '_weights.hdf5')"], {}), False, 'import os\n'), (113, 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'self.model.input', 'outputs': 'main_branch'}), True, 'import tensorflow as tf\n'), (185, 'tensorflow.keras.layers.ConvLSTM2D', 'tf.keras.layers.ConvLSTM2D', (['(16)', '(3)'], {'padding': '"""same"""', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (188, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(None, input_shape[0], input_shape[1], input_shape[2])', 'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (285, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['drop_code'], {'name': '"""drop_code"""'}), True, 'import tensorflow as tf\n'), (291, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['nclasses'], {}), True, 'import tensorflow as tf\n'), (375, 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', (['lstm_number'], {'dropout': 'dropout_lstm', 'kernel_regularizer': 'L2_norm_lstm'}), True, 'import tensorflow as tf\n'), (392, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (403, 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {'name': '"""flatten"""'}), True, 'import tensorflow as tf\n'), (442, 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', (['lstm_number'], {'dropout': 'dropout_lstm', 'kernel_regularizer': 'L2_norm_lstm'}), True, 'import tensorflow as tf\n'), (460, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (470, 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {'name': '"""flatten"""'}), True, 'import tensorflow as tf\n'), (497, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['filters_numbers[i]'], {'kernel_size': 'filters_size[i]', 'strides': 'strides[i]', 'kernel_regularizer': 'L2_norm', 'activation': '"""relu"""', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (502, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (511, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['dropout'], {'name': '"""drop"""'}), True, 'import tensorflow as tf\n'), (536, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['filters_numbers[i]'], {'kernel_size': '(3, filters_size[i], filters_size[i])', 'strides': '(1, strides[i], strides[i])', 'kernel_regularizer': 'L2_norm', 'activation': '"""relu"""', 'data_format': '"""channels_last"""'}), True, 'import tensorflow as tf\n'), (556, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['dropout'], {'name': '"""drop"""'}), True, 'import tensorflow as tf\n'), (585, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['dropout'], {'name': '"""drop"""'}), True, 'import tensorflow as tf\n'), (635, 'os.path.join', 'os.path.join', (['self.experdir', '"""model-final.hdf5"""'], {}), False, 'import os\n'), (638, 'tensorflow.keras.layers.ZeroPadding2D', 'tf.keras.layers.ZeroPadding2D', ([], {'padding': '(4)'}), True, 'import tensorflow as tf\n'), (640, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)'], {'kernel_size': '(5)', 'activation': 'None', 'padding': '"""valid"""', 'use_bias': '(False)', 'data_format': '"""channels_last"""'}), True, 'import tensorflow as tf\n'), (642, 'tensorflow.keras.layers.LeakyReLU', 'tf.keras.layers.LeakyReLU', ([], {}), True, 'import tensorflow as tf\n'), (644, 'tensorflow.keras.layers.ZeroPadding2D', 'tf.keras.layers.ZeroPadding2D', ([], {'padding': '(1)'}), True, 'import tensorflow as tf\n'), (646, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)'], {'kernel_size': '(3)', 'activation': 'None', 'padding': '"""valid"""', 'use_bias': '(False)', 'input_shape': 'input_shape', 'data_format': '"""channels_last"""'}), True, 'import tensorflow as tf\n'), (648, 'tensorflow.keras.layers.LeakyReLU', 'tf.keras.layers.LeakyReLU', ([], {}), True, 'import tensorflow as tf\n'), (649, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)', 'data_format': '"""channels_last"""'}), True, 'import tensorflow as tf\n'), (662, 'tensorflow.keras.layers.ZeroPadding2D', 'tf.keras.layers.ZeroPadding2D', ([], {'padding': '(1)'}), True, 'import tensorflow as tf\n'), (664, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)'], {'kernel_size': '(3)', 'activation': 'None', 'padding': '"""valid"""', 'use_bias': '(False)', 'input_shape': 'input_shape', 'data_format': '"""channels_last"""'}), True, 'import tensorflow as tf\n'), (666, 'tensorflow.keras.layers.LeakyReLU', 'tf.keras.layers.LeakyReLU', ([], {}), True, 'import tensorflow as tf\n'), (668, 'tensorflow.keras.layers.ZeroPadding2D', 'tf.keras.layers.ZeroPadding2D', ([], {'padding': '(1)'}), True, 'import tensorflow as tf\n'), (670, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)'], {'kernel_size': '(3)', 'activation': 'None', 'padding': '"""valid"""', 'use_bias': '(False)', 'input_shape': 'input_shape', 'data_format': '"""channels_last"""'}), True, 'import tensorflow as tf\n'), (672, 'tensorflow.keras.layers.LeakyReLU', 'tf.keras.layers.LeakyReLU', ([], {}), True, 'import tensorflow as tf\n'), (673, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)', 'data_format': '"""channels_last"""'}), True, 'import tensorflow as tf\n'), (686, 'tensorflow.keras.layers.ZeroPadding2D', 'tf.keras.layers.ZeroPadding2D', ([], {'padding': '(1)'}), True, 'import tensorflow as tf\n'), (688, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(128)'], {'kernel_size': '(3)', 'activation': 'None', 'padding': '"""valid"""', 'use_bias': '(False)', 'input_shape': 'input_shape', 'data_format': '"""channels_last"""'}), True, 'import tensorflow as tf\n'), (690, 'tensorflow.keras.layers.LeakyReLU', 'tf.keras.layers.LeakyReLU', ([], {}), True, 'import tensorflow as tf\n'), (692, 'tensorflow.keras.layers.ZeroPadding2D', 'tf.keras.layers.ZeroPadding2D', ([], {'padding': '(1)'}), True, 'import tensorflow as tf\n'), (694, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(128)'], {'kernel_size': '(3)', 'activation': 'None', 'padding': '"""valid"""', 'use_bias': '(False)', 'input_shape': 'input_shape', 'data_format': '"""channels_last"""'}), True, 'import tensorflow as tf\n'), (696, 'tensorflow.keras.layers.LeakyReLU', 'tf.keras.layers.LeakyReLU', ([], {}), True, 'import tensorflow as tf\n'), (708, 'tensorflow.keras.layers.Reshape', 'tf.keras.layers.Reshape', (['(num_bin, -1, c)'], {}), True, 'import tensorflow as tf\n'), (712, 'tensorflow.keras.layers.Reshape', 'tf.keras.layers.Reshape', (['(num_bin, -1, c)'], {}), True, 'import tensorflow as tf\n'), (111, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['nclasses'], {'activation': '"""softmax"""', 'kernel_initializer': '"""he_uniform"""', 'name': '"""probs"""'}), True, 'import tensorflow as tf\n'), (191, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(None, input_shape[0], input_shape[1], input_shape[2])', 'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (266, 'tensorflow.keras.layers.Concatenate', 'tf.keras.layers.Concatenate', ([], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (276, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['ndense_units'], {'name': '"""code"""'}), True, 'import tensorflow as tf\n'), (360, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['filters_numbers[0]'], {'kernel_size': 'filters_size[0]', 'strides': 'strides[0]', 'kernel_regularizer': 'L2_norm', 'activation': '"""relu"""', 'input_shape': 'input_shape', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (364, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (374, 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {'name': '"""flatten"""'}), True, 'import tensorflow as tf\n'), (379, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['ndense_units'], {'name': '"""dense"""'}), True, 'import tensorflow as tf\n'), (384, 'tensorflow.keras.layers.ConvLSTM2D', 'tf.keras.layers.ConvLSTM2D', (['filters_numbers[0]'], {'kernel_size': 'filters_size[0]', 'strides': 'strides[0]', 'kernel_regularizer': 'L2_norm', 'input_shape': 'input_shape', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (388, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['filters_numbers[0]'], {'kernel_size': 'filters_size[0]', 'strides': 'strides[0]', 'kernel_regularizer': 'L2_norm', 'activation': '"""relu"""', 'input_shape': 'input_shape', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (395, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['filters_numbers[i]'], {'kernel_size': 'filters_size[i]', 'strides': 'strides[i]', 'kernel_regularizer': 'L2_norm', 'activation': '"""relu"""', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (407, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['ndense_units'], {'name': '"""dense"""'}), True, 'import tensorflow as tf\n'), (426, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['filters_numbers[0]'], {'kernel_size': 'filters_size[0]', 'strides': 'strides[0]', 'kernel_regularizer': 'L2_norm', 'activation': '"""relu"""', 'input_shape': 'input_shape', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (430, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (441, 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {'name': '"""flatten"""'}), True, 'import tensorflow as tf\n'), (446, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(ndense_units * 2)'], {'name': '"""dense"""'}), True, 'import tensorflow as tf\n'), (449, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['ndense_units'], {'name': '"""code"""'}), True, 'import tensorflow as tf\n'), (452, 'tensorflow.keras.layers.ConvLSTM2D', 'tf.keras.layers.ConvLSTM2D', (['filters_numbers[0]'], {'kernel_size': 'filters_size[0]', 'strides': 'strides[0]', 'kernel_regularizer': 'L2_norm', 'input_shape': 'input_shape', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (456, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['filters_numbers[0]'], {'kernel_size': 'filters_size[0]', 'strides': 'strides[0]', 'kernel_regularizer': 'L2_norm', 'activation': '"""relu"""', 'input_shape': 'input_shape', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (463, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['filters_numbers[i]'], {'kernel_size': 'filters_size[i]', 'strides': 'strides[i]', 'kernel_regularizer': 'L2_norm', 'activation': '"""relu"""', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (468, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (474, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(ndense_units * 2)'], {'name': '"""dense"""'}), True, 'import tensorflow as tf\n'), (477, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['ndense_units'], {'name': '"""code"""'}), True, 'import tensorflow as tf\n'), (541, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['filters_numbers[i]'], {'kernel_size': '(3, filters_size[i], filters_size[i])', 'strides': '(1, strides[i], strides[i])', 'kernel_regularizer': 'L2_norm', 'activation': '"""relu"""', 'data_format': '"""channels_last"""'}), True, 'import tensorflow as tf\n'), (545, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2, 2)', 'data_format': '"""channels_last"""'}), True, 'import tensorflow as tf\n'), (547, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['filters_numbers[i]'], {'kernel_size': 'filters_size[i]', 'strides': '(1, strides[i], strides[i])', 'kernel_regularizer': 'L2_norm', 'activation': '"""relu"""', 'data_format': '"""channels_last"""'}), True, 'import tensorflow as tf\n'), (589, 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(0.001)'], {}), True, 'import tensorflow as tf\n'), (625, 'tensorflow.math.l2_normalize', 'tf.math.l2_normalize', (['x'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (651, 'tensorflow.math.reduce_max', 'tf.math.reduce_max', (['x'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (675, 'tensorflow.math.reduce_max', 'tf.math.reduce_max', (['x'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (697, 'tensorflow.math.reduce_max', 'tf.math.reduce_max', (['x'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (718, 'tensorflow.transpose', 'tf.transpose', (['x', '[1, 0, 2]'], {}), True, 'import tensorflow as tf\n'), (720, 'tensorflow.transpose', 'tf.transpose', (['x', '[1, 0, 2]'], {}), True, 'import tensorflow as tf\n'), (192, 'tensorflow.keras.layers.ConvLSTM2D', 'tf.keras.layers.ConvLSTM2D', (['(50)', '(3)'], {'padding': '"""same"""', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (195, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(None, input_shape[0], input_shape[1], input_shape[2])', 'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (269, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['dropout'], {'name': '"""drop_concat"""'}), True, 'import tensorflow as tf\n'), (273, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(ndense_units * 2)'], {'name': '"""dense"""'}), True, 'import tensorflow as tf\n'), (298, 'tensorflow.math.l2_normalize', 'tf.math.l2_normalize', (['x'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (368, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['filters_numbers[i]'], {'kernel_size': 'filters_size[i]', 'strides': 'strides[i]', 'kernel_regularizer': 'L2_norm', 'activation': '"""relu"""', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (372, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (381, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['dropout'], {'name': '"""drop"""'}), True, 'import tensorflow as tf\n'), (401, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (409, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['dropout'], {'name': '"""drop"""'}), True, 'import tensorflow as tf\n'), (434, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['filters_numbers[i]'], {'kernel_size': 'filters_size[i]', 'strides': 'strides[i]', 'kernel_regularizer': 'L2_norm', 'activation': '"""relu"""', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (439, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (448, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['dropout'], {'name': '"""drop"""'}), True, 'import tensorflow as tf\n'), (476, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['dropout'], {'name': '"""drop"""'}), True, 'import tensorflow as tf\n'), (734, 'numpy.random.randint', 'np.random.randint', (['(100)'], {'size': '(1)'}), True, 'import numpy as np\n'), (44, 'nets.triplet_loss_all.TripletBatchAllLoss', 'TripletBatchAllLoss', ([], {}), False, 'from nets.triplet_loss_all import TripletBatchAllLoss\n'), (115, 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', ([], {}), True, 'import tensorflow as tf\n'), (196, 'tensorflow.keras.layers.ConvLSTM2D', 'tf.keras.layers.ConvLSTM2D', (['(16)', '(7)'], {'padding': '"""same"""', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (199, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(None, input_shape[0], input_shape[1], input_shape[2])', 'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (275, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['dropout'], {'name': '"""drop"""'}), True, 'import tensorflow as tf\n'), (308, 'tensorflow.keras.losses.Huber', 'tf.keras.losses.Huber', ([], {'delta': 'hdelta'}), True, 'import tensorflow as tf\n'), (709, 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['x'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (709, 'tensorflow.math.reduce_max', 'tf.math.reduce_max', (['x'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (713, 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['x'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (713, 'tensorflow.math.reduce_max', 'tf.math.reduce_max', (['x'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (200, 'tensorflow.keras.layers.ConvLSTM2D', 'tf.keras.layers.ConvLSTM2D', (['(50)', '(7)'], {'padding': '"""same"""', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (203, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(None, input_shape[0], input_shape[1], input_shape[2])', 'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (207, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': 'input_shape', 'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (262, 'tensorflow.math.l2_normalize', 'tf.math.l2_normalize', (['x'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (263, 'tensorflow.math.l2_normalize', 'tf.math.l2_normalize', (['x'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (310, 'tensorflow_addons.losses.TripletSemiHardLoss', 'tfa.losses.TripletSemiHardLoss', ([], {'margin': 'margin'}), True, 'import tensorflow_addons as tfa\n'), (204, 'tensorflow.keras.layers.ConvLSTM2D', 'tf.keras.layers.ConvLSTM2D', (['(50)', '(3)'], {'padding': '"""same"""', 'data_format': '"""channels_first"""'}), True, 'import tensorflow as tf\n'), (312, 'tensorflow_addons.losses.TripletHardLoss', 'tfa.losses.TripletHardLoss', ([], {'margin': 'margin'}), True, 'import tensorflow_addons as tfa\n'), (314, 'nets.triplet_loss_all.TripletBatchAllLoss', 'TripletBatchAllLoss', ([], {'margin': 'margin'}), False, 'from nets.triplet_loss_all import TripletBatchAllLoss\n')]
loveprolife/srcnn-tensorflow2
163c90b33af22b460173376d27a1714025056de7
from utils import ( read_data, input_setup, imsave, merge, get_last_weights ) import numpy as np import datetime import tensorflow as tf import time import pprint import os import argparse from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint import matplotlib.pyplot as plt os.environ["CUDA_VISIBLE_DEVICES"] = "2" # os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # os.environ["CUDA_VISIBLE_DEVICES"] = "-1" gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: try: for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) logical_gpus = tf.config.experimental.list_logical_devices('GPU') except RuntimeError as e: print(e) parser = argparse.ArgumentParser(description='SRCNN Training') parser.add_argument("--epoch", default=150, type=int, help="Number of epoch [15000]") parser.add_argument("--batch_size", default=16, type=int, help="The size of batch images [128]") parser.add_argument("--image_size", default=33, type=int, help="The size of image to use [33]") parser.add_argument("--label_size", default=21, type=int, help="The size of label to produce [21]") parser.add_argument("--learning_rate", default=1e-4, type=int, help="The learning rate of gradient descent algorithm [1e-4]") parser.add_argument("--c_dim", default=1, type=int, help="Dimension of image color. [1]") parser.add_argument("--scale", default=3, type=int, help="The size of scale factor for preprocessing input image [3]") parser.add_argument("--stride", default=14, type=int, help="The size of stride to apply input image [14]") parser.add_argument("--checkpoint_dir", default="checkpoint/", type=str, help="Name of checkpoint directory [checkpoint]") parser.add_argument("--sample_dir", default="sample", type=str, help="Name of sample directory [sample]") parser.add_argument("-w", "--load_weights", default=None, type=str, help="whether to load weights from a checkpoint, set None to initialize, set \'last\' to load last checkpoint") parser.add_argument("--save_path", default='checkpoint/models/', type=str) parser.add_argument("--is_train", default=True, type=bool, help="True for training, False for testing [True]") # parser.add_argument("--is_train", default=False, type=bool, help="True for training, False for testing [True]") args, unknown = parser.parse_known_args() pp = pprint.PrettyPrinter() def plot_graphs(history, string): plt.plot(history.history[string]) plt.plot(history.history['val_'+string]) plt.xlabel("Epochs") plt.ylabel(string) plt.legend([string, 'val_'+string]) plt.show() def createmodel(args): model = tf.keras.Sequential() model.add(tf.keras.layers.Conv2D(64, (9, 9), kernel_initializer='normal', strides=1, padding='VALID', activation='relu', input_shape=[args.image_size, args.image_size, args.c_dim], name='conv1')) model.add(tf.keras.layers.Conv2D(32, (1, 1), kernel_initializer='normal', strides=1, padding='VALID', activation='relu', name='conv2')) model.add(tf.keras.layers.Conv2D(1, (5, 5), kernel_initializer='normal', strides=1, padding='VALID', activation='relu', name='conv3')) model.compile(optimizer=tf.keras.optimizers.Adam(lr=args.learning_rate), loss=tf.losses.MSE) return model pp.pprint(args) os.makedirs(args.checkpoint_dir, exist_ok=True) os.makedirs(args.save_path, exist_ok=True) os.makedirs(args.sample_dir, exist_ok=True) if args.is_train: input_setup(args) data_dir = 'checkpoint/train.h5' train_data, train_label = read_data(data_dir) srcnn = createmodel(args) # load last weights if args.load_weights is not None: if args.load_weights.endswith('.h5'): weights_path = args.load_weights else: weights_path = get_last_weights(args.save_path) try: last_step = int(os.path.basename(weights_path).split('_')[-1].split('.')[0]) except: last_step = 0 try: ret = srcnn.load_weights(weights_path) except RuntimeError as e: print(f'[Warning] Ignoring {e}') print( '[Warning] Don\'t panic if you see this, this might be because you load a pretrained weights with different number of classes. The rest of the weights should be loaded already.') print(f'[Info] loaded weights: {os.path.basename(weights_path)}, resuming checkpoint from step: {last_step}') else: last_step = 0 current_time = datetime.datetime.now().strftime(('%Y%m%d-%H%M%S')) log_dir = 'logs/' + current_time summary_writer = tf.summary.create_file_writer(log_dir) model_path = 'SRCNN.h5' saved_model = tf.keras.callbacks.ModelCheckpoint(args.save_path + 'ep_{epoch:03d}.h5', monitor='loss', save_weights_only=True, save_best_only=True, period=5) tensorboard = tf.keras.callbacks.TensorBoard(log_dir='log') start_time = time.time() history = srcnn.fit(train_data, train_label, batch_size=args.batch_size, validation_split=0.2, epochs=args.epoch, initial_epoch=last_step, callbacks=[saved_model, tensorboard], verbose=2) print('spending time:' + str(time.time() - start_time)) # plot_graphs(history, "val_loss") plot_graphs(history, "loss") else: nx, ny = input_setup(args) data_dir = 'checkpoint/test.h5' weights_path = 'checkpoint/ep150-loss0.005.h5' test_data, test_label = read_data(data_dir) print(test_data.shape) srcnn = createmodel(args) srcnn.load_weights(weights_path) result = srcnn.predict(test_data) print(result.shape) # result = srcnn.evaluate(test_data, test_label) result = merge(result, [nx, ny]) print(result.shape) image_path = os.path.join(os.getcwd(), args.sample_dir) image_path = os.path.join(image_path, "test_image.png") imsave(result, image_path)
[ "matplotlib.pyplot.legend", "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.config.experimental.list_logical_devices", "tensorflow.config.experimental.set_memory_growth", "tensorflow.config.experimental.list_physical_devices", "tensorflow.summary.create_file_writer", "tensorflow.keras.Sequential", "tensorflow.keras.layers.Conv2D", "matplotlib.pyplot.plot", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.callbacks.TensorBoard", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ]
train.py
[(25, 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), True, 'import tensorflow as tf\n'), (35, 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""SRCNN Training"""'}), False, 'import argparse\n'), (53, 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {}), False, 'import pprint\n'), (76, 'os.makedirs', 'os.makedirs', (['args.checkpoint_dir'], {'exist_ok': '(True)'}), False, 'import os\n'), (77, 'os.makedirs', 'os.makedirs', (['args.save_path'], {'exist_ok': '(True)'}), False, 'import os\n'), (78, 'os.makedirs', 'os.makedirs', (['args.sample_dir'], {'exist_ok': '(True)'}), False, 'import os\n'), (55, 'matplotlib.pyplot.plot', 'plt.plot', (['history.history[string]'], {}), True, 'import matplotlib.pyplot as plt\n'), (56, 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_' + string]"], {}), True, 'import matplotlib.pyplot as plt\n'), (57, 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (58, 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['string'], {}), True, 'import matplotlib.pyplot as plt\n'), (59, 'matplotlib.pyplot.legend', 'plt.legend', (["[string, 'val_' + string]"], {}), True, 'import matplotlib.pyplot as plt\n'), (60, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (63, 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), True, 'import tensorflow as tf\n'), (80, 'utils.input_setup', 'input_setup', (['args'], {}), False, 'from utils import read_data, input_setup, imsave, merge, get_last_weights\n'), (82, 'utils.read_data', 'read_data', (['data_dir'], {}), False, 'from utils import read_data, input_setup, imsave, merge, get_last_weights\n'), (107, 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (['log_dir'], {}), True, 'import tensorflow as tf\n'), (109, 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', (["(args.save_path + 'ep_{epoch:03d}.h5')"], {'monitor': '"""loss"""', 'save_weights_only': '(True)', 'save_best_only': '(True)', 'period': '(5)'}), True, 'import tensorflow as tf\n'), (111, 'tensorflow.keras.callbacks.TensorBoard', 'tf.keras.callbacks.TensorBoard', ([], {'log_dir': '"""log"""'}), True, 'import tensorflow as tf\n'), (112, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (119, 'utils.input_setup', 'input_setup', (['args'], {}), False, 'from utils import read_data, input_setup, imsave, merge, get_last_weights\n'), (122, 'utils.read_data', 'read_data', (['data_dir'], {}), False, 'from utils import read_data, input_setup, imsave, merge, get_last_weights\n'), (130, 'utils.merge', 'merge', (['result', '[nx, ny]'], {}), False, 'from utils import read_data, input_setup, imsave, merge, get_last_weights\n'), (133, 'os.path.join', 'os.path.join', (['image_path', '"""test_image.png"""'], {}), False, 'import os\n'), (134, 'utils.imsave', 'imsave', (['result', 'image_path'], {}), False, 'from utils import read_data, input_setup, imsave, merge, get_last_weights\n'), (30, 'tensorflow.config.experimental.list_logical_devices', 'tf.config.experimental.list_logical_devices', (['"""GPU"""'], {}), True, 'import tensorflow as tf\n'), (64, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)', '(9, 9)'], {'kernel_initializer': '"""normal"""', 'strides': '(1)', 'padding': '"""VALID"""', 'activation': '"""relu"""', 'input_shape': '[args.image_size, args.image_size, args.c_dim]', 'name': '"""conv1"""'}), True, 'import tensorflow as tf\n'), (67, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(1, 1)'], {'kernel_initializer': '"""normal"""', 'strides': '(1)', 'padding': '"""VALID"""', 'activation': '"""relu"""', 'name': '"""conv2"""'}), True, 'import tensorflow as tf\n'), (69, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(1)', '(5, 5)'], {'kernel_initializer': '"""normal"""', 'strides': '(1)', 'padding': '"""VALID"""', 'activation': '"""relu"""', 'name': '"""conv3"""'}), True, 'import tensorflow as tf\n'), (132, 'os.getcwd', 'os.getcwd', ([], {}), False, 'import os\n'), (29, 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), True, 'import tensorflow as tf\n'), (71, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'lr': 'args.learning_rate'}), True, 'import tensorflow as tf\n'), (89, 'utils.get_last_weights', 'get_last_weights', (['args.save_path'], {}), False, 'from utils import read_data, input_setup, imsave, merge, get_last_weights\n'), (105, 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), False, 'import datetime\n'), (102, 'os.path.basename', 'os.path.basename', (['weights_path'], {}), False, 'import os\n'), (115, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (91, 'os.path.basename', 'os.path.basename', (['weights_path'], {}), False, 'import os\n')]
baranshad/models
aaf008855e9764f32d974e86f8e1f9cfddfafd9a
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.core.freezable_batch_norm.""" import numpy as np import tensorflow as tf from object_detection.core import freezable_batch_norm class FreezableBatchNormTest(tf.test.TestCase): """Tests for FreezableBatchNorm operations.""" def _build_model(self, training=None): model = tf.keras.models.Sequential() norm = freezable_batch_norm.FreezableBatchNorm(training=training, input_shape=(10,), momentum=0.8) model.add(norm) return model, norm def _train_freezable_batch_norm(self, training_mean, training_var): model, _ = self._build_model() model.compile(loss='mse', optimizer='sgd') # centered on training_mean, variance training_var train_data = np.random.normal( loc=training_mean, scale=training_var, size=(1000, 10)) model.fit(train_data, train_data, epochs=4, verbose=0) return model.weights def _test_batchnorm_layer( self, norm, should_be_training, test_data, testing_mean, testing_var, training_arg, training_mean, training_var): out_tensor = norm(tf.convert_to_tensor(test_data, dtype=tf.float32), training=training_arg) out = tf.keras.backend.eval(out_tensor) out -= tf.keras.backend.eval(norm.beta) out /= tf.keras.backend.eval(norm.gamma) if not should_be_training: out *= training_var out += (training_mean - testing_mean) out /= testing_var np.testing.assert_allclose(out.mean(), 0.0, atol=1.5e-1) np.testing.assert_allclose(out.std(), 1.0, atol=1.5e-1) def test_batchnorm_freezing_training_none(self): with self.test_session(): training_mean = 5.0 training_var = 10.0 testing_mean = -10.0 testing_var = 5.0 # Initially train the batch norm, and save the weights trained_weights = self._train_freezable_batch_norm(training_mean, training_var) # Load the batch norm weights, freezing training to True. # Apply the batch norm layer to testing data and ensure it is normalized # according to the batch statistics. model, norm = self._build_model(training=True) for trained_weight, blank_weight in zip(trained_weights, model.weights): weight_copy = blank_weight.assign(tf.keras.backend.eval(trained_weight)) tf.keras.backend.eval(weight_copy) # centered on testing_mean, variance testing_var test_data = np.random.normal( loc=testing_mean, scale=testing_var, size=(1000, 10)) # Test with training=True passed to the call method: training_arg = True should_be_training = True self._test_batchnorm_layer(norm, should_be_training, test_data, testing_mean, testing_var, training_arg, training_mean, training_var) # Test with training=False passed to the call method: training_arg = False should_be_training = False self._test_batchnorm_layer(norm, should_be_training, test_data, testing_mean, testing_var, training_arg, training_mean, training_var) # Test the layer in various Keras learning phase scopes: training_arg = None should_be_training = False self._test_batchnorm_layer(norm, should_be_training, test_data, testing_mean, testing_var, training_arg, training_mean, training_var) tf.keras.backend.set_learning_phase(True) should_be_training = True self._test_batchnorm_layer(norm, should_be_training, test_data, testing_mean, testing_var, training_arg, training_mean, training_var) tf.keras.backend.set_learning_phase(False) should_be_training = False self._test_batchnorm_layer(norm, should_be_training, test_data, testing_mean, testing_var, training_arg, training_mean, training_var) def test_batchnorm_freezing_training_false(self): with self.test_session(): training_mean = 5.0 training_var = 10.0 testing_mean = -10.0 testing_var = 5.0 # Initially train the batch norm, and save the weights trained_weights = self._train_freezable_batch_norm(training_mean, training_var) # Load the batch norm back up, freezing training to False. # Apply the batch norm layer to testing data and ensure it is normalized # according to the training data's statistics. model, norm = self._build_model(training=False) for trained_weight, blank_weight in zip(trained_weights, model.weights): weight_copy = blank_weight.assign(tf.keras.backend.eval(trained_weight)) tf.keras.backend.eval(weight_copy) # centered on testing_mean, variance testing_var test_data = np.random.normal( loc=testing_mean, scale=testing_var, size=(1000, 10)) # Make sure that the layer is never training # Test with training=True passed to the call method: training_arg = True should_be_training = False self._test_batchnorm_layer(norm, should_be_training, test_data, testing_mean, testing_var, training_arg, training_mean, training_var) # Test with training=False passed to the call method: training_arg = False should_be_training = False self._test_batchnorm_layer(norm, should_be_training, test_data, testing_mean, testing_var, training_arg, training_mean, training_var) # Test the layer in various Keras learning phase scopes: training_arg = None should_be_training = False self._test_batchnorm_layer(norm, should_be_training, test_data, testing_mean, testing_var, training_arg, training_mean, training_var) tf.keras.backend.set_learning_phase(True) should_be_training = False self._test_batchnorm_layer(norm, should_be_training, test_data, testing_mean, testing_var, training_arg, training_mean, training_var) tf.keras.backend.set_learning_phase(False) should_be_training = False self._test_batchnorm_layer(norm, should_be_training, test_data, testing_mean, testing_var, training_arg, training_mean, training_var) if __name__ == '__main__': tf.test.main()
[ "tensorflow.convert_to_tensor", "tensorflow.test.main", "tensorflow.keras.backend.eval", "numpy.random.normal", "tensorflow.keras.backend.set_learning_phase", "tensorflow.keras.models.Sequential" ]
research/object_detection/core/freezable_batch_norm_test.py
[(184, 'tensorflow.test.main', 'tf.test.main', ([], {}), True, 'import tensorflow as tf\n'), (27, 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), True, 'import tensorflow as tf\n'), (28, 'object_detection.core.freezable_batch_norm.FreezableBatchNorm', 'freezable_batch_norm.FreezableBatchNorm', ([], {'training': 'training', 'input_shape': '(10,)', 'momentum': '(0.8)'}), False, 'from object_detection.core import freezable_batch_norm\n'), (39, 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'training_mean', 'scale': 'training_var', 'size': '(1000, 10)'}), True, 'import numpy as np\n'), (51, 'tensorflow.keras.backend.eval', 'tf.keras.backend.eval', (['out_tensor'], {}), True, 'import tensorflow as tf\n'), (52, 'tensorflow.keras.backend.eval', 'tf.keras.backend.eval', (['norm.beta'], {}), True, 'import tensorflow as tf\n'), (53, 'tensorflow.keras.backend.eval', 'tf.keras.backend.eval', (['norm.gamma'], {}), True, 'import tensorflow as tf\n'), (49, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['test_data'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (84, 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'testing_mean', 'scale': 'testing_var', 'size': '(1000, 10)'}), True, 'import numpy as np\n'), (110, 'tensorflow.keras.backend.set_learning_phase', 'tf.keras.backend.set_learning_phase', (['(True)'], {}), True, 'import tensorflow as tf\n'), (116, 'tensorflow.keras.backend.set_learning_phase', 'tf.keras.backend.set_learning_phase', (['(False)'], {}), True, 'import tensorflow as tf\n'), (143, 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'testing_mean', 'scale': 'testing_var', 'size': '(1000, 10)'}), True, 'import numpy as np\n'), (170, 'tensorflow.keras.backend.set_learning_phase', 'tf.keras.backend.set_learning_phase', (['(True)'], {}), True, 'import tensorflow as tf\n'), (176, 'tensorflow.keras.backend.set_learning_phase', 'tf.keras.backend.set_learning_phase', (['(False)'], {}), True, 'import tensorflow as tf\n'), (81, 'tensorflow.keras.backend.eval', 'tf.keras.backend.eval', (['weight_copy'], {}), True, 'import tensorflow as tf\n'), (140, 'tensorflow.keras.backend.eval', 'tf.keras.backend.eval', (['weight_copy'], {}), True, 'import tensorflow as tf\n'), (80, 'tensorflow.keras.backend.eval', 'tf.keras.backend.eval', (['trained_weight'], {}), True, 'import tensorflow as tf\n'), (139, 'tensorflow.keras.backend.eval', 'tf.keras.backend.eval', (['trained_weight'], {}), True, 'import tensorflow as tf\n')]
JWThacker/Udacity_Intro_Machine_Learning_Tensorflow_Nanodegree
1d2b0db9d92d5491173e1d1052acc005721c0ad1
import warnings warnings.filterwarnings('ignore') import json import sys import argparse as ap import numpy as np import tensorflow as tf tf.get_logger().setLevel('WARNING') tf.autograph.set_verbosity(2) import tensorflow_hub as hub import logging from utils import process_image, predict ''' Predict the flower species given an image of a flower. params: /path/to/image - a path to an image to make a prediction from. saved_model - a Keras model saved as a .h5 --top_k - the top number of classes that image could be. --category_names - path to a .json labeling classes to species names. ''' def main(): # Add and then parse all command line arguments. parser = ap.ArgumentParser(usage=('python3 predict.py /path/to/image saved_model ' '--top_k K --category_names map.json'), description=('Predict the species' ' of a flower image.')) parser.add_argument('image_path', type=str, help='Path to an image of a flower') parser.add_argument('saved_model', type=str, help='A tf.Keras model saved as an .h5') parser.add_argument('--top_k', type=int, default=1, help=('Number of different' ' species probabilities' ' will be displayed for')) parser.add_argument('--category_names', type=str, default=None, help=('path to a .json file' 'containing the mapped' 'names of the predicted' 'species of flowers')) args = parser.parse_args() # Load saved Keras model reloaded_model = tf.keras.models.load_model(args.saved_model, custom_objects={'KerasLayer': hub.KerasLayer}) # predict the species with the corresponding probabilities try: probs, classes = predict(args.image_path, reloaded_model, args.top_k) except FileNotFoundError: print('\n\n') print('Image not found; enter a valid path to an image') print('\n\n') sys.exit() else: # If --category_names was not empty, map class labels to species names if args.category_names: species_names = [] try: with open(args.category_names, 'r') as f: class_names = json.load(f) except FileNotFoundError: print('\n\n') print(f'{args.category_names} not found; enter valid path.') print('\n\n') sys.exit() else: for i, classs in enumerate(classes): species_names.append(class_names[classs]) results = {name: prob for name, prob in zip(species_names, probs)} print('\n\n') print('Flower Species Name: Probability of species') for name in species_names: print(name.title(), ': ', results[name]) print('\n\n') # Otherwise print the class labels and corresponding probabilities else: print('\n\n') results = {classs: prob for classs, prob in zip(classes, probs)} print('Class Label: Probability of class') for classs in classes: print(classs, ': ', results[classs]) print('\n\n') if __name__ == '__main__': main()
[ "tensorflow.autograph.set_verbosity", "tensorflow.keras.models.load_model", "tensorflow.get_logger" ]
project_2/workspace/predict.py
[(2, 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), False, 'import warnings\n'), (11, 'tensorflow.autograph.set_verbosity', 'tf.autograph.set_verbosity', (['(2)'], {}), True, 'import tensorflow as tf\n'), (28, 'argparse.ArgumentParser', 'ap.ArgumentParser', ([], {'usage': '"""python3 predict.py /path/to/image saved_model --top_k K --category_names map.json"""', 'description': '"""Predict the species of a flower image."""'}), True, 'import argparse as ap\n'), (50, 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['args.saved_model'], {'custom_objects': "{'KerasLayer': hub.KerasLayer}"}), True, 'import tensorflow as tf\n'), (10, 'tensorflow.get_logger', 'tf.get_logger', ([], {}), True, 'import tensorflow as tf\n'), (54, 'utils.predict', 'predict', (['args.image_path', 'reloaded_model', 'args.top_k'], {}), False, 'from utils import process_image, predict\n'), (59, 'sys.exit', 'sys.exit', ([], {}), False, 'import sys\n'), (66, 'json.load', 'json.load', (['f'], {}), False, 'import json\n'), (71, 'sys.exit', 'sys.exit', ([], {}), False, 'import sys\n')]
cclauss/TensorFlowTTS
cac7e27e9d2bf2144f6fef409675d16dd48bc158
# -*- coding: utf-8 -*- # Copyright 2020 Minh Nguyen (@dathudeptrai) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Based Trainer.""" import abc import logging import os import tensorflow as tf from tqdm import tqdm class BasedTrainer(metaclass=abc.ABCMeta): """Customized trainer module for all models.""" def __init__(self, steps, epochs, config): self.steps = steps self.epochs = epochs self.config = config self.finish_train = False self.writer = tf.summary.create_file_writer(config["outdir"]) self.train_data_loader = None self.eval_data_loader = None self.train_metrics = None self.eval_metrics = None self.list_metrics_name = None def init_train_eval_metrics(self, list_metrics_name): """Init train and eval metrics to save it to tensorboard.""" self.train_metrics = {} self.eval_metrics = {} for name in list_metrics_name: self.train_metrics.update( {name: tf.keras.metrics.Mean(name="train_" + name, dtype=tf.float32)} ) self.eval_metrics.update( {name: tf.keras.metrics.Mean(name="eval_" + name, dtype=tf.float32)} ) def reset_states_train(self): """Reset train metrics after save it to tensorboard.""" for metric in self.train_metrics.keys(): self.train_metrics[metric].reset_states() def reset_states_eval(self): """Reset eval metrics after save it to tensorboard.""" for metric in self.eval_metrics.keys(): self.eval_metrics[metric].reset_states() def update_train_metrics(self, dict_metrics_losses): for name, value in dict_metrics_losses.items(): self.train_metrics[name].update_state(value) def update_eval_metrics(self, dict_metrics_losses): for name, value in dict_metrics_losses.items(): self.eval_metrics[name].update_state(value) def set_train_data_loader(self, train_dataset): """Set train data loader (MUST).""" self.train_data_loader = train_dataset def get_train_data_loader(self): """Get train data loader.""" return self.train_data_loader def set_eval_data_loader(self, eval_dataset): """Set eval data loader (MUST).""" self.eval_data_loader = eval_dataset def get_eval_data_loader(self): """Get eval data loader.""" return self.eval_data_loader @abc.abstractmethod def compile(self): pass @abc.abstractmethod def create_checkpoint_manager(self, saved_path=None, max_to_keep=10): """Create checkpoint management.""" pass def run(self): """Run training.""" self.tqdm = tqdm( initial=self.steps, total=self.config["train_max_steps"], desc="[train]" ) while True: self._train_epoch() if self.finish_train: break self.tqdm.close() logging.info("Finish training.") @abc.abstractmethod def save_checkpoint(self): """Save checkpoint.""" pass @abc.abstractmethod def load_checkpoint(self, pretrained_path): """Load checkpoint.""" pass def _train_epoch(self): """Train model one epoch.""" for train_steps_per_epoch, batch in enumerate(self.train_data_loader, 1): # one step training self._train_step(batch) # check interval self._check_log_interval() self._check_eval_interval() self._check_save_interval() # check wheter training is finished if self.finish_train: return # update self.epochs += 1 self.train_steps_per_epoch = train_steps_per_epoch logging.info( f"(Steps: {self.steps}) Finished {self.epochs} epoch training " f"({self.train_steps_per_epoch} steps per epoch)." ) @abc.abstractmethod def _eval_epoch(self): """One epoch evaluation.""" pass @abc.abstractmethod def _train_step(self, batch): """One step training.""" pass @abc.abstractmethod def _check_log_interval(self): """Save log interval.""" pass @abc.abstractmethod def fit(self): pass def _check_eval_interval(self): """Evaluation interval step.""" if self.steps % self.config["eval_interval_steps"] == 0: self._eval_epoch() def _check_save_interval(self): """Save interval checkpoint.""" if self.steps % self.config["save_interval_steps"] == 0: self.save_checkpoint() logging.info(f"Successfully saved checkpoint @ {self.steps} steps.") def generate_and_save_intermediate_result(self, batch): """Generate and save intermediate result.""" pass def _write_to_tensorboard(self, list_metrics, stage="train"): """Write variables to tensorboard.""" with self.writer.as_default(): for key, value in list_metrics.items(): tf.summary.scalar(stage + "/" + key, value.result(), step=self.steps) self.writer.flush() class GanBasedTrainer(BasedTrainer): """Customized trainer module for GAN TTS training (MelGAN, GAN-TTS, ParallelWaveGAN).""" def __init__( self, steps, epochs, config, strategy, is_generator_mixed_precision=False, is_discriminator_mixed_precision=False, ): """Initialize trainer. Args: steps (int): Initial global steps. epochs (int): Initial global epochs. config (dict): Config dict loaded from yaml format configuration file. """ super().__init__(steps, epochs, config) self._is_generator_mixed_precision = is_generator_mixed_precision self._is_discriminator_mixed_precision = is_discriminator_mixed_precision self._strategy = strategy self._already_apply_input_signature = False def init_train_eval_metrics(self, list_metrics_name): with self._strategy.scope(): super().init_train_eval_metrics(list_metrics_name) def get_n_gpus(self): return self._strategy.num_replicas_in_sync def _get_train_element_signature(self): return self.train_data_loader.element_spec def _get_eval_element_signature(self): return self.eval_data_loader.element_spec def set_gen_model(self, generator_model): """Set generator class model (MUST).""" self._generator = generator_model def get_gen_model(self): """Get generator model.""" return self._generator def set_dis_model(self, discriminator_model): """Set discriminator class model (MUST).""" self._discriminator = discriminator_model def get_dis_model(self): """Get discriminator model.""" return self._discriminator def set_gen_optimizer(self, generator_optimizer): """Set generator optimizer (MUST).""" self._gen_optimizer = generator_optimizer if self._is_generator_mixed_precision: self._gen_optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer( self._gen_optimizer, "dynamic" ) def get_gen_optimizer(self): """Get generator optimizer.""" return self._gen_optimizer def set_dis_optimizer(self, discriminator_optimizer): """Set discriminator optimizer (MUST).""" self._dis_optimizer = discriminator_optimizer if self._is_discriminator_mixed_precision: self._dis_optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer( self._dis_optimizer, "dynamic" ) def get_dis_optimizer(self): """Get discriminator optimizer.""" return self._dis_optimizer def compile(self, gen_model, dis_model, gen_optimizer, dis_optimizer): self.set_gen_model(gen_model) self.set_dis_model(dis_model) self.set_gen_optimizer(gen_optimizer) self.set_dis_optimizer(dis_optimizer) def _train_step(self, batch): if self._already_apply_input_signature is False: train_element_signature = self._get_train_element_signature() eval_element_signature = self._get_eval_element_signature() self.one_step_forward = tf.function( self._one_step_forward, input_signature=[train_element_signature] ) self.one_step_evaluate = tf.function( self._one_step_evaluate, input_signature=[eval_element_signature] ) self.one_step_predict = tf.function( self._one_step_predict, input_signature=[eval_element_signature] ) self._already_apply_input_signature = True # run one_step_forward self.one_step_forward(batch) # update counts self.steps += 1 self.tqdm.update(1) self._check_train_finish() def _one_step_forward(self, batch): per_replica_losses = self._strategy.run( self._one_step_forward_per_replica, args=(batch,) ) return self._strategy.reduce( tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None ) @abc.abstractmethod def compute_per_example_generator_losses(self, batch, outputs): """Compute per example generator losses and return dict_metrics_losses Note that all element of the loss MUST has a shape [batch_size] and the keys of dict_metrics_losses MUST be in self.list_metrics_name. Args: batch: dictionary batch input return from dataloader outputs: outputs of the model Returns: per_example_losses: per example losses for each GPU, shape [B] dict_metrics_losses: dictionary loss. """ per_example_losses = 0.0 dict_metrics_losses = {} return per_example_losses, dict_metrics_losses @abc.abstractmethod def compute_per_example_discriminator_losses(self, batch, gen_outputs): """Compute per example discriminator losses and return dict_metrics_losses Note that all element of the loss MUST has a shape [batch_size] and the keys of dict_metrics_losses MUST be in self.list_metrics_name. Args: batch: dictionary batch input return from dataloader outputs: outputs of the model Returns: per_example_losses: per example losses for each GPU, shape [B] dict_metrics_losses: dictionary loss. """ per_example_losses = 0.0 dict_metrics_losses = {} return per_example_losses, dict_metrics_losses def _one_step_forward_per_replica(self, batch): per_replica_gen_losses = 0.0 per_replica_dis_losses = 0.0 # one step generator. with tf.GradientTape() as g_tape: outputs = self._generator(**batch, training=True) ( per_example_losses, dict_metrics_losses, ) = self.compute_per_example_generator_losses(batch, outputs) per_replica_gen_losses = tf.nn.compute_average_loss( per_example_losses, global_batch_size=self.config["batch_size"] * self.get_n_gpus(), ) if self._is_generator_mixed_precision: scaled_per_replica_gen_losses = self._gen_optimizer.get_scaled_loss( per_replica_gen_losses ) if self._is_generator_mixed_precision: scaled_gradients = g_tape.gradient( scaled_per_replica_gen_losses, self._generator.trainable_variables ) gradients = self._gen_optimizer.get_unscaled_gradients(scaled_gradients) else: gradients = g_tape.gradient( per_replica_gen_losses, self._generator.trainable_variables ) self._gen_optimizer.apply_gradients( zip(gradients, self._generator.trainable_variables) ) # accumulate loss into metrics self.update_train_metrics(dict_metrics_losses) # one step discriminator # recompute y_hat after 1 step generator for discriminator training. if self.steps >= self.config["discriminator_train_start_steps"]: with tf.GradientTape() as d_tape: ( per_example_losses, dict_metrics_losses, ) = self.compute_per_example_discriminator_losses( batch, self._generator(**batch) ) per_replica_dis_losses = tf.nn.compute_average_loss( per_example_losses, global_batch_size=self.config["batch_size"] * self.get_n_gpus(), ) if self._is_discriminator_mixed_precision: scaled_per_replica_dis_losses = self._dis_optimizer.get_scaled_loss( per_replica_dis_losses ) if self._is_discriminator_mixed_precision: scaled_gradients = d_tape.gradient( scaled_per_replica_dis_losses, self._discriminator.trainable_variables, ) gradients = self._dis_optimizer.get_unscaled_gradients(scaled_gradients) else: gradients = d_tape.gradient( per_replica_dis_losses, self._discriminator.trainable_variables ) self._dis_optimizer.apply_gradients( zip(gradients, self._discriminator.trainable_variables) ) # accumulate loss into metrics self.update_train_metrics(dict_metrics_losses) return per_replica_gen_losses + per_replica_dis_losses def _eval_epoch(self): """Evaluate model one epoch.""" logging.info(f"(Steps: {self.steps}) Start evaluation.") # calculate loss for each batch for eval_steps_per_epoch, batch in enumerate( tqdm(self.eval_data_loader, desc="[eval]"), 1 ): # eval one step self.one_step_evaluate(batch) if eval_steps_per_epoch <= self.config["num_save_intermediate_results"]: # save intermedia self.generate_and_save_intermediate_result(batch) logging.info( f"(Steps: {self.steps}) Finished evaluation " f"({eval_steps_per_epoch} steps per epoch)." ) # average loss for key in self.eval_metrics.keys(): logging.info( f"(Steps: {self.steps}) eval_{key} = {self.eval_metrics[key].result():.4f}." ) # record self._write_to_tensorboard(self.eval_metrics, stage="eval") # reset self.reset_states_eval() def _one_step_evalute_per_replica(self, batch): ################################################ # one step generator. outputs = self._generator(**batch, training=False) _, dict_metrics_losses = self.compute_per_example_generator_losses( batch, outputs ) # accumulate loss into metrics self.update_eval_metrics(dict_metrics_losses) ################################################ # one step discriminator if self.steps >= self.config["discriminator_train_start_steps"]: _, dict_metrics_losses = self.compute_per_example_discriminator_losses( batch, outputs ) # accumulate loss into metrics self.update_eval_metrics(dict_metrics_losses) ################################################ def _one_step_evaluate(self, batch): self._strategy.run(self._one_step_evalute_per_replica, args=(batch,)) def _one_step_predict_per_replica(self, batch): outputs = self._generator(**batch, training=False) return outputs def _one_step_predict(self, batch): outputs = self._strategy.run(self._one_step_predict_per_replica, args=(batch,)) return outputs @abc.abstractmethod def generate_and_save_intermediate_result(self, batch): return def create_checkpoint_manager(self, saved_path=None, max_to_keep=10): """Create checkpoint management.""" if saved_path is None: saved_path = self.config["outdir"] + "/checkpoints/" os.makedirs(saved_path, exist_ok=True) self.saved_path = saved_path self.ckpt = tf.train.Checkpoint( steps=tf.Variable(1), epochs=tf.Variable(1), gen_optimizer=self.get_gen_optimizer(), dis_optimizer=self.get_dis_optimizer(), ) self.ckp_manager = tf.train.CheckpointManager( self.ckpt, saved_path, max_to_keep=max_to_keep ) def save_checkpoint(self): """Save checkpoint.""" self.ckpt.steps.assign(self.steps) self.ckpt.epochs.assign(self.epochs) self.ckp_manager.save(checkpoint_number=self.steps) self._generator.save_weights( self.saved_path + "generator-{}.h5".format(self.steps) ) self._discriminator.save_weights( self.saved_path + "discriminator-{}.h5".format(self.steps) ) def load_checkpoint(self, pretrained_path): """Load checkpoint.""" self.ckpt.restore(pretrained_path) self.steps = self.ckpt.steps.numpy() self.epochs = self.ckpt.epochs.numpy() self._gen_optimizer = self.ckpt.gen_optimizer # re-assign iterations (global steps) for gen_optimizer. self._gen_optimizer.iterations.assign(tf.cast(self.steps, tf.int64)) # re-assign iterations (global steps) for dis_optimizer. try: discriminator_train_start_steps = self.config[ "discriminator_train_start_steps" ] discriminator_train_start_steps = tf.math.maximum( 0, discriminator_train_start_steps - self.steps ) except Exception: discriminator_train_start_steps = self.steps self._dis_optimizer = self.ckpt.dis_optimizer self._dis_optimizer.iterations.assign( tf.cast(discriminator_train_start_steps, tf.int64) ) # load weights. self._generator.load_weights( self.saved_path + "generator-{}.h5".format(self.steps) ) self._discriminator.load_weights( self.saved_path + "discriminator-{}.h5".format(self.steps) ) def _check_train_finish(self): """Check training finished.""" if self.steps >= self.config["train_max_steps"]: self.finish_train = True if ( self.steps != 0 and self.steps == self.config["discriminator_train_start_steps"] ): self.finish_train = True logging.info( f"Finished training only generator at {self.steps}steps, pls resume and continue training." ) def _check_log_interval(self): """Log to tensorboard.""" if self.steps % self.config["log_interval_steps"] == 0: for metric_name in self.list_metrics_name: logging.info( f"(Step: {self.steps}) train_{metric_name} = {self.train_metrics[metric_name].result():.4f}." ) self._write_to_tensorboard(self.train_metrics, stage="train") # reset self.reset_states_train() def fit(self, train_data_loader, valid_data_loader, saved_path, resume=None): self.set_train_data_loader(train_data_loader) self.set_eval_data_loader(valid_data_loader) self.train_data_loader = self._strategy.experimental_distribute_dataset( self.train_data_loader ) self.eval_data_loader = self._strategy.experimental_distribute_dataset( self.eval_data_loader ) with self._strategy.scope(): self.create_checkpoint_manager(saved_path=saved_path, max_to_keep=10000) if len(resume) > 1: self.load_checkpoint(resume) logging.info(f"Successfully resumed from {resume}.") self.run() class Seq2SeqBasedTrainer(BasedTrainer, metaclass=abc.ABCMeta): """Customized trainer module for Seq2Seq TTS training (Tacotron, FastSpeech).""" def __init__( self, steps, epochs, config, strategy, is_mixed_precision=False, ): """Initialize trainer. Args: steps (int): Initial global steps. epochs (int): Initial global epochs. config (dict): Config dict loaded from yaml format configuration file. strategy (tf.distribute): Strategy for distributed training. is_mixed_precision (bool): Use mixed_precision training or not. """ super().__init__(steps, epochs, config) self._is_mixed_precision = is_mixed_precision self._strategy = strategy # check if we already apply input_signature for train_step. self._already_apply_input_signature = False def init_train_eval_metrics(self, list_metrics_name): with self._strategy.scope(): super().init_train_eval_metrics(list_metrics_name) def set_model(self, model): """Set generator class model (MUST).""" self._model = model def get_model(self): """Get generator model.""" return self._model def set_optimizer(self, optimizer): """Set optimizer (MUST).""" self._optimizer = optimizer if self._is_mixed_precision: self._optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer( self._optimizer, "dynamic" ) def get_optimizer(self): """Get optimizer.""" return self._optimizer def get_n_gpus(self): return self._strategy.num_replicas_in_sync def compile(self, model, optimizer): self.set_model(model) self.set_optimizer(optimizer) def _get_train_element_signature(self): return self.train_data_loader.element_spec def _get_eval_element_signature(self): return self.eval_data_loader.element_spec def _train_step(self, batch): if self._already_apply_input_signature is False: train_element_signature = self._get_train_element_signature() eval_element_signature = self._get_eval_element_signature() self.one_step_forward = tf.function( self._one_step_forward, input_signature=[train_element_signature] ) self.one_step_evaluate = tf.function( self._one_step_evaluate, input_signature=[eval_element_signature] ) self.one_step_predict = tf.function( self._one_step_predict, input_signature=[eval_element_signature] ) self._already_apply_input_signature = True # run one_step_forward self.one_step_forward(batch) # update counts self.steps += 1 self.tqdm.update(1) self._check_train_finish() def _one_step_forward(self, batch): per_replica_losses = self._strategy.run( self._one_step_forward_per_replica, args=(batch,) ) return self._strategy.reduce( tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None ) def _one_step_forward_per_replica(self, batch): with tf.GradientTape() as tape: outputs = self._model(**batch, training=True) per_example_losses, dict_metrics_losses = self.compute_per_example_losses( batch, outputs ) per_replica_losses = tf.nn.compute_average_loss( per_example_losses, global_batch_size=self.config["batch_size"] * self.get_n_gpus(), ) if self._is_mixed_precision: scaled_per_replica_losses = self._optimizer.get_scaled_loss( per_replica_losses ) if self._is_mixed_precision: scaled_gradients = tape.gradient( scaled_per_replica_losses, self._model.trainable_variables ) gradients = self._optimizer.get_unscaled_gradients(scaled_gradients) else: gradients = tape.gradient( per_replica_losses, self._model.trainable_variables ) self._optimizer.apply_gradients( zip(gradients, self._model.trainable_variables), 1.0 ) # accumulate loss into metrics self.update_train_metrics(dict_metrics_losses) return per_replica_losses @abc.abstractmethod def compute_per_example_losses(self, batch, outputs): """Compute per example losses and return dict_metrics_losses Note that all element of the loss MUST has a shape [batch_size] and the keys of dict_metrics_losses MUST be in self.list_metrics_name. Args: batch: dictionary batch input return from dataloader outputs: outputs of the model Returns: per_example_losses: per example losses for each GPU, shape [B] dict_metrics_losses: dictionary loss. """ per_example_losses = 0.0 dict_metrics_losses = {} return per_example_losses, dict_metrics_losses def _eval_epoch(self): """Evaluate model one epoch.""" logging.info(f"(Steps: {self.steps}) Start evaluation.") # calculate loss for each batch for eval_steps_per_epoch, batch in enumerate( tqdm(self.eval_data_loader, desc="[eval]"), 1 ): # eval one step self.one_step_evaluate(batch) if eval_steps_per_epoch <= self.config["num_save_intermediate_results"]: # save intermedia self.generate_and_save_intermediate_result(batch) logging.info( f"(Steps: {self.steps}) Finished evaluation " f"({eval_steps_per_epoch} steps per epoch)." ) # average loss for key in self.eval_metrics.keys(): logging.info( f"(Steps: {self.steps}) eval_{key} = {self.eval_metrics[key].result():.4f}." ) # record self._write_to_tensorboard(self.eval_metrics, stage="eval") # reset self.reset_states_eval() def _one_step_evalute_per_replica(self, batch): outputs = self._model(**batch, training=False) _, dict_metrics_losses = self.compute_per_example_losses(batch, outputs) self.update_eval_metrics(dict_metrics_losses) def _one_step_evaluate(self, batch): self._strategy.run(self._one_step_evalute_per_replica, args=(batch,)) def _one_step_predict_per_replica(self, batch): outputs = self._model(**batch, training=False) return outputs def _one_step_predict(self, batch): outputs = self._strategy.run(self._one_step_predict_per_replica, args=(batch,)) return outputs @abc.abstractmethod def generate_and_save_intermediate_result(self, batch): return def create_checkpoint_manager(self, saved_path=None, max_to_keep=10): """Create checkpoint management.""" if saved_path is None: saved_path = self.config["outdir"] + "/checkpoints/" os.makedirs(saved_path, exist_ok=True) self.saved_path = saved_path self.ckpt = tf.train.Checkpoint( steps=tf.Variable(1), epochs=tf.Variable(1), optimizer=self.get_optimizer() ) self.ckp_manager = tf.train.CheckpointManager( self.ckpt, saved_path, max_to_keep=max_to_keep ) def save_checkpoint(self): """Save checkpoint.""" self.ckpt.steps.assign(self.steps) self.ckpt.epochs.assign(self.epochs) self.ckp_manager.save(checkpoint_number=self.steps) self._model.save_weights(self.saved_path + "model-{}.h5".format(self.steps)) def load_checkpoint(self, pretrained_path): """Load checkpoint.""" self.ckpt.restore(pretrained_path) self.steps = self.ckpt.steps.numpy() self.epochs = self.ckpt.epochs.numpy() self._optimizer = self.ckpt.optimizer # re-assign iterations (global steps) for optimizer. self._optimizer.iterations.assign(tf.cast(self.steps, tf.int64)) # load weights. self._model.load_weights(self.saved_path + "model-{}.h5".format(self.steps)) def _check_train_finish(self): """Check training finished.""" if self.steps >= self.config["train_max_steps"]: self.finish_train = True def _check_log_interval(self): """Log to tensorboard.""" if self.steps % self.config["log_interval_steps"] == 0: for metric_name in self.list_metrics_name: logging.info( f"(Step: {self.steps}) train_{metric_name} = {self.train_metrics[metric_name].result():.4f}." ) self._write_to_tensorboard(self.train_metrics, stage="train") # reset self.reset_states_train() def fit(self, train_data_loader, valid_data_loader, saved_path, resume=None): self.set_train_data_loader(train_data_loader) self.set_eval_data_loader(valid_data_loader) self.train_data_loader = self._strategy.experimental_distribute_dataset( self.train_data_loader ) self.eval_data_loader = self._strategy.experimental_distribute_dataset( self.eval_data_loader ) with self._strategy.scope(): self.create_checkpoint_manager(saved_path=saved_path, max_to_keep=10000) if len(resume) > 1: self.load_checkpoint(resume) logging.info(f"Successfully resumed from {resume}.") self.run()
[ "tensorflow.train.CheckpointManager", "tensorflow.keras.mixed_precision.experimental.LossScaleOptimizer", "tensorflow.Variable", "tensorflow.summary.create_file_writer", "tensorflow.cast", "tensorflow.math.maximum", "tensorflow.function", "tensorflow.keras.metrics.Mean", "tensorflow.GradientTape" ]
tensorflow_tts/trainers/base_trainer.py
[(33, 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (["config['outdir']"], {}), True, 'import tensorflow as tf\n'), (97, 'tqdm.tqdm', 'tqdm', ([], {'initial': 'self.steps', 'total': "self.config['train_max_steps']", 'desc': '"""[train]"""'}), False, 'from tqdm import tqdm\n'), (107, 'logging.info', 'logging.info', (['"""Finish training."""'], {}), False, 'import logging\n'), (137, 'logging.info', 'logging.info', (['f"""(Steps: {self.steps}) Finished {self.epochs} epoch training ({self.train_steps_per_epoch} steps per epoch)."""'], {}), False, 'import logging\n'), (418, 'logging.info', 'logging.info', (['f"""(Steps: {self.steps}) Start evaluation."""'], {}), False, 'import logging\n'), (431, 'logging.info', 'logging.info', (['f"""(Steps: {self.steps}) Finished evaluation ({eval_steps_per_epoch} steps per epoch)."""'], {}), False, 'import logging\n'), (491, 'os.makedirs', 'os.makedirs', (['saved_path'], {'exist_ok': '(True)'}), False, 'import os\n'), (500, 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['self.ckpt', 'saved_path'], {'max_to_keep': 'max_to_keep'}), True, 'import tensorflow as tf\n'), (736, 'logging.info', 'logging.info', (['f"""(Steps: {self.steps}) Start evaluation."""'], {}), False, 'import logging\n'), (749, 'logging.info', 'logging.info', (['f"""(Steps: {self.steps}) Finished evaluation ({eval_steps_per_epoch} steps per epoch)."""'], {}), False, 'import logging\n'), (792, 'os.makedirs', 'os.makedirs', (['saved_path'], {'exist_ok': '(True)'}), False, 'import os\n'), (798, 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['self.ckpt', 'saved_path'], {'max_to_keep': 'max_to_keep'}), True, 'import tensorflow as tf\n'), (170, 'logging.info', 'logging.info', (['f"""Successfully saved checkpoint @ {self.steps} steps."""'], {}), False, 'import logging\n'), (243, 'tensorflow.keras.mixed_precision.experimental.LossScaleOptimizer', 'tf.keras.mixed_precision.experimental.LossScaleOptimizer', (['self._gen_optimizer', '"""dynamic"""'], {}), True, 'import tensorflow as tf\n'), (255, 'tensorflow.keras.mixed_precision.experimental.LossScaleOptimizer', 'tf.keras.mixed_precision.experimental.LossScaleOptimizer', (['self._dis_optimizer', '"""dynamic"""'], {}), True, 'import tensorflow as tf\n'), (273, 'tensorflow.function', 'tf.function', (['self._one_step_forward'], {'input_signature': '[train_element_signature]'}), True, 'import tensorflow as tf\n'), (276, 'tensorflow.function', 'tf.function', (['self._one_step_evaluate'], {'input_signature': '[eval_element_signature]'}), True, 'import tensorflow as tf\n'), (279, 'tensorflow.function', 'tf.function', (['self._one_step_predict'], {'input_signature': '[eval_element_signature]'}), True, 'import tensorflow as tf\n'), (341, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (422, 'tqdm.tqdm', 'tqdm', (['self.eval_data_loader'], {'desc': '"""[eval]"""'}), False, 'from tqdm import tqdm\n'), (523, 'tensorflow.cast', 'tf.cast', (['self.steps', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (529, 'tensorflow.math.maximum', 'tf.math.maximum', (['(0)', '(discriminator_train_start_steps - self.steps)'], {}), True, 'import tensorflow as tf\n'), (536, 'tensorflow.cast', 'tf.cast', (['discriminator_train_start_steps', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (557, 'logging.info', 'logging.info', (['f"""Finished training only generator at {self.steps}steps, pls resume and continue training."""'], {}), False, 'import logging\n'), (629, 'tensorflow.keras.mixed_precision.experimental.LossScaleOptimizer', 'tf.keras.mixed_precision.experimental.LossScaleOptimizer', (['self._optimizer', '"""dynamic"""'], {}), True, 'import tensorflow as tf\n'), (654, 'tensorflow.function', 'tf.function', (['self._one_step_forward'], {'input_signature': '[train_element_signature]'}), True, 'import tensorflow as tf\n'), (657, 'tensorflow.function', 'tf.function', (['self._one_step_evaluate'], {'input_signature': '[eval_element_signature]'}), True, 'import tensorflow as tf\n'), (660, 'tensorflow.function', 'tf.function', (['self._one_step_predict'], {'input_signature': '[eval_element_signature]'}), True, 'import tensorflow as tf\n'), (682, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (740, 'tqdm.tqdm', 'tqdm', (['self.eval_data_loader'], {'desc': '"""[eval]"""'}), False, 'from tqdm import tqdm\n'), (816, 'tensorflow.cast', 'tf.cast', (['self.steps', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (378, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (495, 'tensorflow.Variable', 'tf.Variable', (['(1)'], {}), True, 'import tensorflow as tf\n'), (496, 'tensorflow.Variable', 'tf.Variable', (['(1)'], {}), True, 'import tensorflow as tf\n'), (586, 'logging.info', 'logging.info', (['f"""Successfully resumed from {resume}."""'], {}), False, 'import logging\n'), (796, 'tensorflow.Variable', 'tf.Variable', (['(1)'], {}), True, 'import tensorflow as tf\n'), (796, 'tensorflow.Variable', 'tf.Variable', (['(1)'], {}), True, 'import tensorflow as tf\n'), (851, 'logging.info', 'logging.info', (['f"""Successfully resumed from {resume}."""'], {}), False, 'import logging\n'), (46, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {'name': "('train_' + name)", 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (49, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {'name': "('eval_' + name)", 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n')]
quincy-125/DigiPath_CLAM_TF
8b7ab50caaca13f666268b0f4e071d123e190978
import tensorflow as tf import numpy as np class S_Bag(tf.keras.Model): def __init__(self, dim_compress_features=512, n_class=2): super(S_Bag, self).__init__() self.dim_compress_features = dim_compress_features self.n_class = n_class self.s_bag_model = tf.keras.models.Sequential() self.s_bag_layer = tf.keras.layers.Dense( units=1, activation='linear', input_shape=(self.n_class, self.dim_compress_features), name='Bag_Classifier_Layer' ) self.s_bag_model.add(self.s_bag_layer) def bag_classifier(self): return self.s_bag_model def h_slide(self, A, h): # compute the slide-level representation aggregated per the attention score distribution for the mth class SAR = list() for i in range(len(A)): sar = tf.linalg.matmul(tf.transpose(A[i]), h[i]) # shape be (2,512) SAR.append(sar) slide_agg_rep = tf.math.add_n(SAR) # return h_[slide,m], shape be (2,512) return slide_agg_rep def call(self, bag_label, A, h): slide_agg_rep = self.h_slide(A, h) bag_classifier = self.bag_classifier() slide_score_unnorm = bag_classifier(slide_agg_rep) slide_score_unnorm = tf.reshape(slide_score_unnorm, (1, self.n_class)) Y_hat = tf.math.top_k(slide_score_unnorm, 1)[1][-1] Y_prob = tf.math.softmax( tf.reshape(slide_score_unnorm, (1, self.n_class))) # shape be (1,2), predictions for each of the classes predict_slide_label = np.argmax(Y_prob.numpy()) Y_true = tf.one_hot([bag_label], 2) return slide_score_unnorm, Y_hat, Y_prob, predict_slide_label, Y_true class M_Bag(tf.keras.Model): def __init__(self, dim_compress_features=512, n_class=2): super(M_Bag, self).__init__() self.dim_compress_features = dim_compress_features self.n_class = n_class self.m_bag_models = list() self.m_bag_model = tf.keras.models.Sequential() self.m_bag_layer = tf.keras.layers.Dense(units=1, activation='linear', input_shape=(1, self.dim_compress_features), name='Bag_Classifier_Layer') self.m_bag_model.add(self.m_bag_layer) for i in range(self.n_class): self.m_bag_models.append(self.m_bag_model) def bag_classifier(self): return self.m_bag_models def h_slide(self, A, h): # compute the slide-level representation aggregated per the attention score distribution for the mth class SAR = list() for i in range(len(A)): sar = tf.linalg.matmul(tf.transpose(A[i]), h[i]) # shape be (2,512) SAR.append(sar) SAR_Branch = list() for i in range(self.n_class): sar_branch = list() for j in range(len(SAR)): sar_c = tf.reshape(SAR[j][i], (1, self.dim_compress_features)) sar_branch.append(sar_c) SAR_Branch.append(sar_branch) slide_agg_rep = list() for k in range(self.n_class): slide_agg_rep.append(tf.math.add_n(SAR_Branch[k])) return slide_agg_rep def call(self, bag_label, A, h): slide_agg_rep = self.h_slide(A, h) # return s_[slide,m] (slide-level prediction scores) ssus = list() for i in range(self.n_class): bag_classifier = self.bag_classifier()[i] ssu = bag_classifier(slide_agg_rep[i]) ssus.append(ssu[0][0]) slide_score_unnorm = tf.convert_to_tensor(ssus) slide_score_unnorm = tf.reshape(slide_score_unnorm, (1, self.n_class)) Y_hat = tf.math.top_k(slide_score_unnorm, 1)[1][-1] Y_prob = tf.math.softmax(slide_score_unnorm) predict_slide_label = np.argmax(Y_prob.numpy()) Y_true = tf.one_hot([bag_label], 2) return slide_score_unnorm, Y_hat, Y_prob, predict_slide_label, Y_true
[ "tensorflow.convert_to_tensor", "tensorflow.transpose", "tensorflow.keras.layers.Dense", "tensorflow.reshape", "tensorflow.math.add_n", "tensorflow.one_hot", "tensorflow.math.softmax", "tensorflow.math.top_k", "tensorflow.keras.models.Sequential" ]
MODEL/model_bag_classifier.py
[(11, 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), True, 'import tensorflow as tf\n'), (12, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(1)', 'activation': '"""linear"""', 'input_shape': '(self.n_class, self.dim_compress_features)', 'name': '"""Bag_Classifier_Layer"""'}), True, 'import tensorflow as tf\n'), (27, 'tensorflow.math.add_n', 'tf.math.add_n', (['SAR'], {}), True, 'import tensorflow as tf\n'), (35, 'tensorflow.reshape', 'tf.reshape', (['slide_score_unnorm', '(1, self.n_class)'], {}), True, 'import tensorflow as tf\n'), (41, 'tensorflow.one_hot', 'tf.one_hot', (['[bag_label]', '(2)'], {}), True, 'import tensorflow as tf\n'), (53, 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), True, 'import tensorflow as tf\n'), (54, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(1)', 'activation': '"""linear"""', 'input_shape': '(1, self.dim_compress_features)', 'name': '"""Bag_Classifier_Layer"""'}), True, 'import tensorflow as tf\n'), (95, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['ssus'], {}), True, 'import tensorflow as tf\n'), (96, 'tensorflow.reshape', 'tf.reshape', (['slide_score_unnorm', '(1, self.n_class)'], {}), True, 'import tensorflow as tf\n'), (99, 'tensorflow.math.softmax', 'tf.math.softmax', (['slide_score_unnorm'], {}), True, 'import tensorflow as tf\n'), (102, 'tensorflow.one_hot', 'tf.one_hot', (['[bag_label]', '(2)'], {}), True, 'import tensorflow as tf\n'), (38, 'tensorflow.reshape', 'tf.reshape', (['slide_score_unnorm', '(1, self.n_class)'], {}), True, 'import tensorflow as tf\n'), (25, 'tensorflow.transpose', 'tf.transpose', (['A[i]'], {}), True, 'import tensorflow as tf\n'), (36, 'tensorflow.math.top_k', 'tf.math.top_k', (['slide_score_unnorm', '(1)'], {}), True, 'import tensorflow as tf\n'), (68, 'tensorflow.transpose', 'tf.transpose', (['A[i]'], {}), True, 'import tensorflow as tf\n'), (75, 'tensorflow.reshape', 'tf.reshape', (['SAR[j][i]', '(1, self.dim_compress_features)'], {}), True, 'import tensorflow as tf\n'), (81, 'tensorflow.math.add_n', 'tf.math.add_n', (['SAR_Branch[k]'], {}), True, 'import tensorflow as tf\n'), (98, 'tensorflow.math.top_k', 'tf.math.top_k', (['slide_score_unnorm', '(1)'], {}), True, 'import tensorflow as tf\n')]
quincy-125/DigiPath_CLAM_TF
8b7ab50caaca13f666268b0f4e071d123e190978
import tensorflow as tf class NG_Att_Net(tf.keras.Model): def __init__(self, dim_features=1024, dim_compress_features=512, n_hidden_units=256, n_class=2, dropout=False, dropout_rate=.25): super(NG_Att_Net, self).__init__() self.dim_features = dim_features self.dim_compress_features = dim_compress_features self.n_hidden_units = n_hidden_units self.n_class = n_class self.dropout = dropout self.dropout_rate = dropout_rate self.compression_model = tf.keras.models.Sequential() self.model = tf.keras.models.Sequential() self.fc_compress_layer = tf.keras.layers.Dense(units=dim_compress_features, activation='relu', input_shape=(dim_features,), kernel_initializer='glorot_normal', bias_initializer='zeros', name='Fully_Connected_Layer') self.compression_model.add(self.fc_compress_layer) self.att_layer1 = tf.keras.layers.Dense(units=n_hidden_units, activation='linear', input_shape=(dim_compress_features,), kernel_initializer='glorot_normal', bias_initializer='zeros', name='Attention_layer1') self.att_layer2 = tf.keras.layers.Dense(units=n_hidden_units, activation='tanh', input_shape=(dim_compress_features,), kernel_initializer='glorot_normal', bias_initializer='zeros', name='Attention_Layer2') self.att_layer3 = tf.keras.layers.Dense(units=n_class, activation='linear', input_shape=(n_hidden_units,), kernel_initializer='glorot_normal', bias_initializer='zeros', name='Attention_Layer3') self.model.add(self.att_layer1) self.model.add(self.att_layer2) if dropout: self.model.add(tf.keras.layers.Dropout(dropout_rate, name='Dropout_Layer')) self.model.add(self.att_layer3) def att_model(self): attention_model = [self.compression_model, self.model] return attention_model def call(self, img_features): h = list() A = list() for i in img_features: c_imf = self.att_model()[0](i) h.append(c_imf) for j in h: a = self.att_model()[1](j) A.append(a) return h, A class G_Att_Net(tf.keras.Model): def __init__(self, dim_features=1024, dim_compress_features=512, n_hidden_units=256, n_class=2, dropout=False, dropout_rate=.25): super(G_Att_Net, self).__init__() self.dim_features = dim_features self.dim_compress_features = dim_compress_features self.n_hidden_units = n_hidden_units self.n_class = n_class self.dropout = dropout self.dropout_rate = dropout_rate self.compression_model = tf.keras.models.Sequential() self.model_v = tf.keras.models.Sequential() self.model_u = tf.keras.models.Sequential() self.model = tf.keras.models.Sequential() self.fc_compress_layer = tf.keras.layers.Dense(units=dim_compress_features, activation='relu', input_shape=(dim_features,), kernel_initializer='glorot_normal', bias_initializer='zeros', name='Fully_Connected_Layer') self.compression_model.add(self.fc_compress_layer) self.att_v_layer1 = tf.keras.layers.Dense(units=n_hidden_units, activation='linear', input_shape=(dim_compress_features,), kernel_initializer='glorot_normal', bias_initializer='zeros', name='Attention_V_Layer1') self.att_v_layer2 = tf.keras.layers.Dense(units=n_hidden_units, activation='tanh', input_shape=(dim_compress_features,), kernel_initializer='glorot_normal', bias_initializer='zeros', name='Attention_V_Layer2') self.att_u_layer1 = tf.keras.layers.Dense(units=n_hidden_units, activation='linear', input_shape=(dim_compress_features,), kernel_initializer='glorot_normal', bias_initializer='zeros', name='Attention_U_Layer1') self.att_u_layer2 = tf.keras.layers.Dense(units=n_hidden_units, activation='sigmoid', input_shape=(dim_compress_features,), kernel_initializer='glorot_normal', bias_initializer='zeros', name='Attention_U_Layer2') self.att_layer_f = tf.keras.layers.Dense(units=n_class, activation='linear', input_shape=(n_hidden_units,), kernel_initializer='glorot_normal', bias_initializer='zeros', name='Attention_Gated_Final_Layer') self.model_v.add(self.att_v_layer1) self.model_v.add(self.att_v_layer2) self.model_u.add(self.att_u_layer1) self.model_u.add(self.att_u_layer2) if dropout: self.model_v.add(tf.keras.layers.Dropout(dropout_rate, name='Dropout_V_Layer')) self.model_u.add(tf.keras.layers.Dropout(dropout_rate, name='Dropout_U_Layer')) self.model.add(self.att_layer_f) def att_model(self): attention_model = [self.compression_model, self.model_v, self.model_u, self.model] return attention_model def call(self, img_features): h = list() A = list() for i in img_features: c_imf = self.att_model()[0](i) h.append(c_imf) for j in h: att_v_output = self.att_model()[1](j) att_u_output = self.att_model()[2](j) att_input = tf.math.multiply(att_v_output, att_u_output) a = self.att_model()[3](att_input) A.append(a) return h, A
[ "tensorflow.math.multiply", "tensorflow.keras.layers.Dense", "tensorflow.keras.models.Sequential", "tensorflow.keras.layers.Dropout" ]
MODEL/model_attention.py
[(15, 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), True, 'import tensorflow as tf\n'), (16, 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), True, 'import tensorflow as tf\n'), (18, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'dim_compress_features', 'activation': '"""relu"""', 'input_shape': '(dim_features,)', 'kernel_initializer': '"""glorot_normal"""', 'bias_initializer': '"""zeros"""', 'name': '"""Fully_Connected_Layer"""'}), True, 'import tensorflow as tf\n'), (27, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'n_hidden_units', 'activation': '"""linear"""', 'input_shape': '(dim_compress_features,)', 'kernel_initializer': '"""glorot_normal"""', 'bias_initializer': '"""zeros"""', 'name': '"""Attention_layer1"""'}), True, 'import tensorflow as tf\n'), (34, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'n_hidden_units', 'activation': '"""tanh"""', 'input_shape': '(dim_compress_features,)', 'kernel_initializer': '"""glorot_normal"""', 'bias_initializer': '"""zeros"""', 'name': '"""Attention_Layer2"""'}), True, 'import tensorflow as tf\n'), (41, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'n_class', 'activation': '"""linear"""', 'input_shape': '(n_hidden_units,)', 'kernel_initializer': '"""glorot_normal"""', 'bias_initializer': '"""zeros"""', 'name': '"""Attention_Layer3"""'}), True, 'import tensorflow as tf\n'), (85, 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), True, 'import tensorflow as tf\n'), (86, 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), True, 'import tensorflow as tf\n'), (87, 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), True, 'import tensorflow as tf\n'), (88, 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), True, 'import tensorflow as tf\n'), (90, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'dim_compress_features', 'activation': '"""relu"""', 'input_shape': '(dim_features,)', 'kernel_initializer': '"""glorot_normal"""', 'bias_initializer': '"""zeros"""', 'name': '"""Fully_Connected_Layer"""'}), True, 'import tensorflow as tf\n'), (99, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'n_hidden_units', 'activation': '"""linear"""', 'input_shape': '(dim_compress_features,)', 'kernel_initializer': '"""glorot_normal"""', 'bias_initializer': '"""zeros"""', 'name': '"""Attention_V_Layer1"""'}), True, 'import tensorflow as tf\n'), (106, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'n_hidden_units', 'activation': '"""tanh"""', 'input_shape': '(dim_compress_features,)', 'kernel_initializer': '"""glorot_normal"""', 'bias_initializer': '"""zeros"""', 'name': '"""Attention_V_Layer2"""'}), True, 'import tensorflow as tf\n'), (113, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'n_hidden_units', 'activation': '"""linear"""', 'input_shape': '(dim_compress_features,)', 'kernel_initializer': '"""glorot_normal"""', 'bias_initializer': '"""zeros"""', 'name': '"""Attention_U_Layer1"""'}), True, 'import tensorflow as tf\n'), (120, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'n_hidden_units', 'activation': '"""sigmoid"""', 'input_shape': '(dim_compress_features,)', 'kernel_initializer': '"""glorot_normal"""', 'bias_initializer': '"""zeros"""', 'name': '"""Attention_U_Layer2"""'}), True, 'import tensorflow as tf\n'), (127, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'n_class', 'activation': '"""linear"""', 'input_shape': '(n_hidden_units,)', 'kernel_initializer': '"""glorot_normal"""', 'bias_initializer': '"""zeros"""', 'name': '"""Attention_Gated_Final_Layer"""'}), True, 'import tensorflow as tf\n'), (161, 'tensorflow.math.multiply', 'tf.math.multiply', (['att_v_output', 'att_u_output'], {}), True, 'import tensorflow as tf\n'), (52, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['dropout_rate'], {'name': '"""Dropout_Layer"""'}), True, 'import tensorflow as tf\n'), (141, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['dropout_rate'], {'name': '"""Dropout_V_Layer"""'}), True, 'import tensorflow as tf\n'), (142, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['dropout_rate'], {'name': '"""Dropout_U_Layer"""'}), True, 'import tensorflow as tf\n')]
jingyi7777/CasRx_guide_efficiency
c9e900e4c4a73215f09852bd621b30e8dcb039e8
import tensorflow as tf from kerastuner import HyperParameters from tensorflow import keras from models.layers import recurrent_dense, strided_down, encoder_down_block def guide_all_cnn_hyp_ninef_classi_model(num_strided_down=4,kernel=5,cnn_units=128, dense_units=128, recurrent_layers=8, noise=True): seq = keras.Input(shape=(30, 4)) other = keras.Input(shape=9) x = seq for _ in range(num_strided_down): x = strided_down(x, cnn_units, 1, kernel) if noise: x = keras.layers.GaussianNoise(.01)(x) x = keras.layers.Flatten()(x) x = keras.layers.Concatenate()([x, other]) x = keras.layers.Dense(dense_units, activation=tf.nn.leaky_relu)(x) for _ in range(recurrent_layers): x = recurrent_dense(x, dense_units) outputs = keras.layers.Dense(1)(x) # TODO: make a second output that is confidence, and have some allowance of reduced penalty # for low confidence wrong guesses, but overall penalty for low confidence return keras.Model(inputs=[seq,other], outputs=outputs) def guide_all_cnn_hyp_ninef_classi_model_hp(hp: HyperParameters): kernel= hp.Choice('kernel',[3,4,5]) cnn_units = hp.Choice('cnn_units', [8,16,32,64]) dense_units = hp.Choice('dense_units', [8,16,32,64]) num_strided_down = hp.Int('num_strided_down', 3,5) recurrent_layers = hp.Choice('num_recurrent_layers', [0,1,2,3]) noise = True # hp.Boolean('use_noise') model = guide_all_cnn_hyp_ninef_classi_model(num_strided_down=num_strided_down, kernel=kernel, cnn_units=cnn_units, dense_units=dense_units, recurrent_layers=recurrent_layers, noise=noise) #metrics = [keras.metrics.MeanAbsoluteError(), keras.metrics.MeanSquaredError()] metrics = ['accuracy'] model.compile(keras.optimizers.Adam(), tf.losses.binary_crossentropy, metrics=metrics) #model.compile(keras.optimizers.Adam(), tf.keras.losses.MeanSquaredError(), metrics=metrics) return model
[ "tensorflow.keras.layers.Concatenate", "tensorflow.keras.Input", "tensorflow.keras.layers.Dense", "tensorflow.keras.Model", "tensorflow.keras.layers.GaussianNoise", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.layers.Flatten" ]
models/Deep-learning/models/guide_all_cnn_hyp_ninef_classi_model.py
[(9, 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(30, 4)'}), False, 'from tensorflow import keras\n'), (10, 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(9)'}), False, 'from tensorflow import keras\n'), (29, 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[seq, other]', 'outputs': 'outputs'}), False, 'from tensorflow import keras\n'), (14, 'models.layers.strided_down', 'strided_down', (['x', 'cnn_units', '(1)', 'kernel'], {}), False, 'from models.layers import recurrent_dense, strided_down, encoder_down_block\n'), (18, 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), False, 'from tensorflow import keras\n'), (20, 'tensorflow.keras.layers.Concatenate', 'keras.layers.Concatenate', ([], {}), False, 'from tensorflow import keras\n'), (22, 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['dense_units'], {'activation': 'tf.nn.leaky_relu'}), False, 'from tensorflow import keras\n'), (24, 'models.layers.recurrent_dense', 'recurrent_dense', (['x', 'dense_units'], {}), False, 'from models.layers import recurrent_dense, strided_down, encoder_down_block\n'), (26, 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(1)'], {}), False, 'from tensorflow import keras\n'), (47, 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {}), False, 'from tensorflow import keras\n'), (16, 'tensorflow.keras.layers.GaussianNoise', 'keras.layers.GaussianNoise', (['(0.01)'], {}), False, 'from tensorflow import keras\n')]
zoeleeee/mnist_challenge
8a98f7dde35ee1d7a1fb77e85ca931000fb71631
#CUDA_VISIBLE_DEVICES=0 python keras_rnd_multi_eval.py 0.9 window 16 1 100 0 4 configs/ from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import math import os import sys import time import tensorflow as tf from tensorflow import keras from tensorflow.examples.tutorials.mnist import input_data from utils import * import numpy as np conf = sys.argv[-1] nb_models = int(sys.argv[-2]) t = int(sys.argv[-3]) nb_imgs = int(sys.argv[-4]) st_imgs = int(sys.argv[-5]) input_bytes = eval(sys.argv[-6]) _type = sys.argv[-7] _t = eval(sys.argv[-8]) #dataset = sys.argv[-2] # Global constants with open(conf) as config_file: config = json.load(config_file) num_eval_examples = config['num_eval_examples'] eval_batch_size = config['eval_batch_size'] eval_on_cpu = config['eval_on_cpu'] nb_labels = config['num_labels'] st_lab = config['start_label'] rep = np.load('2_label_permutation.npy')[st_lab:st_lab+nb_labels*nb_models].T rep[rep==0] = -1 nb_channel = int(config['permutation'].split('_')[1].split('.')[1]) nb_label = config['num_labels'] #if dataset == 'origin.npy': # imgs, labels, input_shape = load_data(config['permutation'], config['num_labels']) labels = np.load('data/mnist_labels.npy') imgs = np.load('data/mnist_data.npy').transpose((0,2,3,1)) permut = np.load(config['permutation']) # labels = np.array([rep[i] for i in labels]).astype(np.float32) x_train, y_train = imgs[:60000], labels[:60000] x_test, y_test = imgs[-nb_imgs-st_imgs:-st_imgs], labels[-nb_imgs-st_imgs:-st_imgs] if len(x_test.shape) == 3: x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], 1) print(x_test.shape, len(x_test)) def custom_loss(): def loss(y_true, y_pred): if config['loss_func'] == 'bce': _loss = keras.losses.BinaryCrossentropy() return _loss(y_true, tf.nn.sigmoid(y_pred)) elif config['loss_func'] == 'xent': _loss = keras.losses.SparseCategoricalCrossentropy() return _loss(y_true, tf.nn.softmax(y_pred)) return loss models = [] if _type == 'window': model_var = '_window' + str(input_bytes) elif _type == 'slide4': model_var = '_slide'+str(input_bytes) for i in range(nb_models): with open(conf) as config_file: config = json.load(config_file) model_dir = config['model_dir'] models.append(keras.models.load_model(model_dir+model_var+'.h5', custom_objects={ 'custom_loss': custom_loss(), 'loss':custom_loss() }, compile=False)) conf = conf[:conf.find(conf.split('_')[-1])]+str(nb_labels*(i+1))+'.json' tot_advs_acc = np.zeros(len(y_test)) tot_amt = 0 change_advs_acc = [] rnd_imgs = np.zeros(imgs[-nb_imgs-st_imgs:-st_imgs].shape) print(rnd_imgs.shape, x_test.shape) while True: if np.mean(tot_advs_acc) == 1.: print(tot_amt, 'totally attacked succeed!') np.save('preds/rnd_'+model_dir.split('/')[-1]+'.npy', change_advs_acc) break elif tot_amt == 1e5: np.save('preds/rnd_'+model_dir.split('/')[-1]+'.npy', change_advs_acc) print(tot_amt, 'total adversarial acc:', tot_advs_acc) break else: tot_amt += 1 # noise = x_test noise = np.clip(np.random.randint(-1*int(config['epsilon']*255), int(config['epsilon']*255), x_test.shape)+x_test, 0, 255).astype(np.int) if _type == 'window': x_input = [window_perm_sliding_img_AES(nb_channel, noise, st_lab+i*nb_label, input_bytes) for i in range(nb_models)] if _type == 'slide4': x_input = [four_pixel_perm_sliding_img_AES(nb_channel, noise, st_lab+i*nb_label, input_bytes) for i in range(nb_models)] # samples = np.array([[[permut[d[0]] for d in c] for c in b] for b in noise]) # x_input = [samples[i].astype(np.float32) / 255. for i in range(len(models))] scores = [] for i in range(nb_models): scores.append(models[i].predict(x_input[i], batch_size=eval_batch_size)) scores = np.hstack(scores) nat_labels = np.zeros(scores.shape) nat_labels[scores>=_t] = 1. if _t == .5: nat_labels[scores<1-_t] = -1 else: nat_labels[scores <= 1-_t] = -1 preds, preds_dist, preds_score = [], [], [] print(scores.shape) for i in range(len(nat_labels)): tmp = np.repeat([nat_labels[i]], rep.shape[0], axis=0) dists = np.sum(np.absolute(tmp-rep), axis=-1) min_dist = np.min(dists) pred_labels = np.arange(len(dists))[dists==min_dist] pred_scores = [np.sum([scores[i][k] if rep[j][k]==1 else 1-scores[i][k] for k in np.arange(len(scores[i]))]) for j in pred_labels] pred_label = pred_labels[np.argmax(pred_scores)] preds.append(pred_label) preds_dist.append(dists[pred_label]) preds_score.append(np.max(pred_scores)) error_idxs = np.arange(len(preds))[preds != y_test] preds = np.array(preds) preds_dist = np.array(preds_dist) tot_advs_acc[error_idxs[preds_dist[preds!=y_test]<= t]] = 1. print(rnd_imgs.shape, noise.shape) rnd_imgs[error_idxs[preds_dist[preds!=y_test]<= t]] = noise[error_idxs[preds_dist[preds!=y_test]<= t]] change_advs_acc.append(np.mean(tot_advs_acc)) if tot_amt % 1000 == 0: np.save('advs/rnd_'+model_dir.split('/')[-1]+model_var+'.npy', rnd_imgs) print('{} error rate per time: {:.2f}%; right rate: {:.2f}%; total adversarial acc:{}%'.format(tot_amt, np.sum(preds_dist[preds!=y_test] <= t)/len(preds)*100, np.sum(preds_dist[preds==y_test] <= t)/len(preds)*100, np.mean(tot_advs_acc)*100))
[ "numpy.hstack", "tensorflow.nn.softmax", "tensorflow.nn.sigmoid", "numpy.absolute", "numpy.min", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.keras.losses.BinaryCrossentropy", "numpy.max", "numpy.argmax", "numpy.mean", "numpy.load", "numpy.repeat", "numpy.array", "numpy.zeros", "numpy.sum" ]
keras_rnd_multi_eval.py
[(43, 'numpy.load', 'np.load', (['"""data/mnist_labels.npy"""'], {}), True, 'import numpy as np\n'), (45, 'numpy.load', 'np.load', (["config['permutation']"], {}), True, 'import numpy as np\n'), (79, 'numpy.zeros', 'np.zeros', (['imgs[-nb_imgs - st_imgs:-st_imgs].shape'], {}), True, 'import numpy as np\n'), (31, 'json.load', 'json.load', (['config_file'], {}), False, 'import json\n'), (37, 'numpy.load', 'np.load', (['"""2_label_permutation.npy"""'], {}), True, 'import numpy as np\n'), (44, 'numpy.load', 'np.load', (['"""data/mnist_data.npy"""'], {}), True, 'import numpy as np\n'), (71, 'json.load', 'json.load', (['config_file'], {}), False, 'import json\n'), (82, 'numpy.mean', 'np.mean', (['tot_advs_acc'], {}), True, 'import numpy as np\n'), (57, 'tensorflow.keras.losses.BinaryCrossentropy', 'keras.losses.BinaryCrossentropy', ([], {}), False, 'from tensorflow import keras\n'), (103, 'numpy.hstack', 'np.hstack', (['scores'], {}), True, 'import numpy as np\n'), (104, 'numpy.zeros', 'np.zeros', (['scores.shape'], {}), True, 'import numpy as np\n'), (125, 'numpy.array', 'np.array', (['preds'], {}), True, 'import numpy as np\n'), (126, 'numpy.array', 'np.array', (['preds_dist'], {}), True, 'import numpy as np\n'), (58, 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['y_pred'], {}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'keras.losses.SparseCategoricalCrossentropy', ([], {}), False, 'from tensorflow import keras\n'), (114, 'numpy.repeat', 'np.repeat', (['[nat_labels[i]]', 'rep.shape[0]'], {'axis': '(0)'}), True, 'import numpy as np\n'), (116, 'numpy.min', 'np.min', (['dists'], {}), True, 'import numpy as np\n'), (130, 'numpy.mean', 'np.mean', (['tot_advs_acc'], {}), True, 'import numpy as np\n'), (61, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['y_pred'], {}), True, 'import tensorflow as tf\n'), (115, 'numpy.absolute', 'np.absolute', (['(tmp - rep)'], {}), True, 'import numpy as np\n'), (122, 'numpy.max', 'np.max', (['pred_scores'], {}), True, 'import numpy as np\n'), (119, 'numpy.argmax', 'np.argmax', (['pred_scores'], {}), True, 'import numpy as np\n'), (133, 'numpy.mean', 'np.mean', (['tot_advs_acc'], {}), True, 'import numpy as np\n'), (133, 'numpy.sum', 'np.sum', (['(preds_dist[preds != y_test] <= t)'], {}), True, 'import numpy as np\n'), (133, 'numpy.sum', 'np.sum', (['(preds_dist[preds == y_test] <= t)'], {}), True, 'import numpy as np\n')]
giulatona/keras-io
c441a1f7dd7310c773125242e16769aef8ff65f6
""" Title: Classification with Neural Decision Forests Author: [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/) Date created: 2021/01/15 Last modified: 2021/01/15 Description: How to train differentiable decision trees for end-to-end learning in deep neural networks. """ """ ## Introduction This example provides an implementation of the [Deep Neural Decision Forest](https://ieeexplore.ieee.org/document/7410529) model introduced by P. Kontschieder et al. for structured data classification. It demonstrates how to build a stochastic and differentiable decision tree model, train it end-to-end, and unify decision trees with deep representation learning. ## The dataset This example uses the [United States Census Income Dataset](https://archive.ics.uci.edu/ml/datasets/census+income) provided by the [UC Irvine Machine Learning Repository](https://archive.ics.uci.edu/ml/index.php). The task is binary classification to predict whether a person is likely to be making over USD 50,000 a year. The dataset includes 48,842 instances with 14 input features (such as age, work class, education, occupation, and so on): 5 numerical features and 9 categorical features. """ """ ## Setup """ import tensorflow as tf import numpy as np import pandas as pd from tensorflow import keras from tensorflow.keras import layers import math """ ## Prepare the data """ CSV_HEADER = [ "age", "workclass", "fnlwgt", "education", "education_num", "marital_status", "occupation", "relationship", "race", "gender", "capital_gain", "capital_loss", "hours_per_week", "native_country", "income_bracket", ] train_data_url = ( "https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data" ) train_data = pd.read_csv(train_data_url, header=None, names=CSV_HEADER) test_data_url = ( "https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test" ) test_data = pd.read_csv(test_data_url, header=None, names=CSV_HEADER) print(f"Train dataset shape: {train_data.shape}") print(f"Test dataset shape: {test_data.shape}") """ Remove the first record (because it is not a valid data example) and a trailing 'dot' in the class labels. """ test_data = test_data[1:] test_data.income_bracket = test_data.income_bracket.apply( lambda value: value.replace(".", "") ) """ We store the training and test data splits locally as CSV files. """ train_data_file = "train_data.csv" test_data_file = "test_data.csv" train_data.to_csv(train_data_file, index=False, header=False) test_data.to_csv(test_data_file, index=False, header=False) """ ## Define dataset metadata Here, we define the metadata of the dataset that will be useful for reading and parsing and encoding input features. """ # A list of the numerical feature names. NUMERIC_FEATURE_NAMES = [ "age", "education_num", "capital_gain", "capital_loss", "hours_per_week", ] # A dictionary of the categorical features and their vocabulary. CATEGORICAL_FEATURES_WITH_VOCABULARY = { "workclass": sorted(list(train_data["workclass"].unique())), "education": sorted(list(train_data["education"].unique())), "marital_status": sorted(list(train_data["marital_status"].unique())), "occupation": sorted(list(train_data["occupation"].unique())), "relationship": sorted(list(train_data["relationship"].unique())), "race": sorted(list(train_data["race"].unique())), "gender": sorted(list(train_data["gender"].unique())), "native_country": sorted(list(train_data["native_country"].unique())), } # A list of the columns to ignore from the dataset. IGNORE_COLUMN_NAMES = ["fnlwgt"] # A list of the categorical feature names. CATEGORICAL_FEATURE_NAMES = list(CATEGORICAL_FEATURES_WITH_VOCABULARY.keys()) # A list of all the input features. FEATURE_NAMES = NUMERIC_FEATURE_NAMES + CATEGORICAL_FEATURE_NAMES # A list of column default values for each feature. COLUMN_DEFAULTS = [ [0.0] if feature_name in NUMERIC_FEATURE_NAMES + IGNORE_COLUMN_NAMES else ["NA"] for feature_name in CSV_HEADER ] # The name of the target feature. TARGET_FEATURE_NAME = "income_bracket" # A list of the labels of the target features. TARGET_LABELS = [" <=50K", " >50K"] """ ## Create `tf.data.Dataset` objects for training and validation We create an input function to read and parse the file, and convert features and labels into a [`tf.data.Dataset`](https://www.tensorflow.org/guide/datasets) for training and validation. We also preprocess the input by mapping the target label to an index. """ from tensorflow.keras.layers.experimental.preprocessing import StringLookup target_label_lookup = StringLookup( vocabulary=TARGET_LABELS, mask_token=None, num_oov_indices=0 ) def get_dataset_from_csv(csv_file_path, shuffle=False, batch_size=128): dataset = tf.data.experimental.make_csv_dataset( csv_file_path, batch_size=batch_size, column_names=CSV_HEADER, column_defaults=COLUMN_DEFAULTS, label_name=TARGET_FEATURE_NAME, num_epochs=1, header=False, na_value="?", shuffle=shuffle, ).map(lambda features, target: (features, target_label_lookup(target))) return dataset.cache() """ ## Create model inputs """ def create_model_inputs(): inputs = {} for feature_name in FEATURE_NAMES: if feature_name in NUMERIC_FEATURE_NAMES: inputs[feature_name] = layers.Input( name=feature_name, shape=(), dtype=tf.float32 ) else: inputs[feature_name] = layers.Input( name=feature_name, shape=(), dtype=tf.string ) return inputs """ ## Encode input features """ from tensorflow.keras.layers.experimental.preprocessing import CategoryEncoding from tensorflow.keras.layers.experimental.preprocessing import StringLookup def encode_inputs(inputs, use_embedding=False): encoded_features = [] for feature_name in inputs: if feature_name in CATEGORICAL_FEATURE_NAMES: vocabulary = CATEGORICAL_FEATURES_WITH_VOCABULARY[feature_name] # Create a lookup to convert a string values to an integer indices. # Since we are not using a mask token, nor expecting any out of vocabulary # (oov) token, we set mask_token to None and num_oov_indices to 0. index = StringLookup( vocabulary=vocabulary, mask_token=None, num_oov_indices=0 ) # Convert the string input values into integer indices. value_index = index(inputs[feature_name]) if use_embedding: embedding_dims = int(math.sqrt(len(vocabulary))) # Create an embedding layer with the specified dimensions. embedding_ecoder = layers.Embedding( input_dim=len(vocabulary), output_dim=embedding_dims ) # Convert the index values to embedding representations. encoded_feature = embedding_ecoder(value_index) else: # Create a one-hot encoder. onehot_encoder = CategoryEncoding(output_mode="binary") onehot_encoder.adapt(index(vocabulary)) # Convert the index values to a one-hot representation. encoded_feature = onehot_encoder(value_index) else: # Use the numerical features as-is. encoded_feature = inputs[feature_name] if inputs[feature_name].shape[-1] is None: encoded_feature = tf.expand_dims(encoded_feature, -1) encoded_features.append(encoded_feature) encoded_features = layers.concatenate(encoded_features) return encoded_features """ ## Deep Neural Decision Tree A neural decision tree model has two sets of weights to learn. The first set is `pi`, which represents the probability distribution of the classes in the tree leaves. The second set is the weights of the routing layer `decision_fn`, which represents the probability of going to each leave. The forward pass of the model works as follows: 1. The model expects input `features` as a single vector encoding all the features of an instance in the batch. This vector can be generated from a Convolution Neural Network (CNN) applied to images or dense transformations applied to structured data features. 2. The model first applies a `used_features_mask` to randomly select a subset of input features to use. 3. Then, the model computes the probabilities (`mu`) for the input instances to reach the tree leaves by iteratively performing a *stochastic* routing throughout the tree levels. 4. Finally, the probabilities of reaching the leaves are combined by the class probabilities at the leaves to produce the final `outputs`. """ class NeuralDecisionTree(keras.Model): def __init__(self, depth, num_features, used_features_rate, num_classes): super(NeuralDecisionTree, self).__init__() self.depth = depth self.num_leaves = 2 ** depth self.num_classes = num_classes # Create a mask for the randomly selected features. num_used_features = int(num_features * used_features_rate) one_hot = np.eye(num_features) sampled_feature_indicies = np.random.choice( np.arange(num_features), num_used_features, replace=False ) self.used_features_mask = one_hot[sampled_feature_indicies] # Initialize the weights of the classes in leaves. self.pi = tf.Variable( initial_value=tf.random_normal_initializer()( shape=[self.num_leaves, self.num_classes] ), dtype="float32", trainable=True, ) # Initialize the stochastic routing layer. self.decision_fn = layers.Dense( units=self.num_leaves, activation="sigmoid", name="decision" ) def call(self, features): batch_size = tf.shape(features)[0] # Apply the feature mask to the input features. features = tf.matmul( features, self.used_features_mask, transpose_b=True ) # [batch_size, num_used_features] # Compute the routing probabilities. decisions = tf.expand_dims( self.decision_fn(features), axis=2 ) # [batch_size, num_leaves, 1] # Concatenate the routing probabilities with their complements. decisions = layers.concatenate( [decisions, 1 - decisions], axis=2 ) # [batch_size, num_leaves, 2] mu = tf.ones([batch_size, 1, 1]) begin_idx = 1 end_idx = 2 # Traverse the tree in breadth-first order. for level in range(self.depth): mu = tf.reshape(mu, [batch_size, -1, 1]) # [batch_size, 2 ** level, 1] mu = tf.tile(mu, (1, 1, 2)) # [batch_size, 2 ** level, 2] level_decisions = decisions[ :, begin_idx:end_idx, : ] # [batch_size, 2 ** level, 2] mu = mu * level_decisions # [batch_size, 2**level, 2] begin_idx = end_idx end_idx = begin_idx + 2 ** (level + 1) mu = tf.reshape(mu, [batch_size, self.num_leaves]) # [batch_size, num_leaves] probabilities = keras.activations.softmax(self.pi) # [num_leaves, num_classes] outputs = tf.matmul(mu, probabilities) # [batch_size, num_classes] return outputs """ ## Deep Neural Decision Forest The neural decision forest model consists of a set of neural decision trees that are trained simultaneously. The output of the forest model is the average outputs of its trees. """ class NeuralDecisionForest(keras.Model): def __init__(self, num_trees, depth, num_features, used_features_rate, num_classes): super(NeuralDecisionForest, self).__init__() self.ensemble = [] # Initialize the ensemble by adding NeuralDecisionTree instances. # Each tree will have its own randomly selected input features to use. for _ in range(num_trees): self.ensemble.append( NeuralDecisionTree(depth, num_features, used_features_rate, num_classes) ) def call(self, inputs): # Initialize the outputs: a [batch_size, num_classes] matrix of zeros. batch_size = tf.shape(inputs)[0] outputs = tf.zeros([batch_size, num_classes]) # Aggregate the outputs of trees in the ensemble. for tree in self.ensemble: outputs += tree(inputs) # Divide the outputs by the ensemble size to get the average. outputs /= len(self.ensemble) return outputs """ Finally, let's set up the code that will train and evaluate the model. """ learning_rate = 0.01 batch_size = 265 num_epochs = 10 hidden_units = [64, 64] def run_experiment(model): model.compile( optimizer=keras.optimizers.Adam(learning_rate=learning_rate), loss=keras.losses.SparseCategoricalCrossentropy(), metrics=[keras.metrics.SparseCategoricalAccuracy()], ) print("Start training the model...") train_dataset = get_dataset_from_csv( train_data_file, shuffle=True, batch_size=batch_size ) model.fit(train_dataset, epochs=num_epochs) print("Model training finished") print("Evaluating the model on the test data...") test_dataset = get_dataset_from_csv(test_data_file, batch_size=batch_size) _, accuracy = model.evaluate(test_dataset) print(f"Test accuracy: {round(accuracy * 100, 2)}%") """ ## Experiment 1: train a decision tree model In this experiment, we train a single neural decision tree model where we use all input features. """ num_trees = 10 depth = 10 used_features_rate = 1.0 num_classes = len(TARGET_LABELS) def create_tree_model(): inputs = create_model_inputs() features = encode_inputs(inputs, use_embedding=True) features = layers.BatchNormalization()(features) num_features = features.shape[1] tree = NeuralDecisionTree(depth, num_features, used_features_rate, num_classes) outputs = tree(features) model = keras.Model(inputs=inputs, outputs=outputs) return model tree_model = create_tree_model() run_experiment(tree_model) """ ## Experiment 2: train a forest model In this experiment, we train a neural decision forest with `num_trees` trees where each tree uses randomly selected 50% of the input features. You can control the number of features to be used in each tree by setting the `used_features_rate` variable. In addition, we set the depth to 5 instead of 10 compared to the previous experiment. """ num_trees = 25 depth = 5 used_features_rate = 0.5 def create_forest_model(): inputs = create_model_inputs() features = encode_inputs(inputs, use_embedding=True) features = layers.BatchNormalization()(features) num_features = features.shape[1] forest_model = NeuralDecisionForest( num_trees, depth, num_features, used_features_rate, num_classes ) outputs = forest_model(features) model = keras.Model(inputs=inputs, outputs=outputs) return model forest_model = create_forest_model() run_experiment(forest_model)
[ "tensorflow.zeros", "tensorflow.keras.activations.softmax", "pandas.read_csv", "numpy.arange", "numpy.eye", "tensorflow.keras.layers.experimental.preprocessing.CategoryEncoding", "tensorflow.random_normal_initializer", "tensorflow.tile", "tensorflow.matmul", "tensorflow.keras.layers.experimental.preprocessing.StringLookup", "tensorflow.shape", "tensorflow.keras.layers.Dense", "tensorflow.keras.Model", "tensorflow.data.experimental.make_csv_dataset", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.reshape", "tensorflow.keras.layers.concatenate", "tensorflow.ones", "tensorflow.expand_dims", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.metrics.SparseCategoricalAccuracy", "tensorflow.keras.layers.Input" ]
examples/structured_data/deep_neural_decision_forests.py
[(67, 'pandas.read_csv', 'pd.read_csv', (['train_data_url'], {'header': 'None', 'names': 'CSV_HEADER'}), True, 'import pandas as pd\n'), (72, 'pandas.read_csv', 'pd.read_csv', (['test_data_url'], {'header': 'None', 'names': 'CSV_HEADER'}), True, 'import pandas as pd\n'), (150, 'tensorflow.keras.layers.experimental.preprocessing.StringLookup', 'StringLookup', ([], {'vocabulary': 'TARGET_LABELS', 'mask_token': 'None', 'num_oov_indices': '(0)'}), False, 'from tensorflow.keras.layers.experimental.preprocessing import StringLookup\n'), (232, 'tensorflow.keras.layers.concatenate', 'layers.concatenate', (['encoded_features'], {}), False, 'from tensorflow.keras import layers\n'), (408, 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'inputs', 'outputs': 'outputs'}), False, 'from tensorflow import keras\n'), (441, 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'inputs', 'outputs': 'outputs'}), False, 'from tensorflow import keras\n'), (264, 'numpy.eye', 'np.eye', (['num_features'], {}), True, 'import numpy as np\n'), (280, 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': 'self.num_leaves', 'activation': '"""sigmoid"""', 'name': '"""decision"""'}), False, 'from tensorflow.keras import layers\n'), (288, 'tensorflow.matmul', 'tf.matmul', (['features', 'self.used_features_mask'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (296, 'tensorflow.keras.layers.concatenate', 'layers.concatenate', (['[decisions, 1 - decisions]'], {'axis': '(2)'}), False, 'from tensorflow.keras import layers\n'), (300, 'tensorflow.ones', 'tf.ones', (['[batch_size, 1, 1]'], {}), True, 'import tensorflow as tf\n'), (315, 'tensorflow.reshape', 'tf.reshape', (['mu', '[batch_size, self.num_leaves]'], {}), True, 'import tensorflow as tf\n'), (316, 'tensorflow.keras.activations.softmax', 'keras.activations.softmax', (['self.pi'], {}), False, 'from tensorflow import keras\n'), (317, 'tensorflow.matmul', 'tf.matmul', (['mu', 'probabilities'], {}), True, 'import tensorflow as tf\n'), (343, 'tensorflow.zeros', 'tf.zeros', (['[batch_size, num_classes]'], {}), True, 'import tensorflow as tf\n'), (402, 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), False, 'from tensorflow.keras import layers\n'), (433, 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), False, 'from tensorflow.keras import layers\n'), (156, 'tensorflow.data.experimental.make_csv_dataset', 'tf.data.experimental.make_csv_dataset', (['csv_file_path'], {'batch_size': 'batch_size', 'column_names': 'CSV_HEADER', 'column_defaults': 'COLUMN_DEFAULTS', 'label_name': 'TARGET_FEATURE_NAME', 'num_epochs': '(1)', 'header': '(False)', 'na_value': '"""?"""', 'shuffle': 'shuffle'}), True, 'import tensorflow as tf\n'), (179, 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'name': 'feature_name', 'shape': '()', 'dtype': 'tf.float32'}), False, 'from tensorflow.keras import layers\n'), (183, 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'name': 'feature_name', 'shape': '()', 'dtype': 'tf.string'}), False, 'from tensorflow.keras import layers\n'), (205, 'tensorflow.keras.layers.experimental.preprocessing.StringLookup', 'StringLookup', ([], {'vocabulary': 'vocabulary', 'mask_token': 'None', 'num_oov_indices': '(0)'}), False, 'from tensorflow.keras.layers.experimental.preprocessing import StringLookup\n'), (266, 'numpy.arange', 'np.arange', (['num_features'], {}), True, 'import numpy as np\n'), (285, 'tensorflow.shape', 'tf.shape', (['features'], {}), True, 'import tensorflow as tf\n'), (306, 'tensorflow.reshape', 'tf.reshape', (['mu', '[batch_size, -1, 1]'], {}), True, 'import tensorflow as tf\n'), (307, 'tensorflow.tile', 'tf.tile', (['mu', '(1, 1, 2)'], {}), True, 'import tensorflow as tf\n'), (342, 'tensorflow.shape', 'tf.shape', (['inputs'], {}), True, 'import tensorflow as tf\n'), (366, 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': 'learning_rate'}), False, 'from tensorflow import keras\n'), (367, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'keras.losses.SparseCategoricalCrossentropy', ([], {}), False, 'from tensorflow import keras\n'), (220, 'tensorflow.keras.layers.experimental.preprocessing.CategoryEncoding', 'CategoryEncoding', ([], {'output_mode': '"""binary"""'}), False, 'from tensorflow.keras.layers.experimental.preprocessing import CategoryEncoding\n'), (228, 'tensorflow.expand_dims', 'tf.expand_dims', (['encoded_feature', '(-1)'], {}), True, 'import tensorflow as tf\n'), (368, 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'keras.metrics.SparseCategoricalAccuracy', ([], {}), False, 'from tensorflow import keras\n'), (272, 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {}), True, 'import tensorflow as tf\n')]
vrushabhchauhan/MRI_COPY
f386b24660adbf3486df7040d526e6c4d29dabf7
# Keras implementation of the paper: # 3D MRI Brain Tumor Segmentation Using Autoencoder Regularization # by Myronenko A. (https://arxiv.org/pdf/1810.11654.pdf) # Author of this code: Suyog Jadhav (https://github.com/IAmSUyogJadhav) import tensorflow.keras.backend as K from tensorflow.keras.losses import mse from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense from tensorflow.keras.layers import Input, Reshape, Flatten, Dropout, SpatialDropout3D from tensorflow.keras.optimizers import adam from tensorflow.keras.models import Model try: from group_norm import GroupNormalization except ImportError: import urllib.request print('Downloading group_norm.py in the current directory...') url = 'https://raw.githubusercontent.com/titu1994/Keras-Group-Normalization/master/group_norm.py' urllib.request.urlretrieve(url, "group_norm.py") from group_norm import GroupNormalization def green_block(inp, filters, data_format='channels_first', name=None): """ green_block(inp, filters, name=None) ------------------------------------ Implementation of the special residual block used in the paper. The block consists of two (GroupNorm --> ReLu --> 3x3x3 non-strided Convolution) units, with a residual connection from the input `inp` to the output. Used internally in the model. Can be used independently as well. Parameters ---------- `inp`: An keras.layers.layer instance, required The keras layer just preceding the green block. `filters`: integer, required No. of filters to use in the 3D convolutional block. The output layer of this green block will have this many no. of channels. `data_format`: string, optional The format of the input data. Must be either 'chanels_first' or 'channels_last'. Defaults to `channels_first`, as used in the paper. `name`: string, optional The name to be given to this green block. Defaults to None, in which case, keras uses generated names for the involved layers. If a string is provided, the names of individual layers are generated by attaching a relevant prefix from [GroupNorm_, Res_, Conv3D_, Relu_, ], followed by _1 or _2. Returns ------- `out`: A keras.layers.Layer instance The output of the green block. Has no. of channels equal to `filters`. The size of the rest of the dimensions remains same as in `inp`. """ inp_res = Conv3D( filters=filters, kernel_size=(1, 1, 1), strides=1, data_format=data_format, name=f'Res_{name}' if name else None)(inp) # axis=1 for channels_first data format # No. of groups = 8, as given in the paper x = GroupNormalization( groups=8, axis=1 if data_format == 'channels_first' else 0, name=f'GroupNorm_1_{name}' if name else None)(inp) x = Activation('relu', name=f'Relu_1_{name}' if name else None)(x) x = Conv3D( filters=filters, kernel_size=(3, 3, 3), strides=1, padding='same', data_format=data_format, name=f'Conv3D_1_{name}' if name else None)(x) x = GroupNormalization( groups=8, axis=1 if data_format == 'channels_first' else 0, name=f'GroupNorm_2_{name}' if name else None)(x) x = Activation('relu', name=f'Relu_2_{name}' if name else None)(x) x = Conv3D( filters=filters, kernel_size=(3, 3, 3), strides=1, padding='same', data_format=data_format, name=f'Conv3D_2_{name}' if name else None)(x) out = Add(name=f'Out_{name}' if name else None)([x, inp_res]) return out # From keras-team/keras/blob/master/examples/variational_autoencoder.py def sampling(args): """Reparameterization trick by sampling from an isotropic unit Gaussian. # Arguments args (tensor): mean and log of variance of Q(z|X) # Returns z (tensor): sampled latent vector """ z_mean, z_var = args batch = K.shape(z_mean)[0] dim = K.int_shape(z_mean)[1] # by default, random_normal has mean = 0 and std = 1.0 epsilon = K.random_normal(shape=(batch, dim)) return z_mean + K.exp(0.5 * z_var) * epsilon def dice_coefficient(y_true, y_pred): intersection = K.sum(K.abs(y_true * y_pred), axis=[-3,-2,-1]) dn = K.sum(K.square(y_true) + K.square(y_pred), axis=[-3,-2,-1]) + 1e-8 return K.mean(2 * intersection / dn, axis=[0,1]) def loss_gt(e=1e-8): """ loss_gt(e=1e-8) ------------------------------------------------------ Since keras does not allow custom loss functions to have arguments other than the true and predicted labels, this function acts as a wrapper that allows us to implement the custom loss used in the paper. This function only calculates - L<dice> term of the following equation. (i.e. GT Decoder part loss) L = - L<dice> + weight_L2 ∗ L<L2> + weight_KL ∗ L<KL> Parameters ---------- `e`: Float, optional A small epsilon term to add in the denominator to avoid dividing by zero and possible gradient explosion. Returns ------- loss_gt_(y_true, y_pred): A custom keras loss function This function takes as input the predicted and ground labels, uses them to calculate the dice loss. """ def loss_gt_(y_true, y_pred): intersection = K.sum(K.abs(y_true * y_pred), axis=[-3,-2,-1]) dn = K.sum(K.square(y_true) + K.square(y_pred), axis=[-3,-2,-1]) + e return - K.mean(2 * intersection / dn, axis=[0,1]) return loss_gt_ def loss_VAE(input_shape, z_mean, z_var, weight_L2=0.1, weight_KL=0.1): """ loss_VAE(input_shape, z_mean, z_var, weight_L2=0.1, weight_KL=0.1) ------------------------------------------------------ Since keras does not allow custom loss functions to have arguments other than the true and predicted labels, this function acts as a wrapper that allows us to implement the custom loss used in the paper. This function calculates the following equation, except for -L<dice> term. (i.e. VAE decoder part loss) L = - L<dice> + weight_L2 ∗ L<L2> + weight_KL ∗ L<KL> Parameters ---------- `input_shape`: A 4-tuple, required The shape of an image as the tuple (c, H, W, D), where c is the no. of channels; H, W and D is the height, width and depth of the input image, respectively. `z_mean`: An keras.layers.Layer instance, required The vector representing values of mean for the learned distribution in the VAE part. Used internally. `z_var`: An keras.layers.Layer instance, required The vector representing values of variance for the learned distribution in the VAE part. Used internally. `weight_L2`: A real number, optional The weight to be given to the L2 loss term in the loss function. Adjust to get best results for your task. Defaults to 0.1. `weight_KL`: A real number, optional The weight to be given to the KL loss term in the loss function. Adjust to get best results for your task. Defaults to 0.1. Returns ------- loss_VAE_(y_true, y_pred): A custom keras loss function This function takes as input the predicted and ground labels, uses them to calculate the L2 and KL loss. """ def loss_VAE_(y_true, y_pred): c, H, W, D = input_shape n = c * H * W * D loss_L2 = K.mean(K.square(y_true - y_pred), axis=(1, 2, 3, 4)) # original axis value is (1,2,3,4). loss_KL = (1 / n) * K.sum( K.exp(z_var) + K.square(z_mean) - 1. - z_var, axis=-1 ) return weight_L2 * loss_L2 + weight_KL * loss_KL return loss_VAE_ def build_model(input_shape=(4, 160, 192, 128), output_channels=3, weight_L2=0.1, weight_KL=0.1, dice_e=1e-8): """ build_model(input_shape=(4, 160, 192, 128), output_channels=3, weight_L2=0.1, weight_KL=0.1) ------------------------------------------- Creates the model used in the BRATS2018 winning solution by Myronenko A. (https://arxiv.org/pdf/1810.11654.pdf) Parameters ---------- `input_shape`: A 4-tuple, optional. Shape of the input image. Must be a 4D image of shape (c, H, W, D), where, each of H, W and D are divisible by 2^4, and c is divisible by 4. Defaults to the crop size used in the paper, i.e., (4, 160, 192, 128). `output_channels`: An integer, optional. The no. of channels in the output. Defaults to 3 (BraTS 2018 format). `weight_L2`: A real number, optional The weight to be given to the L2 loss term in the loss function. Adjust to get best results for your task. Defaults to 0.1. `weight_KL`: A real number, optional The weight to be given to the KL loss term in the loss function. Adjust to get best results for your task. Defaults to 0.1. `dice_e`: Float, optional A small epsilon term to add in the denominator of dice loss to avoid dividing by zero and possible gradient explosion. This argument will be passed to loss_gt function. Returns ------- `model`: A keras.models.Model instance The created model. """ c, H, W, D = input_shape assert len(input_shape) == 4, "Input shape must be a 4-tuple" assert (c % 4) == 0, "The no. of channels must be divisible by 4" assert (H % 16) == 0 and (W % 16) == 0 and (D % 16) == 0, \ "All the input dimensions must be divisible by 16" # ------------------------------------------------------------------------- # Encoder # ------------------------------------------------------------------------- ## Input Layer inp = Input(input_shape) ## The Initial Block x = Conv3D( filters=32, kernel_size=(3, 3, 3), strides=1, padding='same', data_format='channels_first', name='Input_x1')(inp) ## Dropout (0.2) x = SpatialDropout3D(0.2, data_format='channels_first')(x) ## Green Block x1 (output filters = 32) x1 = green_block(x, 32, name='x1') x = Conv3D( filters=32, kernel_size=(3, 3, 3), strides=2, padding='same', data_format='channels_first', name='Enc_DownSample_32')(x1) ## Green Block x2 (output filters = 64) x = green_block(x, 64, name='Enc_64_1') x2 = green_block(x, 64, name='x2') x = Conv3D( filters=64, kernel_size=(3, 3, 3), strides=2, padding='same', data_format='channels_first', name='Enc_DownSample_64')(x2) ## Green Blocks x2 (output filters = 128) x = green_block(x, 128, name='Enc_128_1') x3 = green_block(x, 128, name='x3') x = Conv3D( filters=128, kernel_size=(3, 3, 3), strides=2, padding='same', data_format='channels_first', name='Enc_DownSample_128')(x3) ## Green Blocks x4 (output filters = 256) x = green_block(x, 256, name='Enc_256_1') x = green_block(x, 256, name='Enc_256_2') x = green_block(x, 256, name='Enc_256_3') x4 = green_block(x, 256, name='x4') # ------------------------------------------------------------------------- # Decoder # ------------------------------------------------------------------------- ## GT (Groud Truth) Part # ------------------------------------------------------------------------- ### Green Block x1 (output filters=128) x = Conv3D( filters=128, kernel_size=(1, 1, 1), strides=1, data_format='channels_first', name='Dec_GT_ReduceDepth_128')(x4) x = UpSampling3D( size=2, data_format='channels_first', name='Dec_GT_UpSample_128')(x) x = Add(name='Input_Dec_GT_128')([x, x3]) x = green_block(x, 128, name='Dec_GT_128') ### Green Block x1 (output filters=64) x = Conv3D( filters=64, kernel_size=(1, 1, 1), strides=1, data_format='channels_first', name='Dec_GT_ReduceDepth_64')(x) x = UpSampling3D( size=2, data_format='channels_first', name='Dec_GT_UpSample_64')(x) x = Add(name='Input_Dec_GT_64')([x, x2]) x = green_block(x, 64, name='Dec_GT_64') ### Green Block x1 (output filters=32) x = Conv3D( filters=32, kernel_size=(1, 1, 1), strides=1, data_format='channels_first', name='Dec_GT_ReduceDepth_32')(x) x = UpSampling3D( size=2, data_format='channels_first', name='Dec_GT_UpSample_32')(x) x = Add(name='Input_Dec_GT_32')([x, x1]) x = green_block(x, 32, name='Dec_GT_32') ### Blue Block x1 (output filters=32) x = Conv3D( filters=32, kernel_size=(3, 3, 3), strides=1, padding='same', data_format='channels_first', name='Input_Dec_GT_Output')(x) ### Output Block out_GT = Conv3D( filters=output_channels, # No. of tumor classes is 3 kernel_size=(1, 1, 1), strides=1, data_format='channels_first', activation='sigmoid', name='Dec_GT_Output')(x) ## VAE (Variational Auto Encoder) Part # ------------------------------------------------------------------------- ### VD Block (Reducing dimensionality of the data) x = GroupNormalization(groups=8, axis=1, name='Dec_VAE_VD_GN')(x4) x = Activation('relu', name='Dec_VAE_VD_relu')(x) x = Conv3D( filters=16, kernel_size=(3, 3, 3), strides=2, padding='same', data_format='channels_first', name='Dec_VAE_VD_Conv3D')(x) # Not mentioned in the paper, but the author used a Flattening layer here. x = Flatten(name='Dec_VAE_VD_Flatten')(x) x = Dense(256, name='Dec_VAE_VD_Dense')(x) ### VDraw Block (Sampling) z_mean = Dense(128, name='Dec_VAE_VDraw_Mean')(x) z_var = Dense(128, name='Dec_VAE_VDraw_Var')(x) x = Lambda(sampling, name='Dec_VAE_VDraw_Sampling')([z_mean, z_var]) ### VU Block (Upsizing back to a depth of 256) x = Dense((c//4) * (H//16) * (W//16) * (D//16))(x) x = Activation('relu')(x) x = Reshape(((c//4), (H//16), (W//16), (D//16)))(x) x = Conv3D( filters=256, kernel_size=(1, 1, 1), strides=1, data_format='channels_first', name='Dec_VAE_ReduceDepth_256')(x) x = UpSampling3D( size=2, data_format='channels_first', name='Dec_VAE_UpSample_256')(x) ### Green Block x1 (output filters=128) x = Conv3D( filters=128, kernel_size=(1, 1, 1), strides=1, data_format='channels_first', name='Dec_VAE_ReduceDepth_128')(x) x = UpSampling3D( size=2, data_format='channels_first', name='Dec_VAE_UpSample_128')(x) x = green_block(x, 128, name='Dec_VAE_128') ### Green Block x1 (output filters=64) x = Conv3D( filters=64, kernel_size=(1, 1, 1), strides=1, data_format='channels_first', name='Dec_VAE_ReduceDepth_64')(x) x = UpSampling3D( size=2, data_format='channels_first', name='Dec_VAE_UpSample_64')(x) x = green_block(x, 64, name='Dec_VAE_64') ### Green Block x1 (output filters=32) x = Conv3D( filters=32, kernel_size=(1, 1, 1), strides=1, data_format='channels_first', name='Dec_VAE_ReduceDepth_32')(x) x = UpSampling3D( size=2, data_format='channels_first', name='Dec_VAE_UpSample_32')(x) x = green_block(x, 32, name='Dec_VAE_32') ### Blue Block x1 (output filters=32) x = Conv3D( filters=32, kernel_size=(3, 3, 3), strides=1, padding='same', data_format='channels_first', name='Input_Dec_VAE_Output')(x) ### Output Block out_VAE = Conv3D( filters=4, kernel_size=(1, 1, 1), strides=1, data_format='channels_first', name='Dec_VAE_Output')(x) # Build and Compile the model out = out_GT model = Model(inp, outputs=[out, out_VAE]) # Create the model model.compile( adam(lr=1e-4), [loss_gt(dice_e), loss_VAE(input_shape, z_mean, z_var, weight_L2=weight_L2, weight_KL=weight_KL)], metrics=[dice_coefficient] ) return model
[ "tensorflow.keras.layers.Lambda", "tensorflow.keras.backend.int_shape", "tensorflow.keras.layers.Conv3D", "tensorflow.keras.backend.exp", "tensorflow.keras.backend.square", "tensorflow.keras.layers.Add", "tensorflow.keras.layers.Flatten", "tensorflow.keras.optimizers.adam", "tensorflow.keras.models.Model", "tensorflow.keras.layers.UpSampling3D", "tensorflow.keras.layers.Dense", "tensorflow.keras.backend.abs", "tensorflow.keras.backend.random_normal", "tensorflow.keras.layers.SpatialDropout3D", "tensorflow.keras.layers.Reshape", "tensorflow.keras.layers.Activation", "tensorflow.keras.backend.shape", "tensorflow.keras.backend.mean", "tensorflow.keras.layers.Input" ]
model.py
[(105, 'tensorflow.keras.backend.random_normal', 'K.random_normal', ([], {'shape': '(batch, dim)'}), True, 'import tensorflow.keras.backend as K\n'), (112, 'tensorflow.keras.backend.mean', 'K.mean', (['(2 * intersection / dn)'], {'axis': '[0, 1]'}), True, 'import tensorflow.keras.backend as K\n'), (242, 'tensorflow.keras.layers.Input', 'Input', (['input_shape'], {}), False, 'from tensorflow.keras.layers import Input, Reshape, Flatten, Dropout, SpatialDropout3D\n'), (457, 'tensorflow.keras.models.Model', 'Model', (['inp'], {'outputs': '[out, out_VAE]'}), False, 'from tensorflow.keras.models import Model\n'), (54, 'tensorflow.keras.layers.Conv3D', 'Conv3D', ([], {'filters': 'filters', 'kernel_size': '(1, 1, 1)', 'strides': '(1)', 'data_format': 'data_format', 'name': "(f'Res_{name}' if name else None)"}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (63, 'group_norm.GroupNormalization', 'GroupNormalization', ([], {'groups': '(8)', 'axis': "(1 if data_format == 'channels_first' else 0)", 'name': "(f'GroupNorm_1_{name}' if name else None)"}), False, 'from group_norm import GroupNormalization\n'), (67, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': "(f'Relu_1_{name}' if name else None)"}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (68, 'tensorflow.keras.layers.Conv3D', 'Conv3D', ([], {'filters': 'filters', 'kernel_size': '(3, 3, 3)', 'strides': '(1)', 'padding': '"""same"""', 'data_format': 'data_format', 'name': "(f'Conv3D_1_{name}' if name else None)"}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (76, 'group_norm.GroupNormalization', 'GroupNormalization', ([], {'groups': '(8)', 'axis': "(1 if data_format == 'channels_first' else 0)", 'name': "(f'GroupNorm_2_{name}' if name else None)"}), False, 'from group_norm import GroupNormalization\n'), (80, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': "(f'Relu_2_{name}' if name else None)"}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (81, 'tensorflow.keras.layers.Conv3D', 'Conv3D', ([], {'filters': 'filters', 'kernel_size': '(3, 3, 3)', 'strides': '(1)', 'padding': '"""same"""', 'data_format': 'data_format', 'name': "(f'Conv3D_2_{name}' if name else None)"}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (89, 'tensorflow.keras.layers.Add', 'Add', ([], {'name': "(f'Out_{name}' if name else None)"}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (102, 'tensorflow.keras.backend.shape', 'K.shape', (['z_mean'], {}), True, 'import tensorflow.keras.backend as K\n'), (103, 'tensorflow.keras.backend.int_shape', 'K.int_shape', (['z_mean'], {}), True, 'import tensorflow.keras.backend as K\n'), (110, 'tensorflow.keras.backend.abs', 'K.abs', (['(y_true * y_pred)'], {}), True, 'import tensorflow.keras.backend as K\n'), (245, 'tensorflow.keras.layers.Conv3D', 'Conv3D', ([], {'filters': '(32)', 'kernel_size': '(3, 3, 3)', 'strides': '(1)', 'padding': '"""same"""', 'data_format': '"""channels_first"""', 'name': '"""Input_x1"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (254, 'tensorflow.keras.layers.SpatialDropout3D', 'SpatialDropout3D', (['(0.2)'], {'data_format': '"""channels_first"""'}), False, 'from tensorflow.keras.layers import Input, Reshape, Flatten, Dropout, SpatialDropout3D\n'), (258, 'tensorflow.keras.layers.Conv3D', 'Conv3D', ([], {'filters': '(32)', 'kernel_size': '(3, 3, 3)', 'strides': '(2)', 'padding': '"""same"""', 'data_format': '"""channels_first"""', 'name': '"""Enc_DownSample_32"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (269, 'tensorflow.keras.layers.Conv3D', 'Conv3D', ([], {'filters': '(64)', 'kernel_size': '(3, 3, 3)', 'strides': '(2)', 'padding': '"""same"""', 'data_format': '"""channels_first"""', 'name': '"""Enc_DownSample_64"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (280, 'tensorflow.keras.layers.Conv3D', 'Conv3D', ([], {'filters': '(128)', 'kernel_size': '(3, 3, 3)', 'strides': '(2)', 'padding': '"""same"""', 'data_format': '"""channels_first"""', 'name': '"""Enc_DownSample_128"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (302, 'tensorflow.keras.layers.Conv3D', 'Conv3D', ([], {'filters': '(128)', 'kernel_size': '(1, 1, 1)', 'strides': '(1)', 'data_format': '"""channels_first"""', 'name': '"""Dec_GT_ReduceDepth_128"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (308, 'tensorflow.keras.layers.UpSampling3D', 'UpSampling3D', ([], {'size': '(2)', 'data_format': '"""channels_first"""', 'name': '"""Dec_GT_UpSample_128"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (312, 'tensorflow.keras.layers.Add', 'Add', ([], {'name': '"""Input_Dec_GT_128"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (316, 'tensorflow.keras.layers.Conv3D', 'Conv3D', ([], {'filters': '(64)', 'kernel_size': '(1, 1, 1)', 'strides': '(1)', 'data_format': '"""channels_first"""', 'name': '"""Dec_GT_ReduceDepth_64"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (322, 'tensorflow.keras.layers.UpSampling3D', 'UpSampling3D', ([], {'size': '(2)', 'data_format': '"""channels_first"""', 'name': '"""Dec_GT_UpSample_64"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (326, 'tensorflow.keras.layers.Add', 'Add', ([], {'name': '"""Input_Dec_GT_64"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (330, 'tensorflow.keras.layers.Conv3D', 'Conv3D', ([], {'filters': '(32)', 'kernel_size': '(1, 1, 1)', 'strides': '(1)', 'data_format': '"""channels_first"""', 'name': '"""Dec_GT_ReduceDepth_32"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (336, 'tensorflow.keras.layers.UpSampling3D', 'UpSampling3D', ([], {'size': '(2)', 'data_format': '"""channels_first"""', 'name': '"""Dec_GT_UpSample_32"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (340, 'tensorflow.keras.layers.Add', 'Add', ([], {'name': '"""Input_Dec_GT_32"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (344, 'tensorflow.keras.layers.Conv3D', 'Conv3D', ([], {'filters': '(32)', 'kernel_size': '(3, 3, 3)', 'strides': '(1)', 'padding': '"""same"""', 'data_format': '"""channels_first"""', 'name': '"""Input_Dec_GT_Output"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (353, 'tensorflow.keras.layers.Conv3D', 'Conv3D', ([], {'filters': 'output_channels', 'kernel_size': '(1, 1, 1)', 'strides': '(1)', 'data_format': '"""channels_first"""', 'activation': '"""sigmoid"""', 'name': '"""Dec_GT_Output"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (365, 'group_norm.GroupNormalization', 'GroupNormalization', ([], {'groups': '(8)', 'axis': '(1)', 'name': '"""Dec_VAE_VD_GN"""'}), False, 'from group_norm import GroupNormalization\n'), (366, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""Dec_VAE_VD_relu"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (367, 'tensorflow.keras.layers.Conv3D', 'Conv3D', ([], {'filters': '(16)', 'kernel_size': '(3, 3, 3)', 'strides': '(2)', 'padding': '"""same"""', 'data_format': '"""channels_first"""', 'name': '"""Dec_VAE_VD_Conv3D"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (376, 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {'name': '"""Dec_VAE_VD_Flatten"""'}), False, 'from tensorflow.keras.layers import Input, Reshape, Flatten, Dropout, SpatialDropout3D\n'), (377, 'tensorflow.keras.layers.Dense', 'Dense', (['(256)'], {'name': '"""Dec_VAE_VD_Dense"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (380, 'tensorflow.keras.layers.Dense', 'Dense', (['(128)'], {'name': '"""Dec_VAE_VDraw_Mean"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (381, 'tensorflow.keras.layers.Dense', 'Dense', (['(128)'], {'name': '"""Dec_VAE_VDraw_Var"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (382, 'tensorflow.keras.layers.Lambda', 'Lambda', (['sampling'], {'name': '"""Dec_VAE_VDraw_Sampling"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (385, 'tensorflow.keras.layers.Dense', 'Dense', (['(c // 4 * (H // 16) * (W // 16) * (D // 16))'], {}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (386, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (387, 'tensorflow.keras.layers.Reshape', 'Reshape', (['(c // 4, H // 16, W // 16, D // 16)'], {}), False, 'from tensorflow.keras.layers import Input, Reshape, Flatten, Dropout, SpatialDropout3D\n'), (388, 'tensorflow.keras.layers.Conv3D', 'Conv3D', ([], {'filters': '(256)', 'kernel_size': '(1, 1, 1)', 'strides': '(1)', 'data_format': '"""channels_first"""', 'name': '"""Dec_VAE_ReduceDepth_256"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (394, 'tensorflow.keras.layers.UpSampling3D', 'UpSampling3D', ([], {'size': '(2)', 'data_format': '"""channels_first"""', 'name': '"""Dec_VAE_UpSample_256"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (400, 'tensorflow.keras.layers.Conv3D', 'Conv3D', ([], {'filters': '(128)', 'kernel_size': '(1, 1, 1)', 'strides': '(1)', 'data_format': '"""channels_first"""', 'name': '"""Dec_VAE_ReduceDepth_128"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (406, 'tensorflow.keras.layers.UpSampling3D', 'UpSampling3D', ([], {'size': '(2)', 'data_format': '"""channels_first"""', 'name': '"""Dec_VAE_UpSample_128"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (413, 'tensorflow.keras.layers.Conv3D', 'Conv3D', ([], {'filters': '(64)', 'kernel_size': '(1, 1, 1)', 'strides': '(1)', 'data_format': '"""channels_first"""', 'name': '"""Dec_VAE_ReduceDepth_64"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (419, 'tensorflow.keras.layers.UpSampling3D', 'UpSampling3D', ([], {'size': '(2)', 'data_format': '"""channels_first"""', 'name': '"""Dec_VAE_UpSample_64"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (426, 'tensorflow.keras.layers.Conv3D', 'Conv3D', ([], {'filters': '(32)', 'kernel_size': '(1, 1, 1)', 'strides': '(1)', 'data_format': '"""channels_first"""', 'name': '"""Dec_VAE_ReduceDepth_32"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (432, 'tensorflow.keras.layers.UpSampling3D', 'UpSampling3D', ([], {'size': '(2)', 'data_format': '"""channels_first"""', 'name': '"""Dec_VAE_UpSample_32"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (439, 'tensorflow.keras.layers.Conv3D', 'Conv3D', ([], {'filters': '(32)', 'kernel_size': '(3, 3, 3)', 'strides': '(1)', 'padding': '"""same"""', 'data_format': '"""channels_first"""', 'name': '"""Input_Dec_VAE_Output"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (448, 'tensorflow.keras.layers.Conv3D', 'Conv3D', ([], {'filters': '(4)', 'kernel_size': '(1, 1, 1)', 'strides': '(1)', 'data_format': '"""channels_first"""', 'name': '"""Dec_VAE_Output"""'}), False, 'from tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\n'), (459, 'tensorflow.keras.optimizers.adam', 'adam', ([], {'lr': '(0.0001)'}), False, 'from tensorflow.keras.optimizers import adam\n'), (106, 'tensorflow.keras.backend.exp', 'K.exp', (['(0.5 * z_var)'], {}), True, 'import tensorflow.keras.backend as K\n'), (140, 'tensorflow.keras.backend.abs', 'K.abs', (['(y_true * y_pred)'], {}), True, 'import tensorflow.keras.backend as K\n'), (143, 'tensorflow.keras.backend.mean', 'K.mean', (['(2 * intersection / dn)'], {'axis': '[0, 1]'}), True, 'import tensorflow.keras.backend as K\n'), (188, 'tensorflow.keras.backend.square', 'K.square', (['(y_true - y_pred)'], {}), True, 'import tensorflow.keras.backend as K\n'), (111, 'tensorflow.keras.backend.square', 'K.square', (['y_true'], {}), True, 'import tensorflow.keras.backend as K\n'), (111, 'tensorflow.keras.backend.square', 'K.square', (['y_pred'], {}), True, 'import tensorflow.keras.backend as K\n'), (141, 'tensorflow.keras.backend.square', 'K.square', (['y_true'], {}), True, 'import tensorflow.keras.backend as K\n'), (141, 'tensorflow.keras.backend.square', 'K.square', (['y_pred'], {}), True, 'import tensorflow.keras.backend as K\n'), (191, 'tensorflow.keras.backend.exp', 'K.exp', (['z_var'], {}), True, 'import tensorflow.keras.backend as K\n'), (191, 'tensorflow.keras.backend.square', 'K.square', (['z_mean'], {}), True, 'import tensorflow.keras.backend as K\n')]
bhneo/SparsePooling
6575774ad95cd782bbd228fb08c588b475035fc6
import os import sys sys.path.append(os.getcwd()) import tensorflow as tf tf.get_logger().setLevel('ERROR') from common.inputs import data_input from common import layers, utils, train, res_blocks, attacks import config WEIGHT_DECAY = 1e-4 BATCH_NORM_EPSILON = 1e-3 BATCH_NORM_DECAY = 0.99 kernel_regularizer = tf.keras.regularizers.l2(WEIGHT_DECAY) kernel_initializer = tf.keras.initializers.he_normal() BASE_NAME = 'ex1' def build_model_name(params): model_name = BASE_NAME model_name += '_b{}'.format(params.model.resblock) if params.dataset.flip: model_name += '_flip' if params.dataset.crop: model_name += '_crop' return model_name def get_loss_opt(): loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) optimizer = tf.keras.optimizers.SGD(0.1) return loss, optimizer def build_model(shape, num_out, params): inputs = tf.keras.Input(shape=shape) model_name = build_model_name(params) probs, tensor_log = build(inputs, num_out, params.model.resblock) model = tf.keras.Model(inputs=inputs, outputs=probs, name=model_name) log_model = tf.keras.Model(inputs=inputs, outputs=tensor_log.get_outputs(), name=model_name + '_log') tensor_log.set_model(log_model) loss, optimizer = get_loss_opt() model.compile(optimizer=optimizer, loss=loss, metrics=[]) model.summary() lr_scheduler = tf.keras.callbacks.LearningRateScheduler(schedule=lr_schedule, verbose=1) lr_scheduler.set_model(model) callbacks = [lr_scheduler] model.callbacks = callbacks return model, tensor_log def build(inputs, num_out, resblock): log = utils.TensorLog() resblock = utils.parse_resblock(resblock) backbone = res_blocks.build_resnet_backbone(inputs=inputs, repetitions=resblock, layer_num=0, start_filters=16, arch='cifar', use_bias=False, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, bn_axis=-1, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, version='v2') log.add_hist('backbone', backbone) pool = tf.keras.layers.GlobalAveragePooling2D()(backbone) output = tf.keras.layers.Dense(num_out)(pool) return output, log def get_norm_fn(dataset): channel = 1 if dataset == 'cifar10' or dataset == 'cifar100' or dataset == 'svhn_cropped': channel = 3 def norm(image): if channel == 3: image = tf.image.per_image_standardization(image) return image return norm def build_parse(dataset, flip=False, crop=False, is_train=False, with_norm=True): if dataset not in ['cifar10', 'cifar100', 'mnist', 'kmnist', 'emnist', 'fashion_mnist', 'svhn_cropped']: raise Exception('{} not support!'.format(dataset)) if dataset == 'cifar10' or dataset == 'cifar100' or dataset == 'svhn_cropped': height, width, channel = 32, 32, 3 if dataset == 'mnist' or dataset == 'kmnist' or dataset == 'fashion_mnist' or dataset == 'emnist': height, width, channel = 28, 28, 1 def parse(image, label): image = tf.cast(image, tf.float32) image = tf.divide(image, 255.) if with_norm: image = get_norm_fn(dataset)(image) if is_train: if flip: image = tf.image.random_flip_left_right(image) if crop: image = tf.image.resize_with_crop_or_pad(image, height+8, width+8) image = tf.image.random_crop(image, [height, width, channel]) return image, label return parse def lr_schedule(epoch, lr): if epoch in [60, 80]: lr /= 10 return lr def main(): args, params = config.parse_args() if params.task == 'train': train_set, test_set, info = data_input.build_dataset(params.dataset.name, parser_train=build_parse(params.dataset.name, flip=params.dataset.flip, crop=params.dataset.crop, is_train=True), parser_test=build_parse(params.dataset.name, is_train=False), batch_size=params.training.batch_size) model, tensor_log = build_model(shape=info.features['image'].shape, num_out=info.features['label'].num_classes, params=params) trainer = train.Trainer(model, params, info, tensor_log) if args.train: trainer.fit(train_set, test_set) else: trainer.evaluate(test_set) elif params.task == 'attack': do_adv(os.getcwd()) def load_ckpt(model, model_dir): loss, optimizer = get_loss_opt() model.compile(optimizer=optimizer, loss=loss, metrics=[]) ckpt = tf.train.Checkpoint(optimizer=model.optimizer, net=model) manager = tf.train.CheckpointManager(ckpt, model_dir, max_to_keep=3) ckpt.restore(manager.latest_checkpoint) if manager.latest_checkpoint: print("Restored from {}".format(manager.latest_checkpoint)) def get_input_set(dataset): if dataset == 'fashion_mnist' or dataset == 'kmnist': data_shape = (28, 28, 1) num_out = 10 flip = False crop = True elif dataset == 'cifar10': data_shape = (32, 32, 3) num_out = 10 flip = True crop = True elif dataset == 'svhn_cropped': data_shape = (32, 32, 3) num_out = 10 flip = False crop = True return data_shape, num_out, flip, crop def get_model_dir(dataset, log='log', resblocks='333'): data_shape, num_out, flip, crop = get_input_set(dataset) model_dir = '{}/{}/{}_b{}'.format(log, dataset, BASE_NAME, resblocks) if flip: model_dir += '_flip' if crop: model_dir += '_crop' if not os.path.exists(model_dir): raise Exception('model not exist:{}'.format(model_dir)) return model_dir, data_shape, num_out, flip, crop def load_model(data_shape, model_dir, num_out, resblocks='333', input_norm=None): inputs = tf.keras.Input(data_shape) probs, log = build(inputs=inputs if input_norm is None else layers.InputNorm(input_norm)(inputs), num_out=num_out, resblock=resblocks) model = tf.keras.Model(inputs=inputs, outputs=probs, name='x') load_ckpt(model, model_dir) return model def evaluate_attack(epsilons, root='', log='log', dataset='kmnist', metric='acc', all_target=False, method='FGSM', steps=10, black_box=False, resblocks='333'): model_dir, data_shape, num_out, flip, crop = get_model_dir(dataset, root+log, resblocks=resblocks) model = load_model(data_shape, model_dir, num_out, resblocks=resblocks, input_norm=get_norm_fn(dataset)) if black_box: print('load black box source model') model_dir, data_shape, num_out, flip, crop = get_model_dir(dataset, root + log, resblocks=resblocks) model_src = load_model(data_shape, model_dir, num_out, resblocks=resblocks, input_norm=get_norm_fn(dataset)) else: model_src = model loss, _ = get_loss_opt() _, test_set, info = data_input.build_dataset(dataset, path=root + 'data', parser_train=build_parse(dataset, flip=False, crop=False, is_train=True), parser_test=build_parse(dataset, is_train=False, with_norm=False), batch_size=512) acc_adv = tf.keras.metrics.SparseCategoricalAccuracy(name='acc_adv') if metric == 'acc': results = attacks.evaluate_model_after_attacks(epsilons, acc_adv, test_set, model, loss, method=method, steps=steps, x_min=0, x_max=1, model_src=model_src) elif metric == 'success': if all_target: categories = [i for i in range(10)] results = attacks.evaluate_attacks_success_rate_all_target(epsilons, test_set, model, loss, categories, method=method, steps=steps, x_min=0, x_max=1, cost=True, model_src=model_src) else: results = attacks.evaluate_attacks_success_rate(epsilons, test_set, model, loss, method=method, steps=steps, x_min=0, x_max=1, model_src=model_src) return results def do_adv(root): import time all_target = False methods = ['PGD', 'BIM', 'FGSM'] datasets = ['fashion_mnist', 'svhn_cropped', 'cifar10'] black_box = False for dataset in datasets: print('dataset:', dataset) if dataset == 'cifar10': if all_target: epsilons = [0.05] else: epsilons = [0.01, 0.03, 0.06, 0.1] else: if all_target: epsilons = [0.1] else: epsilons = [0.1, 0.2, 0.3] for method in methods: print('method:', method) t1 = time.time() evaluate_attack(epsilons, root=root, log='log', dataset=dataset, metric='success', all_target=all_target, method=method, steps=10, black_box=black_box) t2 = time.time() print('time:', t2-t1) if __name__ == "__main__": utils.init_devices(True) main()
[ "tensorflow.cast", "tensorflow.keras.callbacks.LearningRateScheduler", "tensorflow.image.random_crop", "tensorflow.keras.initializers.he_normal", "tensorflow.keras.optimizers.SGD", "tensorflow.keras.Input", "tensorflow.image.random_flip_left_right", "tensorflow.keras.regularizers.l2", "tensorflow.divide", "tensorflow.train.CheckpointManager", "tensorflow.image.resize_with_crop_or_pad", "tensorflow.train.Checkpoint", "tensorflow.keras.layers.Dense", "tensorflow.keras.Model", "tensorflow.image.per_image_standardization", "tensorflow.keras.layers.GlobalAveragePooling2D", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.get_logger", "tensorflow.keras.metrics.SparseCategoricalAccuracy" ]
models/res/ex1.py
[(18, 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['WEIGHT_DECAY'], {}), True, 'import tensorflow as tf\n'), (19, 'tensorflow.keras.initializers.he_normal', 'tf.keras.initializers.he_normal', ([], {}), True, 'import tensorflow as tf\n'), (4, 'os.getcwd', 'os.getcwd', ([], {}), False, 'import os\n'), (35, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), True, 'import tensorflow as tf\n'), (36, 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', (['(0.1)'], {}), True, 'import tensorflow as tf\n'), (41, 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': 'shape'}), True, 'import tensorflow as tf\n'), (45, 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'probs', 'name': 'model_name'}), True, 'import tensorflow as tf\n'), (53, 'tensorflow.keras.callbacks.LearningRateScheduler', 'tf.keras.callbacks.LearningRateScheduler', ([], {'schedule': 'lr_schedule', 'verbose': '(1)'}), True, 'import tensorflow as tf\n'), (61, 'common.utils.TensorLog', 'utils.TensorLog', ([], {}), False, 'from common import layers, utils, train, res_blocks, attacks\n'), (62, 'common.utils.parse_resblock', 'utils.parse_resblock', (['resblock'], {}), False, 'from common import layers, utils, train, res_blocks, attacks\n'), (63, 'common.res_blocks.build_resnet_backbone', 'res_blocks.build_resnet_backbone', ([], {'inputs': 'inputs', 'repetitions': 'resblock', 'layer_num': '(0)', 'start_filters': '(16)', 'arch': '"""cifar"""', 'use_bias': '(False)', 'kernel_initializer': 'kernel_initializer', 'kernel_regularizer': 'kernel_regularizer', 'bn_axis': '(-1)', 'momentum': 'BATCH_NORM_DECAY', 'epsilon': 'BATCH_NORM_EPSILON', 'version': '"""v2"""'}), False, 'from common import layers, utils, train, res_blocks, attacks\n'), (118, 'config.parse_args', 'config.parse_args', ([], {}), False, 'import config\n'), (146, 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'optimizer': 'model.optimizer', 'net': 'model'}), True, 'import tensorflow as tf\n'), (147, 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['ckpt', 'model_dir'], {'max_to_keep': '(3)'}), True, 'import tensorflow as tf\n'), (188, 'tensorflow.keras.Input', 'tf.keras.Input', (['data_shape'], {}), True, 'import tensorflow as tf\n'), (192, 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'probs', 'name': '"""x"""'}), True, 'import tensorflow as tf\n'), (223, 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', ([], {'name': '"""acc_adv"""'}), True, 'import tensorflow as tf\n'), (270, 'common.utils.init_devices', 'utils.init_devices', (['(True)'], {}), False, 'from common import layers, utils, train, res_blocks, attacks\n'), (7, 'tensorflow.get_logger', 'tf.get_logger', ([], {}), True, 'import tensorflow as tf\n'), (71, 'tensorflow.keras.layers.GlobalAveragePooling2D', 'tf.keras.layers.GlobalAveragePooling2D', ([], {}), True, 'import tensorflow as tf\n'), (72, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_out'], {}), True, 'import tensorflow as tf\n'), (97, 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (98, 'tensorflow.divide', 'tf.divide', (['image', '(255.0)'], {}), True, 'import tensorflow as tf\n'), (132, 'common.train.Trainer', 'train.Trainer', (['model', 'params', 'info', 'tensor_log'], {}), False, 'from common import layers, utils, train, res_blocks, attacks\n'), (181, 'os.path.exists', 'os.path.exists', (['model_dir'], {}), False, 'import os\n'), (225, 'common.attacks.evaluate_model_after_attacks', 'attacks.evaluate_model_after_attacks', (['epsilons', 'acc_adv', 'test_set', 'model', 'loss'], {'method': 'method', 'steps': 'steps', 'x_min': '(0)', 'x_max': '(1)', 'model_src': 'model_src'}), False, 'from common import layers, utils, train, res_blocks, attacks\n'), (83, 'tensorflow.image.per_image_standardization', 'tf.image.per_image_standardization', (['image'], {}), True, 'import tensorflow as tf\n'), (255, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (265, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (103, 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['image'], {}), True, 'import tensorflow as tf\n'), (105, 'tensorflow.image.resize_with_crop_or_pad', 'tf.image.resize_with_crop_or_pad', (['image', '(height + 8)', '(width + 8)'], {}), True, 'import tensorflow as tf\n'), (106, 'tensorflow.image.random_crop', 'tf.image.random_crop', (['image', '[height, width, channel]'], {}), True, 'import tensorflow as tf\n'), (138, 'os.getcwd', 'os.getcwd', ([], {}), False, 'import os\n'), (229, 'common.attacks.evaluate_attacks_success_rate_all_target', 'attacks.evaluate_attacks_success_rate_all_target', (['epsilons', 'test_set', 'model', 'loss', 'categories'], {'method': 'method', 'steps': 'steps', 'x_min': '(0)', 'x_max': '(1)', 'cost': '(True)', 'model_src': 'model_src'}), False, 'from common import layers, utils, train, res_blocks, attacks\n'), (231, 'common.attacks.evaluate_attacks_success_rate', 'attacks.evaluate_attacks_success_rate', (['epsilons', 'test_set', 'model', 'loss'], {'method': 'method', 'steps': 'steps', 'x_min': '(0)', 'x_max': '(1)', 'model_src': 'model_src'}), False, 'from common import layers, utils, train, res_blocks, attacks\n'), (189, 'common.layers.InputNorm', 'layers.InputNorm', (['input_norm'], {}), False, 'from common import layers, utils, train, res_blocks, attacks\n')]
zuyezheng/RedditSentiment
c786284323828c1a3e353ee27e1be13421feb0c2
import time import tensorflow as tf from transformers.TransformerSchedule import TransformerSchedule TRAIN_STEP_SIGNATURE = [ tf.TensorSpec(shape=(None, None), dtype=tf.int64), tf.TensorSpec(shape=(None, None), dtype=tf.int64), ] class TransformerWrapper: def __init__( self, transformer, # path to store or load checkpoints checkpoint_path, # if we should try to restore from checkpoint restore ): self.transformer = transformer self.optimizer = tf.keras.optimizers.Adam( TransformerSchedule(self.transformer.d_model), beta_1=0.9, beta_2=0.98, epsilon=1e-9 ) checkpoint = tf.train.Checkpoint(transformer=self.transformer, optimizer=self.optimizer) self.checkpoint_manager = tf.train.CheckpointManager(checkpoint, checkpoint_path, max_to_keep=5) if restore and self.checkpoint_manager.latest_checkpoint: checkpoint.restore(self.checkpoint_manager.latest_checkpoint) print('Restored from latest checkpoint.') def train(self, epochs, dataset): loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none') train_loss = tf.keras.metrics.Mean(name='train_loss') train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy') @tf.function(input_signature=TRAIN_STEP_SIGNATURE) def train_step(inputs, targets): # inputs for the decoder, excluding the last since we need something to predict target_inputs = targets[:, :-1] # inputs offset by 1 since we're trying to predict the next character target_reals = targets[:, 1:] enc_padding_mask, combined_mask, dec_padding_mask = TransformerWrapper.create_masks(inputs, target_inputs) with tf.GradientTape() as tape: predictions, _ = self.transformer( inputs, target_inputs, True, enc_padding_mask, combined_mask, dec_padding_mask ) loss = TransformerWrapper.loss_function(target_reals, predictions, loss_object) gradients = tape.gradient(loss, self.transformer.trainable_variables) self.optimizer.apply_gradients(zip(gradients, self.transformer.trainable_variables)) train_loss(loss) train_accuracy(target_reals, predictions) for epoch in range(epochs): start = time.time() for (batch_num, (i, t)) in enumerate(dataset): train_step(i, t) if batch_num % 50 == 0: print('Epoch {} Batch {} Loss {:.4f} Accuracy {:.4f}'.format( epoch + 1, batch_num, train_loss.result(), train_accuracy.result() )) if (epoch + 1) % 5 == 0: ckpt_save_path = self.checkpoint_manager.save() print('Saving checkpoint for epoch {} at {}'.format(epoch + 1, ckpt_save_path)) print('Epoch {} Loss {:.4f} Accuracy {:.4f}'.format( epoch + 1, train_loss.result(), train_accuracy.result() )) print('Time taken for 1 epoch: {} secs\n'.format(time.time() - start)) @staticmethod def loss_function(real, pred, loss_object): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss = loss_object(real, pred) mask = tf.cast(mask, dtype=loss.dtype) loss *= mask return tf.reduce_sum(loss) / tf.reduce_sum(mask) @staticmethod def create_masks(inputs, targets): def create_padding_mask(sequence): sequence = tf.cast(tf.math.equal(sequence, 0), tf.float32) # add extra dimensions to add the padding to the attention logits # (batch_size, 1, 1, seq_len) return sequence[:, tf.newaxis, tf.newaxis, :] def create_look_ahead_mask(size): # (seq_len, seq_len) return 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0) # Encoder padding mask encoder_padding_mask = create_padding_mask(inputs) # Pad and mask the encoder outputs used in the 2nd attention block in the decoder. decoder_padding_mask = create_padding_mask(inputs) # Pad and mask future tokens in the input received by the decoder, used in the 1st attention block in the # decoder. look_ahead_mask = create_look_ahead_mask(tf.shape(targets)[1]) dec_target_padding_mask = create_padding_mask(targets) combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask) return encoder_padding_mask, combined_mask, decoder_padding_mask
[ "tensorflow.train.CheckpointManager", "tensorflow.shape", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.train.Checkpoint", "tensorflow.maximum", "tensorflow.cast", "tensorflow.reduce_sum", "tensorflow.ones", "tensorflow.math.equal", "tensorflow.function", "tensorflow.keras.metrics.SparseCategoricalAccuracy", "tensorflow.TensorSpec", "tensorflow.keras.metrics.Mean", "tensorflow.GradientTape" ]
src/transformers/TransformerWrapper.py
[(8, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '(None, None)', 'dtype': 'tf.int64'}), True, 'import tensorflow as tf\n'), (9, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '(None, None)', 'dtype': 'tf.int64'}), True, 'import tensorflow as tf\n'), (28, 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'transformer': 'self.transformer', 'optimizer': 'self.optimizer'}), True, 'import tensorflow as tf\n'), (29, 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['checkpoint', 'checkpoint_path'], {'max_to_keep': '(5)'}), True, 'import tensorflow as tf\n'), (36, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': '"""none"""'}), True, 'import tensorflow as tf\n'), (37, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {'name': '"""train_loss"""'}), True, 'import tensorflow as tf\n'), (38, 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', ([], {'name': '"""train_accuracy"""'}), True, 'import tensorflow as tf\n'), (40, 'tensorflow.function', 'tf.function', ([], {'input_signature': 'TRAIN_STEP_SIGNATURE'}), True, 'import tensorflow as tf\n'), (86, 'tensorflow.cast', 'tf.cast', (['mask'], {'dtype': 'loss.dtype'}), True, 'import tensorflow as tf\n'), (114, 'tensorflow.maximum', 'tf.maximum', (['dec_target_padding_mask', 'look_ahead_mask'], {}), True, 'import tensorflow as tf\n'), (25, 'transformers.TransformerSchedule.TransformerSchedule', 'TransformerSchedule', (['self.transformer.d_model'], {}), False, 'from transformers.TransformerSchedule import TransformerSchedule\n'), (62, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (83, 'tensorflow.math.equal', 'tf.math.equal', (['real', '(0)'], {}), True, 'import tensorflow as tf\n'), (89, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loss'], {}), True, 'import tensorflow as tf\n'), (89, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['mask'], {}), True, 'import tensorflow as tf\n'), (49, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (94, 'tensorflow.math.equal', 'tf.math.equal', (['sequence', '(0)'], {}), True, 'import tensorflow as tf\n'), (112, 'tensorflow.shape', 'tf.shape', (['targets'], {}), True, 'import tensorflow as tf\n'), (102, 'tensorflow.ones', 'tf.ones', (['(size, size)'], {}), True, 'import tensorflow as tf\n'), (79, 'time.time', 'time.time', ([], {}), False, 'import time\n')]
pps-lab/fl-analysis
798fc0292d0611ec8900ebdb090b9e282d0df457
# Copyright (c) 2018 Uber Technologies, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import numpy as np import tensorflow as tf from tensorflow.keras.layers import Layer, InputSpec import tensorflow.keras.backend as K from tensorflow.python.keras.utils import conv_utils from src.subspace.keras_ext.rproj_layers_util import _convert_string_dtype # from keras.backend.tensorflow_backend import _convert_string_dtype from tensorflow.keras import regularizers, constraints, initializers, activations ########### # # Low Rank Basis Layers # # These layers are modified versions of standard Keras layers that # accept an OffsetCreator*Proj to create offsets from a weight basis # in a Dense/Sparse/Fastfood agnostic manner. # ########### class LowRankBasisLayer(Layer): '''Smarter version of Layer...''' def __init__(self, offset_creator_class, weight_basis, *args, **kwargs): super(LowRankBasisLayer, self).__init__(*args, **kwargs) # offset_creator is an object that creates theta offsets self.offset_creator = offset_creator_class() self.weight_basis = weight_basis # These may or may not be used by subclasses #self._basis_matrices = [] #self._basis_matrix_normalizers = [] # TODO check for use of basis_matrices @property def basis_matrices(self): print('USED HERE basis_matrices') return self._basis_matrices # TODO check for use of basis_matrix_normalizers @property def basis_matrix_normalizers(self): print('USED HERE basis_matrix_normalizers') return self._basis_matrix_normalizers def add_weight(self, name, shape, dtype=None, initializer=None, regularizer=None, trainable=True, constraint=None): '''Version of add_weight that creates a weight theta by instantiating theta_0 and then adding to it an offset from the member offset_creator. ''' initializer = initializers.get(initializer) if dtype is None: dtype = K.floatx() # Create Theta_0 value_0 = initializer(shape) theta_0 = tf.Variable(value_0, trainable=False, dtype=_convert_string_dtype(dtype), name='%s_theta0' % name) if isinstance(value_0, np.ndarray): theta_0._keras_shape = value_0.shape elif hasattr(value_0, 'get_shape'): theta_0._keras_shape = tuple(map(int, value_0.get_shape())) theta_0._uses_learning_phase = False # Call offset creator exec = self.offset_creator.create_theta_offset(self.weight_basis, theta_0.get_shape(), dtype=dtype, name=name) non_trainable_weights = exec.ww # if regularizer is not None: # self.add_loss(regularizer(theta)) # if constraint is not None: # self.constraints[theta] = constraint #self._base_thetas.append(theta_0) #self._basis_matrices.append(ww) #self._non_trainable_weights.extend([theta_0, ww]) self._non_trainable_weights.extend([theta_0] + [non_trainable_weights]) return theta_0, exec def add_non_trainable_weight(self, name, shape, dtype=None, initializer=None, regularizer=None, constraint=None): '''Adds a weight variable to the layer. # Arguments name: String, the name for the weight variable. shape: The shape tuple of the weight. dtype: The dtype of the weight. initializer: An Initializer instance (callable). regularizer: An optional Regularizer instance. trainable: A boolean, whether the weight should be trained via backprop or not (assuming that the layer itself is also trainable). constraint: An optional Constraint instance. # Returns The created weight variable. ''' initializer = initializers.get(initializer) if dtype is None: dtype = K.floatx() weight = K.variable(initializer(shape), dtype=dtype, name=name) if regularizer is not None: self.add_loss(regularizer(weight)) if constraint is not None: self.constraints[weight] = constraint self._non_trainable_weights.append(weight) return weight class RProjDense(LowRankBasisLayer): '''RProj version of Dense.''' def __init__(self, offset_creator_class, weight_basis, units, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(RProjDense, self).__init__(offset_creator_class, weight_basis, **kwargs) self.units = units self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.input_spec = InputSpec(min_ndim=2) self.supports_masking = True def build(self, input_shape): assert len(input_shape) >= 2 input_dim = input_shape[-1] self.kernel = self.add_weight(shape=(input_dim, self.units), initializer=self.kernel_initializer, name='kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) if self.use_bias: self.bias = self.add_weight(shape=(self.units,), initializer=self.bias_initializer, name='bias', regularizer=self.bias_regularizer, constraint=self.bias_constraint) else: self.bias = None self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim}) self.built = True def call(self, inputs, **kwargs): kt0, eproj = self.kernel k = tf.add(kt0, eproj()) bt0, eprojb = self.bias b = tf.add(bt0, eprojb()) # Normal dense functionality output = K.dot(inputs, k) if self.use_bias: output = K.bias_add(output, b) if self.activation is not None: output = self.activation(output) return output def compute_output_shape(self, input_shape): assert input_shape and len(input_shape) >= 2 assert input_shape[-1] output_shape = list(input_shape) output_shape[-1] = self.units return tuple(output_shape) class _RProjConv(LowRankBasisLayer): '''Abstract nD convolution layer (private, used as implementation base). Only the intrinsic parameters (RProj) are Trainable.''' def __init__(self, offset_creator_class, weight_basis, rank, filters, kernel_size, strides=1, padding='valid', data_format=None, dilation_rate=1, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(_RProjConv, self).__init__(offset_creator_class, weight_basis, **kwargs) self.rank = rank self.filters = filters self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, rank, 'strides') self.padding = conv_utils.normalize_padding(padding) self.data_format = conv_utils.normalize_data_format(data_format) self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank, 'dilation_rate') self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.input_spec = InputSpec(ndim=self.rank + 2) def build(self, input_shape): if self.data_format == 'channels_first': channel_axis = 1 else: channel_axis = -1 if input_shape[channel_axis] is None: raise ValueError('The channel dimension of the inputs ' 'should be defined. Found `None`.') input_dim = input_shape[channel_axis] kernel_shape = self.kernel_size + (input_dim, self.filters) self.kernel = self.add_weight(shape=kernel_shape, initializer=self.kernel_initializer, name='kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) if self.use_bias: self.bias = self.add_weight(shape=(self.filters,), initializer=self.bias_initializer, name='bias', regularizer=self.bias_regularizer, constraint=self.bias_constraint) else: self.bias = None # Set input spec. self.input_spec = InputSpec(ndim=self.rank + 2, axes={channel_axis: input_dim}) self.built = True def call(self, inputs): if self.rank == 1: outputs = K.conv1d( inputs, self.kernel, strides=self.strides[0], padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate[0]) if self.rank == 2: outputs = K.conv2d( inputs, self.kernel, strides=self.strides, padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate) if self.rank == 3: outputs = K.conv3d( inputs, self.kernel, strides=self.strides, padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate) if self.use_bias: outputs = K.bias_add( outputs, self.bias, data_format=self.data_format) if self.activation is not None: return self.activation(outputs) return outputs def compute_output_shape(self, input_shape): if self.data_format == 'channels_last': space = input_shape[1:-1] new_space = [] for i in range(len(space)): new_dim = conv_utils.conv_output_length( space[i], self.kernel_size[i], padding=self.padding, stride=self.strides[i], dilation=self.dilation_rate[i]) new_space.append(new_dim) return (input_shape[0],) + tuple(new_space) + (self.filters,) if self.data_format == 'channels_first': space = input_shape[2:] new_space = [] for i in range(len(space)): new_dim = conv_utils.conv_output_length( space[i], self.kernel_size[i], padding=self.padding, stride=self.strides[i], dilation=self.dilation_rate[i]) new_space.append(new_dim) return (input_shape[0], self.filters) + tuple(new_space) class RProjConv2D(_RProjConv): '''Low Rank Basis Conv2D Filters if number of filters, output dimension is filters TODO: Documentation / unit tests ''' def __init__(self, offset_creator_class, weight_basis, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(RProjConv2D, self).__init__( offset_creator_class=offset_creator_class, weight_basis=weight_basis, rank=2, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, **kwargs) self.input_spec = InputSpec(ndim=4) def build(self, input_shape): assert self.data_format != 'channels_first','only b01c supported' channel_axis = -1 if input_shape[channel_axis] is None: raise ValueError('The channel dimension of the inputs ' 'should be defined. Found `None`.') input_dim = input_shape[-1] self.units = self.filters kernel_shape = self.kernel_size + (input_dim, self.filters) self.kernel = self.add_weight(shape=kernel_shape, initializer=self.kernel_initializer, name='kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) if self.use_bias: self.bias = self.add_weight(shape=(self.filters,), initializer=self.bias_initializer, name='bias', regularizer=self.bias_regularizer, constraint=self.bias_constraint) else: self.bias = None # Set input spec. self.input_spec = InputSpec(ndim=self.rank + 2, axes={channel_axis: input_dim}) self.built = True def call(self, inputs): assert self.rank == 2, 'only conv2d supported for now...' kt0, eproj = self.kernel k = tf.add(kt0, eproj()) bt0, eprojb = self.bias b = tf.add(bt0, eprojb()) if self.rank == 2: outputs = K.conv2d( inputs, k, strides=self.strides, padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate) if self.use_bias: outputs = K.bias_add( outputs, b, data_format=self.data_format) #if self.activation is not None: # assert False,'activation functions not supported' # return self.activation(outputs) return outputs def compute_output_shape(self, input_shape): if self.data_format == 'channels_last': space = input_shape[1:-1] new_space = [] for i in range(len(space)): new_dim = conv_utils.conv_output_length( space[i], self.kernel_size[i], padding=self.padding, stride=self.strides[i], dilation=self.dilation_rate[i]) new_space.append(new_dim) #self.filters*2 to accomodate LU representation return (input_shape[0],) + tuple(new_space) + (self.filters,) class RProjBatchNormalization(LowRankBasisLayer): '''RProj version of BatchNormalization.''' def __init__(self, offset_creator_class, weight_basis, axis=-1, momentum=0.99, epsilon=1e-3, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None, **kwargs): super(RProjBatchNormalization, self).__init__(offset_creator_class, weight_basis, **kwargs) self.supports_masking = True self.axis = axis self.momentum = momentum self.epsilon = epsilon self.center = center self.scale = scale self.beta_initializer = initializers.get(beta_initializer) self.gamma_initializer = initializers.get(gamma_initializer) self.moving_mean_initializer = initializers.get(moving_mean_initializer) self.moving_variance_initializer = initializers.get(moving_variance_initializer) self.beta_regularizer = regularizers.get(beta_regularizer) self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_constraint = constraints.get(beta_constraint) self.gamma_constraint = constraints.get(gamma_constraint) def build(self, input_shape): dim = input_shape[self.axis] if dim is None: raise ValueError('Axis ' + str(self.axis) + ' of ' 'input tensor should have a defined dimension ' 'but the layer received an input with shape ' + str(input_shape) + '.') self.input_spec = InputSpec(ndim=len(input_shape), axes={self.axis: dim}) shape = (dim,) if self.scale: self.gamma = self.add_weight(shape=shape, name='gamma', initializer=self.gamma_initializer, regularizer=self.gamma_regularizer, constraint=self.gamma_constraint) else: self.gamma = None if self.center: self.beta = self.add_weight(shape=shape, name='beta', initializer=self.beta_initializer, regularizer=self.beta_regularizer, constraint=self.beta_constraint) else: self.beta = None self.moving_mean = self.add_non_trainable_weight( shape=shape, name='moving_mean', initializer=self.moving_mean_initializer) self.moving_variance = self.add_non_trainable_weight( shape=shape, name='moving_variance', initializer=self.moving_variance_initializer) self.built = True def call(self, inputs, training=None): input_shape = K.int_shape(inputs) # Prepare broadcasting shape. ndim = len(input_shape) reduction_axes = list(range(len(input_shape))) del reduction_axes[self.axis] broadcast_shape = [1] * len(input_shape) broadcast_shape[self.axis] = input_shape[self.axis] # Determines whether broadcasting is needed. needs_broadcasting = (sorted(reduction_axes) != list(range(ndim))[:-1]) def normalize_inference(): if needs_broadcasting: # In this case we must explicitly broadcast all parameters. broadcast_moving_mean = K.reshape(self.moving_mean, broadcast_shape) broadcast_moving_variance = K.reshape(self.moving_variance, broadcast_shape) if self.center: broadcast_beta = K.reshape(self.beta, broadcast_shape) else: broadcast_beta = None if self.scale: broadcast_gamma = K.reshape(self.gamma, broadcast_shape) else: broadcast_gamma = None return K.batch_normalization( inputs, broadcast_moving_mean, broadcast_moving_variance, broadcast_beta, broadcast_gamma, epsilon=self.epsilon) else: return K.batch_normalization( inputs, self.moving_mean, self.moving_variance, self.beta, self.gamma, epsilon=self.epsilon) # If the learning phase is *static* and set to inference: if training in {0, False}: return normalize_inference() # If the learning is either dynamic, or set to training: normed_training, mean, variance = K.normalize_batch_in_training( inputs, self.gamma, self.beta, reduction_axes, epsilon=self.epsilon) self.add_update([K.moving_average_update(self.moving_mean, mean, self.momentum), K.moving_average_update(self.moving_variance, variance, self.momentum)], inputs) # Pick the normalized form corresponding to the training phase. return K.in_train_phase(normed_training, normalize_inference, training=training) class RProjLocallyConnected2D(LowRankBasisLayer): """Locally-connected layer for 2D inputs. The `LocallyConnected2D` layer works similarly to the `Conv2D` layer, except that weights are unshared, that is, a different set of filters is applied at each different patch of the input. """ # @interfaces.legacy_conv2d_support def __init__(self, offset_creator_class, weight_basis, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(RProjLocallyConnected2D, self).__init__(offset_creator_class, weight_basis, **kwargs) self.filters = filters self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, 2, 'strides') self.padding = conv_utils.normalize_padding(padding) if self.padding != 'valid': raise ValueError('Invalid border mode for LocallyConnected2D ' '(only "valid" is supported): ' + padding) self.data_format = conv_utils.normalize_data_format(data_format) self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.input_spec = InputSpec(ndim=4) def build(self, input_shape): if self.data_format == 'channels_last': input_row, input_col = input_shape[1:-1] input_filter = input_shape[3] else: input_row, input_col = input_shape[2:] input_filter = input_shape[1] if input_row is None or input_col is None: raise ValueError('The spatial dimensions of the inputs to ' ' a LocallyConnected2D layer ' 'should be fully-defined, but layer received ' 'the inputs shape ' + str(input_shape)) output_row = conv_utils.conv_output_length(input_row, self.kernel_size[0], self.padding, self.strides[0]) output_col = conv_utils.conv_output_length(input_col, self.kernel_size[1], self.padding, self.strides[1]) self.output_row = output_row self.output_col = output_col self.kernel_shape = (output_row * output_col, self.kernel_size[0] * self.kernel_size[1] * input_filter, self.filters) self.kernel = self.add_weight(shape=self.kernel_shape, initializer=self.kernel_initializer, name='kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) if self.use_bias: self.bias = self.add_weight(shape=(output_row, output_col, self.filters), initializer=self.bias_initializer, name='bias', regularizer=self.bias_regularizer, constraint=self.bias_constraint) else: self.bias = None if self.data_format == 'channels_first': self.input_spec = InputSpec(ndim=4, axes={1: input_filter}) else: self.input_spec = InputSpec(ndim=4, axes={-1: input_filter}) self.built = True def compute_output_shape(self, input_shape): if self.data_format == 'channels_first': rows = input_shape[2] cols = input_shape[3] elif self.data_format == 'channels_last': rows = input_shape[1] cols = input_shape[2] rows = conv_utils.conv_output_length(rows, self.kernel_size[0], self.padding, self.strides[0]) cols = conv_utils.conv_output_length(cols, self.kernel_size[1], self.padding, self.strides[1]) if self.data_format == 'channels_first': return (input_shape[0], self.filters, rows, cols) elif self.data_format == 'channels_last': return (input_shape[0], rows, cols, self.filters) def call(self, inputs): _, _, filters = self.kernel_shape output = K.local_conv2d(inputs, self.kernel, self.kernel_size, self.strides, (self.output_row, self.output_col), self.data_format) if self.use_bias: if self.data_format == 'channels_first' or self.data_format == 'channels_last': output = K.bias_add(output, self.bias, data_format=self.data_format) output = self.activation(output) return output def get_config(self): config = { 'filters': self.filters, 'kernel_size': self.kernel_size, 'strides': self.strides, 'padding': self.padding, 'data_format': self.data_format, 'activation': activations.serialize(self.activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint) } base_config = super(LocallyConnected2D, self).get_config() return dict(list(base_config.items()) + list(config.items()))
[ "tensorflow.keras.backend.floatx", "tensorflow.keras.constraints.serialize", "tensorflow.keras.regularizers.serialize", "tensorflow.keras.backend.moving_average_update", "tensorflow.keras.backend.batch_normalization", "tensorflow.python.keras.utils.conv_utils.conv_output_length", "tensorflow.keras.backend.normalize_batch_in_training", "tensorflow.keras.backend.local_conv2d", "tensorflow.keras.backend.in_train_phase", "tensorflow.keras.backend.int_shape", "tensorflow.keras.backend.reshape", "tensorflow.keras.layers.InputSpec", "tensorflow.keras.initializers.get", "tensorflow.keras.backend.bias_add", "tensorflow.keras.backend.conv3d", "tensorflow.keras.backend.dot", "tensorflow.keras.initializers.serialize", "tensorflow.python.keras.utils.conv_utils.normalize_tuple", "tensorflow.keras.backend.conv2d", "tensorflow.keras.constraints.get", "tensorflow.keras.activations.serialize", "tensorflow.keras.regularizers.get", "tensorflow.keras.backend.conv1d", "tensorflow.python.keras.utils.conv_utils.normalize_data_format", "tensorflow.keras.activations.get", "tensorflow.python.keras.utils.conv_utils.normalize_padding" ]
src/subspace/keras_ext/rproj_layers.py
[(78, 'tensorflow.keras.initializers.get', 'initializers.get', (['initializer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (130, 'tensorflow.keras.initializers.get', 'initializers.get', (['initializer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (161, 'tensorflow.keras.activations.get', 'activations.get', (['activation'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (163, 'tensorflow.keras.initializers.get', 'initializers.get', (['kernel_initializer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (164, 'tensorflow.keras.initializers.get', 'initializers.get', (['bias_initializer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (165, 'tensorflow.keras.regularizers.get', 'regularizers.get', (['kernel_regularizer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (166, 'tensorflow.keras.regularizers.get', 'regularizers.get', (['bias_regularizer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (167, 'tensorflow.keras.regularizers.get', 'regularizers.get', (['activity_regularizer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (168, 'tensorflow.keras.constraints.get', 'constraints.get', (['kernel_constraint'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (169, 'tensorflow.keras.constraints.get', 'constraints.get', (['bias_constraint'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (170, 'tensorflow.keras.layers.InputSpec', 'InputSpec', ([], {'min_ndim': '(2)'}), False, 'from tensorflow.keras.layers import Layer, InputSpec\n'), (190, 'tensorflow.keras.layers.InputSpec', 'InputSpec', ([], {'min_ndim': '(2)', 'axes': '{(-1): input_dim}'}), False, 'from tensorflow.keras.layers import Layer, InputSpec\n'), (202, 'tensorflow.keras.backend.dot', 'K.dot', (['inputs', 'k'], {}), True, 'import tensorflow.keras.backend as K\n'), (244, 'tensorflow.python.keras.utils.conv_utils.normalize_tuple', 'conv_utils.normalize_tuple', (['kernel_size', 'rank', '"""kernel_size"""'], {}), False, 'from tensorflow.python.keras.utils import conv_utils\n'), (245, 'tensorflow.python.keras.utils.conv_utils.normalize_tuple', 'conv_utils.normalize_tuple', (['strides', 'rank', '"""strides"""'], {}), False, 'from tensorflow.python.keras.utils import conv_utils\n'), (246, 'tensorflow.python.keras.utils.conv_utils.normalize_padding', 'conv_utils.normalize_padding', (['padding'], {}), False, 'from tensorflow.python.keras.utils import conv_utils\n'), (247, 'tensorflow.python.keras.utils.conv_utils.normalize_data_format', 'conv_utils.normalize_data_format', (['data_format'], {}), False, 'from tensorflow.python.keras.utils import conv_utils\n'), (248, 'tensorflow.python.keras.utils.conv_utils.normalize_tuple', 'conv_utils.normalize_tuple', (['dilation_rate', 'rank', '"""dilation_rate"""'], {}), False, 'from tensorflow.python.keras.utils import conv_utils\n'), (249, 'tensorflow.keras.activations.get', 'activations.get', (['activation'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (251, 'tensorflow.keras.initializers.get', 'initializers.get', (['kernel_initializer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (252, 'tensorflow.keras.initializers.get', 'initializers.get', (['bias_initializer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (253, 'tensorflow.keras.regularizers.get', 'regularizers.get', (['kernel_regularizer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (254, 'tensorflow.keras.regularizers.get', 'regularizers.get', (['bias_regularizer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (255, 'tensorflow.keras.regularizers.get', 'regularizers.get', (['activity_regularizer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (256, 'tensorflow.keras.constraints.get', 'constraints.get', (['kernel_constraint'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (257, 'tensorflow.keras.constraints.get', 'constraints.get', (['bias_constraint'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (258, 'tensorflow.keras.layers.InputSpec', 'InputSpec', ([], {'ndim': '(self.rank + 2)'}), False, 'from tensorflow.keras.layers import Layer, InputSpec\n'), (285, 'tensorflow.keras.layers.InputSpec', 'InputSpec', ([], {'ndim': '(self.rank + 2)', 'axes': '{channel_axis: input_dim}'}), False, 'from tensorflow.keras.layers import Layer, InputSpec\n'), (395, 'tensorflow.keras.layers.InputSpec', 'InputSpec', ([], {'ndim': '(4)'}), False, 'from tensorflow.keras.layers import Layer, InputSpec\n'), (424, 'tensorflow.keras.layers.InputSpec', 'InputSpec', ([], {'ndim': '(self.rank + 2)', 'axes': '{channel_axis: input_dim}'}), False, 'from tensorflow.keras.layers import Layer, InputSpec\n'), (499, 'tensorflow.keras.initializers.get', 'initializers.get', (['beta_initializer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (500, 'tensorflow.keras.initializers.get', 'initializers.get', (['gamma_initializer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (501, 'tensorflow.keras.initializers.get', 'initializers.get', (['moving_mean_initializer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (502, 'tensorflow.keras.initializers.get', 'initializers.get', (['moving_variance_initializer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (503, 'tensorflow.keras.regularizers.get', 'regularizers.get', (['beta_regularizer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (504, 'tensorflow.keras.regularizers.get', 'regularizers.get', (['gamma_regularizer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (505, 'tensorflow.keras.constraints.get', 'constraints.get', (['beta_constraint'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (506, 'tensorflow.keras.constraints.get', 'constraints.get', (['gamma_constraint'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (546, 'tensorflow.keras.backend.int_shape', 'K.int_shape', (['inputs'], {}), True, 'import tensorflow.keras.backend as K\n'), (594, 'tensorflow.keras.backend.normalize_batch_in_training', 'K.normalize_batch_in_training', (['inputs', 'self.gamma', 'self.beta', 'reduction_axes'], {'epsilon': 'self.epsilon'}), True, 'import tensorflow.keras.backend as K\n'), (607, 'tensorflow.keras.backend.in_train_phase', 'K.in_train_phase', (['normed_training', 'normalize_inference'], {'training': 'training'}), True, 'import tensorflow.keras.backend as K\n'), (640, 'tensorflow.python.keras.utils.conv_utils.normalize_tuple', 'conv_utils.normalize_tuple', (['kernel_size', '(2)', '"""kernel_size"""'], {}), False, 'from tensorflow.python.keras.utils import conv_utils\n'), (641, 'tensorflow.python.keras.utils.conv_utils.normalize_tuple', 'conv_utils.normalize_tuple', (['strides', '(2)', '"""strides"""'], {}), False, 'from tensorflow.python.keras.utils import conv_utils\n'), (642, 'tensorflow.python.keras.utils.conv_utils.normalize_padding', 'conv_utils.normalize_padding', (['padding'], {}), False, 'from tensorflow.python.keras.utils import conv_utils\n'), (646, 'tensorflow.python.keras.utils.conv_utils.normalize_data_format', 'conv_utils.normalize_data_format', (['data_format'], {}), False, 'from tensorflow.python.keras.utils import conv_utils\n'), (647, 'tensorflow.keras.activations.get', 'activations.get', (['activation'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (649, 'tensorflow.keras.initializers.get', 'initializers.get', (['kernel_initializer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (650, 'tensorflow.keras.initializers.get', 'initializers.get', (['bias_initializer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (651, 'tensorflow.keras.regularizers.get', 'regularizers.get', (['kernel_regularizer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (652, 'tensorflow.keras.regularizers.get', 'regularizers.get', (['bias_regularizer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (653, 'tensorflow.keras.regularizers.get', 'regularizers.get', (['activity_regularizer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (654, 'tensorflow.keras.constraints.get', 'constraints.get', (['kernel_constraint'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (655, 'tensorflow.keras.constraints.get', 'constraints.get', (['bias_constraint'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (656, 'tensorflow.keras.layers.InputSpec', 'InputSpec', ([], {'ndim': '(4)'}), False, 'from tensorflow.keras.layers import Layer, InputSpec\n'), (670, 'tensorflow.python.keras.utils.conv_utils.conv_output_length', 'conv_utils.conv_output_length', (['input_row', 'self.kernel_size[0]', 'self.padding', 'self.strides[0]'], {}), False, 'from tensorflow.python.keras.utils import conv_utils\n'), (672, 'tensorflow.python.keras.utils.conv_utils.conv_output_length', 'conv_utils.conv_output_length', (['input_col', 'self.kernel_size[1]', 'self.padding', 'self.strides[1]'], {}), False, 'from tensorflow.python.keras.utils import conv_utils\n'), (706, 'tensorflow.python.keras.utils.conv_utils.conv_output_length', 'conv_utils.conv_output_length', (['rows', 'self.kernel_size[0]', 'self.padding', 'self.strides[0]'], {}), False, 'from tensorflow.python.keras.utils import conv_utils\n'), (708, 'tensorflow.python.keras.utils.conv_utils.conv_output_length', 'conv_utils.conv_output_length', (['cols', 'self.kernel_size[1]', 'self.padding', 'self.strides[1]'], {}), False, 'from tensorflow.python.keras.utils import conv_utils\n'), (719, 'tensorflow.keras.backend.local_conv2d', 'K.local_conv2d', (['inputs', 'self.kernel', 'self.kernel_size', 'self.strides', '(self.output_row, self.output_col)', 'self.data_format'], {}), True, 'import tensorflow.keras.backend as K\n'), (80, 'tensorflow.keras.backend.floatx', 'K.floatx', ([], {}), True, 'import tensorflow.keras.backend as K\n'), (132, 'tensorflow.keras.backend.floatx', 'K.floatx', ([], {}), True, 'import tensorflow.keras.backend as K\n'), (204, 'tensorflow.keras.backend.bias_add', 'K.bias_add', (['output', 'b'], {}), True, 'import tensorflow.keras.backend as K\n'), (291, 'tensorflow.keras.backend.conv1d', 'K.conv1d', (['inputs', 'self.kernel'], {'strides': 'self.strides[0]', 'padding': 'self.padding', 'data_format': 'self.data_format', 'dilation_rate': 'self.dilation_rate[0]'}), True, 'import tensorflow.keras.backend as K\n'), (299, 'tensorflow.keras.backend.conv2d', 'K.conv2d', (['inputs', 'self.kernel'], {'strides': 'self.strides', 'padding': 'self.padding', 'data_format': 'self.data_format', 'dilation_rate': 'self.dilation_rate'}), True, 'import tensorflow.keras.backend as K\n'), (307, 'tensorflow.keras.backend.conv3d', 'K.conv3d', (['inputs', 'self.kernel'], {'strides': 'self.strides', 'padding': 'self.padding', 'data_format': 'self.data_format', 'dilation_rate': 'self.dilation_rate'}), True, 'import tensorflow.keras.backend as K\n'), (316, 'tensorflow.keras.backend.bias_add', 'K.bias_add', (['outputs', 'self.bias'], {'data_format': 'self.data_format'}), True, 'import tensorflow.keras.backend as K\n'), (438, 'tensorflow.keras.backend.conv2d', 'K.conv2d', (['inputs', 'k'], {'strides': 'self.strides', 'padding': 'self.padding', 'data_format': 'self.data_format', 'dilation_rate': 'self.dilation_rate'}), True, 'import tensorflow.keras.backend as K\n'), (447, 'tensorflow.keras.backend.bias_add', 'K.bias_add', (['outputs', 'b'], {'data_format': 'self.data_format'}), True, 'import tensorflow.keras.backend as K\n'), (693, 'tensorflow.keras.layers.InputSpec', 'InputSpec', ([], {'ndim': '(4)', 'axes': '{(1): input_filter}'}), False, 'from tensorflow.keras.layers import Layer, InputSpec\n'), (695, 'tensorflow.keras.layers.InputSpec', 'InputSpec', ([], {'ndim': '(4)', 'axes': '{(-1): input_filter}'}), False, 'from tensorflow.keras.layers import Layer, InputSpec\n'), (740, 'tensorflow.keras.activations.serialize', 'activations.serialize', (['self.activation'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (742, 'tensorflow.keras.initializers.serialize', 'initializers.serialize', (['self.kernel_initializer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (743, 'tensorflow.keras.initializers.serialize', 'initializers.serialize', (['self.bias_initializer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (744, 'tensorflow.keras.regularizers.serialize', 'regularizers.serialize', (['self.kernel_regularizer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (745, 'tensorflow.keras.regularizers.serialize', 'regularizers.serialize', (['self.bias_regularizer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (746, 'tensorflow.keras.regularizers.serialize', 'regularizers.serialize', (['self.activity_regularizer'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (747, 'tensorflow.keras.constraints.serialize', 'constraints.serialize', (['self.kernel_constraint'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (748, 'tensorflow.keras.constraints.serialize', 'constraints.serialize', (['self.bias_constraint'], {}), False, 'from tensorflow.keras import regularizers, constraints, initializers, activations\n'), (84, 'src.subspace.keras_ext.rproj_layers_util._convert_string_dtype', '_convert_string_dtype', (['dtype'], {}), False, 'from src.subspace.keras_ext.rproj_layers_util import _convert_string_dtype\n'), (330, 'tensorflow.python.keras.utils.conv_utils.conv_output_length', 'conv_utils.conv_output_length', (['space[i]', 'self.kernel_size[i]'], {'padding': 'self.padding', 'stride': 'self.strides[i]', 'dilation': 'self.dilation_rate[i]'}), False, 'from tensorflow.python.keras.utils import conv_utils\n'), (342, 'tensorflow.python.keras.utils.conv_utils.conv_output_length', 'conv_utils.conv_output_length', (['space[i]', 'self.kernel_size[i]'], {'padding': 'self.padding', 'stride': 'self.strides[i]', 'dilation': 'self.dilation_rate[i]'}), False, 'from tensorflow.python.keras.utils import conv_utils\n'), (463, 'tensorflow.python.keras.utils.conv_utils.conv_output_length', 'conv_utils.conv_output_length', (['space[i]', 'self.kernel_size[i]'], {'padding': 'self.padding', 'stride': 'self.strides[i]', 'dilation': 'self.dilation_rate[i]'}), False, 'from tensorflow.python.keras.utils import conv_utils\n'), (560, 'tensorflow.keras.backend.reshape', 'K.reshape', (['self.moving_mean', 'broadcast_shape'], {}), True, 'import tensorflow.keras.backend as K\n'), (562, 'tensorflow.keras.backend.reshape', 'K.reshape', (['self.moving_variance', 'broadcast_shape'], {}), True, 'import tensorflow.keras.backend as K\n'), (573, 'tensorflow.keras.backend.batch_normalization', 'K.batch_normalization', (['inputs', 'broadcast_moving_mean', 'broadcast_moving_variance', 'broadcast_beta', 'broadcast_gamma'], {'epsilon': 'self.epsilon'}), True, 'import tensorflow.keras.backend as K\n'), (581, 'tensorflow.keras.backend.batch_normalization', 'K.batch_normalization', (['inputs', 'self.moving_mean', 'self.moving_variance', 'self.beta', 'self.gamma'], {'epsilon': 'self.epsilon'}), True, 'import tensorflow.keras.backend as K\n'), (598, 'tensorflow.keras.backend.moving_average_update', 'K.moving_average_update', (['self.moving_mean', 'mean', 'self.momentum'], {}), True, 'import tensorflow.keras.backend as K\n'), (601, 'tensorflow.keras.backend.moving_average_update', 'K.moving_average_update', (['self.moving_variance', 'variance', 'self.momentum'], {}), True, 'import tensorflow.keras.backend as K\n'), (728, 'tensorflow.keras.backend.bias_add', 'K.bias_add', (['output', 'self.bias'], {'data_format': 'self.data_format'}), True, 'import tensorflow.keras.backend as K\n'), (565, 'tensorflow.keras.backend.reshape', 'K.reshape', (['self.beta', 'broadcast_shape'], {}), True, 'import tensorflow.keras.backend as K\n'), (569, 'tensorflow.keras.backend.reshape', 'K.reshape', (['self.gamma', 'broadcast_shape'], {}), True, 'import tensorflow.keras.backend as K\n')]