text
stringlengths
5
261k
id
stringlengths
16
106
metadata
dict
__index_level_0__
int64
0
266
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_nlp.layers.modeling.cached_multi_head_attention import ( CachedMultiHeadAttention, ) from keras_nlp.layers.modeling.f_net_encoder import FNetEncoder from keras_nlp.layers.modeling.masked_lm_head import MaskedLMHead from keras_nlp.layers.modeling.position_embedding import PositionEmbedding from keras_nlp.layers.modeling.reversible_embedding import ReversibleEmbedding from keras_nlp.layers.modeling.rotary_embedding import RotaryEmbedding from keras_nlp.layers.modeling.sine_position_encoding import ( SinePositionEncoding, ) from keras_nlp.layers.modeling.token_and_position_embedding import ( TokenAndPositionEmbedding, ) from keras_nlp.layers.modeling.transformer_decoder import TransformerDecoder from keras_nlp.layers.modeling.transformer_encoder import TransformerEncoder from keras_nlp.layers.preprocessing.masked_lm_mask_generator import ( MaskedLMMaskGenerator, ) from keras_nlp.layers.preprocessing.multi_segment_packer import ( MultiSegmentPacker, ) from keras_nlp.layers.preprocessing.random_deletion import RandomDeletion from keras_nlp.layers.preprocessing.random_swap import RandomSwap from keras_nlp.layers.preprocessing.start_end_packer import StartEndPacker
keras-nlp/keras_nlp/layers/__init__.py/0
{ "file_path": "keras-nlp/keras_nlp/layers/__init__.py", "repo_id": "keras-nlp", "token_count": 582 }
136
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import keras from keras_nlp.backend import ops @keras_nlp_export("keras_nlp.layers.SinePositionEncoding") class SinePositionEncoding(keras.layers.Layer): """Sinusoidal positional encoding layer. This layer calculates the position encoding as a mix of sine and cosine functions with geometrically increasing wavelengths. Defined and formulized in [Attention is All You Need](https://arxiv.org/abs/1706.03762). Takes as input an embedded token tensor. The input must have shape [batch_size, sequence_length, feature_size]. This layer will return a positional encoding the same size as the embedded token tensor, which can be added directly to the embedded token tensor. Args: max_wavelength: The maximum angular wavelength of the sine/cosine curves, as described in Attention is All You Need. Defaults to `10000`. Call arguments: inputs: The tensor inputs to compute an embedding for, with shape `(batch_size, sequence_length, hidden_dim)`. start_index: An integer or integer tensor. The starting position to compute the encoding from. This is useful during cached decoding, where each position is predicted separately in a loop. Examples: ```python # create a simple embedding layer with sinusoidal positional encoding seq_len = 100 vocab_size = 1000 embedding_dim = 32 inputs = keras.Input((seq_len,), dtype="float32") embedding = keras.layers.Embedding( input_dim=vocab_size, output_dim=embedding_dim )(inputs) positional_encoding = keras_nlp.layers.SinePositionEncoding()(embedding) outputs = embedding + positional_encoding ``` References: - [Vaswani et al., 2017](https://arxiv.org/abs/1706.03762) """ def __init__( self, max_wavelength=10000, **kwargs, ): super().__init__(**kwargs) self.max_wavelength = max_wavelength self.built = True def call(self, inputs, start_index=0): shape = ops.shape(inputs) seq_length = shape[-2] hidden_size = shape[-1] positions = ops.arange(seq_length) positions = ops.cast(positions + start_index, self.compute_dtype) min_freq = ops.cast(1 / self.max_wavelength, dtype=self.compute_dtype) timescales = ops.power( min_freq, ops.cast(2 * (ops.arange(hidden_size) // 2), self.compute_dtype) / ops.cast(hidden_size, self.compute_dtype), ) angles = ops.expand_dims(positions, 1) * ops.expand_dims(timescales, 0) # even indices are sine, odd are cosine cos_mask = ops.cast(ops.arange(hidden_size) % 2, self.compute_dtype) sin_mask = 1 - cos_mask # embedding shape is [seq_length, hidden_size] positional_encodings = ( ops.sin(angles) * sin_mask + ops.cos(angles) * cos_mask ) return ops.broadcast_to(positional_encodings, shape) def get_config(self): config = super().get_config() config.update( { "max_wavelength": self.max_wavelength, } ) return config def compute_output_shape(self, input_shape): return input_shape
keras-nlp/keras_nlp/layers/modeling/sine_position_encoding.py/0
{ "file_path": "keras-nlp/keras_nlp/layers/modeling/sine_position_encoding.py", "repo_id": "keras-nlp", "token_count": 1533 }
137
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import tensorflow as tf from keras_nlp.api_export import keras_nlp_export from keras_nlp.layers.preprocessing.preprocessing_layer import ( PreprocessingLayer, ) from keras_nlp.utils.tensor_utils import convert_to_ragged_batch from keras_nlp.utils.tensor_utils import is_int_dtype from keras_nlp.utils.tensor_utils import is_string_dtype @keras_nlp_export("keras_nlp.layers.RandomDeletion") class RandomDeletion(PreprocessingLayer): """Augments input by randomly deleting tokens. This layer comes in handy when you need to generate new data using deletion augmentation as described in the paper [EDA: Easy Data Augmentation Techniques for Boosting Performance on Text Classification Tasks] (https://arxiv.org/pdf/1901.11196.pdf). The layer expects the inputs to be pre-split into token level inputs. This allows control over the level of augmentation, you can split by character for character level swaps, or by word for word level swaps. Input data should be passed as tensors, `tf.RaggedTensor`s, or lists. For batched input, inputs should be a list of lists or a rank two tensor. For unbatched inputs, each element should be a list or a rank one tensor. Args: rate: The probability of a token being chosen for deletion. max_deletions: The maximum number of tokens to delete. skip_list: A list of token values that should not be considered candidates for deletion. skip_fn: A function that takes as input a scalar tensor token and returns as output a scalar tensor True/False value. A value of True indicates that the token should not be considered a candidate for deletion. This function must be tracable--it should consist of tensorflow operations. skip_py_fn: A function that takes as input a python token value and returns as output `True` or `False`. A value of True indicates that should not be considered a candidate for deletion. Unlike the `skip_fn` argument, this argument need not be tracable--it can be any python function. seed: A seed for the random number generator. Examples: Word level usage. >>> keras.utils.set_random_seed(1337) >>> inputs=tf.strings.split(["Hey I like", "Keras and Tensorflow"]) >>> augmenter=keras_nlp.layers.RandomDeletion(rate=0.4, seed=42) >>> augmented=augmenter(inputs) >>> tf.strings.reduce_join(augmented, separator=" ", axis=-1) <tf.Tensor: shape=(2,), dtype=string, numpy=array([b'I like', b'and'], dtype=object)> Character level usage. >>> keras.utils.set_random_seed(1337) >>> inputs=tf.strings.unicode_split(["Hey Dude", "Speed Up"], "UTF-8") >>> augmenter=keras_nlp.layers.RandomDeletion(rate=0.4, seed=42) >>> augmented=augmenter(inputs) >>> tf.strings.reduce_join(augmented, axis=-1) <tf.Tensor: shape=(2,), dtype=string, numpy=array([b'H Dude', b'pedUp'], dtype=object)> Usage with skip_list. >>> keras.utils.set_random_seed(1337) >>> inputs=tf.strings.split(["Hey I like", "Keras and Tensorflow"]) >>> augmenter=keras_nlp.layers.RandomDeletion(rate=0.4, ... skip_list=["Keras", "Tensorflow"], seed=42) >>> augmented=augmenter(inputs) >>> tf.strings.reduce_join(augmented, separator=" ", axis=-1) <tf.Tensor: shape=(2,), dtype=string, numpy=array([b'I like', b'Keras Tensorflow'], dtype=object)> Usage with skip_fn. >>> def skip_fn(word): ... return tf.strings.regex_full_match(word, r"\\pP") >>> keras.utils.set_random_seed(1337) >>> inputs=tf.strings.split(["Hey I like", "Keras and Tensorflow"]) >>> augmenter=keras_nlp.layers.RandomDeletion(rate=0.4, ... skip_fn=skip_fn, seed=42) >>> augmented=augmenter(inputs) >>> tf.strings.reduce_join(augmented, separator=" ", axis=-1) <tf.Tensor: shape=(2,), dtype=string, numpy=array([b'I like', b'and'], dtype=object)> Usage with skip_py_fn. >>> def skip_py_fn(word): ... return len(word) < 4 >>> keras.utils.set_random_seed(1337) >>> inputs=tf.strings.split(["Hey I like", "Keras and Tensorflow"]) >>> augmenter=RandomDeletion(rate=0.4, ... skip_py_fn=skip_py_fn, seed=42) >>> augmented=augmenter(inputs) >>> tf.strings.reduce_join(augmented, separator=" ", axis=-1) <tf.Tensor: shape=(2,), dtype=string, numpy=array([b'Hey I', b'and Tensorflow'], dtype=object)> """ def __init__( self, rate, max_deletions=None, skip_list=None, skip_fn=None, skip_py_fn=None, seed=None, name=None, dtype="int32", **kwargs, ): if not is_int_dtype(dtype) and not is_string_dtype(dtype): raise ValueError( "Output dtype must be an integer type or a string. " f"Received: dtype={dtype}" ) super().__init__(dtype=dtype, name=name, **kwargs) self.rate = rate self.max_deletions = max_deletions self.seed = random.randint(1, 1e9) if seed is None else seed self._generator = tf.random.Generator.from_seed(self.seed) self.skip_list = skip_list self.skip_fn = skip_fn self.skip_py_fn = skip_py_fn if self.max_deletions is not None and self.max_deletions < 0: raise ValueError( "max_deletions must be non-negative." f"Received max_deletions={max_deletions}." ) if self.rate > 1 or self.rate < 0: raise ValueError( "Rate must be between 0 and 1 (both inclusive)." f"Received: rate={rate}" ) if [self.skip_list, self.skip_fn, self.skip_py_fn].count(None) < 2: raise ValueError( "Exactly one of `skip_list`, `skip_fn`, `skip_py_fn` must be " "provided." ) if self.skip_list: self.StaticHashTable = tf.lookup.StaticHashTable( tf.lookup.KeyValueTensorInitializer( tf.convert_to_tensor(self.skip_list), tf.convert_to_tensor([True] * len(self.skip_list)), ), default_value=False, ) def call(self, inputs): inputs, unbatched, _ = convert_to_ragged_batch(inputs) skip_masks = None if self.skip_list: skip_masks = self.StaticHashTable.lookup(inputs.flat_values) elif self.skip_fn: skip_masks = tf.map_fn( self.skip_fn, inputs.flat_values, fn_output_signature="bool" ) elif self.skip_py_fn: def string_fn(token): return self.skip_py_fn(token.numpy().decode("utf-8")) def int_fn(token): return self.skip_py_fn(token.numpy()) py_fn = string_fn if inputs.dtype == tf.string else int_fn skip_masks = tf.map_fn( lambda x: tf.py_function(py_fn, [x], "bool"), inputs.flat_values, fn_output_signature="bool", ) positions_flat = tf.range(tf.size(inputs.flat_values)) positions = inputs.with_flat_values(positions_flat) if skip_masks is not None: skip_masks = tf.logical_not(skip_masks) skip_masks.set_shape([None]) positions = tf.ragged.boolean_mask( positions, inputs.with_flat_values(skip_masks) ) # Figure out how many we are going to select. token_counts = tf.cast(positions.row_lengths(), "float32") num_to_select = tf.random.stateless_binomial( shape=tf.shape(token_counts), seed=self._generator.make_seeds()[:, 0], counts=token_counts, probs=self.rate, ) if self.max_deletions is not None: num_to_select = tf.math.minimum(num_to_select, self.max_deletions) num_to_select = tf.cast(num_to_select, "int64") # Shuffle and trim to items that are going to be selected. def _shuffle_and_trim(x): positions, top_n = x shuffled = tf.random.shuffle(positions, seed=self.seed) return shuffled[:top_n] selected_for_mask = tf.map_fn( _shuffle_and_trim, (positions, num_to_select), fn_output_signature=tf.RaggedTensorSpec( ragged_rank=positions.ragged_rank - 1, dtype=positions.dtype ), ) selected_for_mask.flat_values.set_shape([None]) # Construct the mask which is a boolean RT # Scatter 0's to positions that have been selector for deletion. update_values = tf.zeros_like(selected_for_mask.flat_values, "int32") update_indices = selected_for_mask.flat_values update_indices = tf.expand_dims(update_indices, -1) update_indices = tf.cast(update_indices, "int32") mask_flat = tf.ones_like(inputs.flat_values, dtype="int32") mask_flat = tf.tensor_scatter_nd_update( mask_flat, update_indices, update_values ) mask = tf.cast(inputs.with_flat_values(mask_flat), "bool") inputs = tf.ragged.boolean_mask(inputs, mask) if unbatched: inputs = tf.squeeze(inputs, axis=0) return inputs def get_config(self): config = super().get_config() config.update( { "rate": self.rate, "max_deletions": self.max_deletions, "seed": self.seed, "skip_list": self.skip_list, "skip_fn": self.skip_fn, "skip_py_fn": self.skip_py_fn, } ) return config def compute_output_shape(self, inputs_shape): inputs_shape = list(inputs_shape) inputs_shape[-1] = None return tuple(inputs_shape)
keras-nlp/keras_nlp/layers/preprocessing/random_deletion.py/0
{ "file_path": "keras-nlp/keras_nlp/layers/preprocessing/random_deletion.py", "repo_id": "keras-nlp", "token_count": 4721 }
138
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_nlp.api_export import keras_nlp_export from keras_nlp.metrics.rouge_base import RougeBase @keras_nlp_export("keras_nlp.metrics.RougeN") class RougeN(RougeBase): """ROUGE-N metric. This class implements the ROUGE-N variant of the ROUGE metric. The ROUGE-N metric is traditionally used for evaluating summarisation systems. Succinctly put, ROUGE-N is a score based on the number of matching n-grams between the reference text and the hypothesis text. Note on input shapes: For `y_true` and `y_pred`, this class supports scalar values and batch inputs of shapes `()`, `(batch_size,)` and `(batch_size, 1)`. Args: order: The order of n-grams which are to be matched. It should lie in range [1, 9]. Defaults to `2`. use_stemmer: bool. Whether Porter Stemmer should be used to strip word suffixes to improve matching. Defaults to `False`. dtype: string or tf.dtypes.Dtype. Precision of metric computation. If not specified, it defaults to `"float32"`. name: string. Name of the metric instance. **kwargs: Other keyword arguments. References: - [Lin et al., 2004](https://aclanthology.org/W04-1013/) Examples: 1. Python string. >>> rouge_n = keras_nlp.metrics.RougeN(order=2) >>> y_true = "the tiny little cat was found under the big funny bed" >>> y_pred = "the cat was under the bed" >>> rouge_n(y_true, y_pred)["f1_score"] <tf.Tensor: shape=(), dtype=float32, numpy=0.26666668> 2. List inputs. >>> rouge_n = keras_nlp.metrics.RougeN(order=2) >>> y_true = [ ... "the tiny little cat was found under the big funny bed", ... "i really love contributing to KerasNLP", ... ] >>> y_pred = [ ... "the cat was under the bed", ... "i love contributing to KerasNLP", ... ] >>> rouge_n(y_true, y_pred)["f1_score"] <tf.Tensor: shape=(), dtype=float32, numpy=0.4666667> 3. 2D inputs. >>> rouge_n = keras_nlp.metrics.RougeN(order=2) >>> y_true =[ ... ["the tiny little cat was found under the big funny bed"], ... ["i really love contributing to KerasNLP"], ... ] >>> y_pred =[ ... ["the cat was under the bed"], ... ["i love contributing to KerasNLP"], ... ] >>> rouge_n(y_true, y_pred)["f1_score"] <tf.Tensor: shape=(), dtype=float32, numpy=0.4666667> 4. Trigrams. >>> rouge_n = keras_nlp.metrics.RougeN(order=3) >>> y_true = [ ... "the tiny little cat was found under the big funny bed", ... "i really love contributing to KerasNLP", ... ] >>> y_pred = [ ... "the cat was under the bed", ... "i love contributing to KerasNLP", ... ] >>> rouge_n(y_true, y_pred)["f1_score"] <tf.Tensor: shape=(), dtype=float32, numpy=0.2857143> """ def __init__( self, order=2, use_stemmer=False, name="rouge-n", **kwargs, ): if order not in range(1, 10): raise ValueError( "Invalid `order` value. Should lie in the range [1, 9]." f"Received order={order}" ) super().__init__( variant=f"rouge{order}", use_stemmer=use_stemmer, name=name, **kwargs, ) self.order = order def get_config(self): config = super().get_config() del config["variant"] config.update( { "order": self.order, } ) return config
keras-nlp/keras_nlp/metrics/rouge_n.py/0
{ "file_path": "keras-nlp/keras_nlp/metrics/rouge_n.py", "repo_id": "keras-nlp", "token_count": 1766 }
139
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest from keras_nlp.models.albert.albert_tokenizer import AlbertTokenizer from keras_nlp.tests.test_case import TestCase class AlbertTokenizerTest(TestCase): def setUp(self): self.init_kwargs = { # Generated using create_albert_test_proto.py "proto": os.path.join( self.get_test_data_dir(), "albert_test_vocab.spm" ) } self.input_data = ["the quick brown fox", "the earth is round"] def test_tokenizer_basics(self): self.run_preprocessing_layer_test( cls=AlbertTokenizer, init_kwargs=self.init_kwargs, input_data=self.input_data, expected_output=[[5, 10, 6, 8], [5, 7, 9, 11]], ) def test_errors_missing_special_tokens(self): with self.assertRaises(ValueError): AlbertTokenizer( # Generated using create_no_special_token_proto.py proto=os.path.join( self.get_test_data_dir(), "no_special_token_vocab.spm" ) ) @pytest.mark.large def test_smallest_preset(self): self.run_preset_test( cls=AlbertTokenizer, preset="albert_base_en_uncased", input_data=["The quick brown fox."], expected_output=[[13, 1, 438, 2231, 886, 2385, 9]], ) @pytest.mark.extra_large def test_all_presets(self): for preset in AlbertTokenizer.presets: self.run_preset_test( cls=AlbertTokenizer, preset=preset, input_data=self.input_data, )
keras-nlp/keras_nlp/models/albert/albert_tokenizer_test.py/0
{ "file_path": "keras-nlp/keras_nlp/models/albert/albert_tokenizer_test.py", "repo_id": "keras-nlp", "token_count": 1001 }
140
# Copyright 2024 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest from keras_nlp.models.gemma.gemma_causal_lm_preprocessor import ( GemmaCausalLMPreprocessor, ) from keras_nlp.models.gemma.gemma_tokenizer import GemmaTokenizer from keras_nlp.tests.test_case import TestCase @pytest.mark.keras_3_only class GemmaCausalLMPreprocessorTest(TestCase): def setUp(self): self.tokenizer = GemmaTokenizer( proto=os.path.join( self.get_test_data_dir(), "gemma_test_vocab.spm" ), ) self.init_kwargs = { "tokenizer": self.tokenizer, "sequence_length": 8, } self.input_data = ["the quick brown fox"] def test_preprocessor_basics(self): self.run_preprocessing_layer_test( cls=GemmaCausalLMPreprocessor, init_kwargs=self.init_kwargs, input_data=self.input_data, expected_output=( { "token_ids": [[1, 4, 9, 5, 7, 2, 0, 0]], "padding_mask": [[1, 1, 1, 1, 1, 1, 0, 0]], }, [[4, 9, 5, 7, 2, 0, 0, 0]], # Labels shifted. [[1, 1, 1, 1, 1, 0, 0, 0]], # Zero out unlabeled examples. ), ) def test_no_start_end_token(self): input_data = ["the quick brown fox"] * 4 preprocessor = GemmaCausalLMPreprocessor( **self.init_kwargs, add_start_token=False, add_end_token=False, ) x, y, sw = preprocessor(input_data) self.assertAllEqual(x["token_ids"], [[4, 9, 5, 7, 0, 0, 0, 0]] * 4) self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 0, 0, 0, 0]] * 4) self.assertAllEqual(y, [[9, 5, 7, 0, 0, 0, 0, 0]] * 4) self.assertAllEqual(sw, [[1, 1, 1, 0, 0, 0, 0, 0]] * 4) def test_generate_preprocess(self): input_data = "the quick brown fox" preprocessor = GemmaCausalLMPreprocessor(**self.init_kwargs) x = preprocessor.generate_preprocess(input_data) self.assertAllEqual(x["token_ids"], [1, 4, 9, 5, 7, 0, 0, 0]) self.assertAllEqual(x["padding_mask"], [1, 1, 1, 1, 1, 0, 0, 0]) def test_generate_postprocess(self): input_data = { "token_ids": [1, 4, 9, 5, 7, 2, 0, 0], "padding_mask": [1, 1, 1, 1, 1, 1, 0, 0], } preprocessor = GemmaCausalLMPreprocessor(**self.init_kwargs) x = preprocessor.generate_postprocess(input_data) self.assertAllEqual(x, "the quick brown fox") @pytest.mark.extra_large def test_all_presets(self): for preset in GemmaCausalLMPreprocessor.presets: self.run_preset_test( cls=GemmaCausalLMPreprocessor, preset=preset, input_data=self.input_data, )
keras-nlp/keras_nlp/models/gemma/gemma_causal_lm_preprocessor_test.py/0
{ "file_path": "keras-nlp/keras_nlp/models/gemma/gemma_causal_lm_preprocessor_test.py", "repo_id": "keras-nlp", "token_count": 1604 }
141
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from keras_nlp.models.gpt2.gpt2_causal_lm_preprocessor import ( GPT2CausalLMPreprocessor, ) from keras_nlp.models.gpt2.gpt2_tokenizer import GPT2Tokenizer from keras_nlp.tests.test_case import TestCase class GPT2CausalLMPreprocessorTest(TestCase): def setUp(self): self.vocab = ["!", "air", "Ġair", "plane", "Ġat", "port"] self.vocab += ["<|endoftext|>"] self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)]) self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"] self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"] self.merges += ["Ġai r", "Ġa i", "pla ne"] self.tokenizer = GPT2Tokenizer( vocabulary=self.vocab, merges=self.merges, ) self.init_kwargs = { "tokenizer": self.tokenizer, "sequence_length": 8, } self.input_data = ["airplane at airport"] def test_preprocessor_basics(self): self.run_preprocessor_test( cls=GPT2CausalLMPreprocessor, init_kwargs=self.init_kwargs, input_data=self.input_data, expected_output=( { "token_ids": [[6, 1, 3, 4, 2, 5, 6, 0]], "padding_mask": [[1, 1, 1, 1, 1, 1, 1, 0]], }, [[1, 3, 4, 2, 5, 6, 0, 0]], # Pass through labels. [[1, 1, 1, 1, 1, 1, 0, 0]], # Pass through sample_weights. ), ) def test_no_start_end_token(self): input_data = ["airplane at airport"] * 4 preprocessor = GPT2CausalLMPreprocessor( **self.init_kwargs, add_start_token=False, add_end_token=False, ) x, y, sw = preprocessor(input_data) self.assertAllEqual(x["token_ids"], [[1, 3, 4, 2, 5, 0, 0, 0]] * 4) self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 0, 0, 0]] * 4) self.assertAllEqual(y, [[3, 4, 2, 5, 0, 0, 0, 0]] * 4) self.assertAllEqual(sw, [[1, 1, 1, 1, 0, 0, 0, 0]] * 4) def test_generate_preprocess(self): input_data = "airplane at airport" preprocessor = GPT2CausalLMPreprocessor(**self.init_kwargs) x = preprocessor.generate_preprocess(input_data) self.assertAllEqual(x["token_ids"], [6, 1, 3, 4, 2, 5, 0, 0]) self.assertAllEqual(x["padding_mask"], [1, 1, 1, 1, 1, 1, 0, 0]) def test_generate_postprocess(self): input_data = { "token_ids": [6, 1, 3, 4, 2, 5, 0, 0], "padding_mask": [1, 1, 1, 1, 1, 1, 0, 0], } preprocessor = GPT2CausalLMPreprocessor(**self.init_kwargs) x = preprocessor.generate_postprocess(input_data) self.assertAllEqual(x, "airplane at airport") @pytest.mark.extra_large def test_all_presets(self): for preset in GPT2CausalLMPreprocessor.presets: self.run_preset_test( cls=GPT2CausalLMPreprocessor, preset=preset, input_data=self.input_data, )
keras-nlp/keras_nlp/models/gpt2/gpt2_causal_lm_preprocessor_test.py/0
{ "file_path": "keras-nlp/keras_nlp/models/gpt2/gpt2_causal_lm_preprocessor_test.py", "repo_id": "keras-nlp", "token_count": 1770 }
142
# Copyright 2022 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_nlp.api_export import keras_nlp_export from keras_nlp.layers.preprocessing.start_end_packer import StartEndPacker from keras_nlp.models.gpt_neo_x.gpt_neo_x_tokenizer import GPTNeoXTokenizer from keras_nlp.models.preprocessor import Preprocessor from keras_nlp.utils.keras_utils import ( convert_inputs_to_list_of_tensor_segments, ) from keras_nlp.utils.keras_utils import pack_x_y_sample_weight from keras_nlp.utils.python_utils import classproperty @keras_nlp_export("keras_nlp.models.GPTNeoXPreprocessor") class GPTNeoXPreprocessor(Preprocessor): """GPTNeoX preprocessing layer which tokenizes and packs inputs. This preprocessing layer will do 2 things: - Tokenize the inputs using the `tokenizer`. - Construct a dictionary with keys `"token_ids"`, `"padding_mask"`, that can be passed directly to a `keras_nlp.models.GPTNeoXBackbone`. This layer can be used directly with `tf.data.Dataset.map` to preprocess string data in the `(x, y, sample_weight)` format used by `keras.Model.fit`. The call method of this layer accepts three arguments, `x`, `y`, and `sample_weight`. `x` can be a python string or tensor representing a single segment, a list of python strings representing a batch of single segments, or a list of tensors representing multiple segments to be packed together. `y` and `sample_weight` are both optional, can have any format, and will be passed through unaltered. `GPTNeoXPreprocessor` forces the input to have only one segment, as GPTNeoX is mainly used for generation tasks. For tasks having multi-segment inputs like "glue/mnli", please use a model designed for classification purposes such as BERT or RoBERTa. Args: tokenizer: A `keras_nlp.models.GPTNeoXTokenizer` instance. sequence_length: The length of the packed inputs. add_start_token: If `True`, the preprocessor will prepend the tokenizer start token to each input sequence. add_end_token: If `True`, the preprocessor will append the tokenizer end token to each input sequence. Call arguments: x: A string, `tf.Tensor` or list of python strings. y: Any label data. Will be passed through unaltered. sample_weight: Any label weight data. Will be passed through unaltered. sequence_length: Pass to override the configured `sequence_length` of the layer. """ def __init__( self, tokenizer, sequence_length=1024, add_start_token=True, add_end_token=True, **kwargs, ): super().__init__(**kwargs) self.tokenizer = tokenizer self.packer = None self.sequence_length = sequence_length self.add_start_token = add_start_token self.add_end_token = add_end_token def build(self, input_shape): # Defer packer creation to `build()` so that we can be sure tokenizer # assets have loaded when restoring a saved model. self.packer = StartEndPacker( start_value=self.tokenizer.start_token_id, end_value=self.tokenizer.end_token_id, pad_value=self.tokenizer.pad_token_id, sequence_length=self.sequence_length, return_padding_mask=True, ) self.built = True def call( self, x, y=None, sample_weight=None, sequence_length=None, ): x = convert_inputs_to_list_of_tensor_segments(x) if len(x) != 1: raise ValueError( "GPTNeoX requires each input feature to contain only " f"one segment, but received {len(x)}. If you are using GPTNeoX " "for a multi-segment classification task, please refer to " "classification models like BERT or RoBERTa." ) sequence_length = sequence_length or self.sequence_length token_ids, padding_mask = self.packer( self.tokenizer(x[0]), sequence_length=sequence_length, add_start_value=self.add_start_token, add_end_value=self.add_end_token, ) x = { "token_ids": token_ids, "padding_mask": padding_mask, } return pack_x_y_sample_weight(x, y, sample_weight) def get_config(self): config = super().get_config() config.update( { "sequence_length": self.sequence_length, "add_start_token": self.add_start_token, "add_end_token": self.add_end_token, } ) return config @property def sequence_length(self): """The padded length of model input sequences.""" return self._sequence_length @sequence_length.setter def sequence_length(self, value): self._sequence_length = value if self.packer is not None: self.packer.sequence_length = value @classproperty def tokenizer_cls(cls): return GPTNeoXTokenizer
keras-nlp/keras_nlp/models/gpt_neo_x/gpt_neo_x_preprocessor.py/0
{ "file_path": "keras-nlp/keras_nlp/models/gpt_neo_x/gpt_neo_x_preprocessor.py", "repo_id": "keras-nlp", "token_count": 2258 }
143
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from keras_nlp.models.opt.opt_causal_lm_preprocessor import ( OPTCausalLMPreprocessor, ) from keras_nlp.models.opt.opt_tokenizer import OPTTokenizer from keras_nlp.tests.test_case import TestCase class OPTCausalLMPreprocessorTest(TestCase): def setUp(self): self.vocab = ["<pad>", "</s>", "air", "Ġair", "plane", "Ġat", "port"] self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)]) self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"] self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"] self.merges += ["Ġai r", "Ġa i", "pla ne"] self.tokenizer = OPTTokenizer( vocabulary=self.vocab, merges=self.merges, ) self.init_kwargs = { "tokenizer": self.tokenizer, "sequence_length": 8, } self.input_data = ["airplane at airport"] def test_preprocessor_basics(self): self.run_preprocessor_test( cls=OPTCausalLMPreprocessor, init_kwargs=self.init_kwargs, input_data=self.input_data, expected_output=( { "token_ids": [[1, 2, 4, 5, 3, 6, 1, 0]], "padding_mask": [[1, 1, 1, 1, 1, 1, 1, 0]], }, [[2, 4, 5, 3, 6, 1, 0, 0]], # Pass through labels. [[1, 1, 1, 1, 1, 1, 0, 0]], # Pass through sample_weights. ), ) def test_no_start_end_token(self): input_data = ["airplane at airport"] * 4 preprocessor = OPTCausalLMPreprocessor( **self.init_kwargs, add_start_token=False, add_end_token=False, ) x, y, sw = preprocessor(input_data) self.assertAllEqual(x["token_ids"], [[2, 4, 5, 3, 6, 0, 0, 0]] * 4) self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 0, 0, 0]] * 4) self.assertAllEqual(y, [[4, 5, 3, 6, 0, 0, 0, 0]] * 4) self.assertAllEqual(sw, [[1, 1, 1, 1, 0, 0, 0, 0]] * 4) def test_generate_preprocess(self): input_data = "airplane at airport" preprocessor = OPTCausalLMPreprocessor(**self.init_kwargs) x = preprocessor.generate_preprocess(input_data) self.assertAllEqual(x["token_ids"], [1, 2, 4, 5, 3, 6, 0, 0]) self.assertAllEqual(x["padding_mask"], [1, 1, 1, 1, 1, 1, 0, 0]) def test_generate_postprocess(self): input_data = { "token_ids": [1, 2, 4, 5, 3, 6, 0, 0], "padding_mask": [1, 1, 1, 1, 1, 1, 0, 0], } preprocessor = OPTCausalLMPreprocessor(**self.init_kwargs) x = preprocessor.generate_postprocess(input_data) self.assertAllEqual(x, "airplane at airport") @pytest.mark.extra_large def test_all_presets(self): for preset in OPTCausalLMPreprocessor.presets: self.run_preset_test( cls=OPTCausalLMPreprocessor, preset=preset, input_data=self.input_data, )
keras-nlp/keras_nlp/models/opt/opt_causal_lm_preprocessor_test.py/0
{ "file_path": "keras-nlp/keras_nlp/models/opt/opt_causal_lm_preprocessor_test.py", "repo_id": "keras-nlp", "token_count": 1742 }
144
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_nlp.backend import keras from keras_nlp.models.preprocessor import Preprocessor from keras_nlp.models.task import Task from keras_nlp.tests.test_case import TestCase from keras_nlp.tokenizers.tokenizer import Tokenizer class SimpleTokenizer(Tokenizer): def vocabulary_size(self): return 10 class SimplePreprocessor(Preprocessor): def __init__(self, **kwargs): super().__init__(**kwargs) self.tokenizer = SimpleTokenizer() class SimpleTask(Task): def __init__(self, preprocessor=None, activation=None, **kwargs): self.preprocessor = preprocessor self.activation = keras.activations.get(activation) inputs = keras.Input(shape=(5,)) outputs = keras.layers.Dense(5)(inputs) super().__init__(inputs, outputs, **kwargs) class TestTask(TestCase): def test_summary_with_preprocessor(self): preprocessor = SimplePreprocessor() model = SimpleTask(preprocessor) summary = [] model.summary(print_fn=lambda x, line_break: summary.append(x)) self.assertRegex("\n".join(summary), "Preprocessor:") def test_summary_without_preprocessor(self): model = SimpleTask() summary = [] model.summary(print_fn=lambda x, line_break: summary.append(x)) self.assertNotRegex("\n".join(summary), "Preprocessor:") def test_mismatched_loss(self): # Logit output. model = SimpleTask(activation=None) model.compile( loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True) ) # Non-standard losses should not throw. model.compile(loss="mean_squared_error") with self.assertRaises(ValueError): model.compile(loss="sparse_categorical_crossentropy") with self.assertRaises(ValueError): model.compile( loss=keras.losses.SparseCategoricalCrossentropy( from_logits=False ) ) # Probability output. model = SimpleTask(activation="softmax") model.compile( loss=keras.losses.SparseCategoricalCrossentropy(from_logits=False) ) model.compile(loss="sparse_categorical_crossentropy") # Non-standard losses should not throw. model.compile(loss="mean_squared_error") with self.assertRaises(ValueError): model.compile( loss=keras.losses.SparseCategoricalCrossentropy( from_logits=True ) ) # Non-standard activations should not throw. model = SimpleTask(activation="tanh") model.compile( loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True) ) model.compile( loss=keras.losses.SparseCategoricalCrossentropy(from_logits=False) )
keras-nlp/keras_nlp/models/task_test.py/0
{ "file_path": "keras-nlp/keras_nlp/models/task_test.py", "repo_id": "keras-nlp", "token_count": 1416 }
145
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_nlp.backend import keras from keras_nlp.backend import ops class ContentAndQueryEmbedding(keras.layers.Layer): """ Content and Query Embedding. This class creates Content and Query Embeddings for XLNet model which is later used in XLNet Encoder. Args: vocabulary_size: int, number of tokens in the vocabulary. hidden_dim: int, the size hidden states. dropout: float, defaults to 0. the dropout value, shared by `keras.layers.TwoStreamRelativeAttention` and feedforward network. kernel_initializer_range: int, defaults to 0.02. The kernel initializer range for the dense and relative attention layers. name: string, defaults to None. The name of the layer. **kwargs: other keyword arguments. References: - [XLNet: Generalized Autoregressive Pretraining for Language Understanding] (https://arxiv.org/abs/1906.08237) """ def __init__( self, vocabulary_size, hidden_dim, dropout, name=None, **kwargs ): super().__init__(name=name, **kwargs) self.vocabulary_size = vocabulary_size self.hidden_dim = hidden_dim self.dropout = dropout def positional_embedding(self, pos_seq, inv_freq, bsz=None): sinusoid_inp = ops.einsum("i,d->id", pos_seq, inv_freq) pos_emb = ops.concatenate( [ops.sin(sinusoid_inp), ops.cos(sinusoid_inp)], axis=-1 ) pos_emb = ops.expand_dims(pos_emb, 1) pos_emb = ( ops.ones( [ ops.shape(pos_emb)[0], ops.shape(pos_emb)[1] * bsz, ops.shape(pos_emb)[2], ], dtype=self.compute_dtype, ) * pos_emb ) return pos_emb def relative_positional_encoding(self, qlen, klen, bsz=None, clamp_len=-1): """create relative positional encoding.""" freq_seq = ops.arange(0, self.hidden_dim, 2.0, dtype="float32") freq_seq = ops.cast(freq_seq, self.compute_dtype) inv_freq = 1 / (10000 ** (freq_seq / self.hidden_dim)) beg, end = klen, -qlen fwd_pos_seq = ops.arange(beg, end, -1.0, dtype="float32") fwd_pos_seq = ops.cast(fwd_pos_seq, self.compute_dtype) if clamp_len > 0: fwd_pos_seq = ops.clip( fwd_pos_seq, x_min=-clamp_len, x_max=clamp_len ) pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz) return pos_emb def build(self, input_shape): self.word_embed = keras.layers.Embedding( input_dim=self.vocabulary_size, output_dim=self.hidden_dim, dtype=self.dtype_policy, name="word_embedding", ) self.word_embed.build(input_shape) self.dropout_layer = keras.layers.Dropout( self.dropout, dtype=self.dtype_policy, ) super().build(input_shape) def call( self, token_id_input, mlen=None, ): mlen = 0 if mlen is None else mlen bsz, qlen = ops.shape(token_id_input)[0], ops.shape(token_id_input)[1] klen = mlen + qlen # Word embeddings and prepare h & g hidden states word_emb = self.word_embed(token_id_input) word_emb = self.dropout_layer(word_emb) # Positional encoding pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz) pos_emb = self.dropout_layer(pos_emb) pos_emb = ops.reshape( pos_emb, [ ops.shape(pos_emb)[1], ops.shape(pos_emb)[0], ops.shape(pos_emb)[2], ], ) return word_emb, pos_emb def compute_output_shape(self, token_id_input_shape): return [ token_id_input_shape + (self.hidden_dim,), (token_id_input_shape[0], 1, self.hidden_dim), ]
keras-nlp/keras_nlp/models/xlnet/xlnet_content_and_query_embedding.py/0
{ "file_path": "keras-nlp/keras_nlp/models/xlnet/xlnet_content_and_query_embedding.py", "repo_id": "keras-nlp", "token_count": 2124 }
146
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import ops from keras_nlp.backend import random from keras_nlp.samplers.sampler import Sampler @keras_nlp_export("keras_nlp.samplers.TopPSampler") class TopPSampler(Sampler): """Top-P Sampler class. This sampler implements top-p search algorithm. Top-p search selects tokens from the smallest subset of output probabilities that sum to greater than `p`. Put in another way, top-p will first order token predictions by likelihood, and ignore all tokens after the cumulative probability of selected tokens exceeds `p`, then select a token from the remaining tokens. Args: p: float, the `p` value of top-p. k: int. If set, this argument defines a heuristic "top-k" cutoff applied before the "top-p" sampling. All logits not in the top `k` will be discarded, and the remaining logits will be sorted to find a cutoff point for `p`. Setting this arg can significantly speed sampling up by reducing the number of tokens to sort. Defaults to `None`. seed: int. The random seed. Defaults to `None`. Call arguments: {{call_args}} Examples: ```python causal_lm = keras_nlp.models.GPT2CausalLM.from_preset("gpt2_base_en") # Pass by name to compile. causal_lm.compile(sampler="top_p") causal_lm.generate(["Keras is a"]) # Pass by object to compile. sampler = keras_nlp.samplers.TopPSampler(p=0.1, k=1_000) causal_lm.compile(sampler=sampler) causal_lm.generate(["Keras is a"]) ``` """ def __init__( self, p=0.1, k=None, seed=None, **kwargs, ): super().__init__(**kwargs) self.p = p self.k = k self.seed = seed self.seed_generator = random.SeedGenerator(seed) def get_next_token(self, probabilities): cutoff = ops.shape(probabilities)[1] if self.k is not None: # If `k` is set, only sample from top `k` tokens. cutoff = self.k sorted_preds, sorted_indices = ops.top_k( probabilities, k=cutoff, sorted=True ) # Calculate cumulative probability distribution. cumulative_probabilities = ops.cumsum(sorted_preds, axis=-1) # Create a mask for the tokens to keep. keep_mask = cumulative_probabilities <= self.p # Shift to include the last token that exceed p. shifted_keep_mask = ops.concatenate( [ops.ones_like(keep_mask[:, :1]), keep_mask[:, :-1]], axis=-1 ) # Filter out unmasked tokens and sample from filtered distribution. probabilities = ops.where( shifted_keep_mask, sorted_preds, ops.zeros(ops.shape(sorted_preds), dtype=sorted_preds.dtype), ) sorted_next_token = random.categorical( ops.log(probabilities), 1, seed=self.seed_generator, dtype="int32", ) output = ops.take_along_axis(sorted_indices, sorted_next_token, axis=-1) return ops.squeeze(output, axis=-1) def get_config(self): config = super().get_config() config.update( { "p": self.p, "k": self.k, "seed": self.seed, } ) return config
keras-nlp/keras_nlp/samplers/top_p_sampler.py/0
{ "file_path": "keras-nlp/keras_nlp/samplers/top_p_sampler.py", "repo_id": "keras-nlp", "token_count": 1677 }
147
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tensorflow as tf from keras_nlp.tests.test_case import TestCase from keras_nlp.tokenizers.word_piece_tokenizer import WordPieceTokenizer class WordPieceTokenizerTest(TestCase): def test_tokenize(self): input_data = ["the quick brown fox."] vocab_data = ["[UNK]", "the", "qu", "##ick", "br", "##own", "fox", "."] tokenizer = WordPieceTokenizer(vocabulary=vocab_data) call_output = tokenizer(input_data) tokenize_output = tokenizer.tokenize(input_data) self.assertAllEqual(call_output, [[1, 2, 3, 4, 5, 6, 7]]) self.assertAllEqual(tokenize_output, [[1, 2, 3, 4, 5, 6, 7]]) def test_dense_output(self): input_data = ["the quick brown fox."] vocab_data = ["[UNK]", "the", "qu", "##ick", "br", "##own", "fox", "."] tokenizer = WordPieceTokenizer( vocabulary=vocab_data, sequence_length=10 ) call_output = tokenizer(input_data) self.assertAllEqual(call_output, [[1, 2, 3, 4, 5, 6, 7, 0, 0, 0]]) def test_string_tokenize(self): input_data = ["the quick brown fox"] vocab_data = ["[UNK]", "the", "qu", "##ick", "br", "##own", "fox"] tokenizer = WordPieceTokenizer(vocabulary=vocab_data, dtype="string") call_output = tokenizer(input_data) self.assertAllEqual( call_output, [["the", "qu", "##ick", "br", "##own", "fox"]], ) def test_detokenize(self): vocab_data = ["[UNK]", "the", "qu", "##ick", "br", "##own", "fox"] tokenizer = WordPieceTokenizer(vocabulary=vocab_data) outputs = tokenizer.detokenize([1, 2, 3, 4, 5, 6]) self.assertAllEqual(outputs, "the quick brown fox") outputs = tokenizer.detokenize([[1, 2, 3, 4, 5, 6], [1, 6]]) self.assertAllEqual(outputs, ["the quick brown fox", "the fox"]) def test_accessors(self): vocab_data = ["[UNK]", "the", "qu", "##ick", "br", "##own", "fox"] tokenizer = WordPieceTokenizer(vocabulary=vocab_data) self.assertEqual(tokenizer.vocabulary_size(), 7) self.assertEqual( tokenizer.get_vocabulary(), ["[UNK]", "the", "qu", "##ick", "br", "##own", "fox"], ) self.assertEqual(tokenizer.id_to_token(0), "[UNK]") self.assertEqual(tokenizer.id_to_token(6), "fox") self.assertEqual(tokenizer.token_to_id("[UNK]"), 0) self.assertEqual(tokenizer.token_to_id("fox"), 6) def test_error_id_out_of_vocabulary(self): vocab_data = ["[UNK]", "the", "qu", "##ick", "br", "##own", "fox", "."] tokenizer = WordPieceTokenizer(vocabulary=vocab_data) with self.assertRaises(ValueError): tokenizer.id_to_token(tokenizer.vocabulary_size()) with self.assertRaises(ValueError): tokenizer.id_to_token(-1) def test_special_tokens(self): input_data = ["quick brown whale"] vocab_data = ["@UNK@", "qu", "@@ick", "br", "@@own", "fox"] tokenizer = WordPieceTokenizer( vocabulary=vocab_data, oov_token="@UNK@", suffix_indicator="@@", dtype="string", ) call_output = tokenizer(input_data) self.assertAllEqual( call_output, [["qu", "@@ick", "br", "@@own", "@UNK@"]], ) def test_cjk_tokens(self): input_data = ["ah半推zz"] vocab_data = ["[UNK]", "推", "敐", "乐", "半", "偷", "匕", "ah", "zz"] tokenizer = WordPieceTokenizer(vocabulary=vocab_data, dtype="string") call_output = tokenizer(input_data) self.assertAllEqual( call_output, [["ah", "半", "推", "zz"]], ) def test_lowercase(self): input_data = ["the QUicK brOWN FOX"] vocab_data = ["[UNK]", "the", "qu", "##ick", "br", "##own", "fox"] tokenizer = WordPieceTokenizer(vocabulary=vocab_data, lowercase=True) call_output = tokenizer(input_data) self.assertAllEqual(call_output, [[1, 2, 3, 4, 5, 6]]) def test_skip_lowercase(self): input_data = ["the QUicK brOWN FOX"] vocab_data = ["[UNK]", "the", "QU", "##icK", "br", "##OWN", "fox"] tokenizer = WordPieceTokenizer(vocabulary=vocab_data, lowercase=False) call_output = tokenizer(input_data) self.assertAllEqual(call_output, [[1, 2, 3, 4, 5, 0]]) def test_strip_accents(self): input_data = ["á é í ó ú"] vocab_data = ["[UNK]", "a", "e", "i", "o", "u"] tokenizer = WordPieceTokenizer( vocabulary=vocab_data, strip_accents=True ) call_output = tokenizer(input_data) self.assertAllEqual(call_output, [[1, 2, 3, 4, 5]]) def test_skip_strip_accents(self): input_data = ["á é í ó ú"] vocab_data = ["[UNK]", "á", "é", "í", "ó", "ú"] tokenizer = WordPieceTokenizer( vocabulary=vocab_data, strip_accents=False ) call_output = tokenizer(input_data) self.assertAllEqual(call_output, [[1, 2, 3, 4, 5]]) def test_no_splitting(self): input_data = ["t o k e n", "m i s s i n g", "t o k e n"] vocab_data = ["[UNK]", "t o k e n"] tokenizer = WordPieceTokenizer(vocabulary=vocab_data, split=False) call_output = tokenizer(input_data) self.assertAllEqual(call_output, [1, 0, 1]) def test_word_piece_only(self): input_data = ["the", "quíck", "Brówn", "Fóx"] vocab_data = ["[UNK]", "the", "qu", "##íck", "Br", "##ówn", "Fóx"] tokenizer = WordPieceTokenizer( vocabulary=vocab_data, lowercase=False, strip_accents=False, split=False, ) call_output = tokenizer(input_data) self.assertAllEqual(call_output, [1, 2, 3, 4, 5, 6]) def test_batching_ragged_tensors(self): tokenizer = WordPieceTokenizer( vocabulary=["[UNK]", "a", "b", "c", "d", "e", "f"] ) dataset = tf.data.Dataset.from_tensor_slices(["a b c", "d e", "a f e"]) dataset = dataset.map(tokenizer) dataset = dataset.apply( tf.data.experimental.dense_to_ragged_batch(batch_size=1) ) element = dataset.take(1).get_single_element().numpy() self.assertAllEqual(element, [[1, 2, 3]]) def test_from_file(self): vocab_path = os.path.join(self.get_temp_dir(), "vocab.txt") input_data = ["the quick brown fox."] vocab_data = ["[UNK]", "the", "qu", "##ick", "br", "##own", "fox", "."] with tf.io.gfile.GFile(vocab_path, "w") as file: for piece in vocab_data: file.write(piece + "\n") tokenizer = WordPieceTokenizer(vocabulary=vocab_path) call_output = tokenizer(input_data) self.assertAllEqual(call_output, [[1, 2, 3, 4, 5, 6, 7]]) def test_config(self): input_data = ["quick brOWN whale"] vocab_data = ["@UNK@", "qu", "@@ick", "br", "@@OWN", "fox"] original_tokenizer = WordPieceTokenizer( vocabulary=vocab_data, lowercase=False, oov_token="@UNK@", suffix_indicator="@@", dtype="string", ) cloned_tokenizer = WordPieceTokenizer.from_config( original_tokenizer.get_config() ) cloned_tokenizer.set_vocabulary(original_tokenizer.get_vocabulary()) self.assertAllEqual( original_tokenizer(input_data), cloned_tokenizer(input_data), ) def test_no_oov_token_in_vocabulary(self): vocab_data = ["qu", "@@ick", "br", "@@OWN", "fox"] with self.assertRaises(ValueError): WordPieceTokenizer( vocabulary=vocab_data, ) vocab_data = ["@UNK@", "qu", "@@ick", "br", "@@OWN", "fox"] with self.assertRaises(ValueError): WordPieceTokenizer( vocabulary=vocab_data, ) vocab_data = ["UNK", "qu", "@@ick", "br", "@@OWN", "fox"] with self.assertRaises(ValueError): WordPieceTokenizer( vocabulary=vocab_data, ) with self.assertRaises(ValueError): WordPieceTokenizer(vocabulary=vocab_data, oov_token=None)
keras-nlp/keras_nlp/tokenizers/word_piece_tokenizer_test.py/0
{ "file_path": "keras-nlp/keras_nlp/tokenizers/word_piece_tokenizer_test.py", "repo_id": "keras-nlp", "token_count": 4220 }
148
[tool.black] line-length = 80 [tool.isort] profile = "black" force_single_line = "True" known_first_party = ["keras_nlp", "tests"] default_section = "THIRDPARTY" line_length = 80
keras-nlp/pyproject.toml/0
{ "file_path": "keras-nlp/pyproject.toml", "repo_id": "keras-nlp", "token_count": 71 }
149
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import gc import json import os import pathlib import traceback import keras import numpy as np import requests from absl import app from absl import flags from keras import ops from transformers import AutoTokenizer from transformers import MistralForCausalLM import keras_nlp from keras_nlp.models import MistralBackbone from keras_nlp.models import MistralCausalLMPreprocessor from keras_nlp.models import MistralTokenizer PRESET_MAP = { "mistral_7b_en": "mistralai/Mistral-7B-v0.1", "mistral_instruct_7b_en": "mistralai/Mistral-7B-Instruct-v0.1", } FLAGS = flags.FLAGS flags.DEFINE_string( "preset", None, f'Must be one of {",".join(PRESET_MAP.keys())}' ) def convert_checkpoints(keras_nlp_model, hf_model): config = hf_model.config keras_nlp_model.token_embedding.embeddings.assign( hf_model.model.embed_tokens.weight.detach().cpu().numpy() ) for i in range(keras_nlp_model.num_layers): keras_nlp_model.transformer_layers[ i ]._self_attention_layer._key_dense.set_weights( [ hf_model.model.layers[i] .self_attn.k_proj.weight.T.reshape( config.hidden_size, config.num_key_value_heads, config.hidden_size // config.num_attention_heads, ) .detach() .cpu() .numpy() ] ) keras_nlp_model.transformer_layers[ i ]._self_attention_layer._query_dense.set_weights( [ hf_model.model.layers[i] .self_attn.q_proj.weight.T.reshape( config.hidden_size, config.num_attention_heads, config.hidden_size // config.num_attention_heads, ) .detach() .cpu() .numpy() ] ) keras_nlp_model.transformer_layers[ i ]._self_attention_layer._value_dense.set_weights( [ hf_model.model.layers[i] .self_attn.v_proj.weight.T.reshape( config.hidden_size, config.num_key_value_heads, config.hidden_size // config.num_attention_heads, ) .detach() .cpu() .numpy() ] ) keras_nlp_model.transformer_layers[ i ]._self_attention_layer._output_dense.set_weights( [ hf_model.model.layers[i] .self_attn.o_proj.weight.T.reshape( config.num_attention_heads, config.hidden_size // config.num_attention_heads, config.hidden_size, ) .detach() .cpu() .numpy() ] ) keras_nlp_model.transformer_layers[ i ]._self_attention_layernorm.set_weights( [ hf_model.model.layers[i] .input_layernorm.weight.detach() .cpu() .numpy() ] ) keras_nlp_model.transformer_layers[ i ]._feedforward_intermediate_dense.set_weights( [ hf_model.model.layers[i] .mlp.up_proj.weight.T.detach() .cpu() .numpy() ] ) keras_nlp_model.transformer_layers[ i ]._feedforward_output_dense.set_weights( [ hf_model.model.layers[i] .mlp.down_proj.weight.T.detach() .cpu() .numpy() ] ) keras_nlp_model.transformer_layers[ i ]._feedforward_gate_dense.set_weights( [ hf_model.model.layers[i] .mlp.gate_proj.weight.T.detach() .cpu() .numpy() ] ) keras_nlp_model.transformer_layers[ i ]._feedforward_layernorm.set_weights( [ hf_model.model.layers[i] .post_attention_layernorm.weight.detach() .cpu() .numpy() ] ) keras_nlp_model.layer_norm.set_weights( [hf_model.model.norm.weight.detach().cpu().numpy()] ) keras_nlp_model.token_embedding.reverse_embeddings.assign( hf_model.lm_head.weight.T.detach().cpu().numpy() ) def test_model( keras_nlp_model, keras_nlp_tokenizer, hf_model, hf_model_tokenizer ): # First, test that the number of parameters match keras_nlp_params = keras_nlp_model.count_params() hf_params = hf_model.num_parameters() assert keras_nlp_params == hf_params # Test the outputs of both the models hf_outputs = hf_model( **hf_model_tokenizer(["What is Keras?"], return_tensors="pt") ) hf_output_logits = hf_outputs.logits.detach().cpu().numpy() keras_nlp_preprocessor = MistralCausalLMPreprocessor(keras_nlp_tokenizer) keras_nlp_output = keras_nlp_model( keras_nlp_preprocessor(["What is Keras?"], sequence_length=6)[0] ) keras_nlp_logits = keras_nlp_model.token_embedding( keras_nlp_output, reverse=True ) keras_nlp_logits = ops.convert_to_numpy(keras_nlp_logits) # High tolerence since bfloat16 is used as the default dtype for Mistral try: np.testing.assert_allclose( keras_nlp_logits, hf_output_logits, atol=1e-4 ) except AssertionError as err: print("\n") print(traceback.format_exc()) print(err.args[0]) print("\n") def test_tokenizer(keras_nlp_tokenizer, hf_tokenizer): hf_output = hf_tokenizer(["What is Keras?"], return_tensors="pt") hf_output = hf_output["input_ids"].detach().cpu().numpy() keras_nlp_preprocessor = MistralCausalLMPreprocessor(keras_nlp_tokenizer) keras_nlp_output = keras_nlp_preprocessor( ["What is Keras?"], sequence_length=6 ) keras_nlp_output = ops.convert_to_numpy(keras_nlp_output[0]["token_ids"]) np.testing.assert_equal(keras_nlp_output, hf_output) def main(_): # === Get the preset name === if FLAGS.preset not in PRESET_MAP.keys(): raise ValueError( f"Invalid preset {FLAGS.preset}. Must be one " f"of {','.join(PRESET_MAP.keys())}" ) preset = FLAGS.preset hf_preset = PRESET_MAP[preset] # === Create the save directories === model_dir = pathlib.Path(__file__).parent / f"{preset}" tokenizer_dir = model_dir / "assets" / "tokenizer" if not model_dir.exists(): os.makedirs(model_dir) if not tokenizer_dir.exists(): os.makedirs(tokenizer_dir) # === Load the Huggingface model === hf_model = MistralForCausalLM.from_pretrained(hf_preset) hf_tokenizer = AutoTokenizer.from_pretrained(hf_preset) hf_model.eval() print("\n-> Huggingface model and tokenizer loaded") # === Load the KerasNLP model === keras_nlp_config = dict( vocabulary_size=hf_model.config.vocab_size, hidden_dim=hf_model.config.hidden_size, num_layers=hf_model.config.num_hidden_layers, num_query_heads=hf_model.config.num_attention_heads, num_key_value_heads=hf_model.config.num_key_value_heads, intermediate_dim=hf_model.config.intermediate_size, sliding_window=hf_model.config.sliding_window, layer_norm_epsilon=hf_model.config.rms_norm_eps, rope_max_wavelength=hf_model.config.rope_theta, dtype="float32", ) keras_nlp_model = MistralBackbone(**keras_nlp_config) # === Download the tokenizer from Huggingface model card === spm_path = ( f"https://huggingface.co./{hf_preset}/resolve/main/tokenizer.model" ) response = requests.get(spm_path) if not response.ok: raise ValueError(f"Couldn't fetch {preset}'s tokenizer.") tokenizer_path = tokenizer_dir / "vocabulary.spm" with open(tokenizer_path, "wb") as tokenizer_file: tokenizer_file.write(response.content) keras_nlp_tokenizer = MistralTokenizer(str(tokenizer_path.absolute())) print("\n-> Keras 3 model and tokenizer loaded.") # === Port the weights === convert_checkpoints(keras_nlp_model, hf_model) print("\n-> Weight transfer done.") # === Check that the models and tokenizers outputs match === test_tokenizer(keras_nlp_tokenizer, hf_tokenizer) test_model(keras_nlp_model, keras_nlp_tokenizer, hf_model, hf_tokenizer) print("\n-> Tests passed!") # === Save the model weights in float32 format === keras_nlp_model.save_weights( str((model_dir / "model.weights.h5").absolute()) ) print("\n-> Saved the model weights in float16") del keras_nlp_model, hf_model gc.collect() keras_nlp_config["dtype"] = "float16" # === Save the weights again in float16 === keras_nlp_model = MistralBackbone(**keras_nlp_config) keras_nlp_model.load_weights( str((model_dir / "model.weights.h5").absolute()) ) keras_nlp_model.save_weights( str((model_dir / "model.weights.h5").absolute()) ) print("-> Saved the model weights in float16") # === Save the model config === keras_nlp_config["dtype"] = "bfloat16" model_config = { "module": "keras_nlp.src.models.mistral.mistral_backbone", "class_name": "MistralBackbone", "config": {**keras_nlp_config}, "registered_name": "keras_nlp>MistralBackbone", "assets": [], "weights": "model.weights.h5", } model_config_json = json.dumps(model_config) with open(model_dir / "config.json", "w") as model_config_file: model_config_file.write(model_config_json) print("\n-> Saved model config") # === Save the tokenizer config === tokenizer_config = { "module": "keras_nlp.src.models.mistral.Mistral_tokenizer", "class_name": "MistralTokenizer", "config": { "name": "mistral_tokenizer", "trainable": True, "dtype": "int32", "proto": None, "sequence_length": None, }, "registered_name": "keras_nlp>MistralTokenizer", "assets": ["assets/tokenizer/vocabulary.spm"], "weights": None, } tokenizer_config_json = json.dumps(tokenizer_config) with open(model_dir / "tokenizer.json", "w") as tokenizer_config_file: tokenizer_config_file.write(tokenizer_config_json) print("\n-> Saved tokenizer config") # === Save metadata === metadata_config = { "keras_version": keras.__version__, "keras_nlp_version": keras_nlp.__version__, "parameter_count": keras_nlp_model.count_params(), "date_saved": datetime.datetime.utcnow().strftime("%Y-%m-%d@%H:%M:%S"), } metadata_config_json = json.dumps(metadata_config) with open(model_dir / "metadata.json", "w") as metadata_config_file: metadata_config_file.write(metadata_config_json) print("\n-> Saved metadata") if __name__ == "__main__": flags.mark_flag_as_required("preset") app.run(main)
keras-nlp/tools/checkpoint_conversion/convert_mistral_checkpoints.py/0
{ "file_path": "keras-nlp/tools/checkpoint_conversion/convert_mistral_checkpoints.py", "repo_id": "keras-nlp", "token_count": 5801 }
150
"""Enables dynamic setting of underlying Keras module. """ _KERAS_BACKEND = None _KERAS_UTILS = None def set_keras_submodules(backend, utils): # Deprecated, will be removed in the future. global _KERAS_BACKEND global _KERAS_UTILS _KERAS_BACKEND = backend _KERAS_UTILS = utils def get_keras_submodule(name): # Deprecated, will be removed in the future. if name not in {'backend', 'utils'}: raise ImportError( 'Can only retrieve "backend" and "utils". ' 'Requested: %s' % name) if _KERAS_BACKEND is None: raise ImportError('You need to first `import keras` ' 'in order to use `keras_preprocessing`. ' 'For instance, you can do:\n\n' '```\n' 'import keras\n' 'from keras_preprocessing import image\n' '```\n\n' 'Or, preferably, this equivalent formulation:\n\n' '```\n' 'from keras import preprocessing\n' '```\n') if name == 'backend': return _KERAS_BACKEND elif name == 'utils': return _KERAS_UTILS __version__ = '1.1.2'
keras-preprocessing/keras_preprocessing/__init__.py/0
{ "file_path": "keras-preprocessing/keras_preprocessing/__init__.py", "repo_id": "keras-preprocessing", "token_count": 670 }
151
import numpy as np import pytest from PIL import Image from keras_preprocessing.image import image_data_generator, utils @pytest.fixture(scope='module') def all_test_images(): img_w = img_h = 20 rgb_images = [] rgba_images = [] gray_images = [] for n in range(8): bias = np.random.rand(img_w, img_h, 1) * 64 variance = np.random.rand(img_w, img_h, 1) * (255 - 64) imarray = np.random.rand(img_w, img_h, 3) * variance + bias im = Image.fromarray(imarray.astype('uint8')).convert('RGB') rgb_images.append(im) imarray = np.random.rand(img_w, img_h, 4) * variance + bias im = Image.fromarray(imarray.astype('uint8')).convert('RGBA') rgba_images.append(im) imarray = np.random.rand(img_w, img_h, 1) * variance + bias im = Image.fromarray( imarray.astype('uint8').squeeze()).convert('L') gray_images.append(im) return [rgb_images, rgba_images, gray_images] def test_image_data_generator(all_test_images): for test_images in all_test_images: img_list = [] for im in test_images: img_list.append(utils.img_to_array(im)[None, ...]) image_data_generator.ImageDataGenerator( featurewise_center=True, samplewise_center=True, featurewise_std_normalization=True, samplewise_std_normalization=True, zca_whitening=True, rotation_range=90., width_shift_range=0.1, height_shift_range=0.1, shear_range=0.5, zoom_range=0.2, channel_shift_range=0., brightness_range=(1, 5), fill_mode='nearest', cval=0.5, horizontal_flip=True, vertical_flip=True, interpolation_order=1 ) def test_image_data_generator_with_validation_split(all_test_images): for test_images in all_test_images: img_list = [] for im in test_images: img_list.append(utils.img_to_array(im)[None, ...]) images = np.vstack(img_list) labels = np.concatenate([ np.zeros((int(len(images) / 2),)), np.ones((int(len(images) / 2),))]) generator = image_data_generator.ImageDataGenerator(validation_split=0.5) # training and validation sets would have different # number of classes, because labels are sorted with pytest.raises(ValueError, match='Training and validation subsets ' 'have different number of classes after ' 'the split.*'): generator.flow(images, labels, shuffle=False, batch_size=10, subset='validation') # test non categorical labels with validation split generator.flow(images, labels, shuffle=False, batch_size=10, ignore_class_split=True, subset='validation') labels = np.concatenate([ np.zeros((int(len(images) / 4),)), np.ones((int(len(images) / 4),)), np.zeros((int(len(images) / 4),)), np.ones((int(len(images) / 4),)) ]) seq = generator.flow(images, labels, shuffle=False, batch_size=10, subset='validation') x, y = seq[0] assert 2 == len(np.unique(y)) seq = generator.flow(images, labels, shuffle=False, batch_size=10, subset='training') x2, y2 = seq[0] assert 2 == len(np.unique(y2)) with pytest.raises(ValueError): generator.flow(images, np.arange(images.shape[0]), shuffle=False, batch_size=3, subset='foo') def test_image_data_generator_with_split_value_error(): with pytest.raises(ValueError): image_data_generator.ImageDataGenerator(validation_split=5) def test_image_data_generator_invalid_data(): generator = image_data_generator.ImageDataGenerator( featurewise_center=True, samplewise_center=True, featurewise_std_normalization=True, samplewise_std_normalization=True, zca_whitening=True, data_format='channels_last') # Test fit with invalid data with pytest.raises(ValueError): x = np.random.random((3, 10, 10)) generator.fit(x) # Test flow with invalid data with pytest.raises(ValueError): x = np.random.random((32, 10, 10)) generator.flow(np.arange(x.shape[0])) def test_image_data_generator_fit(): generator = image_data_generator.ImageDataGenerator( featurewise_center=True, samplewise_center=True, featurewise_std_normalization=True, samplewise_std_normalization=True, zca_whitening=True, rotation_range=90., width_shift_range=0.1, height_shift_range=0.1, shear_range=0.5, zoom_range=(0.2, 0.2), channel_shift_range=0., brightness_range=(1, 5), fill_mode='nearest', cval=0.5, horizontal_flip=True, vertical_flip=True, interpolation_order=1, data_format='channels_last' ) x = np.random.random((32, 10, 10, 3)) generator.fit(x, augment=True) # Test grayscale x = np.random.random((32, 10, 10, 1)) generator.fit(x) # Test RBG x = np.random.random((32, 10, 10, 3)) generator.fit(x) # Test more samples than dims x = np.random.random((32, 4, 4, 1)) generator.fit(x) generator = image_data_generator.ImageDataGenerator( featurewise_center=True, samplewise_center=True, featurewise_std_normalization=True, samplewise_std_normalization=True, zca_whitening=True, rotation_range=90., width_shift_range=0.1, height_shift_range=0.1, shear_range=0.5, zoom_range=(0.2, 0.2), channel_shift_range=0., brightness_range=(1, 5), fill_mode='nearest', cval=0.5, horizontal_flip=True, vertical_flip=True, interpolation_order=1, data_format='channels_first' ) x = np.random.random((32, 10, 10, 3)) generator.fit(x, augment=True) # Test grayscale x = np.random.random((32, 1, 10, 10)) generator.fit(x) # Test RBG x = np.random.random((32, 3, 10, 10)) generator.fit(x) # Test more samples than dims x = np.random.random((32, 1, 4, 4)) generator.fit(x) def test_image_data_generator_flow(all_test_images, tmpdir): for test_images in all_test_images: img_list = [] for im in test_images: img_list.append(utils.img_to_array(im)[None, ...]) images = np.vstack(img_list) dsize = images.shape[0] generator = image_data_generator.ImageDataGenerator( featurewise_center=True, samplewise_center=True, featurewise_std_normalization=True, samplewise_std_normalization=True, zca_whitening=True, rotation_range=90., width_shift_range=0.1, height_shift_range=0.1, shear_range=0.5, zoom_range=0.2, channel_shift_range=0., brightness_range=(1, 5), fill_mode='nearest', cval=0.5, horizontal_flip=True, vertical_flip=True, interpolation_order=1 ) generator.flow( images, np.arange(images.shape[0]), shuffle=False, save_to_dir=str(tmpdir), batch_size=3 ) generator.flow( images, np.arange(images.shape[0]), shuffle=False, sample_weight=np.arange(images.shape[0]) + 1, save_to_dir=str(tmpdir), batch_size=3 ) # Test with `shuffle=True` generator.flow( images, np.arange(images.shape[0]), shuffle=True, save_to_dir=str(tmpdir), batch_size=3, seed=42 ) # Test without y generator.flow( images, None, shuffle=True, save_to_dir=str(tmpdir), batch_size=3 ) # Test with a single miscellaneous input data array x_misc1 = np.random.random(dsize) generator.flow( (images, x_misc1), np.arange(dsize), shuffle=False, batch_size=2 ) # Test with two miscellaneous inputs x_misc2 = np.random.random((dsize, 3, 3)) generator.flow( (images, [x_misc1, x_misc2]), np.arange(dsize), shuffle=False, batch_size=2 ) # Test cases with `y = None` generator.flow(images, None, batch_size=3) generator.flow((images, x_misc1), None, batch_size=3, shuffle=False) generator.flow( (images, [x_misc1, x_misc2]), None, batch_size=3, shuffle=False ) generator = image_data_generator.ImageDataGenerator(validation_split=0.2) generator.flow(images, batch_size=3) # Test some failure cases: x_misc_err = np.random.random((dsize + 1, 3, 3)) with pytest.raises(ValueError) as e_info: generator.flow((images, x_misc_err), np.arange(dsize), batch_size=3) assert str(e_info.value).find('All of the arrays in') != -1 with pytest.raises(ValueError) as e_info: generator.flow((images, x_misc1), np.arange(dsize + 1), batch_size=3) assert str(e_info.value).find('`x` (images tensor) and `y` (labels) ') != -1 # Test `flow` behavior as Sequence generator.flow( images, np.arange(images.shape[0]), shuffle=False, save_to_dir=str(tmpdir), batch_size=3 ) # Test with `shuffle=True` generator.flow( images, np.arange(images.shape[0]), shuffle=True, save_to_dir=str(tmpdir), batch_size=3, seed=123 ) # test order_interpolation labels = np.array([[2, 2, 0, 2, 2], [1, 3, 2, 3, 1], [2, 1, 0, 1, 2], [3, 1, 0, 2, 0], [3, 1, 3, 2, 1]]) label_generator = image_data_generator.ImageDataGenerator( rotation_range=90., interpolation_order=0 ) label_generator.flow( x=labels[np.newaxis, ..., np.newaxis], seed=123 ) def test_valid_args(): with pytest.raises(ValueError): image_data_generator.ImageDataGenerator(brightness_range=0.1) def test_batch_standardize(all_test_images): # ImageDataGenerator.standardize should work on batches for test_images in all_test_images: img_list = [] for im in test_images: img_list.append(utils.img_to_array(im)[None, ...]) images = np.vstack(img_list) generator = image_data_generator.ImageDataGenerator( featurewise_center=True, samplewise_center=True, featurewise_std_normalization=True, samplewise_std_normalization=True, zca_whitening=True, rotation_range=90., width_shift_range=0.1, height_shift_range=0.1, shear_range=0.5, zoom_range=0.2, channel_shift_range=0., brightness_range=(1, 5), fill_mode='nearest', cval=0.5, horizontal_flip=True, vertical_flip=True) generator.fit(images, augment=True) transformed = np.copy(images) for i, im in enumerate(transformed): transformed[i] = generator.random_transform(im) transformed = generator.standardize(transformed) def test_deterministic_transform(): x = np.ones((32, 32, 3)) generator = image_data_generator.ImageDataGenerator( rotation_range=90, fill_mode='constant') x = np.random.random((32, 32, 3)) assert np.allclose(generator.apply_transform(x, {'flip_vertical': True}), x[::-1, :, :]) assert np.allclose(generator.apply_transform(x, {'flip_horizontal': True}), x[:, ::-1, :]) x = np.ones((3, 3, 3)) x_rotated = np.array([[[0., 0., 0.], [1., 1., 1.], [0., 0., 0.]], [[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]], [[0., 0., 0.], [1., 1., 1.], [0., 0., 0.]]]) assert np.allclose(generator.apply_transform(x, {'theta': 45}), x_rotated) def test_random_transforms(): x = np.random.random((2, 28, 28)) # Test get_random_transform with predefined seed seed = 1 generator = image_data_generator.ImageDataGenerator( rotation_range=90., width_shift_range=0.1, height_shift_range=0.1, shear_range=0.5, zoom_range=0.2, channel_shift_range=0.1, brightness_range=(1, 5), horizontal_flip=True, vertical_flip=True) transform_dict = generator.get_random_transform(x.shape, seed) transform_dict2 = generator.get_random_transform(x.shape, seed * 2) assert transform_dict['theta'] != 0 assert transform_dict['theta'] != transform_dict2['theta'] assert transform_dict['tx'] != 0 assert transform_dict['tx'] != transform_dict2['tx'] assert transform_dict['ty'] != 0 assert transform_dict['ty'] != transform_dict2['ty'] assert transform_dict['shear'] != 0 assert transform_dict['shear'] != transform_dict2['shear'] assert transform_dict['zx'] != 0 assert transform_dict['zx'] != transform_dict2['zx'] assert transform_dict['zy'] != 0 assert transform_dict['zy'] != transform_dict2['zy'] assert transform_dict['channel_shift_intensity'] != 0 assert (transform_dict['channel_shift_intensity'] != transform_dict2['channel_shift_intensity']) assert transform_dict['brightness'] != 0 assert transform_dict['brightness'] != transform_dict2['brightness'] # Test get_random_transform without any randomness generator = image_data_generator.ImageDataGenerator() transform_dict = generator.get_random_transform(x.shape, seed) assert transform_dict['theta'] == 0 assert transform_dict['tx'] == 0 assert transform_dict['ty'] == 0 assert transform_dict['shear'] == 0 assert transform_dict['zx'] == 1 assert transform_dict['zy'] == 1 assert transform_dict['channel_shift_intensity'] is None assert transform_dict['brightness'] is None def test_fit_rescale(all_test_images): rescale = 1. / 255 for test_images in all_test_images: img_list = [] for im in test_images: img_list.append(utils.img_to_array(im)[None, ...]) images = np.vstack(img_list) # featurewise_center test generator = image_data_generator.ImageDataGenerator( rescale=rescale, featurewise_center=True, dtype='float64') generator.fit(images) batch = generator.flow(images, batch_size=8).next() assert abs(np.mean(batch)) < 1e-6 # featurewise_std_normalization test generator = image_data_generator.ImageDataGenerator( rescale=rescale, featurewise_center=True, featurewise_std_normalization=True, dtype='float64') generator.fit(images) batch = generator.flow(images, batch_size=8).next() assert abs(np.mean(batch)) < 1e-6 assert abs(1 - np.std(batch)) < 1e-5 # zca_whitening test generator = image_data_generator.ImageDataGenerator( rescale=rescale, featurewise_center=True, zca_whitening=True, dtype='float64') generator.fit(images) batch = generator.flow(images, batch_size=8).next() batch = np.reshape(batch, (batch.shape[0], batch.shape[1] * batch.shape[2] * batch.shape[3])) # Y * Y_T = n * I, where Y = W * X identity = np.dot(batch, batch.T) / batch.shape[0] assert ((np.abs(identity) - np.identity(identity.shape[0])) < 1e-6).all() if __name__ == '__main__': pytest.main([__file__])
keras-preprocessing/tests/image/image_data_generator_test.py/0
{ "file_path": "keras-preprocessing/tests/image/image_data_generator_test.py", "repo_id": "keras-preprocessing", "token_count": 8328 }
152
# Security Policy If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. You may submit the report in the following ways: - send a [private vulnerability report](https://github.com/keras-team/keras-tuner/security/advisories/new) Please provide the following information in your report: - A description of the vulnerability and its impact - How to reproduce the issue This project is maintained by volunteers on a reasonable-effort basis. As such, please give us 90 days to work on a fix before public exposure.
keras-tuner/SECURITY.md/0
{ "file_path": "keras-tuner/SECURITY.md", "repo_id": "keras-tuner", "token_count": 178 }
153
# Copyright 2019 The KerasTuner Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for HyperImageAugment Models.""" import numpy as np import pytest from keras_tuner.applications import augment as aug_module from keras_tuner.backend import config from keras_tuner.backend import keras from keras_tuner.engine import hyperparameters as hp_module def test_transforms_search_space(): hm = aug_module.HyperImageAugment(input_shape=(32, 32, 3)) # Default choice assert hm.transforms == [ ("rotate", (0, 0.5)), ("translate_x", (0, 0.4)), ("translate_y", (0, 0.4)), ("contrast", (0, 0.3)), ] hm = aug_module.HyperImageAugment( input_shape=(32, 32, 3), rotate=0.3, translate_x=[0.1, 0.5], contrast=None, ) assert hm.transforms == [ ("rotate", (0, 0.3)), ("translate_x", (0.1, 0.5)), ("translate_y", (0, 0.4)), ] def test_input_requirement(): hp = hp_module.HyperParameters() with pytest.raises(ValueError, match=r".*must specify.*"): hm = aug_module.HyperImageAugment() hm = aug_module.HyperImageAugment(input_shape=(None, None, 3)) model = hm.build(hp) assert model.built hm = aug_module.HyperImageAugment( input_tensor=keras.Input(shape=(32, 32, 3)) ) model = hm.build(hp) assert model.built def test_model_construction_factor_zero(): hp = hp_module.HyperParameters() hm = aug_module.HyperImageAugment(input_shape=(None, None, 3)) model = hm.build(hp) # augment_layers search default space [0, 4], with default zero. assert len(model.layers) == 1 hp = hp_module.HyperParameters() hm = aug_module.HyperImageAugment( input_shape=(None, None, 3), augment_layers=0 ) model = hm.build(hp) # factors default all zero, the model should only have input layer assert len(model.layers) == 1 def test_model_construction_fixed_aug(): hp = hp_module.HyperParameters() hm = aug_module.HyperImageAugment( input_shape=(None, None, 3), rotate=[0.2, 0.5], augment_layers=0 ) model = hm.build(hp) assert model.layers assert model.name == "image_augment" # Output shape includes batch dimension. assert model.output_shape == (None, None, None, 3) out = model.predict(np.ones((1, 32, 32, 3))) assert out.shape == (1, 32, 32, 3) # Augment does not distort image when inferencing. assert (out != 1).sum() == 0 def test_model_construction_rand_aug(): hp = hp_module.HyperParameters() hm = aug_module.HyperImageAugment( input_shape=(None, None, 3), rotate=[0.2, 0.5] ) model = hm.build(hp) assert model.layers assert model.name == "image_rand_augment" # Output shape includes batch dimension. assert model.output_shape == (None, None, None, 3) out = model.predict(np.ones((1, 32, 32, 3))) assert out.shape == (1, 32, 32, 3) # Augment does not distort image when inferencing. assert (out != 1).sum() == 0 def test_hyperparameter_selection_and_hp_defaults_fixed_aug(): hp = hp_module.HyperParameters() hm = aug_module.HyperImageAugment( input_shape=(32, 32, 3), translate_x=[0.2, 0.4], contrast=None, augment_layers=0, ) hm.build(hp) # default value of default search space are always minimum. assert hp.get("factor_rotate") == 0 assert hp.get("factor_translate_x") == 0.2 assert hp.get("factor_translate_y") == 0 assert "factor_contrast" not in hp.values @pytest.mark.skipif( not config.multi_backend(), reason="The test fails with tf.keras.", ) def test_hyperparameter_existence_and_hp_defaults_rand_aug(): hp = hp_module.HyperParameters() hm = aug_module.HyperImageAugment( input_shape=(32, 32, 3), augment_layers=[2, 5], contrast=False ) hm.build(hp) assert hp.get("augment_layers") == 2 def test_augment_layers_not_int(): with pytest.raises(ValueError, match="must be int"): hp = hp_module.HyperParameters() hm = aug_module.HyperImageAugment( input_shape=(32, 32, 3), augment_layers=1.5 ) hm.build(hp) @pytest.mark.skipif( not config.multi_backend(), reason="The test fails with tf.keras.", ) def test_contrast_0_5_to_1(): hp = hp_module.HyperParameters() hm = aug_module.HyperImageAugment( input_shape=(32, 32, 3), augment_layers=[1, 2], contrast=[0.5, 1] ) hm.build(hp) def test_3_range_params_raise_error(): with pytest.raises(ValueError, match="exceed 2"): hp = hp_module.HyperParameters() hm = aug_module.HyperImageAugment( input_shape=(32, 32, 3), augment_layers=[1, 2], contrast=[0.5, 1, 2] ) hm.build(hp) def test_contrast_range_receive_string_raise_error(): with pytest.raises(ValueError, match="must be int or float"): hp = hp_module.HyperParameters() hm = aug_module.HyperImageAugment( input_shape=(32, 32, 3), augment_layers=[1, 2], contrast=[0.5, "a"] ) hm.build(hp) def test_hyperparameter_override_fixed_aug(): hp = hp_module.HyperParameters() hp.Fixed("factor_rotate", 0.9) hp.Choice("factor_translate_x", [0.8]) hm = aug_module.HyperImageAugment(input_shape=(32, 32, 3), augment_layers=0) hm.build(hp) assert hp.get("factor_rotate") == 0.9 assert hp.get("factor_translate_x") == 0.8 assert hp.get("factor_translate_y") == 0.0 assert hp.get("factor_contrast") == 0.0 @pytest.mark.skipif( not config.multi_backend(), reason="The test fails with tf.keras.", ) def test_hyperparameter_override_rand_aug(): hp = hp_module.HyperParameters() hp.Fixed("randaug_mag", 1.0) hp.Choice("randaug_count", [4]) hm = aug_module.HyperImageAugment( input_shape=(32, 32, 3), augment_layers=[2, 4] ) hm.build(hp) assert hp.get("randaug_mag") == 1.0 assert hp.get("randaug_count") == 4
keras-tuner/keras_tuner/applications/augment_test.py/0
{ "file_path": "keras-tuner/keras_tuner/applications/augment_test.py", "repo_id": "keras-tuner", "token_count": 2658 }
154
# Copyright 2019 The KerasTuner Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import time from concurrent import futures import grpc from keras_tuner import protos from keras_tuner.engine import hyperparameters as hp_module from keras_tuner.engine import trial as trial_module from keras_tuner.engine.oracle import synchronized class OracleServicer(protos.get_service_grpc().OracleServicer): def __init__(self, oracle): self.oracle = oracle self.stop_triggered = False def GetSpace(self, request, context): hps = self.oracle.get_space() return protos.get_service().GetSpaceResponse( hyperparameters=hps.to_proto() ) def UpdateSpace(self, request, context): hps = hp_module.HyperParameters.from_proto(request.hyperparameters) self.oracle.update_space(hps) return protos.get_service().UpdateSpaceResponse() def CreateTrial(self, request, context): trial = self.oracle.create_trial(request.tuner_id) if trial.status == trial_module.TrialStatus.STOPPED: self.stop_triggered = True return protos.get_service().CreateTrialResponse(trial=trial.to_proto()) def UpdateTrial(self, request, context): trial = self.oracle.update_trial( request.trial_id, request.metrics, step=request.step ) return protos.get_service().UpdateTrialResponse(trial=trial.to_proto()) def EndTrial(self, request, context): trial = trial_module.Trial.from_proto(request.trial) self.oracle.end_trial(trial) return protos.get_service().EndTrialResponse() def GetTrial(self, request, context): trial = self.oracle.get_trial(request.trial_id) return protos.get_service().GetTrialResponse(trial=trial.to_proto()) def GetBestTrials(self, request, context): trials = self.oracle.get_best_trials(request.num_trials) return protos.get_service().GetBestTrialsResponse( trials=[trial.to_proto() for trial in trials] ) @synchronized def exit_chief(oracle): return len(oracle.ongoing_trials) == 0 and len(oracle.tuner_ids) == 0 def start_server(oracle): """Starts the `OracleServicer` used to manage distributed requests.""" ip_addr = os.environ["KERASTUNER_ORACLE_IP"] port = os.environ["KERASTUNER_ORACLE_PORT"] server = grpc.server(futures.ThreadPoolExecutor(max_workers=1)) oracle_servicer = OracleServicer(oracle) protos.get_service_grpc().add_OracleServicer_to_server( oracle_servicer, server ) server.add_insecure_port(f"{ip_addr}:{port}") server.start() while True: # The server does not block otherwise. time.sleep(1) if oracle_servicer.stop_triggered: while not exit_chief(oracle): time.sleep(20) # pragma: no cover break
keras-tuner/keras_tuner/distribute/oracle_chief.py/0
{ "file_path": "keras-tuner/keras_tuner/distribute/oracle_chief.py", "repo_id": "keras-tuner", "token_count": 1297 }
155
# Copyright 2019 The KerasTuner Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six from keras_tuner import protos from keras_tuner.api_export import keras_tuner_export from keras_tuner.engine import conditions as conditions_mod from keras_tuner.engine.hyperparameters import hp_utils from keras_tuner.engine.hyperparameters import hyperparameter @keras_tuner_export("keras_tuner.engine.hyperparameters.Choice") class Choice(hyperparameter.HyperParameter): """Choice of one value among a predefined set of possible values. Args: name: A string. the name of parameter. Must be unique for each `HyperParameter` instance in the search space. values: A list of possible values. Values must be int, float, str, or bool. All values must be of the same type. ordered: Optional boolean, whether the values passed should be considered to have an ordering. Defaults to `True` for float/int values. Must be `False` for any other values. default: Optional default value to return for the parameter. If unspecified, the default value will be: - None if None is one of the choices in `values` - The first entry in `values` otherwise. """ def __init__(self, name, values, ordered=None, default=None, **kwargs): super().__init__(name=name, default=default, **kwargs) if not values: raise ValueError("`values` must be provided for `Choice`.") # Type checking. types = {type(v) for v in values} if len(types) > 1: raise TypeError( "A `Choice` can contain only one type of value, " f"found values: {str(values)} with types {types}." ) # Standardize on str, int, float, bool. if isinstance(values[0], six.string_types): values = [str(v) for v in values] if default is not None: default = str(default) elif isinstance(values[0], six.integer_types): values = [int(v) for v in values] if default is not None: default = int(default) elif not isinstance(values[0], (bool, float)): raise TypeError( "A `Choice` can contain only `int`, `float`, `str`, or " "`bool`, found values: " + str(values) + "with " "types: " + str(type(values[0])) ) self._values = values if default is not None and default not in values: raise ValueError( "The default value should be one of the choices. " f"You passed: values={values}, default={default}" ) self._default = default # Get or infer ordered. self.ordered = ordered is_numeric = isinstance(values[0], (six.integer_types, float)) if self.ordered and not is_numeric: raise ValueError("`ordered` must be `False` for non-numeric types.") if self.ordered is None: self.ordered = is_numeric def __repr__(self): return ( f"Choice(name: '{self.name}', " + f"values: {self._values}, " + f"ordered: {self.ordered}, default: {self.default})" ) @property def values(self): return self._values @property def default(self): return self._values[0] if self._default is None else self._default def prob_to_value(self, prob): return self._values[hp_utils.prob_to_index(prob, len(self._values))] def value_to_prob(self, value): return hp_utils.index_to_prob( self._values.index(value), len(self._values) ) def get_config(self): config = super().get_config() config["values"] = self._values config["ordered"] = self.ordered return config @classmethod def from_proto(cls, proto): values = [getattr(val, val.WhichOneof("kind")) for val in proto.values] default = getattr(proto.default, proto.default.WhichOneof("kind"), None) conditions = [ conditions_mod.Condition.from_proto(c) for c in proto.conditions ] return cls( name=proto.name, values=values, ordered=proto.ordered, default=default, conditions=conditions, ) def to_proto(self): if isinstance(self.values[0], six.string_types): values = [ protos.get_proto().Value(string_value=v) for v in self.values ] default = protos.get_proto().Value(string_value=self.default) elif isinstance(self.values[0], six.integer_types): values = [ protos.get_proto().Value(int_value=v) for v in self.values ] default = protos.get_proto().Value(int_value=self.default) else: values = [ protos.get_proto().Value(float_value=v) for v in self.values ] default = protos.get_proto().Value(float_value=self.default) return protos.get_proto().Choice( name=self.name, ordered=self.ordered, values=values, default=default, conditions=[c.to_proto() for c in self.conditions], )
keras-tuner/keras_tuner/engine/hyperparameters/hp_types/choice_hp.py/0
{ "file_path": "keras-tuner/keras_tuner/engine/hyperparameters/hp_types/choice_hp.py", "repo_id": "keras-tuner", "token_count": 2500 }
156
# Copyright 2019 The KerasTuner Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import numpy as np import pytest from keras_tuner.backend.keras import losses from keras_tuner.backend.keras import metrics from keras_tuner.engine import metrics_tracking def test_register_from_metrics(): # As well as direction inference. tracker = metrics_tracking.MetricsTracker( metrics=[metrics.CategoricalAccuracy(), metrics.MeanSquaredError()] ) assert set(tracker.metrics.keys()) == { "categorical_accuracy", "mean_squared_error", } assert tracker.metrics["categorical_accuracy"].direction == "max" assert tracker.metrics["mean_squared_error"].direction == "min" def test_register(): tracker = metrics_tracking.MetricsTracker() tracker.register("new_metric", direction="max") assert set(tracker.metrics.keys()) == {"new_metric"} assert tracker.metrics["new_metric"].direction == "max" with pytest.raises(ValueError, match="`direction` should be one of"): tracker.register("another_metric", direction="wrong") with pytest.raises(ValueError, match="already exists"): tracker.register("new_metric", direction="max") def test_exists(): tracker = metrics_tracking.MetricsTracker() tracker.register("new_metric", direction="max") assert tracker.exists("new_metric") assert not tracker.exists("another_metric") def test_update(): tracker = metrics_tracking.MetricsTracker() tracker.update("new_metric", 0.5) # automatic registration assert set(tracker.metrics.keys()) == {"new_metric"} assert tracker.metrics["new_metric"].direction == "min" # default direction assert tracker.get_history("new_metric") == [ metrics_tracking.MetricObservation(0.5, step=0) ] def test_metric_observation_repr(): assert ( repr(metrics_tracking.MetricObservation(0.5, step=0)) == "MetricObservation(value=[0.5], step=0)" ) def test_get_history(): tracker = metrics_tracking.MetricsTracker() tracker.update("new_metric", 0.5, step=0) tracker.update("new_metric", 1.5, step=1) tracker.update("new_metric", 2.0, step=2) assert tracker.get_history("new_metric") == [ metrics_tracking.MetricObservation(0.5, 0), metrics_tracking.MetricObservation(1.5, 1), metrics_tracking.MetricObservation(2.0, 2), ] with pytest.raises(ValueError, match="Unknown metric"): tracker.get_history("another_metric") def test_set_history(): tracker = metrics_tracking.MetricsTracker() tracker.set_history( "new_metric", [ metrics_tracking.MetricObservation(0.5, 0), metrics_tracking.MetricObservation(1.5, 1), metrics_tracking.MetricObservation(2.0, 2), ], ) values = [obs.value for obs in tracker.get_history("new_metric")] steps = [obs.step for obs in tracker.get_history("new_metric")] assert values == [[0.5], [1.5], [2.0]] assert steps == [0, 1, 2] def test_get_best_step_value_none(): tracker = metrics_tracking.MetricsTracker() tracker.register("val_loss", "min") assert tracker.get_best_step("val_loss") is None def test_get_best_value(): tracker = metrics_tracking.MetricsTracker() tracker.register("metric_min", "min") tracker.register("metric_max", "max") assert tracker.get_best_value("metric_min") is None tracker.set_history( "metric_min", [ metrics_tracking.MetricObservation(1.0, 0), metrics_tracking.MetricObservation(2.0, 1), metrics_tracking.MetricObservation(3.0, 2), ], ) tracker.set_history( "metric_max", [ metrics_tracking.MetricObservation(1.0, 0), metrics_tracking.MetricObservation(2.0, 1), metrics_tracking.MetricObservation(3.0, 2), ], ) assert tracker.get_best_value("metric_min") == 1.0 assert tracker.get_best_value("metric_max") == 3.0 def test_get_statistics(): tracker = metrics_tracking.MetricsTracker() history = [ metrics_tracking.MetricObservation(random.random(), i) for i in range(10) ] tracker.set_history("new_metric", history) stats = tracker.get_statistics("new_metric") assert set(stats.keys()) == {"min", "max", "mean", "median", "var", "std"} history = [obs.value for obs in history] assert stats["min"] == np.min(history) assert stats["max"] == np.max(history) assert stats["mean"] == np.mean(history) assert stats["median"] == np.median(history) assert stats["var"] == np.var(history) assert stats["std"] == np.std(history) def test_get_last_value(): tracker = metrics_tracking.MetricsTracker() tracker.register("new_metric", "min") assert tracker.get_last_value("new_metric") is None tracker.set_history( "new_metric", [ metrics_tracking.MetricObservation(1.0, 0), metrics_tracking.MetricObservation(2.0, 1), metrics_tracking.MetricObservation(3.0, 2), ], ) assert tracker.get_last_value("new_metric") == 3.0 def test_serialization(): tracker = metrics_tracking.MetricsTracker() tracker.register("metric_min", "min") tracker.register("metric_max", "max") tracker.set_history( "metric_min", [ metrics_tracking.MetricObservation(1.0, 0), metrics_tracking.MetricObservation(2.0, 1), metrics_tracking.MetricObservation(3.0, 2), ], ) tracker.set_history( "metric_max", [ metrics_tracking.MetricObservation(1.0, 0), metrics_tracking.MetricObservation(2.0, 1), metrics_tracking.MetricObservation(3.0, 2), ], ) new_tracker = metrics_tracking.MetricsTracker.from_config( tracker.get_config() ) assert new_tracker.metrics.keys() == tracker.metrics.keys() def test_metricobservation_proto(): obs = metrics_tracking.MetricObservation(-10, 5) proto = obs.to_proto() assert proto.value == [-10] assert proto.step == 5 new_obs = metrics_tracking.MetricObservation.from_proto(proto) assert new_obs == obs def test_metrichistory_proto(): tracker = metrics_tracking.MetricHistory("max") tracker.update(5, step=3) tracker.update(10, step=4) proto = tracker.to_proto() assert proto.maximize assert proto.observations[0].value == [5] assert proto.observations[0].step == 3 assert proto.observations[1].value == [10] assert proto.observations[1].step == 4 new_tracker = metrics_tracking.MetricHistory.from_proto(proto) assert new_tracker.direction == "max" assert new_tracker.get_history() == [ metrics_tracking.MetricObservation(5, 3), metrics_tracking.MetricObservation(10, 4), ] def test_metricstracker_proto(): tracker = metrics_tracking.MetricsTracker() tracker.register("score", direction="max") tracker.update("score", value=10, step=1) tracker.update("score", value=20, step=1) tracker.update("score", value=30, step=2) proto = tracker.to_proto() obs = proto.metrics["score"].observations assert obs[0].value == [10, 20] assert obs[0].step == 1 assert obs[1].value == [30] assert obs[1].step == 2 assert proto.metrics["score"].maximize new_tracker = metrics_tracking.MetricsTracker.from_proto(proto) assert new_tracker.metrics["score"].direction == "max" assert new_tracker.metrics["score"].get_history() == [ metrics_tracking.MetricObservation([10, 20], 1), metrics_tracking.MetricObservation(30, 2), ] def test_metric_direction_inference(): # Test min metrics. assert metrics_tracking.infer_metric_direction("MAE") == "min" assert ( metrics_tracking.infer_metric_direction(metrics.binary_crossentropy) == "min" ) assert ( metrics_tracking.infer_metric_direction(metrics.FalsePositives()) == "min" ) # All losses in keras.losses are considered as 'min'. assert metrics_tracking.infer_metric_direction("squared_hinge") == "min" assert metrics_tracking.infer_metric_direction(losses.hinge) == "min" assert ( metrics_tracking.infer_metric_direction( losses.CategoricalCrossentropy() ) == "min" ) # Test max metrics. assert metrics_tracking.infer_metric_direction("binary_accuracy") == "max" assert ( metrics_tracking.infer_metric_direction(metrics.categorical_accuracy) == "max" ) assert metrics_tracking.infer_metric_direction(metrics.Precision()) == "max" # Test unknown metrics. assert metrics_tracking.infer_metric_direction("my_metric") is None def my_metric_fn(x, y): return x assert metrics_tracking.infer_metric_direction(my_metric_fn) is None class MyMetric(metrics.Metric): def update_state(self, x, y): return 1 def result(self): return 1 assert metrics_tracking.infer_metric_direction(MyMetric()) is None # Test special cases. assert metrics_tracking.infer_metric_direction("loss") == "min" assert metrics_tracking.infer_metric_direction("acc") == "max" assert metrics_tracking.infer_metric_direction("val_acc") == "max" assert metrics_tracking.infer_metric_direction("crossentropy") == "min" assert metrics_tracking.infer_metric_direction("ce") == "min" assert metrics_tracking.infer_metric_direction("weighted_acc") == "max" assert metrics_tracking.infer_metric_direction("val_weighted_ce") == "min" assert ( metrics_tracking.infer_metric_direction("weighted_binary_accuracy") == "max" )
keras-tuner/keras_tuner/engine/metrics_tracking_test.py/0
{ "file_path": "keras-tuner/keras_tuner/engine/metrics_tracking_test.py", "repo_id": "keras-tuner", "token_count": 4057 }
157
# Copyright 2019 The KerasTuner Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mock running KerasTuner in a distributed tuning setting.""" import os import sys import threading from unittest import mock import portpicker import six class ExceptionStoringThread(threading.Thread): def run(self): self.raised_exception = None try: super().run() except BaseException: self.raised_exception = sys.exc_info() class MockEnvVars(dict): """Allows setting different environment variables in threads.""" def __init__(self): self.thread_local = threading.local() self.initial_env_vars = os.environ.copy() def _setup_thread(self): if getattr(self.thread_local, "environ", None) is None: self.thread_local.environ = self.initial_env_vars.copy() def get(self, name, default=None): self._setup_thread() return self.thread_local.environ.get(name, default) def __setitem__(self, name, value): self._setup_thread() self.thread_local.environ[name] = value def __getitem__(self, name): self._setup_thread() return self.thread_local.environ[name] def __contains__(self, name): self._setup_thread() return name in self.thread_local.environ def mock_distribute(fn, num_workers=2, wait_for_chief=False): """Runs `fn` in multiple processes env vars for chief and clients. This function does not directly use any KerasTuner components, but only set up the corresponding environment variables for each of the threads. The environment variables are used by KerasTuner to check if the current thread is the chief or a client. All the exceptions, in the chief and all the clients, are collected raised in the main thread afterwards. Arguments: fn: Callable. The function to be called. num_workers: Int. The number of clients. wait_for_chief: Boolean. Default to False. Whether to wait for the chief thread to finish before ending the test. """ port = str(portpicker.pick_unused_port()) with mock.patch.object(os, "environ", MockEnvVars()): def chief_fn(): # The IP address of the chief Oracle. Run in distributed mode when # present. Cloud oracle does not run in this mode because the Cloud # API coordinates workers itself. os.environ["KERASTUNER_ORACLE_IP"] = "127.0.0.1" # The port of the chief Oracle. os.environ["KERASTUNER_ORACLE_PORT"] = port # The ID of this process. 'chief' will run the OracleServicer # server. os.environ["KERASTUNER_TUNER_ID"] = "chief" fn() chief_thread = ExceptionStoringThread(target=chief_fn) chief_thread.daemon = True chief_thread.start() worker_threads = [] for i in range(num_workers): def worker_fn(): os.environ["KERASTUNER_ORACLE_IP"] = "127.0.0.1" os.environ["KERASTUNER_ORACLE_PORT"] = port # Workers that are part of the same multi-worker # DistributionStrategy should have the same TUNER_ID. os.environ["KERASTUNER_TUNER_ID"] = f"worker{i}" fn() worker_thread = ExceptionStoringThread(target=worker_fn) worker_thread.start() worker_threads.append(worker_thread) # Wait for chief and clients to finish for worker_thread in worker_threads: worker_thread.join() if wait_for_chief: chief_thread.join() # Re-raise exceptions from chief and clients. if chief_thread.raised_exception: six.reraise(*chief_thread.raised_exception) for worker_thread in worker_threads: if worker_thread.raised_exception is not None: six.reraise(*worker_thread.raised_exception)
keras-tuner/keras_tuner/test_utils/mock_distribute.py/0
{ "file_path": "keras-tuner/keras_tuner/test_utils/mock_distribute.py", "repo_id": "keras-tuner", "token_count": 1764 }
158
import argparse import glob import os import shutil import namex package = "keras_tuner" build_directory = "build" dist_directory = "dist" to_copy = [ "setup.py", "setup.cfg", "README.md", ] def ignore_files(_, filenames): return [f for f in filenames if "test" in f] def build(): if os.path.exists(build_directory): raise ValueError(f"Directory already exists: {build_directory}") whl_path = None try: # Copy sources (`keras_tuner/` directory and setup files) to build # directory os.mkdir(build_directory) shutil.copytree( package, os.path.join(build_directory, package), ignore=ignore_files ) for fname in to_copy: shutil.copy(fname, os.path.join(f"{build_directory}", fname)) os.chdir(build_directory) # Restructure the codebase so that source files live in # `keras_tuner/src` namex.convert_codebase(package, code_directory="src") # Generate API __init__.py files in `keras_tuner/` namex.generate_api_files(package, code_directory="src", verbose=True) # Make sure to export the __version__ string from keras_tuner.src import __version__ # noqa: E402 with open(os.path.join(package, "__init__.py")) as f: init_contents = f.read() with open(os.path.join(package, "__init__.py"), "w") as f: f.write(init_contents + "\n\n" + f'__version__ = "{__version__}"\n') # Build the package os.system("python3 -m build") # Save the dist files generated by the build process os.chdir("..") if not os.path.exists(dist_directory): os.mkdir(dist_directory) for filename in glob.glob( os.path.join(build_directory, dist_directory, "*.*") ): shutil.copy(filename, dist_directory) # Find the .whl file path for fname in os.listdir(dist_directory): if __version__ in fname and fname.endswith(".whl"): whl_path = os.path.abspath(os.path.join(dist_directory, fname)) print(f"Build successful. Wheel file available at {whl_path}") finally: # Clean up: remove the build directory (no longer needed) shutil.rmtree(build_directory) return whl_path def install_whl(whl_fpath): print("Installing wheel file.") os.system(f"pip3 install {whl_fpath} --force-reinstall --no-dependencies") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--install", action="store_true", help="Whether to install the generated wheel file.", ) args = parser.parse_args() whl_path = build() if whl_path and args.install: install_whl(whl_path)
keras-tuner/pip_build.py/0
{ "file_path": "keras-tuner/pip_build.py", "repo_id": "keras-tuner", "token_count": 1213 }
159
"""Tests for tf.distribute related functionality under tf implementation.""" import numpy as np import pytest import tensorflow as tf from tensorflow.python.eager import context from keras import backend from keras import layers from keras import models from keras import testing from keras.backend.tensorflow import trainer as tf_trainer @pytest.mark.skipif( backend.backend() != "tensorflow", reason="The distribute test can only run with TF backend.", ) class DistributeTest(testing.TestCase): def setUp(self): super().setUp() # Need at least 2 devices for distribution related tests. cpus = tf.config.list_physical_devices("CPU") context._reset_context() tf.config.set_logical_device_configuration( cpus[0], [ tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration(), ], ) def test_variable_creation(self): strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"]) with strategy.scope(): dense = layers.Dense(2) dense.build([4, 2]) self.assertIsInstance(dense.kernel, backend.Variable) self.assertIsInstance( dense.kernel.value, tf.distribute.DistributedValues ) self.assertIn("MirroredVariable", dense.kernel.value.__class__.__name__) self.assertIsInstance(dense.kernel, backend.Variable) self.assertIsInstance(dense.bias.value, tf.distribute.DistributedValues) self.assertIn("MirroredVariable", dense.bias.value.__class__.__name__) def test_strategy_run(self): strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"]) with strategy.scope(): inputs = layers.Input(shape=[4]) dense = layers.Dense(2) output = dense(inputs) model = models.Functional(inputs, output) self.assertIsInstance(dense.kernel, backend.Variable) self.assertIsInstance( dense.kernel.value, tf.distribute.DistributedValues ) def input_fn(ctx): if ctx.replica_id_in_sync_group == 1: return tf.ones([8, 4]) else: return tf.zeros([8, 4]) distributed_inputs = ( strategy.experimental_distribute_values_from_function(input_fn) ) @tf.function def run_fn(data): return model(data) result = strategy.run(run_fn, args=(distributed_inputs,)) self.assertIsInstance( result, tf.types.experimental.distributed.PerReplica ) self.assertLen(result.values, 2) self.assertEqual(result.values[0].shape, [8, 2]) self.assertEqual(result.values[1].shape, [8, 2]) self.assertNotAllClose(result.values[0], result.values[1]) self.assertAllClose(result.values[0], tf.zeros([8, 2])) def test_epoch_iterator(self): x = np.random.random((100, 16)) y = np.random.random((100, 4)) sample_weight = np.random.random((100,)) batch_size = 16 shuffle = True strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"]) epoch_iterator = tf_trainer.TFEpochIterator( x=x, y=y, sample_weight=sample_weight, batch_size=batch_size, shuffle=shuffle, distribute_strategy=strategy, ) steps_seen = [] for step, data_iterator in epoch_iterator.enumerate_epoch(): steps_seen.append(step) batch = next(data_iterator) self.assertEqual(len(batch), 3) x, y, sample_weight = batch self.assertTrue( isinstance(x, tf.types.experimental.distributed.PerReplica) ) # Make sure the local batch size is 8 if step < 6: self.assertEqual(x.values[0].shape, [8, 16]) self.assertEqual(y.values[0].shape, [8, 4]) self.assertEqual(sample_weight.values[0].shape, [8]) else: # Last partial batch self.assertEqual(x.values[0].shape, [2, 16]) self.assertEqual(y.values[0].shape, [2, 4]) self.assertEqual(sample_weight.values[0].shape, [2]) self.assertEqual(steps_seen, [0, 1, 2, 3, 4, 5, 6])
keras/keras/backend/tensorflow/distribute_test.py/0
{ "file_path": "keras/keras/backend/tensorflow/distribute_test.py", "repo_id": "keras", "token_count": 2037 }
160
import tensorflow as tf from keras.utils import tracking class KerasAutoTrackable(tf.__internal__.tracking.AutoTrackable): """Manages dependencies on other objects with Keras tracking. Similar to TF AutoTrackable, but disabling tracking is based on tracking within Keras. This serves as an interface between Keras tracking and TF tracking. """ def __setattr__(self, name, value): """Support self.foo = trackable syntax.""" try: if getattr(self, name) is value: # Short circuit for `self.$x = self.$x`. return except AttributeError: pass if getattr(self, "_self_setattr_tracking", True): value = sticky_attribute_assignment( trackable=self, value=value, name=name ) super().__setattr__(name, value) def sticky_attribute_assignment(trackable, name, value): """Adds dependencies, called from __setattr__. Args: trackable: The object to add dependencies to (generally the one having an attribute assigned). name: The attribute name being assigned. value: The value being assigned. Not necessarily a trackable object. Returns: The value which should be stored in the attribute. """ if isinstance( value, (tracking.TrackedList, tracking.TrackedDict, tracking.TrackedSet) ) and hasattr(trackable, "_tracked"): trackable._tracked.append(name) if not tracking.is_tracking_enabled(): return value if isinstance(value, tf.__internal__.tracking.Trackable): trackable._track_trackable( # pylint: disable=protected-access value, name=name, # Allow the user to switch the Trackable which is tracked by this # name, since assigning a new variable to an attribute has # historically been fine (e.g. Adam did this). overwrite=True, ) return value
keras/keras/backend/tensorflow/trackable.py/0
{ "file_path": "keras/keras/backend/tensorflow/trackable.py", "repo_id": "keras", "token_count": 768 }
161
import numpy as np import pytest from keras import models from keras import testing from keras.callbacks.callback import Callback class CallbackTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_model_state_is_current_on_epoch_end(self): class TestModel(models.Model): def __init__(self): super().__init__() self.iterations = self.add_variable( shape=(), initializer="zeros", trainable=False ) def call(self, inputs): self.iterations.assign(self.iterations + 1) return inputs class CBK(Callback): def on_batch_end(self, batch, logs): assert np.int32(self.model.iterations) == batch + 1 model = TestModel() model.compile(optimizer="sgd", loss="mse") x = np.random.random((8, 1)) y = np.random.random((8, 1)) model.fit(x, y, callbacks=[CBK()], batch_size=2)
keras/keras/callbacks/callback_test.py/0
{ "file_path": "keras/keras/callbacks/callback_test.py", "repo_id": "keras", "token_count": 468 }
162
import warnings from unittest import mock import numpy as np from keras import backend from keras import callbacks from keras import layers from keras import testing from keras.models import Sequential from keras.utils import numerical_utils try: import requests except ImportError: requests = None class TerminateOnNaNTest(testing.TestCase): def test_RemoteMonitor(self): if requests is None: self.skipTest("`requests` required to run this test") monitor = callbacks.RemoteMonitor() # This will raise a warning since the default address in unreachable: warning_msg = "Could not reach RemoteMonitor root server" with warnings.catch_warnings(record=True) as warning_logs: warnings.simplefilter("always") monitor.on_epoch_end(0, logs={"loss": 0.0}) self.assertIn(warning_msg, str(warning_logs[-1].message)) def test_RemoteMonitor_np_array(self): if requests is None: self.skipTest("`requests` required to run this test") with mock.patch("requests.post") as requests_post: monitor = callbacks.RemoteMonitor(send_as_json=True) a = np.arange(1) # a 1 by 1 array logs = {"loss": 0.0, "val": a} monitor.on_epoch_end(0, logs=logs) send = {"loss": 0.0, "epoch": 0, "val": 0} requests_post.assert_called_once_with( monitor.root + monitor.path, json=send, headers=monitor.headers ) def test_RemoteMonitor_np_float32(self): if requests is None: self.skipTest("`requests` required to run this test") with mock.patch("requests.post") as requests_post: monitor = callbacks.RemoteMonitor(send_as_json=True) a = np.float32(1.0) # a float32 generic type logs = {"loss": 0.0, "val": a} monitor.on_epoch_end(0, logs=logs) send = {"loss": 0.0, "epoch": 0, "val": 1.0} requests_post.assert_called_once_with( monitor.root + monitor.path, json=send, headers=monitor.headers ) def test_RemoteMonitorWithJsonPayload(self): if requests is None: self.skipTest("`requests` required to run this test") if backend.backend() == "numpy": self.skipTest("Trainer not implemented from NumPy backend.") TRAIN_SAMPLES = 10 TEST_SAMPLES = 10 INPUT_DIM = 3 NUM_CLASSES = 2 BATCH_SIZE = 4 np.random.seed(1337) x_train = np.random.random((TRAIN_SAMPLES, INPUT_DIM)) y_train = np.random.choice(np.arange(NUM_CLASSES), size=TRAIN_SAMPLES) x_test = np.random.random((TEST_SAMPLES, INPUT_DIM)) y_test = np.random.choice(np.arange(NUM_CLASSES), size=TEST_SAMPLES) y_test = numerical_utils.to_categorical(y_test) y_train = numerical_utils.to_categorical(y_train) model = Sequential([layers.Dense(NUM_CLASSES)]) model.compile(loss="mean_squared_error", optimizer="sgd") with mock.patch("requests.post") as requests_post: monitor = callbacks.RemoteMonitor(send_as_json=True) hist = model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=[monitor], epochs=1, ) send = { "epoch": 0, "loss": hist.history["loss"][0], "val_loss": hist.history["val_loss"][0], } requests_post.assert_called_once_with( monitor.root + monitor.path, json=send, headers=monitor.headers )
keras/keras/callbacks/remote_monitor_test.py/0
{ "file_path": "keras/keras/callbacks/remote_monitor_test.py", "repo_id": "keras", "token_count": 1740 }
163
import re import numpy as np from keras import activations from keras import constraints from keras import initializers from keras import ops from keras import regularizers from keras.api_export import keras_export from keras.layers.layer import Layer @keras_export("keras.layers.EinsumDense") class EinsumDense(Layer): """A layer that uses `einsum` as the backing computation. This layer can perform einsum calculations of arbitrary dimensionality. Args: equation: An equation describing the einsum to perform. This equation must be a valid einsum string of the form `ab,bc->ac`, `...ab,bc->...ac`, or `ab...,bc->ac...` where 'ab', 'bc', and 'ac' can be any valid einsum axis expression sequence. output_shape: The expected shape of the output tensor (excluding the batch dimension and any dimensions represented by ellipses). You can specify `None` for any dimension that is unknown or can be inferred from the input shape. activation: Activation function to use. If you don't specify anything, no activation is applied (that is, a "linear" activation: `a(x) = x`). bias_axes: A string containing the output dimension(s) to apply a bias to. Each character in the `bias_axes` string should correspond to a character in the output portion of the `equation` string. kernel_initializer: Initializer for the `kernel` weights matrix. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. kernel_constraint: Constraint function applied to the `kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. lora_rank: Optional integer. If set, the layer's forward pass will implement LoRA (Low-Rank Adaptation) with the provided rank. LoRA sets the layer's kernel to non-trainable and replaces it with a delta over the original kernel, obtained via multiplying two lower-rank trainable matrices (the factorization happens on the last dimension). This can be useful to reduce the computation cost of fine-tuning large dense layers. You can also enable LoRA on an existing `EinsumDense` layer by calling `layer.enable_lora(rank)`. **kwargs: Base layer keyword arguments, such as `name` and `dtype`. Examples: **Biased dense layer with einsums** This example shows how to instantiate a standard Keras dense layer using einsum operations. This example is equivalent to `keras.layers.Dense(64, use_bias=True)`. >>> layer = keras.layers.EinsumDense("ab,bc->ac", ... output_shape=64, ... bias_axes="c") >>> input_tensor = keras.Input(shape=[32]) >>> output_tensor = layer(input_tensor) >>> output_tensor.shape (None, 64) **Applying a dense layer to a sequence** This example shows how to instantiate a layer that applies the same dense operation to every element in a sequence. Here, the `output_shape` has two values (since there are two non-batch dimensions in the output); the first dimension in the `output_shape` is `None`, because the sequence dimension `b` has an unknown shape. >>> layer = keras.layers.EinsumDense("abc,cd->abd", ... output_shape=(None, 64), ... bias_axes="d") >>> input_tensor = keras.Input(shape=[32, 128]) >>> output_tensor = layer(input_tensor) >>> output_tensor.shape (None, 32, 64) **Applying a dense layer to a sequence using ellipses** This example shows how to instantiate a layer that applies the same dense operation to every element in a sequence, but uses the ellipsis notation instead of specifying the batch and sequence dimensions. Because we are using ellipsis notation and have specified only one axis, the `output_shape` arg is a single value. When instantiated in this way, the layer can handle any number of sequence dimensions - including the case where no sequence dimension exists. >>> layer = keras.layers.EinsumDense("...x,xy->...y", ... output_shape=64, ... bias_axes="y") >>> input_tensor = keras.Input(shape=[32, 128]) >>> output_tensor = layer(input_tensor) >>> output_tensor.shape (None, 32, 64) """ def __init__( self, equation, output_shape, activation=None, bias_axes=None, kernel_initializer="glorot_uniform", bias_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, kernel_constraint=None, bias_constraint=None, lora_rank=None, **kwargs, ): super().__init__(**kwargs) self.equation = equation if isinstance(output_shape, int): self.partial_output_shape = (output_shape,) else: self.partial_output_shape = tuple(output_shape) self.bias_axes = bias_axes self.activation = activations.get(activation) self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.lora_rank = lora_rank self.lora_enabled = False def build(self, input_shape): shape_data = _analyze_einsum_string( self.equation, self.bias_axes, input_shape, self.partial_output_shape, ) kernel_shape, bias_shape, full_output_shape = shape_data self.full_output_shape = tuple(full_output_shape) self._kernel = self.add_weight( name="kernel", shape=tuple(kernel_shape), initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, dtype=self.dtype, trainable=True, ) if bias_shape is not None: self.bias = self.add_weight( name="bias", shape=tuple(bias_shape), initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, dtype=self.dtype, trainable=True, ) else: self.bias = None super().build(input_shape) if self.lora_rank: self.enable_lora(self.lora_rank) @property def kernel(self): if not self.built: raise AttributeError( "You must build the layer before accessing `kernel`." ) if self.lora_enabled: return self._kernel + ops.matmul( self.lora_kernel_a, self.lora_kernel_b ) return self._kernel def compute_output_shape(self, _): return self.full_output_shape def get_config(self): base_config = super().get_config() config = { "output_shape": self.partial_output_shape, "equation": self.equation, "activation": activations.serialize(self.activation), "bias_axes": self.bias_axes, "kernel_initializer": initializers.serialize( self.kernel_initializer ), "bias_initializer": initializers.serialize(self.bias_initializer), "kernel_regularizer": regularizers.serialize( self.kernel_regularizer ), "bias_regularizer": regularizers.serialize(self.bias_regularizer), "activity_regularizer": regularizers.serialize( self.activity_regularizer ), "kernel_constraint": constraints.serialize(self.kernel_constraint), "bias_constraint": constraints.serialize(self.bias_constraint), } if self.lora_rank: config["lora_rank"] = self.lora_rank return {**base_config, **config} def call(self, inputs): x = ops.einsum(self.equation, inputs, self.kernel) if self.bias is not None: x += self.bias if self.activation is not None: x = self.activation(x) return x def enable_lora( self, rank, a_initializer="he_uniform", b_initializer="zeros" ): if self.kernel_constraint: raise ValueError( "Lora is incompatible with kernel constraints. " "In order to enable lora on this layer, remove the " "`kernel_constraint` argument." ) if not self.built: raise ValueError( "Cannot enable lora on a layer that isn't yet built." ) if self.lora_enabled: raise ValueError( "lora is already enabled. " "This can only be done once per layer." ) self._tracker.unlock() self.lora_kernel_a = self.add_weight( name="lora_kernel_a", shape=(self.kernel.shape[:-1] + (rank,)), initializer=initializers.get(a_initializer), regularizer=self.kernel_regularizer, ) self.lora_kernel_b = self.add_weight( name="lora_kernel_b", shape=(rank, self.kernel.shape[-1]), initializer=initializers.get(b_initializer), regularizer=self.kernel_regularizer, ) self.kernel.trainable = False self._tracker.lock() self.lora_enabled = True def save_own_variables(self, store): if not self.lora_enabled: return super().save_own_variables(store) kernel_value = self.kernel store["0"] = kernel_value if self.bias is not None: store["1"] = self.bias def load_own_variables(self, store): if not self.lora_enabled: return super().load_own_variables(store) self._kernel.assign(store["0"]) if self.bias is not None: self.bias.assign(store["1"]) self.lora_kernel_a.assign(np.zeros(self.lora_kernel_a.shape)) self.lora_kernel_b.assign(np.zeros(self.lora_kernel_b.shape)) def _analyze_einsum_string(equation, bias_axes, input_shape, output_shape): """Analyzes an einsum string to determine the required weight shape.""" dot_replaced_string = re.sub(r"\.\.\.", "0", equation) # This is the case where no ellipses are present in the string. split_string = re.match( "([a-zA-Z]+),([a-zA-Z]+)->([a-zA-Z]+)", dot_replaced_string ) if split_string: return _analyze_split_string( split_string, bias_axes, input_shape, output_shape ) # This is the case where ellipses are present on the left. split_string = re.match( "0([a-zA-Z]+),([a-zA-Z]+)->0([a-zA-Z]+)", dot_replaced_string ) if split_string: return _analyze_split_string( split_string, bias_axes, input_shape, output_shape, left_elided=True ) # This is the case where ellipses are present on the right. split_string = re.match( "([a-zA-Z]{2,})0,([a-zA-Z]+)->([a-zA-Z]+)0", dot_replaced_string ) if split_string: return _analyze_split_string( split_string, bias_axes, input_shape, output_shape ) raise ValueError( f"Invalid einsum equation '{equation}'. Equations must be in the form " "[X],[Y]->[Z], ...[X],[Y]->...[Z], or [X]...,[Y]->[Z]...." ) def _analyze_split_string( split_string, bias_axes, input_shape, output_shape, left_elided=False ): """Analyze an pre-split einsum string to find the weight shape.""" input_spec = split_string.group(1) weight_spec = split_string.group(2) output_spec = split_string.group(3) elided = len(input_shape) - len(input_spec) if isinstance(output_shape, int): output_shape = [output_shape] else: output_shape = list(output_shape) output_shape.insert(0, input_shape[0]) if elided > 0 and left_elided: for i in range(1, elided): # We already inserted the 0th input dimension at dim 0, so we need # to start at location 1 here. output_shape.insert(1, input_shape[i]) elif elided > 0 and not left_elided: for i in range(len(input_shape) - elided, len(input_shape)): output_shape.append(input_shape[i]) if left_elided: # If we have beginning dimensions elided, we need to use negative # indexing to determine where in the input dimension our values are. input_dim_map = { dim: (i + elided) - len(input_shape) for i, dim in enumerate(input_spec) } # Because we've constructed the full output shape already, we don't need # to do negative indexing. output_dim_map = { dim: (i + elided) for i, dim in enumerate(output_spec) } else: input_dim_map = {dim: i for i, dim in enumerate(input_spec)} output_dim_map = {dim: i for i, dim in enumerate(output_spec)} for dim in input_spec: input_shape_at_dim = input_shape[input_dim_map[dim]] if dim in output_dim_map: output_shape_at_dim = output_shape[output_dim_map[dim]] if ( output_shape_at_dim is not None and output_shape_at_dim != input_shape_at_dim ): raise ValueError( "Input shape and output shape do not match at shared " f"dimension '{dim}'. Input shape is {input_shape_at_dim}, " "and output shape " f"is {output_shape[output_dim_map[dim]]}." ) for dim in output_spec: if dim not in input_spec and dim not in weight_spec: raise ValueError( f"Dimension '{dim}' was specified in the output " f"'{output_spec}' but has no corresponding dim in the input " f"spec '{input_spec}' or weight spec '{output_spec}'" ) weight_shape = [] for dim in weight_spec: if dim in input_dim_map: weight_shape.append(input_shape[input_dim_map[dim]]) elif dim in output_dim_map: weight_shape.append(output_shape[output_dim_map[dim]]) else: raise ValueError( f"Weight dimension '{dim}' did not have a match in either " f"the input spec '{input_spec}' or the output " f"spec '{output_spec}'. For this layer, the weight must " "be fully specified." ) if bias_axes is not None: num_left_elided = elided if left_elided else 0 idx_map = { char: output_shape[i + num_left_elided] for i, char in enumerate(output_spec) } for char in bias_axes: if char not in output_spec: raise ValueError( f"Bias dimension '{char}' was requested, but is not part " f"of the output spec '{output_spec}'" ) first_bias_location = min( [output_spec.find(char) for char in bias_axes] ) bias_output_spec = output_spec[first_bias_location:] bias_shape = [ idx_map[char] if char in bias_axes else 1 for char in bias_output_spec ] if not left_elided: for _ in range(elided): bias_shape.append(1) else: bias_shape = None return weight_shape, bias_shape, output_shape
keras/keras/layers/core/einsum_dense.py/0
{ "file_path": "keras/keras/layers/core/einsum_dense.py", "repo_id": "keras", "token_count": 7413 }
164
import os import numpy as np import pytest from absl.testing import parameterized from tensorflow import data as tf_data from keras import backend from keras import layers from keras import models from keras import testing from keras.saving import saving_api class DiscretizationTest(testing.TestCase, parameterized.TestCase): def test_discretization_basics(self): self.run_layer_test( layers.Discretization, init_kwargs={ "bin_boundaries": [0.0, 0.5, 1.0], }, input_shape=(2, 3), expected_output_shape=(2, 3), expected_num_trainable_weights=0, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, expected_num_losses=0, supports_masking=False, run_training_check=False, ) def test_adapt_flow(self): layer = layers.Discretization(num_bins=4) layer.adapt( np.random.random((32, 3)), ) output = layer(np.array([[0.0, 0.1, 0.3]])) self.assertTrue(output.dtype, "int32") @parameterized.parameters( [ ("int", [[-1.0, 0.0, 0.1, 0.8, 1.2]], [[0, 1, 1, 2, 3]]), ("one_hot", [0.1, 0.8], [[0, 1, 0, 0], [0, 0, 1, 0]]), ("multi_hot", [[0.1, 0.8]], [[0, 1, 1, 0]]), ( "one_hot", [[[0.15, 0.75], [0.85, 0.45]]], [ [ [[0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]], [[0.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 0.0]], ] ], ), ( "multi_hot", [[[0.15, 0.75], [0.85, 0.45]]], [[[0.0, 1.0, 1.0, 0.0], [0.0, 1.0, 1.0, 0.0]]], ), ("count", [[0.1, 0.8, 0.9]], [[0, 1, 2, 0]]), ] ) def test_correctness(self, output_mode, input_array, expected_output): input_array = np.array(input_array) expected_output = np.array(expected_output) layer = layers.Discretization( bin_boundaries=[0.0, 0.5, 1.0], output_mode=output_mode ) output = layer(input_array) self.assertTrue(backend.is_tensor(output)) self.assertAllClose(output, expected_output) def test_tf_data_compatibility(self): # With fixed bins layer = layers.Discretization( bin_boundaries=[0.0, 0.35, 0.5, 1.0], dtype="float32" ) x = np.array([[-1.0, 0.0, 0.1, 0.2, 0.4, 0.5, 1.0, 1.2, 0.98]]) self.assertAllClose(layer(x), np.array([[0, 1, 1, 1, 2, 3, 4, 4, 3]])) ds = tf_data.Dataset.from_tensor_slices(x).batch(1).map(layer) for output in ds.take(1): output = output.numpy() self.assertAllClose(output, np.array([[0, 1, 1, 1, 2, 3, 4, 4, 3]])) # With adapt flow layer = layers.Discretization(num_bins=4) layer.adapt( np.random.random((32, 3)), ) x = np.array([[0.0, 0.1, 0.3]]) ds = tf_data.Dataset.from_tensor_slices(x).batch(1).map(layer) for output in ds.take(1): output.numpy() def test_saving(self): # With fixed bins layer = layers.Discretization(bin_boundaries=[0.0, 0.35, 0.5, 1.0]) model = models.Sequential( [ layers.Input((2,)), layer, ] ) fpath = os.path.join(self.get_temp_dir(), "model.keras") model.save(fpath) model = saving_api.load_model(fpath) x = np.array([[-1.0, 0.0, 0.1, 0.2, 0.4, 0.5, 1.0, 1.2, 0.98]]) self.assertAllClose(layer(x), np.array([[0, 1, 1, 1, 2, 3, 4, 4, 3]])) # With adapt flow layer = layers.Discretization(num_bins=4) layer.adapt( np.random.random((32, 3)), ) ref_input = np.random.random((1, 2)) ref_output = layer(ref_input) model = models.Sequential( [ layers.Input((2,)), layer, ] ) fpath = os.path.join(self.get_temp_dir(), "model.keras") model.save(fpath) model = saving_api.load_model(fpath) self.assertAllClose(layer(ref_input), ref_output) @parameterized.parameters( [ ( "one_hot", [[-1.0, 0.2, 0.7, 1.2]], [ [ [1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0], ] ], ), ( "multi_hot", [[[-1.0], [0.2], [0.7], [1.2]]], [ [ [1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0], ] ], ), ( "count", [[-1.0], [0.2], [0.7], [1.2]], [ [1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0], ], ), ] ) @pytest.mark.skipif( backend.backend() != "tensorflow", reason="Sparse tensor only works in TensorFlow", ) def test_sparse_output(self, output_mode, input_array, expected_output): from keras.utils.module_utils import tensorflow as tf x = np.array(input_array) layer = layers.Discretization( bin_boundaries=[0.0, 0.5, 1.0], sparse=True, output_mode=output_mode ) output = layer(x) self.assertTrue(isinstance(output, tf.SparseTensor)) self.assertAllClose(output, np.array(expected_output))
keras/keras/layers/preprocessing/discretization_test.py/0
{ "file_path": "keras/keras/layers/preprocessing/discretization_test.py", "repo_id": "keras", "token_count": 3613 }
165
import numpy as np import pytest from keras import backend from keras import layers from keras.testing import test_case class SpatialDropoutTest(test_case.TestCase): @pytest.mark.requires_trainable_backend def test_spatial_dropout_1d(self): self.run_layer_test( layers.SpatialDropout1D, init_kwargs={"rate": 0.5}, call_kwargs={"training": True}, input_shape=(2, 3, 4), ) self.run_layer_test( layers.SpatialDropout1D, init_kwargs={"rate": 0.5}, call_kwargs={"training": False}, input_shape=(2, 3, 4), ) @pytest.mark.requires_trainable_backend def test_spatial_dropout_2d(self): self.run_layer_test( layers.SpatialDropout2D, init_kwargs={"rate": 0.5}, call_kwargs={"training": True}, input_shape=(2, 3, 4, 5), ) self.run_layer_test( layers.SpatialDropout2D, init_kwargs={"rate": 0.5, "data_format": "channels_first"}, call_kwargs={"training": True}, input_shape=(2, 3, 4, 5), ) @pytest.mark.requires_trainable_backend def test_spatial_dropout_3d(self): self.run_layer_test( layers.SpatialDropout3D, init_kwargs={"rate": 0.5}, call_kwargs={"training": True}, input_shape=(2, 3, 4, 4, 5), ) self.run_layer_test( layers.SpatialDropout3D, init_kwargs={"rate": 0.5, "data_format": "channels_first"}, call_kwargs={"training": True}, input_shape=(2, 3, 4, 4, 5), ) def test_spatial_dropout_1D_dynamic(self): inputs = layers.Input((3, 2)) layer = layers.SpatialDropout1D(0.5) layer(inputs, training=True) def test_spatial_dropout_1D_correctness(self): inputs = np.ones((10, 3, 10)) layer = layers.SpatialDropout1D(0.5) outputs = layer(inputs, training=True) self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :]) def test_spatial_dropout_2D_dynamic(self): inputs = layers.Input((3, 2, 4)) layer = layers.SpatialDropout2D(0.5) layer(inputs, training=True) def test_spatial_dropout_2D_correctness(self): if backend.config.image_data_format() == "channels_last": inputs = np.ones((10, 3, 3, 10)) else: inputs = np.ones((10, 10, 3, 3)) layer = layers.SpatialDropout2D(0.5) outputs = layer(inputs, training=True) if backend.config.image_data_format() == "channels_last": self.assertAllClose(outputs[:, 0, 0, :], outputs[:, 1, 1, :]) else: self.assertAllClose(outputs[:, :, 0, 0], outputs[:, :, 1, 1]) def test_spatial_dropout_3D_dynamic(self): inputs = layers.Input((3, 2, 4, 2)) layer = layers.SpatialDropout3D(0.5) layer(inputs, training=True) def test_spatial_dropout_3D_correctness(self): if backend.config.image_data_format() == "channels_last": inputs = np.ones((10, 3, 3, 3, 10)) else: inputs = np.ones((10, 10, 3, 3, 3)) layer = layers.SpatialDropout3D(0.5) outputs = layer(inputs, training=True) if backend.config.image_data_format() == "channels_last": self.assertAllClose(outputs[:, 0, 0, 0, :], outputs[:, 1, 1, 1, :]) else: self.assertAllClose(outputs[:, :, 0, 0, 0], outputs[:, :, 1, 1, 1])
keras/keras/layers/regularization/spatial_dropout_test.py/0
{ "file_path": "keras/keras/layers/regularization/spatial_dropout_test.py", "repo_id": "keras", "token_count": 1761 }
166
from keras import ops from keras.api_export import keras_export from keras.layers.input_spec import InputSpec from keras.layers.layer import Layer @keras_export("keras.layers.UpSampling1D") class UpSampling1D(Layer): """Upsampling layer for 1D inputs. Repeats each temporal step `size` times along the time axis. Examples: >>> input_shape = (2, 2, 3) >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) >>> x [[[ 0 1 2] [ 3 4 5]] [[ 6 7 8] [ 9 10 11]]] >>> y = keras.layers.UpSampling1D(size=2)(x) >>> y [[[ 0. 1. 2.] [ 0. 1. 2.] [ 3. 4. 5.] [ 3. 4. 5.]] [[ 6. 7. 8.] [ 6. 7. 8.] [ 9. 10. 11.] [ 9. 10. 11.]]] Args: size: Integer. Upsampling factor. Input shape: 3D tensor with shape: `(batch_size, steps, features)`. Output shape: 3D tensor with shape: `(batch_size, upsampled_steps, features)`. """ def __init__(self, size=2, **kwargs): super().__init__(**kwargs) self.size = int(size) self.input_spec = InputSpec(ndim=3) def compute_output_shape(self, input_shape): size = ( self.size * input_shape[1] if input_shape[1] is not None else None ) return [input_shape[0], size, input_shape[2]] def call(self, inputs): return ops.repeat(x=inputs, repeats=self.size, axis=1) def get_config(self): config = {"size": self.size} base_config = super().get_config() return {**base_config, **config}
keras/keras/layers/reshaping/up_sampling1d.py/0
{ "file_path": "keras/keras/layers/reshaping/up_sampling1d.py", "repo_id": "keras", "token_count": 731 }
167
import numpy as np import pytest from keras import initializers from keras import layers from keras import testing class SimpleRNNTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_basics(self): self.run_layer_test( layers.SimpleRNN, init_kwargs={"units": 3, "dropout": 0.5, "recurrent_dropout": 0.5}, input_shape=(3, 2, 4), call_kwargs={"training": True}, expected_output_shape=(3, 3), expected_num_trainable_weights=3, expected_num_non_trainable_weights=0, expected_num_non_trainable_variables=1, supports_masking=True, ) self.run_layer_test( layers.SimpleRNN, init_kwargs={ "units": 3, "return_sequences": True, "bias_regularizer": "l1", "kernel_regularizer": "l2", "recurrent_regularizer": "l2", }, input_shape=(3, 2, 4), expected_output_shape=(3, 2, 3), expected_num_losses=3, expected_num_trainable_weights=3, expected_num_non_trainable_weights=0, supports_masking=True, ) def test_correctness(self): sequence = np.arange(24).reshape((2, 3, 4)).astype("float32") layer = layers.SimpleRNN( 4, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), ) output = layer(sequence) self.assertAllClose( np.array( [ [0.405432, 0.405432, 0.405432, 0.405432], [0.73605347, 0.73605347, 0.73605347, 0.73605347], ] ), output, ) layer = layers.SimpleRNN( 4, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), unroll=True, ) output = layer(sequence) self.assertAllClose( np.array( [ [0.405432, 0.405432, 0.405432, 0.405432], [0.73605347, 0.73605347, 0.73605347, 0.73605347], ] ), output, ) layer = layers.SimpleRNN( 4, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), go_backwards=True, ) output = layer(sequence) self.assertAllClose( np.array( [ [0.11144729, 0.11144729, 0.11144729, 0.11144729], [0.5528889, 0.5528889, 0.5528889, 0.5528889], ] ), output, ) layer = layers.SimpleRNN( 4, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), go_backwards=True, unroll=True, ) output = layer(sequence) self.assertAllClose( np.array( [ [0.11144729, 0.11144729, 0.11144729, 0.11144729], [0.5528889, 0.5528889, 0.5528889, 0.5528889], ] ), output, ) def test_statefulness(self): sequence = np.arange(24).reshape((2, 3, 4)).astype("float32") layer = layers.SimpleRNN( 4, stateful=True, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), ) layer(sequence) output = layer(sequence) self.assertAllClose( np.array( [ [0.40559256, 0.40559256, 0.40559256, 0.40559256], [0.7361247, 0.7361247, 0.7361247, 0.7361247], ] ), output, ) layer.reset_state() layer(sequence) output = layer(sequence) self.assertAllClose( np.array( [ [0.40559256, 0.40559256, 0.40559256, 0.40559256], [0.7361247, 0.7361247, 0.7361247, 0.7361247], ] ), output, ) def test_pass_initial_state(self): sequence = np.arange(24).reshape((2, 4, 3)).astype("float32") initial_state = np.arange(8).reshape((2, 4)).astype("float32") layer = layers.SimpleRNN( 4, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), ) output = layer(sequence, initial_state=initial_state) self.assertAllClose( np.array( [ [0.33621645, 0.33621645, 0.33621645, 0.33621645], [0.6262637, 0.6262637, 0.6262637, 0.6262637], ] ), output, ) layer = layers.SimpleRNN( 4, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), go_backwards=True, ) output = layer(sequence, initial_state=initial_state) self.assertAllClose( np.array( [ [0.07344437, 0.07344437, 0.07344437, 0.07344437], [0.43043602, 0.43043602, 0.43043602, 0.43043602], ] ), output, ) def test_masking(self): sequence = np.arange(24).reshape((2, 4, 3)).astype("float32") mask = np.array([[True, True, False, True], [True, False, False, True]]) layer = layers.SimpleRNN( 4, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), unroll=True, ) output = layer(sequence, mask=mask) self.assertAllClose( np.array( [ [0.32951632, 0.32951632, 0.32951632, 0.32951632], [0.61799484, 0.61799484, 0.61799484, 0.61799484], ] ), output, ) layer = layers.SimpleRNN( 2, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), return_sequences=True, ) output = layer(sequence, mask=mask) self.assertAllClose( np.array( [ [0.0599281, 0.0599281], [0.15122814, 0.15122814], [0.15122814, 0.15122814], [0.32394567, 0.32394567], ], ), output[0], ) self.assertAllClose( np.array( [ [0.3969304, 0.3969304], [0.3969304, 0.3969304], [0.3969304, 0.3969304], [0.608085, 0.608085], ], ), output[1], ) layer = layers.SimpleRNN( 2, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), return_sequences=True, zero_output_for_mask=True, ) output = layer(sequence, mask=mask) self.assertAllClose( np.array( [ [0.0599281, 0.0599281], [0.15122814, 0.15122814], [0.0, 0.0], [0.32394567, 0.32394567], ], ), output[0], ) self.assertAllClose( np.array( [ [0.3969304, 0.3969304], [0.0, 0.0], [0.0, 0.0], [0.608085, 0.608085], ], ), output[1], ) layer = layers.SimpleRNN( 4, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), go_backwards=True, ) output = layer(sequence, mask=mask) self.assertAllClose( np.array( [ [0.07376196, 0.07376196, 0.07376196, 0.07376196], [0.43645123, 0.43645123, 0.43645123, 0.43645123], ] ), output, )
keras/keras/layers/rnn/simple_rnn_test.py/0
{ "file_path": "keras/keras/layers/rnn/simple_rnn_test.py", "repo_id": "keras", "token_count": 5507 }
168
import json import os import warnings import numpy as np from absl import logging from keras import backend from keras import optimizers from keras.backend.common import global_state from keras.legacy.saving import json_utils from keras.legacy.saving import saving_options from keras.legacy.saving import saving_utils from keras.saving import object_registration from keras.utils import io_utils try: import h5py except ImportError: h5py = None HDF5_OBJECT_HEADER_LIMIT = 64512 def save_model_to_hdf5(model, filepath, overwrite=True, include_optimizer=True): if h5py is None: raise ImportError( "`save_model()` using h5 format requires h5py. Could not " "import h5py." ) if not isinstance(filepath, h5py.File): # If file exists and should not be overwritten. if not overwrite and os.path.isfile(filepath): proceed = io_utils.ask_to_proceed_with_overwrite(filepath) if not proceed: return dirpath = os.path.dirname(filepath) if dirpath and not os.path.exists(dirpath): os.makedirs(dirpath, exist_ok=True) f = h5py.File(filepath, mode="w") opened_new_file = True else: f = filepath opened_new_file = False try: with saving_options.keras_option_scope(use_legacy_config=True): model_metadata = saving_utils.model_metadata( model, include_optimizer ) for k, v in model_metadata.items(): if isinstance(v, (dict, list, tuple)): f.attrs[k] = json.dumps( v, default=json_utils.get_json_type ).encode("utf8") else: f.attrs[k] = v model_weights_group = f.create_group("model_weights") save_weights_to_hdf5_group(model_weights_group, model) # TODO(b/128683857): Add integration tests between tf.keras and # external Keras, to avoid breaking TF.js users. if include_optimizer and hasattr(model, "optimizer"): save_optimizer_weights_to_hdf5_group(f, model.optimizer) f.flush() finally: if opened_new_file: f.close() def load_model_from_hdf5(filepath, custom_objects=None, compile=True): """Loads a model saved via `save_model_to_hdf5`. Args: filepath: One of the following: - String, path to the saved model - `h5py.File` object from which to load the model custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. compile: Boolean, whether to compile the model after loading. Returns: A Keras model instance. If an optimizer was found as part of the saved model, the model is already compiled. Otherwise, the model is uncompiled and a warning will be displayed. When `compile` is set to False, the compilation is omitted without any warning. Raises: ImportError: if h5py is not available. ValueError: In case of an invalid savefile. """ if h5py is None: raise ImportError( "`load_model()` using h5 format requires h5py. Could not " "import h5py." ) if not custom_objects: custom_objects = {} gco = object_registration.GLOBAL_CUSTOM_OBJECTS tlco = global_state.get_global_attribute("custom_objects_scope_dict", {}) custom_objects = {**custom_objects, **gco, **tlco} opened_new_file = not isinstance(filepath, h5py.File) if opened_new_file: f = h5py.File(filepath, mode="r") else: f = filepath model = None try: # instantiate model model_config = f.attrs.get("model_config") if model_config is None: raise ValueError( f"No model config found in the file at {filepath}." ) if hasattr(model_config, "decode"): model_config = model_config.decode("utf-8") model_config = json_utils.decode(model_config) with saving_options.keras_option_scope(use_legacy_config=True): model = saving_utils.model_from_config( model_config, custom_objects=custom_objects ) # set weights load_weights_from_hdf5_group(f["model_weights"], model) if compile: # instantiate optimizer training_config = f.attrs.get("training_config") if hasattr(training_config, "decode"): training_config = training_config.decode("utf-8") if training_config is None: logging.warning( "No training configuration found in the save file, so " "the model was *not* compiled. Compile it manually." ) return model training_config = json_utils.decode(training_config) # Compile model. model.compile( **saving_utils.compile_args_from_training_config( training_config, custom_objects ) ) saving_utils.try_build_compiled_arguments(model) # Set optimizer weights. if "optimizer_weights" in f: try: if isinstance(model.optimizer, optimizers.Optimizer): model.optimizer.build(model._trainable_variables) else: model.optimizer._create_all_weights( model._trainable_variables ) except (NotImplementedError, AttributeError): logging.warning( "Error when creating the weights of optimizer {}, " "making it impossible to restore the saved optimizer " "state. As a result, your model is starting with " "a freshly initialized optimizer." ) optimizer_weight_values = ( load_optimizer_weights_from_hdf5_group(f) ) try: model.optimizer.set_weights(optimizer_weight_values) except ValueError: logging.warning( "Error in loading the saved optimizer " "state. As a result, your model is " "starting with a freshly initialized " "optimizer." ) finally: if opened_new_file: f.close() return model def save_weights_to_hdf5_group(f, model): """Saves the weights of a list of layers to a HDF5 group. Args: f: HDF5 group. model: Model instance. """ from keras import __version__ as keras_version save_attributes_to_hdf5_group( f, "layer_names", [layer.name.encode("utf8") for layer in model.layers] ) f.attrs["backend"] = backend.backend().encode("utf8") f.attrs["keras_version"] = str(keras_version).encode("utf8") # Sort model layers by layer name to ensure that group names are strictly # growing to avoid prefix issues. for layer in sorted(model.layers, key=lambda x: x.name): g = f.create_group(layer.name) weights = _legacy_weights(layer) save_subset_weights_to_hdf5_group(g, weights) weights = list( v for v in model._trainable_variables + model._non_trainable_variables if v in model.weights ) g = f.create_group("top_level_model_weights") save_subset_weights_to_hdf5_group(g, weights) def save_subset_weights_to_hdf5_group(f, weights): """Save top-level weights of a model to a HDF5 group. Args: f: HDF5 group. weights: List of weight variables. """ weight_values = [backend.convert_to_numpy(w) for w in weights] weight_names = [w.name.encode("utf8") for w in weights] save_attributes_to_hdf5_group(f, "weight_names", weight_names) for name, val in zip(weight_names, weight_values): param_dset = f.create_dataset(name, val.shape, dtype=val.dtype) if not val.shape: # scalar param_dset[()] = val else: param_dset[:] = val def save_optimizer_weights_to_hdf5_group(hdf5_group, optimizer): """Saves optimizer weights of a optimizer to a HDF5 group. Args: hdf5_group: HDF5 group. optimizer: optimizer instance. """ if isinstance(optimizer, optimizers.Optimizer): symbolic_weights = optimizer.variables else: symbolic_weights = getattr(optimizer, "weights") if symbolic_weights: weights_group = hdf5_group.create_group("optimizer_weights") weight_names = [str(w.name).encode("utf8") for w in symbolic_weights] save_attributes_to_hdf5_group( weights_group, "weight_names", weight_names ) weight_values = [backend.convert_to_numpy(w) for w in symbolic_weights] for name, val in zip(weight_names, weight_values): param_dset = weights_group.create_dataset( name, val.shape, dtype=val.dtype ) if not val.shape: # scalar param_dset[()] = val else: param_dset[:] = val def save_attributes_to_hdf5_group(group, name, data): """Saves attributes (data) of the specified name into the HDF5 group. This method deals with an inherent problem of HDF5 file which is not able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes. Args: group: A pointer to a HDF5 group. name: A name of the attributes to save. data: Attributes data to store. Raises: RuntimeError: If any single attribute is too large to be saved. """ # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( "The following attributes cannot be saved to HDF5 file because " f"they are larger than {HDF5_OBJECT_HEADER_LIMIT} " f"bytes: {bad_attributes}" ) data_npy = np.asarray(data) num_chunks = 1 chunked_data = np.array_split(data_npy, num_chunks) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data): num_chunks += 1 chunked_data = np.array_split(data_npy, num_chunks) if num_chunks > 1: for chunk_id, chunk_data in enumerate(chunked_data): group.attrs["%s%d" % (name, chunk_id)] = chunk_data else: group.attrs[name] = data def load_weights_from_hdf5_group(f, model): """Implements topological (order-based) weight loading. Args: f: A pointer to a HDF5 group. model: Model instance. Raises: ValueError: in case of mismatch between provided layers and weights file. """ if "keras_version" in f.attrs: original_keras_version = f.attrs["keras_version"] if hasattr(original_keras_version, "decode"): original_keras_version = original_keras_version.decode("utf8") else: original_keras_version = "1" if "backend" in f.attrs: original_backend = f.attrs["backend"] if hasattr(original_backend, "decode"): original_backend = original_backend.decode("utf8") else: original_backend = None filtered_layers = [] for layer in model.layers: weights = _legacy_weights(layer) if weights: filtered_layers.append(layer) layer_names = load_attributes_from_hdf5_group(f, "layer_names") filtered_layer_names = [] for name in layer_names: g = f[name] weight_names = load_attributes_from_hdf5_group(g, "weight_names") if weight_names: filtered_layer_names.append(name) layer_names = filtered_layer_names if len(layer_names) != len(filtered_layers): raise ValueError( "Layer count mismatch when loading weights from file. " f"Model expected {len(filtered_layers)} layers, found " f"{len(layer_names)} saved layers." ) for k, name in enumerate(layer_names): g = f[name] layer = filtered_layers[k] symbolic_weights = _legacy_weights(layer) weight_values = load_subset_weights_from_hdf5_group(g) if len(weight_values) != len(symbolic_weights): raise ValueError( f"Weight count mismatch for layer #{k} (named {layer.name} in " f"the current model, {name} in the save file). " f"Layer expects {len(symbolic_weights)} weight(s). Received " f"{len(weight_values)} saved weight(s)" ) for ref_v, val in zip(symbolic_weights, weight_values): ref_v.assign(val) if "top_level_model_weights" in f: symbolic_weights = list( # model.weights v for v in model._trainable_variables + model._non_trainable_variables if v in model.weights ) weight_values = load_subset_weights_from_hdf5_group( f["top_level_model_weights"] ) if len(weight_values) != len(symbolic_weights): raise ValueError( "Weight count mismatch for top-level weights when loading " "weights from file. " f"Model expects {len(symbolic_weights)} top-level weight(s). " f"Received {len(weight_values)} saved top-level weight(s)" ) for ref_v, val in zip(symbolic_weights, weight_values): ref_v.assign(val) def load_weights_from_hdf5_group_by_name(f, model, skip_mismatch=False): """Implements name-based weight loading (instead of topological loading). Layers that have no matching name are skipped. Args: f: A pointer to a HDF5 group. model: Model instance. skip_mismatch: Boolean, whether to skip loading of layers where there is a mismatch in the number of weights, or a mismatch in the shape of the weights. Raises: ValueError: in case of mismatch between provided layers and weights file and skip_match=False. """ if "keras_version" in f.attrs: original_keras_version = f.attrs["keras_version"] if hasattr(original_keras_version, "decode"): original_keras_version = original_keras_version.decode("utf8") else: original_keras_version = "1" if "backend" in f.attrs: original_backend = f.attrs["backend"] if hasattr(original_backend, "decode"): original_backend = original_backend.decode("utf8") else: original_backend = None # New file format. layer_names = load_attributes_from_hdf5_group(f, "layer_names") # Reverse index of layer name to list of layers with name. index = {} for layer in model.layers: if layer.name: index.setdefault(layer.name, []).append(layer) for k, name in enumerate(layer_names): g = f[name] weight_values = load_subset_weights_from_hdf5_group(g) for layer in index.get(name, []): symbolic_weights = _legacy_weights(layer) if len(weight_values) != len(symbolic_weights): if skip_mismatch: warnings.warn( f"Skipping loading of weights for layer #{k} (named " f"{layer.name}) due to mismatch in number of weights. " f"Layer expects {len(symbolic_weights)} weight(s). " f"Received {len(weight_values)} saved weight(s)", stacklevel=2, ) continue raise ValueError( f"Weight count mismatch for layer #{k} " f"(named {layer.name}). " f"Layer expects {len(symbolic_weights)} weight(s). " f"Received {len(weight_values)} saved weight(s)" ) # Set values. for i in range(len(weight_values)): expected_shape = symbolic_weights[i].shape received_shape = weight_values[i].shape if expected_shape != received_shape: if skip_mismatch: warnings.warn( f"Skipping loading weights for layer #{k} (named " f"{layer.name}) due to mismatch in shape for " f"weight {symbolic_weights[i].name}. " f"Weight expects shape {expected_shape}. " "Received saved weight " f"with shape {received_shape}", stacklevel=2, ) continue raise ValueError( f"Shape mismatch in layer #{k} (named {layer.name}) " f"for weight {symbolic_weights[i].name}. " f"Weight expects shape {expected_shape}. " "Received saved weight " f"with shape {received_shape}" ) else: symbolic_weights[i].assign(weight_values[i]) if "top_level_model_weights" in f: symbolic_weights = model.trainable_weights + model.non_trainable_weights weight_values = load_subset_weights_from_hdf5_group( f["top_level_model_weights"] ) if len(weight_values) != len(symbolic_weights): if skip_mismatch: warnings.warn( "Skipping loading top-level weights for model due to " "mismatch in number of weights. " f"Model expects {len(symbolic_weights)} " "top-level weight(s). " f"Received {len(weight_values)} saved top-level weight(s)", stacklevel=2, ) else: raise ValueError( "Weight count mismatch for top-level weights of model. " f"Model expects {len(symbolic_weights)} " "top-level weight(s). " f"Received {len(weight_values)} saved top-level weight(s)" ) else: for i in range(len(weight_values)): expected_shape = symbolic_weights[i].shape received_shape = weight_values[i].shape if expected_shape != received_shape: if skip_mismatch: warnings.warn( "Skipping loading top-level weight for model due " "to mismatch in shape for " f"weight {symbolic_weights[i].name}. " f"Weight expects shape {expected_shape}. " "Received saved weight " f"with shape {received_shape}", stacklevel=2, ) else: raise ValueError( "Shape mismatch in model for top-level weight " f"{symbolic_weights[i].name}. " f"Weight expects shape {expected_shape}. " "Received saved weight " f"with shape {received_shape}" ) else: symbolic_weights[i].assign(weight_values[i]) def load_subset_weights_from_hdf5_group(f): """Load layer weights of a model from hdf5. Args: f: A pointer to a HDF5 group. Returns: List of NumPy arrays of the weight values. Raises: ValueError: in case of mismatch between provided model and weights file. """ weight_names = load_attributes_from_hdf5_group(f, "weight_names") return [np.asarray(f[weight_name]) for weight_name in weight_names] def load_optimizer_weights_from_hdf5_group(hdf5_group): """Load optimizer weights from a HDF5 group. Args: hdf5_group: A pointer to a HDF5 group. Returns: data: List of optimizer weight names. """ weights_group = hdf5_group["optimizer_weights"] optimizer_weight_names = load_attributes_from_hdf5_group( weights_group, "weight_names" ) return [ weights_group[weight_name] for weight_name in optimizer_weight_names ] def load_attributes_from_hdf5_group(group, name): """Loads attributes of the specified name from the HDF5 group. This method deals with an inherent problem of HDF5 file which is not able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes. Args: group: A pointer to a HDF5 group. name: A name of the attributes to load. Returns: data: Attributes data. """ if name in group.attrs: data = [ n.decode("utf8") if hasattr(n, "decode") else n for n in group.attrs[name] ] else: data = [] chunk_id = 0 while f"{name}{chunk_id}" in group.attrs: data.extend( [ n.decode("utf8") if hasattr(n, "decode") else n for n in group.attrs[f"{name}{chunk_id}"] ] ) chunk_id += 1 return data def _legacy_weights(layer): """Legacy weight order converter. For legacy reason, the layer.weights was in the order of [self.trainable_weights + self.non_trainable_weights], and this order was used for preserving the weights in h5 format. The new order of layer.weights are the same as layer.get_weights() which is more intuitive for user. To keep supporting the existing saved h5 file, this method should be used to save/load weights. Args: layer: a `Model` or `Layer` instance. Returns: A list of variables with the legacy weight order. """ return layer.trainable_weights + layer.non_trainable_weights
keras/keras/legacy/saving/legacy_h5_format.py/0
{ "file_path": "keras/keras/legacy/saving/legacy_h5_format.py", "repo_id": "keras", "token_count": 10858 }
169
import numpy as np from absl.testing import parameterized from keras import testing from keras.metrics import f_score_metrics class FBetaScoreTest(parameterized.TestCase, testing.TestCase): def _run_test( self, y_true, y_pred, sample_weights, average, beta, threshold, reference_result, ): fbeta = f_score_metrics.FBetaScore( average, beta, threshold, dtype="float32" ) fbeta.update_state(y_true, y_pred, sample_weights) result = fbeta.result() self.assertAllClose(result, reference_result, atol=1e-6) def test_config(self): fbeta_obj = f_score_metrics.FBetaScore( beta=0.5, threshold=0.3, average=None, dtype="float32" ) self.assertEqual(fbeta_obj.beta, 0.5) self.assertEqual(fbeta_obj.average, None) self.assertEqual(fbeta_obj.threshold, 0.3) self.assertEqual(fbeta_obj.dtype, "float32") # Check save and restore config fbeta_obj2 = f_score_metrics.FBetaScore.from_config( fbeta_obj.get_config() ) self.assertEqual(fbeta_obj2.beta, 0.5) self.assertEqual(fbeta_obj2.average, None) self.assertEqual(fbeta_obj2.threshold, 0.3) self.assertEqual(fbeta_obj2.dtype, "float32") @parameterized.parameters( ("micro", 0.5), ("micro", 1.0), ("micro", 2.0), ("macro", 0.5), ("macro", 1.0), ("macro", 2.0), ("weighted", 0.5), ("weighted", 1.0), ("weighted", 2.0), ) def test_fbeta_perfect_score(self, average, beta): y_true = [[1, 1, 1], [1, 0, 0], [1, 1, 0]] y_pred = [[0.7, 0.7, 0.7], [1, 0, 0], [0.9, 0.8, 0]] self._run_test( y_true, y_pred, None, average=average, beta=beta, threshold=0.66, reference_result=1.0, ) @parameterized.parameters( ("micro", 0.5), ("micro", 1.0), ("micro", 2.0), ("macro", 0.5), ("macro", 1.0), ("macro", 2.0), ("weighted", 0.5), ("weighted", 1.0), ("weighted", 2.0), ) def test_fbeta_worst_score(self, average, beta): y_true = [[0, 0, 0], [0, 1, 0], [0, 0, 1]] y_pred = [[0.7, 0.7, 0.7], [1, 0, 0], [0.9, 0.8, 0]] self._run_test( y_true, y_pred, None, average=average, beta=beta, threshold=0.66, reference_result=0.0, ) @parameterized.parameters( # average, beta, result (None, 0.5, [0.71428573, 0.5, 0.833334]), (None, 1.0, [0.8, 0.5, 0.6666667]), (None, 2.0, [0.9090904, 0.5, 0.555556]), ("micro", 0.5, 0.6666667), ("micro", 1.0, 0.6666667), ("micro", 2.0, 0.6666667), ("macro", 0.5, 0.6825397), ("macro", 1.0, 0.6555555), ("macro", 2.0, 0.6548822), ("weighted", 0.5, 0.6825397), ("weighted", 1.0, 0.6555555), ("weighted", 2.0, 0.6548822), ) def test_fbeta_random_score(self, average, beta, result): y_pred = [[0.7, 0.7, 0.7], [1, 0, 0], [0.9, 0.8, 0]] y_true = [[0, 0, 1], [1, 1, 0], [1, 1, 1]] self._run_test( y_true, y_pred, None, average=average, beta=beta, threshold=0.66, reference_result=result, ) @parameterized.parameters( # average, beta, result (None, 0.5, [0.9090904, 0.555556, 1.0]), (None, 1.0, [0.8, 0.6666667, 1.0]), (None, 2.0, [0.71428573, 0.833334, 1.0]), ("micro", 0.5, 0.833334), ("micro", 1.0, 0.833334), ("micro", 2.0, 0.833334), ("macro", 0.5, 0.821549), ("macro", 1.0, 0.822222), ("macro", 2.0, 0.849206), ("weighted", 0.5, 0.880471), ("weighted", 1.0, 0.844445), ("weighted", 2.0, 0.829365), ) def test_fbeta_random_score_none(self, average, beta, result): y_true = [ [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [1, 0, 0], [0, 0, 1], ] y_pred = [ [0.9, 0.1, 0], [0.2, 0.6, 0.2], [0, 0, 1], [0.4, 0.3, 0.3], [0, 0.9, 0.1], [0, 0, 1], ] self._run_test( y_true, y_pred, None, average=average, beta=beta, threshold=None, reference_result=result, ) @parameterized.parameters( # average, beta, sample_weights, result (None, 0.5, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [0.909091, 0.555556, 1.0]), (None, 0.5, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], [1.0, 0.0, 1.0]), (None, 0.5, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], [0.9375, 0.714286, 1.0]), (None, 1.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [0.8, 0.666667, 1.0]), (None, 1.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], [1.0, 0.0, 1.0]), (None, 1.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], [0.857143, 0.8, 1.0]), (None, 2.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [0.714286, 0.833333, 1.0]), (None, 2.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], [1.0, 0.0, 1.0]), (None, 2.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], [0.789474, 0.909091, 1.0]), ("micro", 0.5, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.833333), ("micro", 0.5, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 1.0), ("micro", 0.5, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.9), ("micro", 1.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.833333), ("micro", 1.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 1.0), ("micro", 1.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.9), ("micro", 2.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.833333), ("micro", 2.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 1.0), ("micro", 2.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.9), ("macro", 0.5, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.821549), ("macro", 0.5, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 0.666667), ("macro", 0.5, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.883929), ("macro", 1.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.822222), ("macro", 1.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 0.666667), ("macro", 1.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.885714), ("macro", 2.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.849206), ("macro", 2.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 0.666667), ("macro", 2.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.899522), ("weighted", 0.5, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.880471), ("weighted", 0.5, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 1.0), ("weighted", 0.5, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.917857), ("weighted", 1.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.844444), ("weighted", 1.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 1.0), ("weighted", 1.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.902857), ("weighted", 2.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.829365), ("weighted", 2.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 1.0), ("weighted", 2.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.897608), ) def test_fbeta_weighted_random_score_none( self, average, beta, sample_weights, result ): y_true = [ [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [1, 0, 0], [0, 0, 1], ] y_pred = [ [0.9, 0.1, 0], [0.2, 0.6, 0.2], [0, 0, 1], [0.4, 0.3, 0.3], [0, 0.9, 0.1], [0, 0, 1], ] self._run_test( y_true, y_pred, sample_weights, average=average, beta=beta, threshold=None, reference_result=result, ) class F1ScoreTest(testing.TestCase): def test_config(self): f1_obj = f_score_metrics.F1Score(dtype="float32") config = f1_obj.get_config() self.assertNotIn("beta", config) # Check save and restore config f1_obj = f_score_metrics.F1Score.from_config(config) self.assertEqual(f1_obj.average, None) self.assertEqual(f1_obj.dtype, "float32") def test_correctness(self): f1 = f_score_metrics.F1Score() fbeta = f_score_metrics.FBetaScore(beta=1.0) y_true = np.array( [ [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [1, 0, 0], [0, 0, 1], ] ) y_pred = np.array( [ [0.9, 0.1, 0], [0.2, 0.6, 0.2], [0, 0, 1], [0.4, 0.3, 0.3], [0, 0.9, 0.1], [0, 0, 1], ] ) fbeta.update_state(y_true, y_pred) f1.update_state(y_true, y_pred) self.assertAllClose(fbeta.result(), f1.result(), atol=1e-6)
keras/keras/metrics/f_score_metrics_test.py/0
{ "file_path": "keras/keras/metrics/f_score_metrics_test.py", "repo_id": "keras", "token_count": 5700 }
170
import numpy as np import pytest import tree from absl.testing import parameterized from keras import layers from keras import models from keras import testing from keras.models.cloning import clone_model def get_mlp_functional_model(shared_layers=False): inputs = layers.Input(shape=(3,)) x = layers.Dense(2)(inputs) if shared_layers: layer = layers.Dense(2, name="shared") x = layer(x) x = layer(x) outputs = layers.Dense(2)(x) model = models.Model(inputs, outputs) return model def get_cnn_functional_model(shared_layers=False): inputs = layers.Input(shape=(7, 3)) x = layers.Conv1D(2, 2, padding="same")(inputs) if shared_layers: layer = layers.Conv1D(2, 2, padding="same", name="shared") x = layer(x) x = layer(x) outputs = layers.Conv1D(2, 2, padding="same")(x) model = models.Model(inputs, outputs) return model def get_sequential_model(explicit_input=True): model = models.Sequential() if explicit_input: model.add(layers.Input(shape=(3,))) model.add(layers.Dense(2)) model.add(layers.Dense(2)) return model def get_subclassed_model(): class ExampleModel(models.Model): def __init__(self, **kwargs): super().__init__(**kwargs) self.d1 = layers.Dense(2) self.d2 = layers.Dense(2) def call(self, x): return self.d2(self.d1(x)) return ExampleModel() @pytest.mark.requires_trainable_backend class CloneModelTest(testing.TestCase, parameterized.TestCase): @parameterized.named_parameters( ("mlp_functional", get_mlp_functional_model), ("cnn_functional", get_cnn_functional_model, True), ("sequential", get_sequential_model), ( "deferred_sequential", lambda: get_sequential_model(explicit_input=False), ), ("subclassed", get_subclassed_model), ) def test_cloning_correctness(self, model_fn, is_conv=False): ref_input = np.random.random((2, 7, 3) if is_conv else (2, 3)) model = model_fn() new_model = clone_model(model) ref_output = model(ref_input) new_model(ref_input) # Maybe needed to build the model new_model.set_weights(model.get_weights()) output = new_model(ref_input) self.assertAllClose(ref_output, output) @parameterized.named_parameters( ("mlp_functional", get_mlp_functional_model), ("cnn_functional", get_cnn_functional_model), ("sequential", get_sequential_model), ) def test_custom_clone_function(self, model_fn): def clone_function(layer): config = layer.get_config() config["name"] = config["name"] + "_custom" return layer.__class__.from_config(config) model = model_fn() new_model = clone_model(model, clone_function=clone_function) for l1, l2 in zip(model.layers, new_model.layers): if not isinstance(l1, layers.InputLayer): self.assertEqual(l2.name, l1.name + "_custom") def test_shared_layers_cloning(self): model = get_mlp_functional_model(shared_layers=True) new_model = clone_model(model) self.assertLen(new_model.layers, 4) def test_structured_io_cloning(self): x = layers.Input((3,)) y = layers.Input((3,)) z1 = x + y z2 = layers.Dense(5)(z1) inputs = dict(x=x, y=y) outputs = dict(z1=z1, z2=z2) model0 = models.Model(inputs, outputs) model = clone_model(model0) tree.assert_same_structure(model.input, inputs) tree.assert_same_structure(model.output, outputs) model = clone_model(model0, input_tensors=inputs) tree.assert_same_structure(model.input, inputs) tree.assert_same_structure(model.output, outputs) with self.assertRaisesRegex( ValueError, "`input_tensors` must have the same structure as model.input", ): model = clone_model(model0, input_tensors=(x, y))
keras/keras/models/cloning_test.py/0
{ "file_path": "keras/keras/models/cloning_test.py", "repo_id": "keras", "token_count": 1812 }
171
from keras import backend from keras.api_export import keras_export from keras.backend import KerasTensor from keras.backend import any_symbolic_tensors from keras.ops.operation import Operation from keras.ops.operation_utils import reduce_shape class Cholesky(Operation): def __init__(self): super().__init__() def call(self, x): return _cholesky(x) def compute_output_spec(self, x): _assert_2d(x) _assert_square(x) return KerasTensor(x.shape, x.dtype) @keras_export(["keras.ops.cholesky", "keras.ops.linalg.cholesky"]) def cholesky(x): """Computes the Cholesky decomposition of a positive semi-definite matrix. Args: x: Input tensor of shape `(..., M, M)`. Returns: A tensor of shape `(..., M, M)` representing the lower triangular Cholesky factor of `x`. """ if any_symbolic_tensors((x,)): return Cholesky().symbolic_call(x) return _cholesky(x) def _cholesky(x): x = backend.convert_to_tensor(x) _assert_2d(x) _assert_square(x) try: return backend.linalg.cholesky(x) except Exception as e: raise ValueError(f"Cholesky decomposition failed: {e}") class Det(Operation): def __init__(self): super().__init__() def call(self, x): return _det(x) def compute_output_spec(self, x): _assert_2d(x) _assert_square(x) return KerasTensor(x.shape[:-2], x.dtype) @keras_export(["keras.ops.det", "keras.ops.linalg.det"]) def det(x): """Computes the determinant of a square tensor. Args: x: Input tensor of shape `(..., M, M)`. Returns: A tensor of shape `(...,)` represeting the determinant of `x`. """ if any_symbolic_tensors((x,)): return Det().symbolic_call(x) return _det(x) def _det(x): x = backend.convert_to_tensor(x) _assert_2d(x) _assert_square(x) return backend.linalg.det(x) class Eig(Operation): def __init__(self): super().__init__() def call(self, x): return _eig(x) def compute_output_spec(self, x): _assert_2d(x) _assert_square(x) return ( KerasTensor(x.shape[:-1], x.dtype), KerasTensor(x.shape, x.dtype), ) @keras_export(["keras.ops.eig", "keras.ops.linalg.eig"]) def eig(x): """Computes the eigenvalues and eigenvectors of a square matrix. Args: x: Input tensor of shape `(..., M, M)`. Returns: A tuple of two tensors: a tensor of shape `(..., M)` containing eigenvalues and a tensor of shape `(..., M, M)` containing eigenvectors. """ if any_symbolic_tensors((x,)): return Eig().symbolic_call(x) return _eig(x) def _eig(x): x = backend.convert_to_tensor(x) _assert_2d(x) _assert_square(x) return backend.linalg.eig(x) class Inv(Operation): def __init__(self): super().__init__() def call(self, x): return _inv(x) def compute_output_spec(self, x): _assert_2d(x) _assert_square(x) return KerasTensor(x.shape, x.dtype) @keras_export(["keras.ops.inv", "keras.ops.linalg.inv"]) def inv(x): """Computes the inverse of a square tensor. Args: x: Input tensor of shape `(..., M, M)`. Returns: A tensor of shape `(..., M, M)` representing the inverse of `x`. """ if any_symbolic_tensors((x,)): return Inv().symbolic_call(x) return _inv(x) def _inv(x): x = backend.convert_to_tensor(x) _assert_2d(x) _assert_square(x) return backend.linalg.inv(x) class LuFactor(Operation): def __init__(self): super().__init__() def call(self, x): return _lu_factor(x) def compute_output_spec(self, x): _assert_2d(x) batch_shape = x.shape[:-2] m, n = x.shape[-2:] k = min(m, n) return ( KerasTensor(batch_shape + (m, n), x.dtype), KerasTensor(batch_shape + (k,), x.dtype), ) @keras_export(["keras.ops.lu_factor", "keras.ops.linalg.lu_factor"]) def lu_factor(x): """Computes the lower-upper decomposition of a square matrix. Args: x: A tensor of shape `(..., M, M)`. Returns: A tuple of two tensors: a tensor of shape `(..., M, M)` containing the lower and upper triangular matrices and a tensor of shape `(..., M)` containing the pivots. """ if any_symbolic_tensors((x,)): return LuFactor().symbolic_call(x) return _lu_factor(x) def _lu_factor(x): x = backend.convert_to_tensor(x) _assert_2d(x) if backend.backend() == "tensorflow": try: _assert_square(x) except ValueError as e: raise ValueError( f"LU decomposition failed: {e}. LU decomposition is only " "supported for square matrices in Tensorflow." ) return backend.linalg.lu_factor(x) class Norm(Operation): def __init__(self, ord=None, axis=None, keepdims=False): super().__init__() if isinstance(ord, str): if ord not in ("fro", "nuc"): raise ValueError( "Invalid `ord` argument. " "Expected one of {'fro', 'nuc'} when using string. " f"Received: ord={ord}" ) if isinstance(axis, int): axis = [axis] self.ord = ord self.axis = axis self.keepdims = keepdims def compute_output_spec(self, x): output_dtype = backend.standardize_dtype(x.dtype) if "int" in output_dtype or output_dtype == "bool": output_dtype = backend.floatx() if self.axis is None: axis = tuple(range(len(x.shape))) else: axis = self.axis num_axes = len(axis) if num_axes == 1 and isinstance(self.ord, str): raise ValueError( "Invalid `ord` argument for vector norm. " f"Received: ord={self.ord}" ) elif num_axes == 2 and self.ord not in ( None, "fro", "nuc", float("inf"), float("-inf"), 1, -1, 2, -2, ): raise ValueError( "Invalid `ord` argument for matrix norm. " f"Received: ord={self.ord}" ) return KerasTensor( reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims), dtype=output_dtype, ) def call(self, x): x = backend.convert_to_tensor(x) return backend.linalg.norm( x, ord=self.ord, axis=self.axis, keepdims=self.keepdims ) @keras_export(["keras.ops.norm", "keras.ops.linalg.norm"]) def norm(x, ord=None, axis=None, keepdims=False): """Matrix or vector norm. This function is able to return one of eight different matrix norms, or one of an infinite number of vector norms (described below), depending on the value of the `ord` parameter. Args: x: Input tensor. ord: Order of the norm (see table under Notes). The default is `None`. axis: If `axis` is an integer, it specifies the axis of `x` along which to compute the vector norms. If `axis` is a 2-tuple, it specifies the axes that hold 2-D matrices, and the matrix norms of these matrices are computed. keepdims: If this is set to `True`, the axes which are reduced are left in the result as dimensions with size one. Note: For values of `ord < 1`, the result is, strictly speaking, not a mathematical 'norm', but it may still be useful for various numerical purposes. The following norms can be calculated: - For matrices: - `ord=None`: Frobenius norm - `ord="fro"`: Frobenius norm - `ord=nuc`: nuclear norm - `ord=np.inf`: `max(sum(abs(x), axis=1))` - `ord=-np.inf`: `min(sum(abs(x), axis=1))` - `ord=0`: not supported - `ord=1`: `max(sum(abs(x), axis=0))` - `ord=-1`: `min(sum(abs(x), axis=0))` - `ord=2`: 2-norm (largest sing. value) - `ord=-2`: smallest singular value - other: not supported - For vectors: - `ord=None`: 2-norm - `ord="fro"`: not supported - `ord=nuc`: not supported - `ord=np.inf`: `max(abs(x))` - `ord=-np.inf`: `min(abs(x))` - `ord=0`: `sum(x != 0)` - `ord=1`: as below - `ord=-1`: as below - `ord=2`: as below - `ord=-2`: as below - other: `sum(abs(x)**ord)**(1./ord)` Returns: Norm of the matrix or vector(s). Example: >>> x = keras.ops.reshape(keras.ops.arange(9, dtype="float32") - 4, (3, 3)) >>> keras.ops.linalg.norm(x) 7.7459664 """ if any_symbolic_tensors((x,)): return Norm(ord=ord, axis=axis, keepdims=keepdims).symbolic_call(x) x = backend.convert_to_tensor(x) return backend.linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims) class Qr(Operation): def __init__(self, mode="reduced"): super().__init__() if mode not in {"reduced", "complete"}: raise ValueError( "`mode` argument value not supported. " "Expected one of {'reduced', 'complete'}. " f"Received: mode={mode}" ) self.mode = mode def compute_output_spec(self, x): if len(x.shape) < 2: raise ValueError( "Input should have rank >= 2. Received: " f"input.shape = {x.shape}" ) m = x.shape[-2] n = x.shape[-1] if m is None or n is None: raise ValueError( "Input should have its last 2 dimensions " "fully-defined. Received: " f"input.shape = {x.shape}" ) k = min(m, n) base = tuple(x.shape[:-2]) if self.mode == "reduced": return ( KerasTensor(shape=base + (m, k), dtype=x.dtype), KerasTensor(shape=base + (k, n), dtype=x.dtype), ) # 'complete' mode. return ( KerasTensor(shape=base + (m, m), dtype=x.dtype), KerasTensor(shape=base + (m, n), dtype=x.dtype), ) def call(self, x): x = backend.convert_to_tensor(x) return backend.linalg.qr(x, mode=self.mode) @keras_export(["keras.ops.qr", "keras.ops.linalg.qr"]) def qr(x, mode="reduced"): """Computes the QR decomposition of a tensor. Args: x: Input tensor of shape `(..., M, N)`. mode: A string specifying the mode of the QR decomposition. - 'reduced': Returns the reduced QR decomposition. (default) - 'complete': Returns the complete QR decomposition. Returns: A tuple containing two tensors. The first tensor of shape `(..., M, K)` is the orthogonal matrix `q` and the second tensor of shape `(..., K, N)` is the upper triangular matrix `r`, where `K = min(M, N)`. Example: >>> x = keras.ops.convert_to_tensor([[1., 2.], [3., 4.], [5., 6.]]) >>> q, r = qr(x) >>> print(q) array([[-0.16903079 0.897085] [-0.5070925 0.2760267 ] [-0.8451542 -0.34503305]], shape=(3, 2), dtype=float32) """ if any_symbolic_tensors((x,)): return Qr(mode=mode).symbolic_call(x) x = backend.convert_to_tensor(x) return backend.linalg.qr(x, mode=mode) class Solve(Operation): def __init__(self): super().__init__() def call(self, a, b): return _solve(a, b) def compute_output_spec(self, a, b): _assert_2d(a) _assert_square(a) _assert_1d(b) _assert_a_b_compat(a, b) return KerasTensor(b.shape, b.dtype) @keras_export(["keras.ops.solve", "keras.ops.linalg.solve"]) def solve(a, b): """Solves a linear system of equations given by `a x = b`. Args: a: A tensor of shape `(..., M, M)` representing the coefficients matrix. b: A tensor of shape `(..., M)` or `(..., M, N)` represeting the right-hand side or "dependent variable" matrix. Returns: A tensor of shape `(..., M)` or `(..., M, N)` representing the solution of the linear system. Returned shape is identical to `b`. """ if any_symbolic_tensors((a, b)): return Solve().symbolic_call(a, b) return _solve(a, b) def _solve(a, b): a = backend.convert_to_tensor(a) b = backend.convert_to_tensor(b) _assert_2d(a) _assert_square(a) _assert_1d(b) _assert_a_b_compat(a, b) return backend.linalg.solve(a, b) class SolveTriangular(Operation): def __init__(self, lower=False): super().__init__() self.lower = lower def call(self, a, b): return _solve_triangular(a, b, self.lower) def compute_output_spec(self, a, b): _assert_2d(a) _assert_square(a) _assert_1d(b) _assert_a_b_compat(a, b) return KerasTensor(b.shape, b.dtype) @keras_export( ["keras.ops.solve_triangular", "keras.ops.linalg.solve_triangular"] ) def solve_triangular(a, b, lower=False): """Solves a linear system of equations given by `a x = b`. Args: a: A tensor of shape `(..., M, M)` representing the coefficients matrix. b: A tensor of shape `(..., M)` or `(..., M, N)` represeting the right-hand side or "dependent variable" matrix. Returns: A tensor of shape `(..., M)` or `(..., M, N)` representing the solution of the linear system. Returned shape is identical to `b`. """ if any_symbolic_tensors((a, b)): return SolveTriangular(lower).symbolic_call(a, b) return _solve_triangular(a, b, lower) def _solve_triangular(a, b, lower=False): a = backend.convert_to_tensor(a) b = backend.convert_to_tensor(b) _assert_2d(a) _assert_square(a) _assert_1d(b) _assert_a_b_compat(a, b) return backend.linalg.solve_triangular(a, b, lower) class SVD(Operation): def __init__(self, full_matrices=True, compute_uv=True): super().__init__() self.full_matrices = full_matrices self.compute_uv = compute_uv def call(self, x): return _svd(x, self.full_matrices, self.compute_uv) def compute_output_spec(self, x): _assert_2d(x) rows, columns = x.shape[-2:] batches = x.shape[:-2] s_shape = batches + (min(rows, columns),) if self.full_matrices: u_shape = batches + (rows, rows) v_shape = batches + (columns, columns) else: u_shape = batches + (rows, min(rows, columns)) v_shape = batches + (min(rows, columns), columns) if self.compute_uv: return ( KerasTensor(u_shape, x.dtype), KerasTensor(s_shape, x.dtype), KerasTensor(v_shape, x.dtype), ) return KerasTensor(s_shape, x.dtype) @keras_export(["keras.ops.svd", "keras.ops.linalg.svd"]) def svd(x, full_matrices=True, compute_uv=True): """Computes the singular value decomposition of a matrix. Args: x: Input tensor of shape `(..., M, N)`. Returns: A tuple of three tensors: a tensor of shape `(..., M, M)` containing the left singular vectors, a tensor of shape `(..., M, N)` containing the singular values and a tensor of shape `(..., N, N)` containing the right singular vectors. """ if any_symbolic_tensors((x,)): return SVD(full_matrices, compute_uv).symbolic_call(x) return _svd(x, full_matrices, compute_uv) def _svd(x, full_matrices=True, compute_uv=True): x = backend.convert_to_tensor(x) _assert_2d(x) return backend.linalg.svd(x, full_matrices, compute_uv) def _assert_1d(*arrays): for a in arrays: if a.ndim < 1: raise ValueError( "Expected input to have rank >= 1. " "Received scalar input {a}." ) def _assert_2d(*arrays): for a in arrays: if a.ndim < 2: raise ValueError( "Expected input to have rank >= 2. " "Received input with shape {a.shape}." ) def _assert_square(*arrays): for a in arrays: m, n = a.shape[-2:] if m != n: raise ValueError( "Expected a square matrix. " f"Received non-square input with shape {a.shape}" ) def _assert_a_b_compat(a, b): if a.ndim == b.ndim: if a.shape[-2] != b.shape[-2]: raise ValueError( "Incompatible shapes between `a` and `b`. " "Expected `a.shape[-2] == b.shape[-2]`. " f"Received: a.shape={a.shape}, b.shape={b.shape}" ) elif a.ndim == b.ndim - 1: if a.shape[-1] != b.shape[-1]: raise ValueError( "Incompatible shapes between `a` and `b`. " "Expected `a.shape[-1] == b.shape[-1]`. " f"Received: a.shape={a.shape}, b.shape={b.shape}" )
keras/keras/ops/linalg.py/0
{ "file_path": "keras/keras/ops/linalg.py", "repo_id": "keras", "token_count": 8541 }
172
from keras.api_export import keras_export from keras.optimizers.adadelta import Adadelta from keras.optimizers.adafactor import Adafactor from keras.optimizers.adagrad import Adagrad from keras.optimizers.adam import Adam from keras.optimizers.adamax import Adamax from keras.optimizers.adamw import AdamW from keras.optimizers.ftrl import Ftrl from keras.optimizers.lion import Lion from keras.optimizers.loss_scale_optimizer import LossScaleOptimizer from keras.optimizers.nadam import Nadam from keras.optimizers.optimizer import Optimizer from keras.optimizers.rmsprop import RMSprop from keras.optimizers.sgd import SGD from keras.saving import serialization_lib ALL_OBJECTS = { Optimizer, Adam, SGD, RMSprop, Adadelta, AdamW, Adagrad, Adamax, Adafactor, Nadam, Ftrl, Lion, LossScaleOptimizer, } ALL_OBJECTS_DICT = {cls.__name__.lower(): cls for cls in ALL_OBJECTS} @keras_export("keras.optimizers.serialize") def serialize(optimizer): """Returns the optimizer configuration as a Python dict. Args: optimizer: An `Optimizer` instance to serialize. Returns: Python dict which contains the configuration of the optimizer. """ return serialization_lib.serialize_keras_object(optimizer) @keras_export("keras.optimizers.deserialize") def deserialize(config, custom_objects=None): """Returns a Keras optimizer object via its configuration. Args: config: Optimizer configuration dictionary. custom_objects: Optional dictionary mapping names (strings) to custom objects (classes and functions) to be considered during deserialization. Returns: A Keras Optimizer instance. """ # Make deserialization case-insensitive for built-in optimizers. if config["class_name"].lower() in ALL_OBJECTS_DICT: config["class_name"] = config["class_name"].lower() return serialization_lib.deserialize_keras_object( config, module_objects=ALL_OBJECTS_DICT, custom_objects=custom_objects, ) @keras_export("keras.optimizers.get") def get(identifier): """Retrieves a Keras Optimizer instance. Args: identifier: Optimizer identifier, one of: - String: name of an optimizer - Dictionary: configuration dictionary. - Keras Optimizer instance (it will be returned unchanged). Returns: A Keras Optimizer instance. """ if identifier is None: return None elif isinstance(identifier, dict): obj = deserialize(identifier) elif isinstance(identifier, str): config = {"class_name": identifier, "config": {}} obj = deserialize(config) else: obj = identifier if isinstance(obj, Optimizer): return obj raise ValueError(f"Could not interpret optimizer identifier: {identifier}") # We will add this temporarily so that tensorflow packages that depend on # estimators will continue to import (there are a large number). Note that # Keras 3 will not work with the estimators API. @keras_export( [ "keras.optimizers.legacy.Adagrad", "keras.optimizers.legacy.Adam", "keras.optimizers.legacy.Ftrl", "keras.optimizers.legacy.RMSprop", "keras.optimizers.legacy.SGD", "keras.optimizers.legacy.Optimizer", ] ) class LegacyOptimizerWarning: def __init__(self, *args, **kwargs): raise ImportError( "`keras.optimizers.legacy` is not supported in Keras 3. When using " "`tf.keras`, to continue using a `tf.keras.optimizers.legacy` " "optimizer, you can install the `tf_keras` package (Keras 2) and " "set the environment variable `TF_USE_LEGACY_KERAS=True` to " "configure TensorFlow to use `tf_keras` when accessing `tf.keras`." )
keras/keras/optimizers/__init__.py/0
{ "file_path": "keras/keras/optimizers/__init__.py", "repo_id": "keras", "token_count": 1497 }
173
from keras import ops from keras.api_export import keras_export from keras.optimizers import optimizer @keras_export(["keras.optimizers.Lion"]) class Lion(optimizer.Optimizer): """Optimizer that implements the Lion algorithm. The Lion optimizer is a stochastic-gradient-descent method that uses the sign operator to control the magnitude of the update, unlike other adaptive optimizers such as Adam that rely on second-order moments. This make Lion more memory-efficient as it only keeps track of the momentum. According to the authors (see reference), its performance gain over Adam grows with the batch size. Because the update of Lion is produced through the sign operation, resulting in a larger norm, a suitable learning rate for Lion is typically 3-10x smaller than that for AdamW. The weight decay for Lion should be in turn 3-10x larger than that for AdamW to maintain a similar strength (lr * wd). Args: learning_rate: A float, a `keras.optimizers.schedules.LearningRateSchedule` instance, or a callable that takes no arguments and returns the actual value to use. The learning rate. Defaults to `0.001`. beta_1: A float value or a constant float tensor, or a callable that takes no arguments and returns the actual value to use. The rate to combine the current gradient and the 1st moment estimate. Defaults to `0.9`. beta_2: A float value or a constant float tensor, or a callable that takes no arguments and returns the actual value to use. The exponential decay rate for the 1st moment estimate. Defaults to `0.99`. {{base_optimizer_keyword_args}} References: - [Chen et al., 2023](http://arxiv.org/abs/2302.06675) - [Authors' implementation]( http://github.com/google/automl/tree/master/lion) """ def __init__( self, learning_rate=0.001, beta_1=0.9, beta_2=0.99, weight_decay=None, clipnorm=None, clipvalue=None, global_clipnorm=None, use_ema=False, ema_momentum=0.99, ema_overwrite_frequency=None, name="lion", **kwargs, ): super().__init__( learning_rate=learning_rate, name=name, weight_decay=weight_decay, clipnorm=clipnorm, clipvalue=clipvalue, global_clipnorm=global_clipnorm, use_ema=use_ema, ema_momentum=ema_momentum, ema_overwrite_frequency=ema_overwrite_frequency, **kwargs, ) self.beta_1 = beta_1 self.beta_2 = beta_2 if beta_1 <= 0 or beta_1 > 1: raise ValueError( "Argument `beta_1` must be in the [0, 1] range. Otherwise, the " f"optimizer degenerates to SignSGD. Received: beta_1={beta_1}." ) def build(self, var_list): """Initialize optimizer variables. Lion optimizer has one variable `momentums`. Args: var_list: list of model variables to build Lion variables on. """ if self.built: return super().build(var_list) self._momentums = [] for var in var_list: self._momentums.append( self.add_variable_from_reference( reference_variable=var, name="momentum" ) ) def update_step(self, gradient, variable, learning_rate): """Update step given gradient and the associated model variable.""" lr = ops.cast(learning_rate, variable.dtype) gradient = ops.cast(gradient, variable.dtype) beta_1 = ops.cast(self.beta_1, variable.dtype) beta_2 = ops.cast(self.beta_2, variable.dtype) m = self._momentums[self._get_variable_index(variable)] self.assign_sub( variable, ops.multiply( lr, ops.sign( ops.add( ops.multiply(m, beta_1), ops.multiply(gradient, (1.0 - beta_1)), ) ), ), ) self.assign( m, ops.add( ops.multiply(m, beta_2), ops.multiply(gradient, (1.0 - beta_2)) ), ) def get_config(self): config = super().get_config() config.update( { "beta_1": self.beta_1, "beta_2": self.beta_2, } ) return config Lion.__doc__ = Lion.__doc__.replace( "{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args )
keras/keras/optimizers/lion.py/0
{ "file_path": "keras/keras/optimizers/lion.py", "repo_id": "keras", "token_count": 2224 }
174
"""Object config serialization and deserialization logic.""" import importlib import inspect import types import warnings import numpy as np from keras import api_export from keras import backend from keras.api_export import keras_export from keras.backend.common import global_state from keras.saving import object_registration from keras.utils import python_utils from keras.utils.module_utils import tensorflow as tf PLAIN_TYPES = (str, int, float, bool) # List of Keras modules with built-in string representations for Keras defaults BUILTIN_MODULES = ( "activations", "constraints", "initializers", "losses", "metrics", "optimizers", "regularizers", ) class SerializableDict: def __init__(self, **config): self.config = config def serialize(self): return serialize_keras_object(self.config) class SafeModeScope: """Scope to propagate safe mode flag to nested deserialization calls.""" def __init__(self, safe_mode=True): self.safe_mode = safe_mode def __enter__(self): self.original_value = in_safe_mode() global_state.set_global_attribute("safe_mode_saving", self.safe_mode) def __exit__(self, *args, **kwargs): global_state.set_global_attribute( "safe_mode_saving", self.original_value ) @keras_export("keras.config.enable_unsafe_deserialization") def enable_unsafe_deserialization(): """Disables safe mode globally, allowing deserialization of lambdas.""" global_state.set_global_attribute("safe_mode_saving", False) def in_safe_mode(): return global_state.get_global_attribute("safe_mode_saving") class ObjectSharingScope: """Scope to enable detection and reuse of previously seen objects.""" def __enter__(self): global_state.set_global_attribute("shared_objects/id_to_obj_map", {}) global_state.set_global_attribute("shared_objects/id_to_config_map", {}) def __exit__(self, *args, **kwargs): global_state.set_global_attribute("shared_objects/id_to_obj_map", None) global_state.set_global_attribute( "shared_objects/id_to_config_map", None ) def get_shared_object(obj_id): """Retrieve an object previously seen during deserialization.""" id_to_obj_map = global_state.get_global_attribute( "shared_objects/id_to_obj_map" ) if id_to_obj_map is not None: return id_to_obj_map.get(obj_id, None) def record_object_after_serialization(obj, config): """Call after serializing an object, to keep track of its config.""" if config["module"] == "__main__": config["module"] = None # Ensures module is None when no module found id_to_config_map = global_state.get_global_attribute( "shared_objects/id_to_config_map" ) if id_to_config_map is None: return # Not in a sharing scope obj_id = int(id(obj)) if obj_id not in id_to_config_map: id_to_config_map[obj_id] = config else: config["shared_object_id"] = obj_id prev_config = id_to_config_map[obj_id] prev_config["shared_object_id"] = obj_id def record_object_after_deserialization(obj, obj_id): """Call after deserializing an object, to keep track of it in the future.""" id_to_obj_map = global_state.get_global_attribute( "shared_objects/id_to_obj_map" ) if id_to_obj_map is None: return # Not in a sharing scope id_to_obj_map[obj_id] = obj @keras_export( [ "keras.saving.serialize_keras_object", "keras.utils.serialize_keras_object", ] ) def serialize_keras_object(obj): """Retrieve the config dict by serializing the Keras object. `serialize_keras_object()` serializes a Keras object to a python dictionary that represents the object, and is a reciprocal function of `deserialize_keras_object()`. See `deserialize_keras_object()` for more information about the config format. Args: obj: the Keras object to serialize. Returns: A python dict that represents the object. The python dict can be deserialized via `deserialize_keras_object()`. """ if obj is None: return obj if isinstance(obj, PLAIN_TYPES): return obj if isinstance(obj, (list, tuple)): config_arr = [serialize_keras_object(x) for x in obj] return tuple(config_arr) if isinstance(obj, tuple) else config_arr if isinstance(obj, dict): return serialize_dict(obj) # Special cases: if isinstance(obj, bytes): return { "class_name": "__bytes__", "config": {"value": obj.decode("utf-8")}, } if isinstance(obj, slice): return { "class_name": "__slice__", "config": { "start": serialize_keras_object(obj.start), "stop": serialize_keras_object(obj.stop), "step": serialize_keras_object(obj.step), }, } if isinstance(obj, backend.KerasTensor): history = getattr(obj, "_keras_history", None) if history: history = list(history) history[0] = history[0].name return { "class_name": "__keras_tensor__", "config": { "shape": obj.shape, "dtype": obj.dtype, "keras_history": history, }, } if tf.available and isinstance(obj, tf.TensorShape): return obj.as_list() if obj._dims is not None else None if backend.is_tensor(obj): return { "class_name": "__tensor__", "config": { "value": backend.convert_to_numpy(obj).tolist(), "dtype": backend.standardize_dtype(obj.dtype), }, } if type(obj).__module__ == np.__name__: if isinstance(obj, np.ndarray) and obj.ndim > 0: return { "class_name": "__numpy__", "config": { "value": obj.tolist(), "dtype": backend.standardize_dtype(obj.dtype), }, } else: # Treat numpy floats / etc as plain types. return obj.item() if tf.available and isinstance(obj, tf.DType): return obj.name if isinstance(obj, types.FunctionType) and obj.__name__ == "<lambda>": warnings.warn( "The object being serialized includes a `lambda`. This is unsafe. " "In order to reload the object, you will have to pass " "`safe_mode=False` to the loading function. " "Please avoid using `lambda` in the " "future, and use named Python functions instead. " f"This is the `lambda` being serialized: {inspect.getsource(obj)}", stacklevel=2, ) return { "class_name": "__lambda__", "config": { "value": python_utils.func_dump(obj), }, } if tf.available and isinstance(obj, tf.TypeSpec): ts_config = obj._serialize() # TensorShape and tf.DType conversion ts_config = list( map( lambda x: ( x.as_list() if isinstance(x, tf.TensorShape) else (x.name if isinstance(x, tf.DType) else x) ), ts_config, ) ) return { "class_name": "__typespec__", "spec_name": obj.__class__.__name__, "module": obj.__class__.__module__, "config": ts_config, "registered_name": None, } inner_config = _get_class_or_fn_config(obj) config_with_public_class = serialize_with_public_class( obj.__class__, inner_config ) if config_with_public_class is not None: get_build_and_compile_config(obj, config_with_public_class) record_object_after_serialization(obj, config_with_public_class) return config_with_public_class # Any custom object or otherwise non-exported object if isinstance(obj, types.FunctionType): module = obj.__module__ else: module = obj.__class__.__module__ class_name = obj.__class__.__name__ if module == "builtins": registered_name = None else: if isinstance(obj, types.FunctionType): registered_name = object_registration.get_registered_name(obj) else: registered_name = object_registration.get_registered_name( obj.__class__ ) config = { "module": module, "class_name": class_name, "config": inner_config, "registered_name": registered_name, } get_build_and_compile_config(obj, config) record_object_after_serialization(obj, config) return config def get_build_and_compile_config(obj, config): if hasattr(obj, "get_build_config"): build_config = obj.get_build_config() if build_config is not None: config["build_config"] = serialize_dict(build_config) if hasattr(obj, "get_compile_config"): compile_config = obj.get_compile_config() if compile_config is not None: config["compile_config"] = serialize_dict(compile_config) return def serialize_with_public_class(cls, inner_config=None): """Serializes classes from public Keras API or object registration. Called to check and retrieve the config of any class that has a public Keras API or has been registered as serializable via `keras.saving.register_keras_serializable()`. """ # This gets the `keras.*` exported name, such as # "keras.optimizers.Adam". keras_api_name = api_export.get_name_from_symbol(cls) # Case of custom or unknown class object if keras_api_name is None: registered_name = object_registration.get_registered_name(cls) if registered_name is None: return None # Return custom object config with corresponding registration name return { "module": cls.__module__, "class_name": cls.__name__, "config": inner_config, "registered_name": registered_name, } # Split the canonical Keras API name into a Keras module and class name. parts = keras_api_name.split(".") return { "module": ".".join(parts[:-1]), "class_name": parts[-1], "config": inner_config, "registered_name": None, } def serialize_with_public_fn(fn, config, fn_module_name=None): """Serializes functions from public Keras API or object registration. Called to check and retrieve the config of any function that has a public Keras API or has been registered as serializable via `keras.saving.register_keras_serializable()`. If function's module name is already known, returns corresponding config. """ if fn_module_name: return { "module": fn_module_name, "class_name": "function", "config": config, "registered_name": config, } keras_api_name = api_export.get_name_from_symbol(fn) if keras_api_name: parts = keras_api_name.split(".") return { "module": ".".join(parts[:-1]), "class_name": "function", "config": config, "registered_name": config, } else: registered_name = object_registration.get_registered_name(fn) if not registered_name and not fn.__module__ == "builtins": return None return { "module": fn.__module__, "class_name": "function", "config": config, "registered_name": registered_name, } def _get_class_or_fn_config(obj): """Return the object's config depending on its type.""" # Functions / lambdas: if isinstance(obj, types.FunctionType): return obj.__name__ # All classes: if hasattr(obj, "get_config"): config = obj.get_config() if not isinstance(config, dict): raise TypeError( f"The `get_config()` method of {obj} should return " f"a dict. It returned: {config}" ) return serialize_dict(config) elif hasattr(obj, "__name__"): return object_registration.get_registered_name(obj) else: raise TypeError( f"Cannot serialize object {obj} of type {type(obj)}. " "To be serializable, " "a class must implement the `get_config()` method." ) def serialize_dict(obj): return {key: serialize_keras_object(value) for key, value in obj.items()} @keras_export( [ "keras.saving.deserialize_keras_object", "keras.utils.deserialize_keras_object", ] ) def deserialize_keras_object( config, custom_objects=None, safe_mode=True, **kwargs ): """Retrieve the object by deserializing the config dict. The config dict is a Python dictionary that consists of a set of key-value pairs, and represents a Keras object, such as an `Optimizer`, `Layer`, `Metrics`, etc. The saving and loading library uses the following keys to record information of a Keras object: - `class_name`: String. This is the name of the class, as exactly defined in the source code, such as "LossesContainer". - `config`: Dict. Library-defined or user-defined key-value pairs that store the configuration of the object, as obtained by `object.get_config()`. - `module`: String. The path of the python module. Built-in Keras classes expect to have prefix `keras`. - `registered_name`: String. The key the class is registered under via `keras.saving.register_keras_serializable(package, name)` API. The key has the format of '{package}>{name}', where `package` and `name` are the arguments passed to `register_keras_serializable()`. If `name` is not provided, it uses the class name. If `registered_name` successfully resolves to a class (that was registered), the `class_name` and `config` values in the dict will not be used. `registered_name` is only used for non-built-in classes. For example, the following dictionary represents the built-in Adam optimizer with the relevant config: ```python dict_structure = { "class_name": "Adam", "config": { "amsgrad": false, "beta_1": 0.8999999761581421, "beta_2": 0.9990000128746033, "decay": 0.0, "epsilon": 1e-07, "learning_rate": 0.0010000000474974513, "name": "Adam" }, "module": "keras.optimizers", "registered_name": None } # Returns an `Adam` instance identical to the original one. deserialize_keras_object(dict_structure) ``` If the class does not have an exported Keras namespace, the library tracks it by its `module` and `class_name`. For example: ```python dict_structure = { "class_name": "MetricsList", "config": { ... }, "module": "keras.trainers.compile_utils", "registered_name": "MetricsList" } # Returns a `MetricsList` instance identical to the original one. deserialize_keras_object(dict_structure) ``` And the following dictionary represents a user-customized `MeanSquaredError` loss: ```python @keras.saving.register_keras_serializable(package='my_package') class ModifiedMeanSquaredError(keras.losses.MeanSquaredError): ... dict_structure = { "class_name": "ModifiedMeanSquaredError", "config": { "fn": "mean_squared_error", "name": "mean_squared_error", "reduction": "auto" }, "registered_name": "my_package>ModifiedMeanSquaredError" } # Returns the `ModifiedMeanSquaredError` object deserialize_keras_object(dict_structure) ``` Args: config: Python dict describing the object. custom_objects: Python dict containing a mapping between custom object names the corresponding classes or functions. safe_mode: Boolean, whether to disallow unsafe `lambda` deserialization. When `safe_mode=False`, loading an object has the potential to trigger arbitrary code execution. This argument is only applicable to the Keras v3 model format. Defaults to `True`. Returns: The object described by the `config` dictionary. """ safe_scope_arg = in_safe_mode() # Enforces SafeModeScope safe_mode = safe_scope_arg if safe_scope_arg is not None else safe_mode module_objects = kwargs.pop("module_objects", None) custom_objects = custom_objects or {} tlco = global_state.get_global_attribute("custom_objects_scope_dict", {}) gco = object_registration.GLOBAL_CUSTOM_OBJECTS custom_objects = {**custom_objects, **tlco, **gco} if config is None: return None if ( isinstance(config, str) and custom_objects and custom_objects.get(config) is not None ): # This is to deserialize plain functions which are serialized as # string names by legacy saving formats. return custom_objects[config] if isinstance(config, (list, tuple)): return [ deserialize_keras_object( x, custom_objects=custom_objects, safe_mode=safe_mode ) for x in config ] if module_objects is not None: inner_config, fn_module_name, has_custom_object = None, None, False if isinstance(config, dict): if "config" in config: inner_config = config["config"] if "class_name" not in config: raise ValueError( f"Unknown `config` as a `dict`, config={config}" ) # Check case where config is function or class and in custom objects if custom_objects and ( config["class_name"] in custom_objects or config.get("registered_name") in custom_objects or ( isinstance(inner_config, str) and inner_config in custom_objects ) ): has_custom_object = True # Case where config is function but not in custom objects elif config["class_name"] == "function": fn_module_name = config["module"] if fn_module_name == "builtins": config = config["config"] else: config = config["registered_name"] # Case where config is class but not in custom objects else: if config.get("module", "_") is None: raise TypeError( "Cannot deserialize object of type " f"`{config['class_name']}`. If " f"`{config['class_name']}` is a custom class, please " "register it using the " "`@keras.saving.register_keras_serializable()` " "decorator." ) config = config["class_name"] if not has_custom_object: # Return if not found in either module objects or custom objects if config not in module_objects: # Object has already been deserialized return config if isinstance(module_objects[config], types.FunctionType): return deserialize_keras_object( serialize_with_public_fn( module_objects[config], config, fn_module_name ), custom_objects=custom_objects, ) return deserialize_keras_object( serialize_with_public_class( module_objects[config], inner_config=inner_config ), custom_objects=custom_objects, ) if isinstance(config, PLAIN_TYPES): return config if not isinstance(config, dict): raise TypeError(f"Could not parse config: {config}") if "class_name" not in config or "config" not in config: return { key: deserialize_keras_object( value, custom_objects=custom_objects, safe_mode=safe_mode ) for key, value in config.items() } class_name = config["class_name"] inner_config = config["config"] or {} custom_objects = custom_objects or {} # Special cases: if class_name == "__keras_tensor__": obj = backend.KerasTensor( inner_config["shape"], dtype=inner_config["dtype"] ) obj._pre_serialization_keras_history = inner_config["keras_history"] return obj if class_name == "__tensor__": return backend.convert_to_tensor( inner_config["value"], dtype=inner_config["dtype"] ) if class_name == "__numpy__": return np.array(inner_config["value"], dtype=inner_config["dtype"]) if config["class_name"] == "__bytes__": return inner_config["value"].encode("utf-8") if config["class_name"] == "__slice__": return slice( deserialize_keras_object( inner_config["start"], custom_objects=custom_objects, safe_mode=safe_mode, ), deserialize_keras_object( inner_config["stop"], custom_objects=custom_objects, safe_mode=safe_mode, ), deserialize_keras_object( inner_config["step"], custom_objects=custom_objects, safe_mode=safe_mode, ), ) if config["class_name"] == "__lambda__": if safe_mode: raise ValueError( "Requested the deserialization of a `lambda` object. " "This carries a potential risk of arbitrary code execution " "and thus it is disallowed by default. If you trust the " "source of the saved model, you can pass `safe_mode=False` to " "the loading function in order to allow `lambda` loading, " "or call `keras.config.enable_unsafe_deserialization()`." ) return python_utils.func_load(inner_config["value"]) if tf is not None and config["class_name"] == "__typespec__": obj = _retrieve_class_or_fn( config["spec_name"], config["registered_name"], config["module"], obj_type="class", full_config=config, custom_objects=custom_objects, ) # Conversion to TensorShape and DType inner_config = map( lambda x: ( tf.TensorShape(x) if isinstance(x, list) else (getattr(tf, x) if hasattr(tf.dtypes, str(x)) else x) ), inner_config, ) return obj._deserialize(tuple(inner_config)) # Below: classes and functions. module = config.get("module", None) registered_name = config.get("registered_name", class_name) if class_name == "function": fn_name = inner_config return _retrieve_class_or_fn( fn_name, registered_name, module, obj_type="function", full_config=config, custom_objects=custom_objects, ) # Below, handling of all classes. # First, is it a shared object? if "shared_object_id" in config: obj = get_shared_object(config["shared_object_id"]) if obj is not None: return obj cls = _retrieve_class_or_fn( class_name, registered_name, module, obj_type="class", full_config=config, custom_objects=custom_objects, ) if isinstance(cls, types.FunctionType): return cls if not hasattr(cls, "from_config"): raise TypeError( f"Unable to reconstruct an instance of '{class_name}' because " f"the class is missing a `from_config()` method. " f"Full object config: {config}" ) # Instantiate the class from its config inside a custom object scope # so that we can catch any custom objects that the config refers to. custom_obj_scope = object_registration.CustomObjectScope(custom_objects) safe_mode_scope = SafeModeScope(safe_mode) with custom_obj_scope, safe_mode_scope: try: instance = cls.from_config(inner_config) except TypeError as e: raise TypeError( f"{cls} could not be deserialized properly. Please" " ensure that components that are Python object" " instances (layers, models, etc.) returned by" " `get_config()` are explicitly deserialized in the" " model's `from_config()` method." f"\n\nconfig={config}.\n\nException encountered: {e}" ) build_config = config.get("build_config", None) if build_config and not instance.built: instance.build_from_config(build_config) instance.built = True compile_config = config.get("compile_config", None) if compile_config: instance.compile_from_config(compile_config) instance.compiled = True if "shared_object_id" in config: record_object_after_deserialization( instance, config["shared_object_id"] ) return instance def _retrieve_class_or_fn( name, registered_name, module, obj_type, full_config, custom_objects=None ): # If there is a custom object registered via # `register_keras_serializable()`, that takes precedence. if obj_type == "function": custom_obj = object_registration.get_registered_object( name, custom_objects=custom_objects ) else: custom_obj = object_registration.get_registered_object( registered_name, custom_objects=custom_objects ) if custom_obj is not None: return custom_obj if module: # If it's a Keras built-in object, # we cannot always use direct import, because the exported # module name might not match the package structure # (e.g. experimental symbols). if module == "keras" or module.startswith("keras."): api_name = module + "." + name obj = api_export.get_symbol_from_name(api_name) if obj is not None: return obj # Configs of Keras built-in functions do not contain identifying # information other than their name (e.g. 'acc' or 'tanh'). This special # case searches the Keras modules that contain built-ins to retrieve # the corresponding function from the identifying string. if obj_type == "function" and module == "builtins": for mod in BUILTIN_MODULES: obj = api_export.get_symbol_from_name( "keras." + mod + "." + name ) if obj is not None: return obj # Retrieval of registered custom function in a package filtered_dict = { k: v for k, v in custom_objects.items() if k.endswith(full_config["config"]) } if filtered_dict: return next(iter(filtered_dict.values())) # Otherwise, attempt to retrieve the class object given the `module` # and `class_name`. Import the module, find the class. try: mod = importlib.import_module(module) except ModuleNotFoundError: raise TypeError( f"Could not deserialize {obj_type} '{name}' because " f"its parent module {module} cannot be imported. " f"Full object config: {full_config}" ) obj = vars(mod).get(name, None) # Special case for keras.metrics.metrics if obj is None and registered_name is not None: obj = vars(mod).get(registered_name, None) if obj is not None: return obj raise TypeError( f"Could not locate {obj_type} '{name}'. " "Make sure custom classes are decorated with " "`@keras.saving.register_keras_serializable()`. " f"Full object config: {full_config}" )
keras/keras/saving/serialization_lib.py/0
{ "file_path": "keras/keras/saving/serialization_lib.py", "repo_id": "keras", "token_count": 12690 }
175
import multiprocessing.dummy import queue import random import threading import time import warnings import weakref from contextlib import closing import numpy as np import tree from keras import backend from keras.api_export import keras_export from keras.trainers.data_adapters import data_adapter_utils from keras.trainers.data_adapters.data_adapter import DataAdapter @keras_export(["keras.utils.PyDataset", "keras.utils.Sequence"]) class PyDataset: """Base class for defining a parallel dataset using Python code. Every `PyDataset` must implement the `__getitem__()` and the `__len__()` methods. If you want to modify your dataset between epochs, you may additionally implement `on_epoch_end()`. The `__getitem__()` method should return a complete batch (not a single sample), and the `__len__` method should return the number of batches in the dataset (rather than the number of samples). Args: workers: Number of workers to use in multithreading or multiprocessing. use_multiprocessing: Whether to use Python multiprocessing for parallelism. Setting this to `True` means that your dataset will be replicated in multiple forked processes. This is necessary to gain compute-level (rather than I/O level) benefits from parallelism. However it can only be set to `True` if your dataset can be safely pickled. max_queue_size: Maximum number of batches to keep in the queue when iterating over the dataset in a multithreaded or multipricessed setting. Reduce this value to reduce the CPU memory consumption of your dataset. Defaults to 10. Notes: - `PyDataset` is a safer way to do multiprocessing. This structure guarantees that the model will only train once on each sample per epoch, which is not the case with Python generators. - The arguments `workers`, `use_multiprocessing`, and `max_queue_size` exist to configure how `fit()` uses parallelism to iterate over the dataset. They are not being used by the `PyDataset` class directly. When you are manually iterating over a `PyDataset`, no parallelism is applied. Example: ```python from skimage.io import imread from skimage.transform import resize import numpy as np import math # Here, `x_set` is list of path to the images # and `y_set` are the associated classes. class CIFAR10PyDataset(keras.utils.PyDataset): def __init__(self, x_set, y_set, batch_size, **kwargs): super().__init__(**kwargs) self.x, self.y = x_set, y_set self.batch_size = batch_size def __len__(self): # Return number of batches. return math.ceil(len(self.x) / self.batch_size) def __getitem__(self, idx): # Return x, y for batch idx. low = idx * self.batch_size # Cap upper bound at array length; the last batch may be smaller # if the total number of items is not a multiple of batch size. high = min(low + self.batch_size, len(self.x)) batch_x = self.x[low:high] batch_y = self.y[low:high] return np.array([ resize(imread(file_name), (200, 200)) for file_name in batch_x]), np.array(batch_y) ``` """ def __init__(self, workers=1, use_multiprocessing=False, max_queue_size=10): self._workers = workers self._use_multiprocessing = use_multiprocessing self._max_queue_size = max_queue_size def _warn_if_super_not_called(self): warn = False if not hasattr(self, "_workers"): self._workers = 1 warn = True if not hasattr(self, "_use_multiprocessing"): self._use_multiprocessing = False warn = True if not hasattr(self, "_max_queue_size"): self._max_queue_size = 10 warn = True if warn: warnings.warn( "Your `PyDataset` class should call " "`super().__init__(**kwargs)` in its constructor. " "`**kwargs` can include `workers`, " "`use_multiprocessing`, `max_queue_size`. Do not pass " "these arguments to `fit()`, as they will be ignored.", stacklevel=2, ) @property def workers(self): self._warn_if_super_not_called() return self._workers @workers.setter def workers(self, value): self._workers = value @property def use_multiprocessing(self): self._warn_if_super_not_called() return self._use_multiprocessing @use_multiprocessing.setter def use_multiprocessing(self, value): self._use_multiprocessing = value @property def max_queue_size(self): self._warn_if_super_not_called() return self._max_queue_size @max_queue_size.setter def max_queue_size(self, value): self._max_queue_size = value def __getitem__(self, index): """Gets batch at position `index`. Args: index: position of the batch in the PyDataset. Returns: A batch """ raise NotImplementedError def __len__(self): """Number of batch in the PyDataset. Returns: The number of batches in the PyDataset. """ raise NotImplementedError def on_epoch_end(self): """Method called at the end of every epoch.""" pass def __iter__(self): """Create a generator that iterate over the PyDataset.""" for i in range(len(self)): yield self[i] class PyDatasetAdapter(DataAdapter): """Adapter for `keras.utils.PyDataset` instances.""" def __init__( self, x, class_weight=None, shuffle=False, ): self.py_dataset = x self.class_weight = class_weight self.enqueuer = None self.shuffle = shuffle self._output_signature = None def _set_tf_output_signature(self): from keras.utils.module_utils import tensorflow as tf def get_tensor_spec(x): shape = x.shape if len(shape) < 1: raise ValueError( "The arrays returned by PyDataset.__getitem__() " "must be at least rank 1. Received: " f"{x} of rank {len(x.shape)}" ) shape = list(shape) shape[0] = None # The batch size is not guaranteed to be static. dtype = backend.standardize_dtype(x.dtype) return tf.TensorSpec(shape=shape, dtype=dtype) # Grab the first example batch = self.py_dataset[0] # Run checks on it and format it batch = self._standardize_batch(batch) self._output_signature = tree.map_structure(get_tensor_spec, batch) def _standardize_batch(self, batch): if isinstance(batch, dict): return batch if isinstance(batch, np.ndarray): batch = (batch,) if isinstance(batch, list): batch = tuple(batch) if not isinstance(batch, tuple) or len(batch) not in {1, 2, 3}: raise ValueError( "PyDataset.__getitem__() must return a tuple or a dict. " "If a tuple, it must be ordered either " "(input,) or (inputs, targets) or " "(inputs, targets, sample_weights). " f"Received: {str(batch)[:100]}... of type {type(batch)}" ) if self.class_weight is not None: if len(batch) == 3: raise ValueError( "You cannot specify `class_weight` " "and `sample_weight` at the same time." ) if len(batch) == 2: sw = data_adapter_utils.class_weight_to_sample_weights( batch[1], self.class_weight ) batch = batch + (sw,) return batch def _make_multiprocessed_generator_fn(self): workers = self.py_dataset.workers use_multiprocessing = self.py_dataset.use_multiprocessing if workers > 1 or (workers > 0 and use_multiprocessing): def generator_fn(): self.enqueuer = OrderedEnqueuer( self.py_dataset, use_multiprocessing=use_multiprocessing, shuffle=self.shuffle, ) self.enqueuer.start( workers=workers, max_queue_size=self.py_dataset.max_queue_size, ) return self.enqueuer.get() else: def generator_fn(): order = range(len(self.py_dataset)) if self.shuffle: # Match the shuffle convention in OrderedEnqueuer. order = list(order) random.shuffle(order) for i in order: yield self.py_dataset[i] return generator_fn def _get_iterator(self): gen_fn = self._make_multiprocessed_generator_fn() for i, batch in enumerate(gen_fn()): batch = self._standardize_batch(batch) yield batch if i >= len(self.py_dataset) - 1 and self.enqueuer: self.enqueuer.stop() def get_numpy_iterator(self): return data_adapter_utils.get_numpy_iterator(self._get_iterator()) def get_jax_iterator(self): return data_adapter_utils.get_jax_iterator(self._get_iterator()) def get_tf_dataset(self): from keras.utils.module_utils import tensorflow as tf if self._output_signature is None: self._set_tf_output_signature() ds = tf.data.Dataset.from_generator( self._get_iterator, output_signature=self._output_signature, ) if self.shuffle: ds = ds.shuffle(8) ds = ds.prefetch(tf.data.AUTOTUNE) return ds def get_torch_dataloader(self): return data_adapter_utils.get_torch_dataloader(self._get_iterator()) def on_epoch_end(self): if self.enqueuer: self.enqueuer.stop() self.py_dataset.on_epoch_end() @property def num_batches(self): return len(self.py_dataset) @property def batch_size(self): return None # Global variables to be shared across processes _SHARED_SEQUENCES = {} # We use a Value to provide unique id to different processes. _SEQUENCE_COUNTER = None # Because multiprocessing pools are inherently unsafe, starting from a clean # state can be essential to avoiding deadlocks. In order to accomplish this, we # need to be able to check on the status of Pools that we create. _DATA_POOLS = weakref.WeakSet() _WORKER_ID_QUEUE = None # Only created if needed. _FORCE_THREADPOOL = False def get_pool_class(use_multiprocessing): global _FORCE_THREADPOOL if not use_multiprocessing or _FORCE_THREADPOOL: return multiprocessing.dummy.Pool # ThreadPool return multiprocessing.Pool def get_worker_id_queue(): """Lazily create the queue to track worker ids.""" global _WORKER_ID_QUEUE if _WORKER_ID_QUEUE is None: _WORKER_ID_QUEUE = multiprocessing.Queue() return _WORKER_ID_QUEUE def init_pool(seqs): global _SHARED_SEQUENCES _SHARED_SEQUENCES = seqs def get_index(uid, i): """Get the value from the PyDataset `uid` at index `i`. To allow multiple PyDatasets to be used at the same time, we use `uid` to get a specific one. A single PyDataset would cause the validation to overwrite the training PyDataset. Args: uid: int, PyDataset identifier i: index Returns: The value at index `i`. """ return _SHARED_SEQUENCES[uid][i] class PyDatasetEnqueuer: """Base class to enqueue inputs. The task of an Enqueuer is to use parallelism to speed up preprocessing. This is done with processes or threads. Example: ```python enqueuer = PyDatasetEnqueuer(...) enqueuer.start() datas = enqueuer.get() for data in datas: # Use the inputs; training, evaluating, predicting. # ... stop sometime. enqueuer.stop() ``` The `enqueuer.get()` should be an infinite stream of data. """ def __init__(self, py_dataset, use_multiprocessing=False): self.py_dataset = py_dataset self.use_multiprocessing = use_multiprocessing global _SEQUENCE_COUNTER if _SEQUENCE_COUNTER is None: try: _SEQUENCE_COUNTER = multiprocessing.Value("i", 0) except OSError: # In this case the OS does not allow us to use # multiprocessing. We resort to an int # for enqueuer indexing. _SEQUENCE_COUNTER = 0 if isinstance(_SEQUENCE_COUNTER, int): self.uid = _SEQUENCE_COUNTER _SEQUENCE_COUNTER += 1 else: # Doing Multiprocessing.Value += x is not process-safe. with _SEQUENCE_COUNTER.get_lock(): self.uid = _SEQUENCE_COUNTER.value _SEQUENCE_COUNTER.value += 1 self.workers = 0 self.executor_fn = None self.queue = None self.run_thread = None self.stop_signal = None def is_running(self): return self.stop_signal is not None and not self.stop_signal.is_set() def start(self, workers=1, max_queue_size=10): """Starts the handler's workers. Args: workers: Number of workers. max_queue_size: queue size (when full, workers could block on `put()`) """ if self.use_multiprocessing: self.executor_fn = self._get_executor_init(workers) else: # We do not need the init since it's threads. self.executor_fn = lambda _: get_pool_class(False)(workers) self.workers = workers self.queue = queue.Queue(max_queue_size) self.stop_signal = threading.Event() self.run_thread = threading.Thread(target=self._run) self.run_thread.daemon = True self.run_thread.start() def _send_py_dataset(self): """Sends current Iterable to all workers.""" # For new processes that may spawn _SHARED_SEQUENCES[self.uid] = self.py_dataset def stop(self, timeout=None): """Stops running threads and wait for them to exit, if necessary. Should be called by the same thread which called `start()`. Args: timeout: maximum time to wait on `thread.join()` """ if not self.is_running(): return self.stop_signal.set() with self.queue.mutex: self.queue.queue.clear() self.queue.unfinished_tasks = 0 self.queue.not_full.notify() self.run_thread.join(timeout) _SHARED_SEQUENCES[self.uid] = None def __del__(self): if self.is_running(): self.stop() def _run(self): """Submits request to the executor and queue the `Future` objects.""" raise NotImplementedError def _get_executor_init(self, workers): """Gets the Pool initializer for multiprocessing. Args: workers: Number of workers. Returns: Function, a Function to initialize the pool """ raise NotImplementedError def get(self): """Creates a generator to extract data from the queue. Skip the data if it is `None`. Returns: Generator yielding tuples `(inputs, targets)` or `(inputs, targets, sample_weights)`. """ raise NotImplementedError class OrderedEnqueuer(PyDatasetEnqueuer): """Builds a Enqueuer from a PyDataset. Args: py_dataset: A `keras.utils.PyDataset` object. use_multiprocessing: use multiprocessing if True, otherwise threading shuffle: whether to shuffle the data at the beginning of each epoch """ def __init__(self, py_dataset, use_multiprocessing=False, shuffle=False): super().__init__(py_dataset, use_multiprocessing) self.shuffle = shuffle def _get_executor_init(self, workers): """Gets the Pool initializer for multiprocessing. Args: workers: Number of workers. Returns: Function, a Function to initialize the pool """ def pool_fn(seqs): pool = get_pool_class(True)( workers, initializer=init_pool_generator, initargs=(seqs, None, get_worker_id_queue()), ) _DATA_POOLS.add(pool) return pool return pool_fn def _wait_queue(self): """Wait for the queue to be empty.""" while True: time.sleep(0.1) if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set(): return def _run(self): """Submits request to the executor and queue the `Future` objects.""" indices = list(range(len(self.py_dataset))) if self.shuffle: random.shuffle(indices) self._send_py_dataset() # Share the initial py_dataset while True: with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor: for i in indices: if self.stop_signal.is_set(): return self.queue.put( executor.apply_async(get_index, (self.uid, i)), block=True, ) # Done with the current epoch, waiting for the final batches self._wait_queue() if self.stop_signal.is_set(): # We're done return # Call the internal on epoch end. self.py_dataset.on_epoch_end() self._send_py_dataset() # Update the pool def get(self): """Creates a generator to extract data from the queue. Skip the data if it is `None`. Yields: The next element in the queue, i.e. a tuple `(inputs, targets)` or `(inputs, targets, sample_weights)`. """ while self.is_running(): try: inputs = self.queue.get(block=True, timeout=5).get() if self.is_running(): self.queue.task_done() if inputs is not None: yield inputs except queue.Empty: pass except Exception as e: self.stop() raise e def init_pool_generator(gens, random_seed=None, id_queue=None): """Initializer function for pool workers. Args: gens: State which should be made available to worker processes. random_seed: An optional value with which to seed child processes. id_queue: A multiprocessing Queue of worker ids. This is used to indicate that a worker process was created by Keras. """ global _SHARED_SEQUENCES _SHARED_SEQUENCES = gens worker_proc = multiprocessing.current_process() # name isn't used for anything, but setting a more descriptive name is # helpful when diagnosing orphaned processes. worker_proc.name = f"Keras_worker_{worker_proc.name}" if random_seed is not None: np.random.seed(random_seed + worker_proc.ident) if id_queue is not None: # If a worker dies during init, the pool will just create a replacement. id_queue.put(worker_proc.ident, block=True, timeout=0.1)
keras/keras/trainers/data_adapters/py_dataset_adapter.py/0
{ "file_path": "keras/keras/trainers/data_adapters/py_dataset_adapter.py", "repo_id": "keras", "token_count": 9208 }
176
import os import sys from io import StringIO from keras.testing import test_case from keras.utils.code_stats import count_loc class TestCountLoc(test_case.TestCase): def setUp(self): self.test_dir = "test_directory" os.makedirs(self.test_dir, exist_ok=True) def tearDown(self): for root, dirs, files in os.walk(self.test_dir, topdown=False): for name in files: os.remove(os.path.join(root, name)) for name in dirs: os.rmdir(os.path.join(root, name)) def create_file(self, filename, content): with open( os.path.join(self.test_dir, filename), "w", encoding="utf-8" ) as f: f.write(content) def test_count_loc_valid_python(self): self.create_file( "sample.py", "# This is a test file\n\nprint('Hello')\n" ) loc = count_loc(self.test_dir) self.assertEqual(loc, 1) def test_exclude_test_files(self): self.create_file("sample_test.py", "print('Hello')\n") loc = count_loc(self.test_dir, exclude=("_test",)) self.assertEqual(loc, 0) def test_other_extensions(self): self.create_file("sample.txt", "Hello\n") loc = count_loc(self.test_dir, extensions=(".py",)) self.assertEqual(loc, 0) def test_comment_lines(self): self.create_file( "sample.py", "# Comment\nprint('Hello')\n# Another comment\n" ) loc = count_loc(self.test_dir) self.assertEqual(loc, 1) def test_empty_file(self): self.create_file("empty.py", "") loc = count_loc(self.test_dir) self.assertEqual(loc, 0) def test_whitespace_only(self): self.create_file("whitespace.py", " \n\t\n") loc = count_loc(self.test_dir) self.assertEqual(loc, 0) def test_inline_comments_after_code(self): content = 'print("Hello") # This is an inline comment' self.create_file("inline_comment_sample.py", content) loc = count_loc(self.test_dir) self.assertEqual(loc, 1) # The comment shouldn't affect the count def test_directory_structure(self): content1 = 'print("Hello from file1")' content2 = 'print("Hello from file2")' os.mkdir(os.path.join(self.test_dir, "subdir")) self.create_file("sample1.py", content1) self.create_file(os.path.join("subdir", "sample2.py"), content2) loc = count_loc(self.test_dir) self.assertEqual(loc, 2) # Both files should be counted def test_normal_directory_name(self): content = 'print("Hello from a regular directory")' os.makedirs(os.path.join(self.test_dir, "some_test_dir")) self.create_file(os.path.join("some_test_dir", "sample.py"), content) loc = count_loc(self.test_dir) self.assertEqual(loc, 1) # Should count normally def test_exclude_directory_name(self): content = 'print("Hello from an excluded directory")' os.makedirs(os.path.join(self.test_dir, "dir_test")) self.create_file(os.path.join("dir_test", "sample.py"), content) loc = count_loc(self.test_dir) self.assertEqual(loc, 0) # Shouldn't count the file in dir_test due to the exclusion pattern def test_verbose_output(self): content = 'print("Hello")' self.create_file("sample.py", content) original_stdout = sys.stdout sys.stdout = StringIO() count_loc(self.test_dir, verbose=1) output = sys.stdout.getvalue() sys.stdout = original_stdout self.assertIn("Count LoCs in", output) def test_multiline_string_same_line(self): content = '''"""This is a multiline string ending on the same line""" print("Outside string")''' self.create_file("same_line_multiline.py", content) loc = count_loc(self.test_dir) self.assertEqual(loc, 1) # Only the print statement should count def test_multiline_string_ends_on_same_line(self): content = '"""a multiline string end on same line"""\nprint("Outstr")' self.create_file("same_line_multiline.py", content) loc = count_loc(self.test_dir) self.assertEqual(loc, 1) # Only the print statement should count def test_multiline_string_ends_in_middle_of_line(self): content = '''print("Start") """This is a multiline string ending in the middle of a line""" """This is another multiline string.""" print("End")''' self.create_file("multiline_in_middle.py", content) loc = count_loc(self.test_dir) self.assertEqual(loc, 2) # Both print statements should count def test_line_starting_with_triple_quotes_not_ending(self): content = '"""\nThis is a multiline string\n' self.create_file("test_file_2.py", content) path = os.path.join(self.test_dir, "test_file_2.py") self.assertEqual(count_loc(path), 0) # Because it's part of a multiline string def test_line_starting_and_ending_with_triple_quotes(self): content = '"""This is a one-liner docstring."""\n' self.create_file("test_file_3.py", content) path = os.path.join(self.test_dir, "test_file_3.py") self.assertEqual(count_loc(path), 0) # This is still considered a comment/docstring def test_string_open_true_line_starting_with_triple_quotes(self): content = '"""\nEnd of the multiline string."""\n' self.create_file("test_file_4.py", content) path = os.path.join(self.test_dir, "test_file_4.py") self.assertEqual(count_loc(path), 0) # Entire content is a multiline string/comment
keras/keras/utils/code_stats_test.py/0
{ "file_path": "keras/keras/utils/code_stats_test.py", "repo_id": "keras", "token_count": 2502 }
177
from keras.testing import test_case from keras.utils import naming class NamingUtilsTest(test_case.TestCase): def test_uniquify_unique_name(self): name = "the_unique_name" unique_name = naming.uniquify(name) self.assertEqual(unique_name, name) def test_auto_name(self): self.assertEqual(naming.auto_name("unique_name"), "unique_name") self.assertEqual(naming.auto_name("unique_name"), "unique_name_1") self.assertEqual(naming.auto_name("unique_name"), "unique_name_2") def test_get_uid(self): self.assertEqual(naming.get_uid("very_unique_name"), 1) self.assertEqual(naming.get_uid("very_unique_name"), 2) self.assertEqual(naming.get_uid("very_unique_name"), 3) def test_uniquify_non_unique_name(self): name = "non_unique_name" naming.uniquify(name) unique_name = naming.uniquify(name) self.assertEqual(unique_name, name + "_1") def test_to_snake_case_snake_case_name(self): name = "snake_case_name" snake_case_name = naming.to_snake_case(name) self.assertEqual(snake_case_name, name) def test_get_uid_existing_prefix(self): prefix = "existing_prefix" naming.get_uid(prefix) uid = naming.get_uid(prefix) self.assertEqual(uid, 2) def test_reset_uids(self): naming.get_uid("unique_name") naming.reset_uids() uid = naming.get_uid("unique_name") self.assertEqual(uid, 1) def test_get_object_name_no_name_attribute(self): class ObjectWithoutName: __name__ = "ObjectWithoutName" obj = ObjectWithoutName() object_name = naming.get_object_name(obj) self.assertEqual(object_name, "object_without_name") def test_get_object_name_no_name_or_class_attribute(self): class ObjectWithoutNameOrClass: pass obj = ObjectWithoutNameOrClass() object_name = naming.get_object_name(obj) self.assertEqual(object_name, "object_without_name_or_class") def test_uniquify_already_uniquified_name(self): name = "unique_name" unique_name = naming.uniquify(name) new_unique_name = naming.uniquify(unique_name) # first time `name` is uniquified so returns same name self.assertEqual(name, unique_name) # second time `name` is uniquified should be different # from the first output self.assertNotEqual(new_unique_name, unique_name) def test_to_snake_case_capital_after_any_character(self): name = "myVariableNameHere" snake_case_name = naming.to_snake_case(name) self.assertEqual(snake_case_name, "my_variable_name_here") def test_to_snake_case_lower_before_upper(self): name = "convertTHIS" snake_case_name = naming.to_snake_case(name) self.assertEqual(snake_case_name, "convert_this") def test_to_snake_case_already_snake_cased(self): name = "already_snake_cased" snake_case_name = naming.to_snake_case(name) self.assertEqual(snake_case_name, name) def test_to_snake_case_no_changes(self): name = "lowercase" snake_case_name = naming.to_snake_case(name) self.assertEqual(snake_case_name, name) def test_to_snake_case_single_uppercase_word(self): name = "UPPERCASE" snake_case_name = naming.to_snake_case(name) self.assertEqual(snake_case_name, "uppercase") def test_get_object_name_for_keras_objects(self): class MockKerasObject: name = "mock_object" obj = MockKerasObject() result = naming.get_object_name(obj) self.assertEqual( result, "mock_object", f"Expected 'mock_object' but got {result}" ) # Test for function objects that have a `__name__` attribute. def test_get_object_name_for_functions(self): def mock_function(): pass result = naming.get_object_name(mock_function) # Assumes to_snake_case works correctly. expected_name = naming.to_snake_case(mock_function.__name__) self.assertEqual( result, expected_name, f"Expected '{expected_name}' but got {result}", )
keras/keras/utils/naming_test.py/0
{ "file_path": "keras/keras/utils/naming_test.py", "repo_id": "keras", "token_count": 1909 }
178
from keras.utils.module_utils import tensorflow as tf def expand_dims(inputs, axis): """Expand dims on sparse, ragged, or dense tensors.""" if isinstance(inputs, tf.SparseTensor): return tf.sparse.expand_dims(inputs, axis) else: return tf.expand_dims(inputs, axis) def sparse_bincount(inputs, depth, binary_output, dtype, count_weights=None): """Apply binary or count encoding to an input and return a sparse tensor.""" result = tf.sparse.bincount( inputs, weights=count_weights, minlength=depth, maxlength=depth, axis=-1, binary_output=binary_output, ) result = tf.cast(result, dtype) if inputs.shape.rank == 1: output_shape = (depth,) else: batch_size = tf.shape(result)[0] output_shape = (batch_size, depth) result = tf.SparseTensor( indices=result.indices, values=result.values, dense_shape=output_shape ) return result def dense_bincount(inputs, depth, binary_output, dtype, count_weights=None): """Apply binary or count encoding to an input.""" result = tf.math.bincount( inputs, weights=count_weights, minlength=depth, maxlength=depth, dtype=dtype, axis=-1, binary_output=binary_output, ) if inputs.shape.rank == 1: result.set_shape(tf.TensorShape((depth,))) else: batch_size = inputs.shape.as_list()[0] result.set_shape(tf.TensorShape((batch_size, depth))) return result def encode_categorical_inputs( inputs, output_mode, depth, dtype="float32", sparse=False, count_weights=None, idf_weights=None, ): """Encodes categoical inputs according to output_mode.""" if output_mode == "int": return tf.identity(tf.cast(inputs, dtype)) original_shape = inputs.shape # In all cases, we should uprank scalar input to a single sample. if inputs.shape.rank == 0: inputs = expand_dims(inputs, -1) # One hot will unprank only if the final output dimension is not already 1. if output_mode == "one_hot": if inputs.shape[-1] != 1: inputs = expand_dims(inputs, -1) # TODO(b/190445202): remove output rank restriction. if inputs.shape.rank > 2: raise ValueError( "When output_mode is not `'int'`, maximum supported output rank " f"is 2. Received output_mode {output_mode} and input shape " f"{original_shape}, " f"which would result in output rank {inputs.shape.rank}." ) binary_output = output_mode in ("multi_hot", "one_hot") if sparse: bincounts = sparse_bincount( inputs, depth, binary_output, dtype, count_weights ) else: bincounts = dense_bincount( inputs, depth, binary_output, dtype, count_weights ) if output_mode != "tf_idf": return bincounts if idf_weights is None: raise ValueError( "When output mode is `'tf_idf'`, idf_weights must be provided. " f"Received: output_mode={output_mode} and idf_weights={idf_weights}" ) if sparse: value_weights = tf.gather(idf_weights, bincounts.indices[:, -1]) return tf.SparseTensor( bincounts.indices, value_weights * bincounts.values, bincounts.dense_shape, ) else: return tf.multiply(tf.cast(bincounts, idf_weights.dtype), idf_weights) def get_tensor_spec(t, dynamic_batch=False, name=None): """Returns a `TensorSpec` given a single `Tensor` or `TensorSpec`.""" if isinstance(t, tf.TypeSpec): spec = t elif isinstance(t, tf.__internal__.CompositeTensor): # Check for ExtensionTypes spec = t._type_spec elif hasattr(t, "shape") and hasattr(t, "dtype"): spec = tf.TensorSpec(shape=t.shape, dtype=t.dtype, name=name) else: return None # Allow non-Tensors to pass through. if not dynamic_batch: return spec shape = spec.shape if shape.rank is None or shape.rank == 0: return spec shape_list = shape.as_list() shape_list[0] = None shape = tf.TensorShape(shape_list) spec._shape = shape return spec def ensure_tensor(inputs, dtype=None): """Ensures the input is a Tensor, SparseTensor or RaggedTensor.""" if not isinstance(inputs, (tf.Tensor, tf.SparseTensor, tf.RaggedTensor)): inputs = tf.convert_to_tensor(inputs, dtype) if dtype is not None and inputs.dtype != dtype: inputs = tf.cast(inputs, dtype) return inputs
keras/keras/utils/tf_utils.py/0
{ "file_path": "keras/keras/utils/tf_utils.py", "repo_id": "keras", "token_count": 2013 }
179
[flake8] ignore = # Conflicts with black E203 # defaults flake8 ignores E121,E123,E126,E226,E24,E704,W503,W504 # Function name should be lowercase N802 # lowercase ... imported as non lowercase # Useful to ignore for "import keras.backend as K" N812 # do not use bare 'except' E722 # too many "#" E266 exclude = *_pb2.py, *_pb2_grpc.py, extend-exclude = # excluding examples/ and guides/ since they are formatted as follow-along guides examples, guides, #imported but unused in __init__.py, that's ok. per-file-ignores = # import not used **/__init__.py:F401 **/random.py:F401 max-line-length = 80
keras/setup.cfg/0
{ "file_path": "keras/setup.cfg", "repo_id": "keras", "token_count": 289 }
180
{ "python.linting.flake8Enabled": true, "python.linting.pylintEnabled": false, "python.linting.enabled": true, "editor.rulers": [ 80 ], "editor.formatOnSave": true, "python.formatting.provider": "black", "python.formatting.blackArgs": [ "--line-length", "80" ], "python.sortImports.args": [ "--profile", "black", "--sl" ], "[python]": { "editor.codeActionsOnSave": { "source.organizeImports": true } }, "python.analysis.diagnosticSeverityOverrides": { "reportMissingImports": "none" } }
tf-keras/.vscode/settings.json/0
{ "file_path": "tf-keras/.vscode/settings.json", "repo_id": "tf-keras", "token_count": 260 }
181
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Built-in activation functions.""" import sys import types import tensorflow.compat.v2 as tf import tf_keras.layers.activation as activation_layers from tf_keras import backend from tf_keras.saving import object_registration from tf_keras.saving import serialization_lib from tf_keras.saving.legacy import serialization as legacy_serialization from tf_keras.saving.legacy.saved_model import utils as saved_model_utils from tf_keras.utils import generic_utils # isort: off from tensorflow.python.util.tf_export import keras_export # b/123041942 # In TF 2.x, if the `tf.nn.softmax` is used as an activation function in Keras # layers, it gets serialized as 'softmax_v2' instead of 'softmax' as the # internal method name is returned in serialization. This results in errors in # model exporting and loading as TF-Keras can't find any activation function # with the name of `softmax_v2`. # This dict maps the activation function name from its v2 version to its # canonical name. _TF_ACTIVATIONS_V2 = { "softmax_v2": "softmax", } @keras_export("keras.activations.softmax") @tf.__internal__.dispatch.add_dispatch_support def softmax(x, axis=-1): """Softmax converts a vector of values to a probability distribution. The elements of the output vector are in range (0, 1) and sum to 1. Each vector is handled independently. The `axis` argument sets which axis of the input the function is applied along. Softmax is often used as the activation for the last layer of a classification network because the result could be interpreted as a probability distribution. The softmax of each vector x is computed as `exp(x) / tf.reduce_sum(exp(x))`. The input values in are the log-odds of the resulting probability. Args: x : Input tensor. axis: Integer, axis along which the softmax normalization is applied. Returns: Tensor, output of softmax transformation (all values are non-negative and sum to 1). Examples: **Example 1: standalone usage** >>> inputs = tf.random.normal(shape=(32, 10)) >>> outputs = tf.keras.activations.softmax(inputs) >>> tf.reduce_sum(outputs[0, :]) # Each sample in the batch now sums to 1 <tf.Tensor: shape=(), dtype=float32, numpy=1.0000001> **Example 2: usage in a `Dense` layer** >>> layer = tf.keras.layers.Dense(32, ... activation=tf.keras.activations.softmax) """ return backend.softmax(x, axis) @keras_export("keras.activations.elu") @tf.__internal__.dispatch.add_dispatch_support def elu(x, alpha=1.0): """Exponential Linear Unit. The exponential linear unit (ELU) with `alpha > 0` is: `x` if `x > 0` and `alpha * (exp(x) - 1)` if `x < 0` The ELU hyperparameter `alpha` controls the value to which an ELU saturates for negative net inputs. ELUs diminish the vanishing gradient effect. ELUs have negative values which pushes the mean of the activations closer to zero. Mean activations that are closer to zero enable faster learning as they bring the gradient closer to the natural gradient. ELUs saturate to a negative value when the argument gets smaller. Saturation means a small derivative which decreases the variation and the information that is propagated to the next layer. Example Usage: >>> import tensorflow as tf >>> model = tf.keras.Sequential() >>> model.add(tf.keras.layers.Conv2D(32, (3, 3), activation='elu', ... input_shape=(28, 28, 1))) >>> model.add(tf.keras.layers.MaxPooling2D((2, 2))) >>> model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='elu')) >>> model.add(tf.keras.layers.MaxPooling2D((2, 2))) >>> model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='elu')) <tensorflow.python.keras.engine.sequential.Sequential object ...> Args: x: Input tensor. alpha: A scalar, slope of negative section. `alpha` controls the value to which an ELU saturates for negative net inputs. Returns: The exponential linear unit (ELU) activation function: `x` if `x > 0` and `alpha * (exp(x) - 1)` if `x < 0`. Reference: - [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) (Clevert et al, 2016)](https://arxiv.org/abs/1511.07289) """ return backend.elu(x, alpha) @keras_export("keras.activations.selu") @tf.__internal__.dispatch.add_dispatch_support def selu(x): """Scaled Exponential Linear Unit (SELU). The Scaled Exponential Linear Unit (SELU) activation function is defined as: - `if x > 0: return scale * x` - `if x < 0: return scale * alpha * (exp(x) - 1)` where `alpha` and `scale` are pre-defined constants (`alpha=1.67326324` and `scale=1.05070098`). Basically, the SELU activation function multiplies `scale` (> 1) with the output of the `tf.keras.activations.elu` function to ensure a slope larger than one for positive inputs. The values of `alpha` and `scale` are chosen so that the mean and variance of the inputs are preserved between two consecutive layers as long as the weights are initialized correctly (see `tf.keras.initializers.LecunNormal` initializer) and the number of input units is "large enough" (see reference paper for more information). Example Usage: >>> num_classes = 10 # 10-class problem >>> model = tf.keras.Sequential() >>> model.add(tf.keras.layers.Dense(64, kernel_initializer='lecun_normal', ... activation='selu')) >>> model.add(tf.keras.layers.Dense(32, kernel_initializer='lecun_normal', ... activation='selu')) >>> model.add(tf.keras.layers.Dense(16, kernel_initializer='lecun_normal', ... activation='selu')) >>> model.add(tf.keras.layers.Dense(num_classes, activation='softmax')) Args: x: A tensor or variable to compute the activation function for. Returns: The scaled exponential unit activation: `scale * elu(x, alpha)`. Notes: - To be used together with the `tf.keras.initializers.LecunNormal` initializer. - To be used together with the dropout variant `tf.keras.layers.AlphaDropout` (not regular dropout). References: - [Klambauer et al., 2017](https://arxiv.org/abs/1706.02515) """ return tf.nn.selu(x) @keras_export("keras.activations.softplus") @tf.__internal__.dispatch.add_dispatch_support def softplus(x): """Softplus activation function, `softplus(x) = log(exp(x) + 1)`. Example Usage: >>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32) >>> b = tf.keras.activations.softplus(a) >>> b.numpy() array([2.0611537e-09, 3.1326166e-01, 6.9314718e-01, 1.3132616e+00, 2.0000000e+01], dtype=float32) Args: x: Input tensor. Returns: The softplus activation: `log(exp(x) + 1)`. """ return tf.math.softplus(x) @keras_export("keras.activations.softsign") @tf.__internal__.dispatch.add_dispatch_support def softsign(x): """Softsign activation function, `softsign(x) = x / (abs(x) + 1)`. Example Usage: >>> a = tf.constant([-1.0, 0.0, 1.0], dtype = tf.float32) >>> b = tf.keras.activations.softsign(a) >>> b.numpy() array([-0.5, 0. , 0.5], dtype=float32) Args: x: Input tensor. Returns: The softsign activation: `x / (abs(x) + 1)`. """ return tf.math.softsign(x) @keras_export("keras.activations.swish") @tf.__internal__.dispatch.add_dispatch_support def swish(x): """Swish activation function, `swish(x) = x * sigmoid(x)`. Swish activation function which returns `x*sigmoid(x)`. It is a smooth, non-monotonic function that consistently matches or outperforms ReLU on deep networks, it is unbounded above and bounded below. Example Usage: >>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32) >>> b = tf.keras.activations.swish(a) >>> b.numpy() array([-4.1223075e-08, -2.6894143e-01, 0.0000000e+00, 7.3105860e-01, 2.0000000e+01], dtype=float32) Args: x: Input tensor. Returns: The swish activation applied to `x` (see reference paper for details). Reference: - [Ramachandran et al., 2017](https://arxiv.org/abs/1710.05941) """ return tf.nn.silu(x) @keras_export("keras.activations.relu") @tf.__internal__.dispatch.add_dispatch_support def relu(x, alpha=0.0, max_value=None, threshold=0.0): """Applies the rectified linear unit activation function. With default values, this returns the standard ReLU activation: `max(x, 0)`, the element-wise maximum of 0 and the input tensor. Modifying default parameters allows you to use non-zero thresholds, change the max value of the activation, and to use a non-zero multiple of the input for values below the threshold. Example: >>> foo = tf.constant([-10, -5, 0.0, 5, 10], dtype = tf.float32) >>> tf.keras.activations.relu(foo).numpy() array([ 0., 0., 0., 5., 10.], dtype=float32) >>> tf.keras.activations.relu(foo, alpha=0.5).numpy() array([-5. , -2.5, 0. , 5. , 10. ], dtype=float32) >>> tf.keras.activations.relu(foo, max_value=5.).numpy() array([0., 0., 0., 5., 5.], dtype=float32) >>> tf.keras.activations.relu(foo, threshold=5.).numpy() array([-0., -0., 0., 0., 10.], dtype=float32) Args: x: Input `tensor` or `variable`. alpha: A `float` that governs the slope for values lower than the threshold. max_value: A `float` that sets the saturation threshold (the largest value the function will return). threshold: A `float` giving the threshold value of the activation function below which values will be damped or set to zero. Returns: A `Tensor` representing the input tensor, transformed by the relu activation function. Tensor will be of the same shape and dtype of input `x`. """ return backend.relu( x, alpha=alpha, max_value=max_value, threshold=threshold ) @keras_export("keras.activations.gelu", v1=[]) @tf.__internal__.dispatch.add_dispatch_support def gelu(x, approximate=False): """Applies the Gaussian error linear unit (GELU) activation function. Gaussian error linear unit (GELU) computes `x * P(X <= x)`, where `P(X) ~ N(0, 1)`. The (GELU) nonlinearity weights inputs by their value, rather than gates inputs by their sign as in ReLU. Example: >>> x = tf.constant([-3.0, -1.0, 0.0, 1.0, 3.0], dtype=tf.float32) >>> y = tf.keras.activations.gelu(x) >>> y.numpy() array([-0.00404951, -0.15865529, 0. , 0.8413447 , 2.9959507 ], dtype=float32) >>> y = tf.keras.activations.gelu(x, approximate=True) >>> y.numpy() array([-0.00363752, -0.15880796, 0. , 0.841192 , 2.9963627 ], dtype=float32) Args: x: Input tensor. approximate: A `bool`, whether to enable approximation. Returns: The gaussian error linear activation: `0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * x^3)))` if `approximate` is `True` or `x * P(X <= x) = 0.5 * x * (1 + erf(x / sqrt(2)))`, where `P(X) ~ N(0, 1)`, if `approximate` is `False`. Reference: - [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415) """ # noqa: E501 return tf.nn.gelu(x, approximate) @keras_export("keras.activations.tanh") @tf.__internal__.dispatch.add_dispatch_support def tanh(x): """Hyperbolic tangent activation function. Example: >>> a = tf.constant([-3.0, -1.0, 0.0, 1.0, 3.0], dtype = tf.float32) >>> b = tf.keras.activations.tanh(a) >>> b.numpy() array([-0.9950547, -0.7615942, 0., 0.7615942, 0.9950547], dtype=float32) Args: x: Input tensor. Returns: Tensor of same shape and dtype of input `x`, with tanh activation: `tanh(x) = sinh(x)/cosh(x) = ((exp(x) - exp(-x))/(exp(x) + exp(-x)))`. """ return tf.tanh(x) @keras_export("keras.activations.sigmoid") @tf.__internal__.dispatch.add_dispatch_support def sigmoid(x): """Sigmoid activation function, `sigmoid(x) = 1 / (1 + exp(-x))`. Applies the sigmoid activation function. For small values (<-5), `sigmoid` returns a value close to zero, and for large values (>5) the result of the function gets close to 1. Sigmoid is equivalent to a 2-element Softmax, where the second element is assumed to be zero. The sigmoid function always returns a value between 0 and 1. Example: >>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32) >>> b = tf.keras.activations.sigmoid(a) >>> b.numpy() array([2.0611537e-09, 2.6894143e-01, 5.0000000e-01, 7.3105860e-01, 1.0000000e+00], dtype=float32) Args: x: Input tensor. Returns: Tensor with the sigmoid activation: `1 / (1 + exp(-x))`. """ return backend.sigmoid(x) @keras_export("keras.activations.exponential") @tf.__internal__.dispatch.add_dispatch_support def exponential(x): """Exponential activation function. Example: >>> a = tf.constant([-3.0, -1.0, 0.0, 1.0, 3.0], dtype = tf.float32) >>> b = tf.keras.activations.exponential(a) >>> b.numpy() array([0.04978707, 0.36787945, 1., 2.7182817 , 20.085537], dtype=float32) Args: x: Input tensor. Returns: Tensor with exponential activation: `exp(x)`. """ return tf.exp(x) @keras_export("keras.activations.hard_sigmoid") @tf.__internal__.dispatch.add_dispatch_support def hard_sigmoid(x): """Hard sigmoid activation function. A faster approximation of the sigmoid activation. Piecewise linear approximation of the sigmoid function. Ref: 'https://en.wikipedia.org/wiki/Hard_sigmoid' Example: >>> a = tf.constant([-3.0, -1.0, 0.0, 1.0, 3.0], dtype = tf.float32) >>> b = tf.keras.activations.hard_sigmoid(a) >>> b.numpy() array([0. , 0.3, 0.5, 0.7, 1. ], dtype=float32) Args: x: Input tensor. Returns: The hard sigmoid activation, defined as: - `if x < -2.5: return 0` - `if x > 2.5: return 1` - `if -2.5 <= x <= 2.5: return 0.2 * x + 0.5` """ return backend.hard_sigmoid(x) @keras_export("keras.activations.linear") @tf.__internal__.dispatch.add_dispatch_support def linear(x): """Linear activation function (pass-through). Example: >>> a = tf.constant([-3.0, -1.0, 0.0, 1.0, 3.0], dtype = tf.float32) >>> b = tf.keras.activations.linear(a) >>> b.numpy() array([-3., -1., 0., 1., 3.], dtype=float32) Args: x: Input tensor. Returns: The input, unmodified. """ return x @keras_export("keras.activations.mish") @tf.__internal__.dispatch.add_dispatch_support def mish(x): """Mish activation function. It is defined as: ```python def mish(x): return x * tanh(softplus(x)) ``` where `softplus` is defined as: ```python def softplus(x): return log(exp(x) + 1) ``` Example: >>> a = tf.constant([-3.0, -1.0, 0.0, 1.0], dtype = tf.float32) >>> b = tf.keras.activations.mish(a) >>> b.numpy() array([-0.14564745, -0.30340144, 0., 0.86509836], dtype=float32) Args: x: Input tensor. Returns: The mish activation. Reference: - [Mish: A Self Regularized Non-Monotonic Activation Function](https://arxiv.org/abs/1908.08681) """ return x * tf.math.tanh(tf.math.softplus(x)) @keras_export("keras.activations.serialize") @tf.__internal__.dispatch.add_dispatch_support def serialize(activation, use_legacy_format=False): """Returns the string identifier of an activation function. Args: activation : Function object. use_legacy_format: Boolean, whether to use the legacy format for serialization. Defaults to False. Returns: String denoting the name attribute of the input function Example: >>> tf.keras.activations.serialize(tf.keras.activations.tanh) 'tanh' >>> tf.keras.activations.serialize(tf.keras.activations.sigmoid) 'sigmoid' >>> tf.keras.activations.serialize('abcd') Traceback (most recent call last): ... ValueError: Unknown activation function 'abcd' cannot be serialized. Raises: ValueError: The input function is not a valid one. """ if ( hasattr(activation, "__name__") and activation.__name__ in _TF_ACTIVATIONS_V2 ): return _TF_ACTIVATIONS_V2[activation.__name__] if use_legacy_format: return legacy_serialization.serialize_keras_object(activation) fn_config = serialization_lib.serialize_keras_object(activation) if ( not tf.__internal__.tf2.enabled() or saved_model_utils.in_tf_saved_model_scope() ): return fn_config if "config" not in fn_config: raise ValueError( f"Unknown activation function '{activation}' cannot be " "serialized due to invalid function name. Make sure to use " "an activation name that matches the references defined in " "activations.py or use " "`@keras.saving.register_keras_serializable()` " "to register any custom activations. " f"config={fn_config}" ) if not isinstance(activation, types.FunctionType): # Case for additional custom activations represented by objects return fn_config if ( isinstance(fn_config["config"], str) and fn_config["config"] not in globals() ): # Case for custom activation functions from external activations modules fn_config["config"] = object_registration.get_registered_name( activation ) return fn_config return fn_config["config"] # Case for keras.activations builtins (simply return name) # Add additional globals so that deserialize() can find these common activation # functions leaky_relu = tf.nn.leaky_relu log_softmax = tf.nn.log_softmax relu6 = tf.nn.relu6 silu = tf.nn.silu @keras_export("keras.activations.deserialize") @tf.__internal__.dispatch.add_dispatch_support def deserialize(name, custom_objects=None, use_legacy_format=False): """Returns activation function given a string identifier. Args: name: The name of the activation function. custom_objects: Optional `{function_name: function_obj}` dictionary listing user-provided activation functions. use_legacy_format: Boolean, whether to use the legacy format for deserialization. Defaults to False. Returns: Corresponding activation function. Example: >>> tf.keras.activations.deserialize('linear') <function linear at 0x1239596a8> >>> tf.keras.activations.deserialize('sigmoid') <function sigmoid at 0x123959510> >>> tf.keras.activations.deserialize('abcd') Traceback (most recent call last): ... ValueError: Unknown activation function 'abcd' cannot be deserialized. Raises: ValueError: `Unknown activation function` if the input string does not denote any defined Tensorflow activation function. """ activation_functions = {} current_module = sys.modules[__name__] # we put 'current_module' after 'activation_layers' to prefer the local one # if there is a collision generic_utils.populate_dict_with_module_objects( activation_functions, (activation_layers, current_module), obj_filter=callable, ) if use_legacy_format: return legacy_serialization.deserialize_keras_object( name, module_objects=activation_functions, custom_objects=custom_objects, printable_module_name="activation function", ) returned_fn = serialization_lib.deserialize_keras_object( name, module_objects=activation_functions, custom_objects=custom_objects, printable_module_name="activation function", ) if isinstance(returned_fn, str): raise ValueError( f"Unknown activation function '{name}' cannot be deserialized." ) return returned_fn @keras_export("keras.activations.get") @tf.__internal__.dispatch.add_dispatch_support def get(identifier): """Returns function. Args: identifier: Function or string Returns: Function corresponding to the input string or input function. Example: >>> tf.keras.activations.get('softmax') <function softmax at 0x1222a3d90> >>> tf.keras.activations.get(tf.keras.activations.softmax) <function softmax at 0x1222a3d90> >>> tf.keras.activations.get(None) <function linear at 0x1239596a8> >>> tf.keras.activations.get(abs) <built-in function abs> >>> tf.keras.activations.get('abcd') Traceback (most recent call last): ... ValueError: Unknown activation function:abcd Raises: ValueError: Input is an unknown function or string, i.e., the input does not denote any defined function. """ if identifier is None: return linear if isinstance(identifier, (str, dict)): use_legacy_format = ( "module" not in identifier if isinstance(identifier, dict) else False ) return deserialize(identifier, use_legacy_format=use_legacy_format) elif callable(identifier): return identifier raise TypeError( f"Could not interpret activation function identifier: {identifier}" )
tf-keras/tf_keras/activations.py/0
{ "file_path": "tf-keras/tf_keras/activations.py", "repo_id": "tf-keras", "token_count": 9022 }
182
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """EfficientNet models for TF-Keras. Reference: - [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks]( https://arxiv.org/abs/1905.11946) (ICML 2019) """ import copy import math import tensorflow.compat.v2 as tf from tf_keras import backend from tf_keras.applications import imagenet_utils from tf_keras.engine import training from tf_keras.layers import VersionAwareLayers from tf_keras.utils import data_utils from tf_keras.utils import layer_utils # isort: off from tensorflow.python.util.tf_export import keras_export BASE_WEIGHTS_PATH = "https://storage.googleapis.com/keras-applications/" WEIGHTS_HASHES = { "b0": ( "902e53a9f72be733fc0bcb005b3ebbac", "50bc09e76180e00e4465e1a485ddc09d", ), "b1": ( "1d254153d4ab51201f1646940f018540", "74c4e6b3e1f6a1eea24c589628592432", ), "b2": ( "b15cce36ff4dcbd00b6dd88e7857a6ad", "111f8e2ac8aa800a7a99e3239f7bfb39", ), "b3": ( "ffd1fdc53d0ce67064dc6a9c7960ede0", "af6d107764bb5b1abb91932881670226", ), "b4": ( "18c95ad55216b8f92d7e70b3a046e2fc", "ebc24e6d6c33eaebbd558eafbeedf1ba", ), "b5": ( "ace28f2a6363774853a83a0b21b9421a", "38879255a25d3c92d5e44e04ae6cec6f", ), "b6": ( "165f6e37dce68623721b423839de8be5", "9ecce42647a20130c1f39a5d4cb75743", ), "b7": ( "8c03f828fec3ef71311cd463b6759d99", "cbcfe4450ddf6f3ad90b1b398090fe4a", ), } DEFAULT_BLOCKS_ARGS = [ { "kernel_size": 3, "repeats": 1, "filters_in": 32, "filters_out": 16, "expand_ratio": 1, "id_skip": True, "strides": 1, "se_ratio": 0.25, }, { "kernel_size": 3, "repeats": 2, "filters_in": 16, "filters_out": 24, "expand_ratio": 6, "id_skip": True, "strides": 2, "se_ratio": 0.25, }, { "kernel_size": 5, "repeats": 2, "filters_in": 24, "filters_out": 40, "expand_ratio": 6, "id_skip": True, "strides": 2, "se_ratio": 0.25, }, { "kernel_size": 3, "repeats": 3, "filters_in": 40, "filters_out": 80, "expand_ratio": 6, "id_skip": True, "strides": 2, "se_ratio": 0.25, }, { "kernel_size": 5, "repeats": 3, "filters_in": 80, "filters_out": 112, "expand_ratio": 6, "id_skip": True, "strides": 1, "se_ratio": 0.25, }, { "kernel_size": 5, "repeats": 4, "filters_in": 112, "filters_out": 192, "expand_ratio": 6, "id_skip": True, "strides": 2, "se_ratio": 0.25, }, { "kernel_size": 3, "repeats": 1, "filters_in": 192, "filters_out": 320, "expand_ratio": 6, "id_skip": True, "strides": 1, "se_ratio": 0.25, }, ] CONV_KERNEL_INITIALIZER = { "class_name": "VarianceScaling", "config": { "scale": 2.0, "mode": "fan_out", "distribution": "truncated_normal", }, } DENSE_KERNEL_INITIALIZER = { "class_name": "VarianceScaling", "config": { "scale": 1.0 / 3.0, "mode": "fan_out", "distribution": "uniform", }, } layers = VersionAwareLayers() BASE_DOCSTRING = """Instantiates the {name} architecture. Reference: - [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks]( https://arxiv.org/abs/1905.11946) (ICML 2019) This function returns a TF-Keras image classification model, optionally loaded with weights pre-trained on ImageNet. For image classification use cases, see [this page for detailed examples]( https://keras.io/api/applications/#usage-examples-for-image-classification-models). For transfer learning use cases, make sure to read the [guide to transfer learning & fine-tuning]( https://keras.io/guides/transfer_learning/). Note: each TF-Keras Application expects a specific kind of input preprocessing. For EfficientNet, input preprocessing is included as part of the model (as a `Rescaling` layer), and thus `tf.keras.applications.efficientnet.preprocess_input` is actually a pass-through function. EfficientNet models expect their inputs to be float tensors of pixels with values in the [0-255] range. Args: include_top: Whether to include the fully-connected layer at the top of the network. Defaults to `True`. weights: One of `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded. Defaults to 'imagenet'. input_tensor: Optional TF-Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. input_shape: Optional shape tuple, only to be specified if `include_top` is False. It should have exactly 3 inputs channels. pooling: Optional pooling mode for feature extraction when `include_top` is `False`. Defaults to `None`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional layer. - `avg` means that global average pooling will be applied to the output of the last convolutional layer, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classes: Optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. 1000 is how many ImageNet classes there are. Defaults to `1000`. classifier_activation: A `str` or callable. The activation function to use on the "top" layer. Ignored unless `include_top=True`. Set `classifier_activation=None` to return the logits of the "top" layer. Defaults to 'softmax'. When loading pretrained weights, `classifier_activation` can only be `None` or `"softmax"`. Returns: A `keras.Model` instance. """ IMAGENET_STDDEV_RGB = [0.229, 0.224, 0.225] def EfficientNet( width_coefficient, depth_coefficient, default_size, dropout_rate=0.2, drop_connect_rate=0.2, depth_divisor=8, activation="swish", blocks_args="default", model_name="efficientnet", include_top=True, weights="imagenet", input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation="softmax", ): """Instantiates the EfficientNet architecture. Args: width_coefficient: float, scaling coefficient for network width. depth_coefficient: float, scaling coefficient for network depth. default_size: integer, default input image size. dropout_rate: float, dropout rate before final classifier layer. drop_connect_rate: float, dropout rate at skip connections. depth_divisor: integer, a unit of network width. activation: activation function. blocks_args: list of dicts, parameters to construct block modules. model_name: string, model name. include_top: whether to include the fully-connected layer at the top of the network. weights: one of `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded. input_tensor: optional TF-Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. input_shape: optional shape tuple, only to be specified if `include_top` is False. It should have exactly 3 inputs channels. pooling: optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional layer. - `avg` means that global average pooling will be applied to the output of the last convolutional layer, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classes: optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. classifier_activation: A `str` or callable. The activation function to use on the "top" layer. Ignored unless `include_top=True`. Set `classifier_activation=None` to return the logits of the "top" layer. Returns: A `keras.Model` instance. Raises: ValueError: in case of invalid argument for `weights`, or invalid input shape. ValueError: if `classifier_activation` is not `softmax` or `None` when using a pretrained top layer. """ if blocks_args == "default": blocks_args = DEFAULT_BLOCKS_ARGS if not (weights in {"imagenet", None} or tf.io.gfile.exists(weights)): raise ValueError( "The `weights` argument should be either " "`None` (random initialization), `imagenet` " "(pre-training on ImageNet), " "or the path to the weights file to be loaded." ) if weights == "imagenet" and include_top and classes != 1000: raise ValueError( 'If using `weights` as `"imagenet"` with `include_top`' " as true, `classes` should be 1000" ) # Determine proper input shape input_shape = imagenet_utils.obtain_input_shape( input_shape, default_size=default_size, min_size=32, data_format=backend.image_data_format(), require_flatten=include_top, weights=weights, ) if input_tensor is None: img_input = layers.Input(shape=input_shape) else: if not backend.is_keras_tensor(input_tensor): img_input = layers.Input(tensor=input_tensor, shape=input_shape) else: img_input = input_tensor bn_axis = 3 if backend.image_data_format() == "channels_last" else 1 def round_filters(filters, divisor=depth_divisor): """Round number of filters based on depth multiplier.""" filters *= width_coefficient new_filters = max( divisor, int(filters + divisor / 2) // divisor * divisor ) # Make sure that round down does not go down by more than 10%. if new_filters < 0.9 * filters: new_filters += divisor return int(new_filters) def round_repeats(repeats): """Round number of repeats based on depth multiplier.""" return int(math.ceil(depth_coefficient * repeats)) # Build stem x = img_input x = layers.Rescaling(1.0 / 255.0)(x) x = layers.Normalization(axis=bn_axis)(x) if weights == "imagenet": # Note that the normaliztion layer uses square value of STDDEV as the # variance for the layer: result = (input - mean) / sqrt(var) # However, the original implemenetation uses (input - mean) / var to # normalize the input, we need to divide another sqrt(var) to match the # original implementation. # See https://github.com/tensorflow/tensorflow/issues/49930 for more # details if backend.image_data_format() == "channels_first": shape_for_multiply = [1, 3, 1, 1] else: shape_for_multiply = [1, 1, 1, 3] x = tf.math.multiply( x, tf.reshape( [1.0 / math.sqrt(stddev) for stddev in IMAGENET_STDDEV_RGB], shape_for_multiply, ), ) x = layers.ZeroPadding2D( padding=imagenet_utils.correct_pad(x, 3), name="stem_conv_pad" )(x) x = layers.Conv2D( round_filters(32), 3, strides=2, padding="valid", use_bias=False, kernel_initializer=CONV_KERNEL_INITIALIZER, name="stem_conv", )(x) x = layers.BatchNormalization(axis=bn_axis, name="stem_bn")(x) x = layers.Activation(activation, name="stem_activation")(x) # Build blocks blocks_args = copy.deepcopy(blocks_args) b = 0 blocks = float(sum(round_repeats(args["repeats"]) for args in blocks_args)) for i, args in enumerate(blocks_args): assert args["repeats"] > 0 # Update block input and output filters based on depth multiplier. args["filters_in"] = round_filters(args["filters_in"]) args["filters_out"] = round_filters(args["filters_out"]) for j in range(round_repeats(args.pop("repeats"))): # The first block needs to take care of stride and filter size # increase. if j > 0: args["strides"] = 1 args["filters_in"] = args["filters_out"] x = block( x, activation, drop_connect_rate * b / blocks, name=f"block{i + 1}{chr(j + 97)}_", **args, ) b += 1 # Build top x = layers.Conv2D( round_filters(1280), 1, padding="same", use_bias=False, kernel_initializer=CONV_KERNEL_INITIALIZER, name="top_conv", )(x) x = layers.BatchNormalization(axis=bn_axis, name="top_bn")(x) x = layers.Activation(activation, name="top_activation")(x) if include_top: x = layers.GlobalAveragePooling2D(name="avg_pool")(x) if dropout_rate > 0: x = layers.Dropout(dropout_rate, name="top_dropout")(x) imagenet_utils.validate_activation(classifier_activation, weights) x = layers.Dense( classes, activation=classifier_activation, kernel_initializer=DENSE_KERNEL_INITIALIZER, name="predictions", )(x) else: if pooling == "avg": x = layers.GlobalAveragePooling2D(name="avg_pool")(x) elif pooling == "max": x = layers.GlobalMaxPooling2D(name="max_pool")(x) # Ensure that the model takes into account # any potential predecessors of `input_tensor`. if input_tensor is not None: inputs = layer_utils.get_source_inputs(input_tensor) else: inputs = img_input # Create model. model = training.Model(inputs, x, name=model_name) # Load weights. if weights == "imagenet": if include_top: file_suffix = ".h5" file_hash = WEIGHTS_HASHES[model_name[-2:]][0] else: file_suffix = "_notop.h5" file_hash = WEIGHTS_HASHES[model_name[-2:]][1] file_name = model_name + file_suffix weights_path = data_utils.get_file( file_name, BASE_WEIGHTS_PATH + file_name, cache_subdir="models", file_hash=file_hash, ) model.load_weights(weights_path) elif weights is not None: model.load_weights(weights) return model def block( inputs, activation="swish", drop_rate=0.0, name="", filters_in=32, filters_out=16, kernel_size=3, strides=1, expand_ratio=1, se_ratio=0.0, id_skip=True, ): """An inverted residual block. Args: inputs: input tensor. activation: activation function. drop_rate: float between 0 and 1, fraction of the input units to drop. name: string, block label. filters_in: integer, the number of input filters. filters_out: integer, the number of output filters. kernel_size: integer, the dimension of the convolution window. strides: integer, the stride of the convolution. expand_ratio: integer, scaling coefficient for the input filters. se_ratio: float between 0 and 1, fraction to squeeze the input filters. id_skip: boolean. Returns: output tensor for the block. """ bn_axis = 3 if backend.image_data_format() == "channels_last" else 1 # Expansion phase filters = filters_in * expand_ratio if expand_ratio != 1: x = layers.Conv2D( filters, 1, padding="same", use_bias=False, kernel_initializer=CONV_KERNEL_INITIALIZER, name=name + "expand_conv", )(inputs) x = layers.BatchNormalization(axis=bn_axis, name=name + "expand_bn")(x) x = layers.Activation(activation, name=name + "expand_activation")(x) else: x = inputs # Depthwise Convolution if strides == 2: x = layers.ZeroPadding2D( padding=imagenet_utils.correct_pad(x, kernel_size), name=name + "dwconv_pad", )(x) conv_pad = "valid" else: conv_pad = "same" x = layers.DepthwiseConv2D( kernel_size, strides=strides, padding=conv_pad, use_bias=False, depthwise_initializer=CONV_KERNEL_INITIALIZER, name=name + "dwconv", )(x) x = layers.BatchNormalization(axis=bn_axis, name=name + "bn")(x) x = layers.Activation(activation, name=name + "activation")(x) # Squeeze and Excitation phase if 0 < se_ratio <= 1: filters_se = max(1, int(filters_in * se_ratio)) se = layers.GlobalAveragePooling2D(name=name + "se_squeeze")(x) if bn_axis == 1: se_shape = (filters, 1, 1) else: se_shape = (1, 1, filters) se = layers.Reshape(se_shape, name=name + "se_reshape")(se) se = layers.Conv2D( filters_se, 1, padding="same", activation=activation, kernel_initializer=CONV_KERNEL_INITIALIZER, name=name + "se_reduce", )(se) se = layers.Conv2D( filters, 1, padding="same", activation="sigmoid", kernel_initializer=CONV_KERNEL_INITIALIZER, name=name + "se_expand", )(se) x = layers.multiply([x, se], name=name + "se_excite") # Output phase x = layers.Conv2D( filters_out, 1, padding="same", use_bias=False, kernel_initializer=CONV_KERNEL_INITIALIZER, name=name + "project_conv", )(x) x = layers.BatchNormalization(axis=bn_axis, name=name + "project_bn")(x) if id_skip and strides == 1 and filters_in == filters_out: if drop_rate > 0: x = layers.Dropout( drop_rate, noise_shape=(None, 1, 1, 1), name=name + "drop" )(x) x = layers.add([x, inputs], name=name + "add") return x @keras_export( "keras.applications.efficientnet.EfficientNetB0", "keras.applications.EfficientNetB0", ) def EfficientNetB0( include_top=True, weights="imagenet", input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation="softmax", **kwargs, ): return EfficientNet( 1.0, 1.0, 224, 0.2, model_name="efficientnetb0", include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation, **kwargs, ) @keras_export( "keras.applications.efficientnet.EfficientNetB1", "keras.applications.EfficientNetB1", ) def EfficientNetB1( include_top=True, weights="imagenet", input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation="softmax", **kwargs, ): return EfficientNet( 1.0, 1.1, 240, 0.2, model_name="efficientnetb1", include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation, **kwargs, ) @keras_export( "keras.applications.efficientnet.EfficientNetB2", "keras.applications.EfficientNetB2", ) def EfficientNetB2( include_top=True, weights="imagenet", input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation="softmax", **kwargs, ): return EfficientNet( 1.1, 1.2, 260, 0.3, model_name="efficientnetb2", include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation, **kwargs, ) @keras_export( "keras.applications.efficientnet.EfficientNetB3", "keras.applications.EfficientNetB3", ) def EfficientNetB3( include_top=True, weights="imagenet", input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation="softmax", **kwargs, ): return EfficientNet( 1.2, 1.4, 300, 0.3, model_name="efficientnetb3", include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation, **kwargs, ) @keras_export( "keras.applications.efficientnet.EfficientNetB4", "keras.applications.EfficientNetB4", ) def EfficientNetB4( include_top=True, weights="imagenet", input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation="softmax", **kwargs, ): return EfficientNet( 1.4, 1.8, 380, 0.4, model_name="efficientnetb4", include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation, **kwargs, ) @keras_export( "keras.applications.efficientnet.EfficientNetB5", "keras.applications.EfficientNetB5", ) def EfficientNetB5( include_top=True, weights="imagenet", input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation="softmax", **kwargs, ): return EfficientNet( 1.6, 2.2, 456, 0.4, model_name="efficientnetb5", include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation, **kwargs, ) @keras_export( "keras.applications.efficientnet.EfficientNetB6", "keras.applications.EfficientNetB6", ) def EfficientNetB6( include_top=True, weights="imagenet", input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation="softmax", **kwargs, ): return EfficientNet( 1.8, 2.6, 528, 0.5, model_name="efficientnetb6", include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation, **kwargs, ) @keras_export( "keras.applications.efficientnet.EfficientNetB7", "keras.applications.EfficientNetB7", ) def EfficientNetB7( include_top=True, weights="imagenet", input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation="softmax", **kwargs, ): return EfficientNet( 2.0, 3.1, 600, 0.5, model_name="efficientnetb7", include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation, **kwargs, ) EfficientNetB0.__doc__ = BASE_DOCSTRING.format(name="EfficientNetB0") EfficientNetB1.__doc__ = BASE_DOCSTRING.format(name="EfficientNetB1") EfficientNetB2.__doc__ = BASE_DOCSTRING.format(name="EfficientNetB2") EfficientNetB3.__doc__ = BASE_DOCSTRING.format(name="EfficientNetB3") EfficientNetB4.__doc__ = BASE_DOCSTRING.format(name="EfficientNetB4") EfficientNetB5.__doc__ = BASE_DOCSTRING.format(name="EfficientNetB5") EfficientNetB6.__doc__ = BASE_DOCSTRING.format(name="EfficientNetB6") EfficientNetB7.__doc__ = BASE_DOCSTRING.format(name="EfficientNetB7") @keras_export("keras.applications.efficientnet.preprocess_input") def preprocess_input(x, data_format=None): """A placeholder method for backward compatibility. The preprocessing logic has been included in the efficientnet model implementation. Users are no longer required to call this method to normalize the input data. This method does nothing and only kept as a placeholder to align the API surface between old and new version of model. Args: x: A floating point `numpy.array` or a `tf.Tensor`. data_format: Optional data format of the image tensor/array. `None` means the global setting `tf.keras.backend.image_data_format()` is used (unless you changed it, it uses "channels_last"). Defaults to `None`. Returns: Unchanged `numpy.array` or `tf.Tensor`. """ return x @keras_export("keras.applications.efficientnet.decode_predictions") def decode_predictions(preds, top=5): return imagenet_utils.decode_predictions(preds, top=top) decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
tf-keras/tf_keras/applications/efficientnet.py/0
{ "file_path": "tf-keras/tf_keras/applications/efficientnet.py", "repo_id": "tf-keras", "token_count": 12097 }
183
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmarks on Bidirectional LSTM on IMDB.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v2 as tf import tf_keras as keras from tf_keras.benchmarks import benchmark_util class BidirectionalLSTMBenchmark(tf.test.Benchmark): """Benchmarks for Bidirectional LSTM using `tf.test.Benchmark`.""" def __init__(self): super().__init__() self.max_feature = 20000 self.max_len = 200 (self.imdb_x, self.imdb_y), _ = keras.datasets.imdb.load_data( num_words=self.max_feature ) self.imdb_x = keras.preprocessing.sequence.pad_sequences( self.imdb_x, maxlen=self.max_len ) def _build_model(self): """Model from https://keras.io/examples/nlp/bidirectional_lstm_imdb/.""" inputs = keras.Input(shape=(None,), dtype="int32") x = keras.layers.Embedding(self.max_feature, 128)(inputs) x = keras.layers.Bidirectional( keras.layers.LSTM(64, return_sequences=True) )(x) x = keras.layers.Bidirectional(keras.layers.LSTM(64))(x) outputs = keras.layers.Dense(1, activation="sigmoid")(x) model = keras.Model(inputs, outputs) return model # In each benchmark test, the required arguments for the # method `measure_performance` include: # x: Input data, it could be Numpy or loaded from tfds. # y: Target data. If `x` is a dataset or generator instance, # `y` should not be specified. # loss: Loss function for model. # optimizer: Optimizer for model. # Check more details in `measure_performance()` method of # benchmark_util. def benchmark_bidirect_lstm_imdb_bs_128(self): """Measure performance with batch_size=128.""" batch_size = 128 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.imdb_x, y=self.imdb_y, batch_size=batch_size, optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"], ) metadata = benchmark_util.get_keras_examples_metadata( "bidirectional_lstm", batch_size ) extras.update(metadata) self.report_benchmark( wall_time=wall_time, metrics=metrics, extras=extras ) def benchmark_bidirect_lstm_imdb_bs_256(self): """Measure performance with batch_size=256.""" batch_size = 256 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.imdb_x, y=self.imdb_y, batch_size=batch_size, optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"], ) metadata = benchmark_util.get_keras_examples_metadata( "bidirectional_lstm", batch_size ) extras.update(metadata) self.report_benchmark( wall_time=wall_time, metrics=metrics, extras=extras ) def benchmark_bidirect_lstm_imdb_bs_512(self): """Measure performance with batch_size=512.""" batch_size = 512 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.imdb_x, y=self.imdb_y, batch_size=batch_size, optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"], ) metadata = benchmark_util.get_keras_examples_metadata( "bidirectional_lstm", batch_size ) extras.update(metadata) self.report_benchmark( wall_time=wall_time, metrics=metrics, extras=extras ) def benchmark_bidirect_lstm_imdb_bs_512_gpu_2(self): """Measure performance with batch_size=512, gpu=2 and distribution_strategy=`mirrored`. """ batch_size = 512 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.imdb_x, y=self.imdb_y, batch_size=batch_size, num_gpus=2, distribution_strategy="mirrored", optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"], ) metadata = benchmark_util.get_keras_examples_metadata( "bidirectional_lstm", batch_size ) extras.update(metadata) self.report_benchmark( wall_time=wall_time, metrics=metrics, extras=extras ) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/benchmarks/keras_examples_benchmarks/bidirectional_lstm_benchmark_test.py/0
{ "file_path": "tf-keras/tf_keras/benchmarks/keras_examples_benchmarks/bidirectional_lstm_benchmark_test.py", "repo_id": "tf-keras", "token_count": 2382 }
184
# Description: # Implementation of TF-Keras benchmarks. # Placeholder: load unaliased py_library load("@org_keras//tf_keras:tf_keras.bzl", "cuda_py_test") package( # copybara:uncomment default_applicable_licenses = ["//tf_keras:license"], default_visibility = ["//visibility:public"], licenses = ["notice"], ) # To run CPU benchmarks: # bazel run -c opt benchmarks_test -- --benchmarks=. # To run GPU benchmarks: # bazel run --config=cuda -c opt --copt="-mavx" benchmarks_test -- \ # --benchmarks=. # To run a subset of benchmarks using --benchmarks flag. # --benchmarks: the list of benchmarks to run. The specified value is interpreted # as a regular expression and any benchmark whose name contains a partial match # to the regular expression is executed. # e.g. --benchmarks=".*lstm*." will run all lstm layer related benchmarks. py_library( name = "saved_model_benchmark_util", srcs = ["saved_model_benchmark_util.py"], srcs_version = "PY3", deps = [ "//:expect_tensorflow_installed", "//tf_keras/benchmarks:profiler_lib", ], ) cuda_py_test( name = "densenet_benchmark_test", srcs = ["densenet_benchmark_test.py"], tags = [ "no_pip", # b/161253163 "no_windows", # b/160628318 ], deps = [ ":saved_model_benchmark_util", "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", "//tf_keras/benchmarks:profiler_lib", ], ) cuda_py_test( name = "efficientnet_benchmark_test", srcs = ["efficientnet_benchmark_test.py"], tags = [ "no_pip", # b/161253163 "no_windows", # b/160628318 ], deps = [ ":saved_model_benchmark_util", "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", "//tf_keras/benchmarks:profiler_lib", ], ) cuda_py_test( name = "inception_resnet_v2_benchmark_test", srcs = ["inception_resnet_v2_benchmark_test.py"], tags = [ "no_pip", # b/161253163 "no_windows", # b/160628318 ], deps = [ ":saved_model_benchmark_util", "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", "//tf_keras/benchmarks:profiler_lib", ], ) cuda_py_test( name = "mobilenet_benchmark_test", srcs = ["mobilenet_benchmark_test.py"], tags = [ "no_pip", # b/161253163 "no_windows", # b/160628318 ], deps = [ ":saved_model_benchmark_util", "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", "//tf_keras/benchmarks:profiler_lib", ], ) cuda_py_test( name = "nasnet_large_benchmark_test", srcs = ["nasnet_large_benchmark_test.py"], tags = [ "no_pip", # b/161253163 "no_windows", # b/160628318 ], deps = [ ":saved_model_benchmark_util", "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", "//tf_keras/benchmarks:profiler_lib", ], ) cuda_py_test( name = "resnet152_v2_benchmark_test", srcs = ["resnet152_v2_benchmark_test.py"], tags = [ "no_pip", # b/161253163 "no_windows", # b/160628318 ], deps = [ ":saved_model_benchmark_util", "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", "//tf_keras/benchmarks:profiler_lib", ], ) cuda_py_test( name = "vgg_benchmark_test", srcs = ["vgg_benchmark_test.py"], tags = [ "no_pip", # b/161253163 "no_windows", # b/160628318 ], deps = [ ":saved_model_benchmark_util", "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", "//tf_keras/benchmarks:profiler_lib", ], ) cuda_py_test( name = "xception_benchmark_test", srcs = ["xception_benchmark_test.py"], tags = [ "no_pip", # b/161253163 "no_windows", # b/160628318 ], deps = [ ":saved_model_benchmark_util", "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", "//tf_keras/benchmarks:profiler_lib", ], )
tf-keras/tf_keras/benchmarks/saved_model_benchmarks/BUILD/0
{ "file_path": "tf-keras/tf_keras/benchmarks/saved_model_benchmarks/BUILD", "repo_id": "tf-keras", "token_count": 2015 }
185
# Description: # Contains the TF-Keras datasets package (internal TensorFlow version). # Placeholder: load unaliased py_library package( # copybara:uncomment default_applicable_licenses = ["//tf_keras:license"], default_visibility = [ "//tf_keras:friends", ], licenses = ["notice"], ) py_library( name = "datasets", srcs = [ "__init__.py", "boston_housing.py", "cifar.py", "cifar10.py", "cifar100.py", "fashion_mnist.py", "imdb.py", "mnist.py", "reuters.py", ], srcs_version = "PY3", visibility = ["//visibility:public"], deps = [ "//:expect_numpy_installed", "//:expect_tensorflow_installed", "//tf_keras:backend", "//tf_keras/utils:engine_utils", ], )
tf-keras/tf_keras/datasets/BUILD/0
{ "file_path": "tf-keras/tf_keras/datasets/BUILD", "repo_id": "tf-keras", "token_count": 398 }
186
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for custom training loops.""" import numpy as np import tensorflow.compat.v2 as tf from absl.testing import parameterized from tf_keras import metrics from tf_keras.distribute import strategy_combinations # isort: off from tensorflow.python.framework import ( test_util as tf_test_utils, ) class KerasMetricsTest(tf.test.TestCase, parameterized.TestCase): @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=strategy_combinations.all_strategies + strategy_combinations.multiworker_strategies, mode=["eager"], ) ) def test_multiple_keras_metrics_experimental_run(self, distribution): with distribution.scope(): loss_metric = metrics.Mean("loss", dtype=np.float32) loss_metric_2 = metrics.Mean("loss_2", dtype=np.float32) @tf.function def train_step(): def step_fn(): loss = tf.constant(5.0, dtype=np.float32) loss_metric.update_state(loss) loss_metric_2.update_state(loss) distribution.run(step_fn) train_step() self.assertEqual( loss_metric.result().numpy(), loss_metric_2.result().numpy() ) self.assertEqual(loss_metric.result().numpy(), 5.0) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=strategy_combinations.all_strategies + strategy_combinations.multiworker_strategies, mode=["eager"], ) ) def test_update_keras_metric_declared_in_strategy_scope(self, distribution): with distribution.scope(): metric = metrics.Mean("test_metric", dtype=np.float32) dataset = tf.data.Dataset.range(10).batch(2) dataset = distribution.experimental_distribute_dataset(dataset) @tf.function def step_fn(i): metric.update_state(i) for i in dataset: distribution.run(step_fn, args=(i,)) # This should be the mean of integers 0-9 which has a sum of 45 and a # count of 10 resulting in mean of 4.5. self.assertEqual(metric.result().numpy(), 4.5) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=strategy_combinations.all_strategies, mode=["eager"] ) ) def test_update_keras_metric_outside_strategy_scope_cross_replica( self, distribution ): metric = metrics.Mean("test_metric", dtype=np.float32) with distribution.scope(): for i in range(10): metric.update_state(i) # This should be the mean of integers 0-9 which has a sum of 45 and a # count of 10 resulting in mean of 4.5. self.assertEqual(metric.result().numpy(), 4.5) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=strategy_combinations.all_strategies, mode=["eager"] ) ) @tf_test_utils.disable_mlir_bridge( "TODO(b/168036682): Support dynamic padder" ) def test_update_keras_metrics_dynamic_shape(self, distribution): with distribution.scope(): metric = metrics.Mean("test_metric", dtype=np.float32) dataset = tf.data.Dataset.range(10).batch(2, drop_remainder=False) @tf.function def train_fn(dataset): weights = tf.constant([0.1, 0.1]) def step_fn(i): metric.update_state(i, weights) for i in dataset: distribution.run(step_fn, args=(i,)) train_fn(dataset) # This should be the mean of integers 0-9 which has a sum of 45 and a # count of 10 resulting in mean of 4.5. self.assertEqual(metric.result().numpy(), 4.5) if __name__ == "__main__": tf.__internal__.distribute.multi_process_runner.test_main()
tf-keras/tf_keras/distribute/custom_training_loop_metrics_test.py/0
{ "file_path": "tf-keras/tf_keras/distribute/custom_training_loop_metrics_test.py", "repo_id": "tf-keras", "token_count": 1964 }
187
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Correctness tests for tf.keras CNN models using DistributionStrategy.""" import numpy as np import tensorflow.compat.v2 as tf import tf_keras as keras from tf_keras.distribute import keras_correctness_test_base from tf_keras.optimizers.legacy import gradient_descent from tf_keras.testing_infra import test_utils @test_utils.run_all_without_tensor_float_32( "Uses Dense layers, which call matmul. Even if Dense layers run in " "float64, the test sometimes fails with TensorFloat-32 enabled for unknown " "reasons" ) @test_utils.run_v2_only() class DistributionStrategyCnnCorrectnessTest( keras_correctness_test_base.TestDistributionStrategyCorrectnessBase ): def get_model( self, initial_weights=None, distribution=None, input_shapes=None ): del input_shapes with keras_correctness_test_base.MaybeDistributionScope(distribution): image = keras.layers.Input(shape=(28, 28, 3), name="image") c1 = keras.layers.Conv2D( name="conv1", filters=16, kernel_size=(3, 3), strides=(4, 4), kernel_regularizer=keras.regularizers.l2(1e-4), )(image) if self.with_batch_norm == "regular": c1 = keras.layers.BatchNormalization(name="bn1")(c1) elif self.with_batch_norm == "sync": # Test with parallel batch norms to verify all-reduce works OK. bn1 = keras.layers.BatchNormalization( name="bn1", synchronized=True )(c1) bn2 = keras.layers.BatchNormalization( name="bn2", synchronized=True )(c1) c1 = keras.layers.Add()([bn1, bn2]) c1 = keras.layers.MaxPooling2D(pool_size=(2, 2))(c1) logits = keras.layers.Dense(10, activation="softmax", name="pred")( keras.layers.Flatten()(c1) ) model = keras.Model(inputs=[image], outputs=[logits]) if initial_weights: model.set_weights(initial_weights) model.compile( optimizer=gradient_descent.SGD(learning_rate=0.1), loss="sparse_categorical_crossentropy", metrics=["sparse_categorical_accuracy"], ) return model def _get_data(self, count, shape=(28, 28, 3), num_classes=10): centers = np.random.randn(num_classes, *shape) features = [] labels = [] for _ in range(count): label = np.random.randint(0, num_classes, size=1)[0] offset = np.random.normal(loc=0, scale=0.1, size=np.prod(shape)) offset = offset.reshape(shape) labels.append(label) features.append(centers[label] + offset) x = np.asarray(features, dtype=np.float32) y = np.asarray(labels, dtype=np.float32).reshape((count, 1)) return x, y def get_data(self): x_train, y_train = self._get_data( count=keras_correctness_test_base._GLOBAL_BATCH_SIZE * keras_correctness_test_base._EVAL_STEPS ) x_predict = x_train return x_train, y_train, x_predict def get_data_with_partial_last_batch_eval(self): x_train, y_train = self._get_data(count=1280) x_eval, y_eval = self._get_data(count=1000) return x_train, y_train, x_eval, y_eval, x_eval @tf.__internal__.distribute.combinations.generate( keras_correctness_test_base.all_strategy_and_input_config_combinations() + keras_correctness_test_base.multi_worker_mirrored_eager() ) def test_cnn_correctness( self, distribution, use_numpy, use_validation_data ): if ( distribution == tf.__internal__.distribute.combinations.central_storage_strategy_with_gpu_and_cpu # noqa: E501 ): self.skipTest("b/183958183") self.run_correctness_test(distribution, use_numpy, use_validation_data) @tf.__internal__.distribute.combinations.generate( keras_correctness_test_base.all_strategy_and_input_config_combinations() + keras_correctness_test_base.multi_worker_mirrored_eager() ) def test_cnn_with_batch_norm_correctness( self, distribution, use_numpy, use_validation_data ): self.run_correctness_test( distribution, use_numpy, use_validation_data, with_batch_norm="regular", ) @tf.__internal__.distribute.combinations.generate( keras_correctness_test_base.all_strategy_and_input_config_combinations() + keras_correctness_test_base.multi_worker_mirrored_eager() ) def test_cnn_with_sync_batch_norm_correctness( self, distribution, use_numpy, use_validation_data ): if not tf.executing_eagerly(): self.skipTest( "BatchNorm with `synchronized` is not enabled in graph mode." ) self.run_correctness_test( distribution, use_numpy, use_validation_data, with_batch_norm="sync" ) @tf.__internal__.distribute.combinations.generate( keras_correctness_test_base.all_strategy_and_input_config_combinations_eager() # noqa: E501 + keras_correctness_test_base.multi_worker_mirrored_eager() + keras_correctness_test_base.test_combinations_with_tpu_strategies_graph() # noqa: E501 ) def test_cnn_correctness_with_partial_last_batch_eval( self, distribution, use_numpy, use_validation_data ): self.run_correctness_test( distribution, use_numpy, use_validation_data, partial_last_batch=True, training_epochs=1, ) @tf.__internal__.distribute.combinations.generate( keras_correctness_test_base.all_strategy_and_input_config_combinations_eager() # noqa: E501 + keras_correctness_test_base.multi_worker_mirrored_eager() + keras_correctness_test_base.test_combinations_with_tpu_strategies_graph() # noqa: E501 ) def test_cnn_with_batch_norm_correctness_and_partial_last_batch_eval( self, distribution, use_numpy, use_validation_data ): self.run_correctness_test( distribution, use_numpy, use_validation_data, with_batch_norm="regular", partial_last_batch=True, ) if __name__ == "__main__": tf.__internal__.distribute.multi_process_runner.test_main()
tf-keras/tf_keras/distribute/keras_image_model_correctness_test.py/0
{ "file_path": "tf-keras/tf_keras/distribute/keras_image_model_correctness_test.py", "repo_id": "tf-keras", "token_count": 3252 }
188
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test multi-worker TF-Keras.""" import collections import copy import functools import json import os import sys import threading import tensorflow.compat.v2 as tf from absl.testing import parameterized import tf_keras as keras from tf_keras import backend from tf_keras import callbacks from tf_keras import metrics as metrics_module from tf_keras import models from tf_keras.distribute import multi_worker_testing_utils from tf_keras.optimizers import optimizer_v1 from tf_keras.optimizers.legacy import rmsprop from tf_keras.utils import kpl_test_utils def _clone_and_build_model(model, strategy): # The new "original" model in worker 0. with strategy.scope(): cloned_model = models.clone_model(model) # Compile and build model. if isinstance(model.optimizer, optimizer_v1.TFOptimizer): optimizer = model.optimizer # TODO(yuefengz): figure out why the optimizer here is still a # TFOptimizer. while isinstance(optimizer, optimizer_v1.TFOptimizer): optimizer = optimizer.optimizer optimizer = copy.deepcopy(optimizer) else: optimizer_config = model.optimizer.get_config() optimizer = type(model.optimizer).from_config(optimizer_config) cloned_model.compile( optimizer, model.loss, metrics=metrics_module.clone_metrics(model._compile_metrics), loss_weights=model.loss_weights, sample_weight_mode=model.sample_weight_mode, weighted_metrics=metrics_module.clone_metrics( model._compile_weighted_metrics ), ) return cloned_model # TODO(b/123918215): Possibly merge this Callback with keras_test.Counter. class MultiWorkerVerificationCallback(callbacks.Callback): """MultiWorkerVerificationCallback verifies the callbacks in multi-worker scheme. This Callback is intended to be used for verifying the callback is indeed called the correct number of times in various task types. Attributes: _task_dict: A nested dictionary storing the number of times a callback has been called in specific task type, task index, and method name. Look up structure is task_name -> task_id -> tracking_method_name -> invoke_count For example, a _task_dict of { 'ps': { 0: { 'on_epoch_begin': 2 }, 1: { 'on_epoch_begin': 2 } }, 'worker': { 0: { 'on_epoch_begin': 2 }, 1: { 'on_epoch_begin': 2 } } } indicates the ps task has 'on_epoch_begin' called twice on each of the two indices, and likewise for worker task. """ # TODO(rchao): Add other method calls to verify. METHODS_TO_VERIFY = ["on_epoch_begin"] def __init__(self, num_epoch, num_worker): """Initialize a MultiWorkerVerificationCallback. Args: num_epoch: Number of epochs this Callback is expected to be called for. num_worker: Number of workers this Callback is expected to be called from. """ super().__init__() self._num_epoch = num_epoch self._num_worker = num_worker self._task_dict = { key: collections.defaultdict(lambda: collections.defaultdict(int)) for key in ["ps", "worker", "chief"] } self._lock = threading.Lock() self._is_between_graph = None self.wrap_methods(self.METHODS_TO_VERIFY) @property def is_between_graph(self): return self._is_between_graph @is_between_graph.setter def is_between_graph(self, is_between_graph): self._is_between_graph = is_between_graph def wrap_methods(self, method_names): """Wrap methods so that the counts of calls are tracked. Args: method_names: A list of names of methods to track calls. """ for method_name in method_names: method = getattr(self, method_name) def wrapped_method(method_to_wrap, name, *arg, **kwargs): # Use lock to ensure += operation is thread-safe. with self._lock: task_config = json.loads(os.environ["TF_CONFIG"])["task"] self._task_dict[task_config["type"]][task_config["index"]][ name ] += 1 method_to_wrap(*arg, **kwargs) setattr( self, method_name, functools.partial(wrapped_method, method, method_name), ) def verify(self, test_case): method_count_dict = { method_name: self._num_epoch for method_name in self.METHODS_TO_VERIFY } assert self._is_between_graph is not None if self._is_between_graph: # TODO(b/124171024): In between-graph replication, by default only # the chief calls callback. Fix this test to cover that, as well as # the rare cases where all workers call. worker_call_count = { i: method_count_dict for i in range(0, self._num_worker) } else: # If in-graph, only the first worker calls callback methods. worker_call_count = {0: method_count_dict} chief_call_count = {0: method_count_dict} task_config = json.loads(os.environ["TF_CONFIG"])["task"]["type"] test_case.assertDictEqual( self._task_dict, { # PS' callback is not supposed to be called. "ps": {}, # Worker or chief should only be called on worker/chief. "worker": worker_call_count if task_config == "worker" else {}, "chief": chief_call_count if task_config == "chief" else {}, }, ) class KerasMultiWorkerTestIndependentWorker( tf.test.TestCase, parameterized.TestCase ): @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( mode=["eager"], strategy=[ tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_cpu, # noqa: E501 tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_gpu, # noqa: E501 ], ) ) def testSimpleModelIndependentWorkerSync(self, strategy): verification_callback = MultiWorkerVerificationCallback( num_epoch=2, num_worker=len( json.loads(os.environ["TF_CONFIG"])["cluster"]["worker"] ), ) verification_callback.is_between_graph = ( strategy.extended.experimental_between_graph ) batch_size = 64 steps = 2 train_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset( batch_size, steps ) with strategy.scope(): model = multi_worker_testing_utils.get_mnist_model((28, 28, 1)) orig_loss, _ = model.evaluate(train_ds, steps=steps) history = model.fit( x=train_ds, epochs=2, steps_per_epoch=steps, callbacks=[verification_callback], ) self.assertIsInstance(history, keras.callbacks.History) trained_loss, _ = model.evaluate(train_ds, steps=steps) self.assertLess(trained_loss, orig_loss) verification_callback.verify(self) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( mode=["eager"], strategy=[ tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_cpu, # noqa: E501 tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_gpu, # noqa: E501 ], ) ) def test_distribution_reduction_method_auto_default_train_step( self, strategy ): BATCH = 4 EPOCHS = 1 STEPS = 2 # Dataset's targets are [0, 1, 2, 3, 4, 5, 6, 7]: train_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset( BATCH, STEPS, target_values="increasing" ) # A model that always outputs `sum(inputs*0) + 1 = 1` with strategy.scope(): inputs = keras.Input(shape=(28, 28, 1)) x = keras.layers.Flatten()(inputs) x = keras.layers.Dense( 1, kernel_initializer="zeros", bias_initializer="ones" )(x) model = keras.Model(inputs=inputs, outputs=x) model.trainable = False # model.distribute_reduction_method = 'auto' model.compile( loss=keras.losses.MeanAbsoluteError( reduction=keras.losses.losses_utils.ReductionV2.NONE ), optimizer=multi_worker_testing_utils.gradient_descent.SGD( learning_rate=0.001 ), metrics=["mse"], ) # For every output x_i = 1, and increasing target values in [0, 8): # loss_i = |i-1| # loss = (|0-1| + |1-1| + |2-1| + ... |7-1|) / (BATCH*STEPS) # = (1+0+1+2+3+4+5+6) / 8 = 2.75 orig_loss, _ = model.evaluate(train_ds, steps=STEPS) self.assertEqual(2.75, orig_loss) history = model.fit(train_ds, epochs=EPOCHS, steps_per_epoch=STEPS) self.assertAllClose(history.history["loss"], [2.75] * EPOCHS) trained_loss, _ = model.evaluate(train_ds, steps=STEPS) self.assertEqual(2.75, trained_loss) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( mode=["eager"], strategy=[ tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_cpu, # noqa: E501 tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_gpu, # noqa: E501 ], ) ) def test_distribution_reduction_method_auto_custom_train_step( self, strategy ): BATCH = 4 EPOCHS = 1 STEPS = 2 # Dataset's targets are [0, 1, 2, 3, 4, 5, 6, 7]: train_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset( BATCH, STEPS, target_values="increasing" ) # A model that has loss=sum(targets) / BATCH: class MyModel(keras.Model): def train_step(self, data): _, y = data loss_value = tf.cast(y, tf.float32) loss_value = tf.nn.compute_average_loss( loss_value, global_batch_size=BATCH ) return {"loss": loss_value} def test_step(self, data): _, y = data loss_value = tf.cast(y, tf.float32) loss_value = tf.nn.compute_average_loss( loss_value, global_batch_size=BATCH ) return {"loss": loss_value} with strategy.scope(): inputs = keras.Input(shape=(28, 28, 1)) x = keras.layers.Flatten()(inputs) x = keras.layers.Dense( 1, kernel_initializer="ones", bias_initializer="ones" )(x) model = MyModel(inputs=inputs, outputs=x) # model.distribute_reduction_method = 'auto' model.compile( optimizer=multi_worker_testing_utils.gradient_descent.SGD( learning_rate=0.001 ), ) # For epochs=1 steps=2 replicas=2 batch=4, and increasing target vals, # loss_e0_s0_r0 = [0+1]/BATCH = 1/4 # loss_e0_s0_r1 = [2+3]/BATCH = 5/4 # loss_e0_s0 = 1/4 + 5/4 = 1.5 # loss_e0_s1_r0 = [4+5]/BATCH = 9/4 # loss_e0_s2_r1 = [6+7]/BATCH = 13/4 # loss_e0_s1 = 9/4 + 13/4 = 5.5 # loss_e0 = last([1.5, 5.5]) history = model.fit(train_ds, epochs=EPOCHS, steps_per_epoch=STEPS) self.assertAllClose([5.5], history.history["loss"]) eval_output = model.evaluate(train_ds, steps=STEPS) self.assertAllClose(5.5, eval_output) class KPLMultiWorkerTest(tf.test.TestCase, parameterized.TestCase): @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( mode=["eager"], use_adapt=[False], # TODO(b/180742437): Add tests for using adapt. strategy=[ tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_gpu, # noqa: E501 # TODO(b/183956672): Re-enable # strategy_combinations.multi_worker_mirrored_2x2_gpu, ], ) ) def testTrainAndServeWithKPL(self, use_adapt, strategy): test_utils_obj = kpl_test_utils.DistributeKplTestUtils() with strategy.scope(): ( feature_mapper, label_mapper, ) = test_utils_obj.define_kpls_for_training(use_adapt) model = test_utils_obj.define_model() optimizer = rmsprop.RMSprop(learning_rate=0.1) accuracy = keras.metrics.Accuracy() def dataset_fn(_): return test_utils_obj.dataset_fn(feature_mapper, label_mapper) @tf.function def train_step(iterator): """The step function for one training step.""" def step_fn(inputs): """The computation to run on each worker.""" features, labels = inputs with tf.GradientTape() as tape: pred = model(features, training=True) loss = keras.losses.binary_crossentropy(labels, pred) loss = tf.nn.compute_average_loss(loss) grads = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients( list(zip(grads, model.trainable_variables)) ) actual_pred = tf.cast(tf.greater(pred, 0.5), tf.int64) accuracy.update_state(labels, actual_pred) strategy.run(step_fn, args=(next(iterator),)) distributed_dataset = strategy.distribute_datasets_from_function( dataset_fn ) distributed_iterator = iter(distributed_dataset) num_epochs = 4 num_steps = 7 for _ in range(num_epochs): accuracy.reset_state() for _ in range(num_steps): train_step(distributed_iterator) self.assertGreater(accuracy.result().numpy(), 0.5) self.assertEqual( optimizer.iterations.numpy(), num_epochs * num_steps ) # Test save/load/serving the trained model. test_utils_obj.test_save_load_serving_model( model, feature_mapper, test_utils_obj.define_reverse_lookup_layer() ) if __name__ == "__main__": # Enable manual variable initialization to make sure variables are # initialized by `init_restore_or_wait_for_variables`. backend.manual_variable_initialization(True) with tf.compat.v1.test.mock.patch.object(sys, "exit", os._exit): tf.__internal__.distribute.multi_process_runner.test_main()
tf-keras/tf_keras/distribute/multi_worker_test.py/0
{ "file_path": "tf-keras/tf_keras/distribute/multi_worker_test.py", "repo_id": "tf-keras", "token_count": 8086 }
189
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A `Network` is way to compose layers: the topological form of a `Model`.""" import collections import copy import itertools import warnings import tensorflow.compat.v2 as tf from tf_keras import backend from tf_keras.dtensor import layout_map as layout_map_lib from tf_keras.engine import base_layer from tf_keras.engine import base_layer_utils from tf_keras.engine import functional_utils from tf_keras.engine import input_layer as input_layer_module from tf_keras.engine import input_spec from tf_keras.engine import node as node_module from tf_keras.engine import training as training_lib from tf_keras.engine import training_utils from tf_keras.saving import serialization_lib from tf_keras.saving.legacy import serialization from tf_keras.saving.legacy.saved_model import json_utils from tf_keras.saving.legacy.saved_model import network_serialization from tf_keras.saving.legacy.saved_model import utils as saved_model_utils from tf_keras.utils import generic_utils from tf_keras.utils import tf_inspect from tf_keras.utils import tf_utils # isort: off from tensorflow.python.platform import tf_logging as logging from tensorflow.tools.docs import doc_controls class Functional(training_lib.Model): """A `Functional` model is a `Model` defined as a directed graph of layers. Three types of `Model` exist: subclassed `Model`, `Functional` model, and `Sequential` (a special case of `Functional`). In general, more TF-Keras features are supported with `Functional` than with subclassed `Model`s, specifically: - Model cloning (`keras.models.clone`) - Serialization (`model.get_config()/from_config`, `model.to_json()` - Whole-model saving (`model.save()`) A `Functional` model can be instantiated by passing two arguments to `__init__`. The first argument is the `keras.Input` Tensors that represent the inputs to the model. The second argument specifies the output tensors that represent the outputs of this model. Both arguments can be a nested structure of tensors. Example: ``` inputs = {'x1': keras.Input(shape=(10,)), 'x2': keras.Input(shape=(1,))} t = keras.layers.Dense(1, activation='relu')(inputs['x1']) outputs = keras.layers.Add()([t, inputs['x2']) model = keras.Model(inputs, outputs) ``` A `Functional` model constructed using the Functional API can also include raw TensorFlow functions, with the exception of functions that create Variables or assign ops. Example: ```python inputs = keras.Input(shape=(10,)) x = keras.layers.Dense(1)(inputs) outputs = tf.nn.relu(x) model = keras.Model(inputs, outputs) ``` A new `Functional` model can also be created by using the intermediate tensors. This enables you to quickly extract sub-components of the model. Example: ```python inputs = keras.Input(shape=(None, None, 3)) processed = keras.layers.RandomCrop(width=32, height=32)(inputs) conv = keras.layers.Conv2D(filters=2, kernel_size=3)(processed) pooling = keras.layers.GlobalAveragePooling2D()(conv) feature = keras.layers.Dense(10)(pooling) full_model = keras.Model(inputs, feature) backbone = keras.Model(processed, conv) activations = keras.Model(conv, feature) ``` Note that the `backbone` and `activations` models are not created with `keras.Input` objects, but with the tensors that are originated from `keras.Input` objects. Under the hood, the layers and weights will be shared across these models, so that user can train the `full_model`, and use `backbone` or `activations` to do feature extraction. The inputs and outputs of the model can be nested structures of tensors as well, and the created models are standard `Functional` model that support all the existing API. Args: inputs: List of input tensors (must be created via `tf.keras.Input()` or originated from `tf.keras.Input()`). outputs: List of output tensors. name: String, optional. Name of the model. trainable: Boolean, optional. If the model's variables should be trainable. """ # See tf.Module for the usage of this property. # The key of _layer_call_argspecs is a layer. tf.Module._flatten will fail # to flatten the key since it is trying to convert Trackable/Layer to a # string. _TF_MODULE_IGNORED_PROPERTIES = frozenset( itertools.chain( ( "_layer_call_argspecs", "_output_mask_cache", "_output_tensor_cache", "_output_shape_cache", ), training_lib.Model._TF_MODULE_IGNORED_PROPERTIES, ) ) @tf.__internal__.tracking.no_automatic_dependency_tracking def __init__(self, inputs, outputs, name=None, trainable=True, **kwargs): # This is used by the Model class, since we have some logic to swap the # class in the __new__ method, which will lead to __init__ get invoked # twice. Using the skip_init to skip one of the invocation of __init__ # to avoid any side effects skip_init = kwargs.pop("skip_init", False) if skip_init: return generic_utils.validate_kwargs(kwargs, {}) super().__init__(name=name, trainable=trainable) # Check if the inputs contain any intermediate `KerasTensor` (not # created by tf.keras.Input()). In this case we need to clone the `Node` # and `KerasTensor` objects to mimic rebuilding a new model from new # inputs. This feature is only enabled in TF2 not in v1 graph mode. if tf.compat.v1.executing_eagerly_outside_functions(): if not all( [ functional_utils.is_input_keras_tensor(t) for t in tf.nest.flatten(inputs) ] ): inputs, outputs = functional_utils.clone_graph_nodes( inputs, outputs ) self._init_graph_network(inputs, outputs) @tf.__internal__.tracking.no_automatic_dependency_tracking def _init_graph_network(self, inputs, outputs): # This method is needed for Sequential to reinitialize graph network # when layer is added or removed. self._is_graph_network = True # Normalize and set self.inputs, self.outputs. if isinstance(inputs, list) and len(tf.nest.flatten(inputs)) == 1: inputs = inputs[0] if isinstance(outputs, list) and len(tf.nest.flatten(outputs)) == 1: outputs = outputs[0] self._nested_inputs = inputs self._nested_outputs = outputs self.inputs = tf.nest.flatten(inputs) self.outputs = tf.nest.flatten(outputs) # Models constructed with a single Tensor or list of Tensors can # be called with a dict, where the keys of the dict are the names # of the `Input` objects. Extra keys are ignored with warning. if not tf.nest.is_nested(self._nested_inputs): self._enable_dict_to_input_mapping = True elif isinstance(self._nested_inputs, (list, tuple)) and not any( tf.nest.is_nested(t) for t in self._nested_inputs ): self._enable_dict_to_input_mapping = True elif isinstance(self._nested_inputs, dict) and not any( tf.nest.is_nested(t) for t in self._nested_inputs.values() ): self._enable_dict_to_input_mapping = True else: self._enable_dict_to_input_mapping = False if not tf.compat.v1.executing_eagerly_outside_functions(): if any( not hasattr(tensor, "_keras_history") for tensor in self.outputs ): base_layer_utils.create_keras_history(self._nested_outputs) self._validate_graph_inputs_and_outputs() # A Network does not create weights of its own, thus it is already # built. self.built = True self._build_input_shape = tf.nest.map_structure( lambda x: x.shape, inputs ) self._compute_output_and_mask_jointly = True # `_expects_training_arg` is True since the `training` argument is # always present in the signature of the `call` method of a graph # network. self._call_spec.expects_training_arg = True self._call_spec.expects_mask_arg = True # A graph network does not autocast inputs, as its layers will cast them # instead. self._autocast = False self._input_layers = [] self._output_layers = [] self._input_coordinates = [] self._output_coordinates = [] # This is for performance optimization when calling the Network on new # inputs. Every time the Network is called on a set on input tensors, we # compute the output tensors, output masks and output shapes in one # pass, then cache them here. When any of these outputs is queried # later, we retrieve it from there instead of recomputing it. self._output_mask_cache = {} self._output_tensor_cache = {} self._output_shape_cache = {} # Build self._output_layers: for x in self.outputs: ( layer, node_index, tensor_index, ) = x._keras_history self._output_layers.append(layer) self._output_coordinates.append((layer, node_index, tensor_index)) # Build self._input_layers: for x in self.inputs: ( layer, node_index, tensor_index, ) = x._keras_history # It's supposed to be an input layer, so only one node # and one tensor output. assert node_index == 0 assert tensor_index == 0 self._input_layers.append(layer) self._input_coordinates.append((layer, node_index, tensor_index)) # Keep track of the network's nodes and layers. nodes, nodes_by_depth, layers, _ = _map_graph_network( self.inputs, self.outputs ) self._network_nodes = nodes self._nodes_by_depth = nodes_by_depth self._self_tracked_trackables = layers self._layer_call_argspecs = {} for layer in self._self_tracked_trackables: self._layer_call_argspecs[layer] = tf_inspect.getfullargspec( layer.call ) # Build self.input_names and self.output_names. self._set_output_names() self.input_names = [] self._feed_input_names = [] self._feed_inputs = [] self._feed_input_shapes = [] for layer in self._input_layers: self.input_names.append(layer.name) if layer.is_placeholder: self._feed_input_names.append(layer.name) # Use batch_input_shape here because non-eager composite tensors # may not have a shape attribute that's meaningful (sparse, for # instance, has a tensor that's non-constant and needs to be # fed). This means that input layers that create placeholders # will need to have the batch_input_shape attr to allow for # input shape validation. self._feed_input_shapes.append(layer._batch_input_shape) self._feed_inputs.append(layer.input) self._compute_tensor_usage_count() self._set_save_spec(self._nested_inputs) tf_utils.assert_no_legacy_layers(self.layers) # Note that this method is used by both functional and sequential # models, so we can't just have this method in functional.__init__, # which will miss the coverage of sequential model. if self._layout_map is not None: layout_map_lib._map_functional_model_variable( self, self._layout_map ) @property def input(self): """Retrieves the input tensor(s) of a layer. Only applicable if the layer has exactly one input, i.e. if it is connected to one incoming layer. Returns: Input tensor or list of input tensors. Raises: RuntimeError: If called in Eager mode. AttributeError: If no inbound nodes are found. """ return self._nested_inputs @property def input_shape(self): """Retrieves the input shape(s) of a layer. Only applicable if the layer has exactly one input, i.e. if it is connected to one incoming layer, or if all inputs have the same shape. Returns: Input shape, as an integer shape tuple (or list of shape tuples, one tuple per input tensor). Raises: AttributeError: if the layer has no defined input_shape. RuntimeError: if called in Eager mode. """ return tf.nest.map_structure(backend.int_shape, self.input) @property def input_spec(self): if hasattr(self, "_manual_input_spec"): return self._manual_input_spec if isinstance(self._nested_inputs, (dict, list, tuple)) and len( self._nested_inputs ) != len(self.inputs): # Case where we have a nested structure. # In such a case we can't safely run any checks. return None if isinstance(self._nested_inputs, dict): # Case where `_nested_inputs` is a plain dict of Inputs. names = sorted(self._nested_inputs.keys()) return [ input_spec.InputSpec( shape=shape_with_no_batch_size(self._nested_inputs[name]), allow_last_axis_squeeze=True, name=name, ) for name in names ] else: # Single input, or list / tuple of inputs. # The data may be passed as a dict keyed by input name. return [ input_spec.InputSpec( shape=shape_with_no_batch_size(x), allow_last_axis_squeeze=True, name=x._keras_history.layer.name, ) for x in self.inputs ] @input_spec.setter def input_spec(self, value): self._manual_input_spec = value @property def output(self): """Retrieves the output tensor(s) of a layer. Only applicable if the layer has exactly one output, i.e. if it is connected to one incoming layer. Returns: Output tensor or list of output tensors. Raises: AttributeError: if the layer is connected to more than one incoming layers. RuntimeError: if called in Eager mode. """ return self._nested_outputs @property def output_shape(self): """Retrieves the output shape(s) of a layer. Only applicable if the layer has one output, or if all outputs have the same shape. Returns: Output shape, as an integer shape tuple (or list of shape tuples, one tuple per output tensor). Raises: AttributeError: if the layer has no defined output shape. RuntimeError: if called in Eager mode. """ return tf.nest.map_structure(backend.int_shape, self.output) def _set_output_names(self): """Assigns unique names to the Network's outputs. Output layers with multiple output tensors would otherwise lead to duplicate names in self.output_names. """ uniquified = [] output_names = set() prefix_count = {} for layer in self._output_layers: proposal = layer.name while proposal in output_names: existing_count = prefix_count.get(layer.name, 1) proposal = f"{layer.name}_{existing_count}" prefix_count[layer.name] = existing_count + 1 output_names.add(proposal) uniquified.append(proposal) self.output_names = uniquified @property def _layer_checkpoint_dependencies(self): """Dictionary of layer dependencies to be included in the checkpoint.""" weight_layer_index = 0 dependencies = collections.OrderedDict() for layer_index, layer in enumerate(self.layers): try: if layer.weights: # Keep a separate index for layers which have weights. This # allows users to insert Layers without weights anywhere in # the network without breaking checkpoints. dependencies[ "layer_with_weights-%d" % weight_layer_index ] = layer weight_layer_index += 1 except ValueError: # The layer might have weights, but may not be built yet. We # just treat it as layer without weight. pass # Even if it doesn't have weights, we should still track everything # in case it has/will have Trackable dependencies. dependencies["layer-%d" % layer_index] = layer return dependencies def _trackable_children(self, save_type="checkpoint", **kwargs): dependencies = self._layer_checkpoint_dependencies dependencies.update(super()._trackable_children(save_type, **kwargs)) return dependencies def _lookup_dependency(self, name, cached_dependencies=None): if cached_dependencies: return cached_dependencies.get(name) # Fall back to slow lookup (`layer_checkpoint_dependencies` does a # thorough check of all layer to see if they contain weights.) layer_dependencies = self._layer_checkpoint_dependencies if name in layer_dependencies: return layer_dependencies[name] return super()._lookup_dependency(name) def _handle_deferred_layer_dependencies(self, layers): """Handles layer checkpoint dependencies that are added after init.""" layer_checkpoint_dependencies = self._layer_checkpoint_dependencies layer_to_name = {v: k for k, v in layer_checkpoint_dependencies.items()} for layer in layers: if layer in layer_to_name: self._handle_deferred_dependencies( name=layer_to_name[layer], trackable=layer ) @property def _should_compute_mask(self): return True def compute_mask(self, inputs, mask): # TODO(omalleyt): b/123540974 This function is not really safe to call # by itself because it will duplicate any updates and losses in graph # mode by `call`ing the Layers again. output_tensors = self._run_internal_graph(inputs, mask=mask) return tf.nest.map_structure( lambda t: getattr(t, "_keras_mask", None), output_tensors ) @doc_controls.do_not_doc_inheritable def call(self, inputs, training=None, mask=None): """Calls the model on new inputs. In this case `call` just reapplies all ops in the graph to the new inputs (e.g. build a new computational graph from the provided inputs). Args: inputs: A tensor or list of tensors. training: Boolean or boolean scalar tensor, indicating whether to run the `Network` in training mode or inference mode. mask: A mask or list of masks. A mask can be either a tensor or None (no mask). Returns: A tensor if there is a single output, or a list of tensors if there are more than one outputs. """ return self._run_internal_graph(inputs, training=training, mask=mask) def compute_output_shape(self, input_shape): # Convert any shapes in tuple format to TensorShapes. input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False) if len(tf.nest.flatten(input_shape)) != len( tf.nest.flatten(self._input_layers) ): raise ValueError( f"Invalid `input_shape` argument {input_shape}: " f"the model expects {len(self._input_layers)} " "input tensors." ) # Use the tuple of TensorShape as the cache key, since tuple is hashable # and can be used as hash key. try: cache_key = tuple( tf_utils.convert_shapes(input_shape, to_tuples=True) ) if cache_key in self._output_shape_cache: # Cache hit. Return shapes as TensorShapes. return self._output_shape_cache[cache_key] except ValueError: # In case there are unknown TensorShape, eg for sparse tensor input, # We skip the caching since the shape is unknown. pass layers_to_output_shapes = {} for layer, shape in zip( self._input_layers, tf.nest.flatten(input_shape) ): # It's an input layer: then `compute_output_shape` is identity, # and there is only one node and one tensor.. shape_key = layer.name + "_0_0" layers_to_output_shapes[shape_key] = shape depth_keys = list(self._nodes_by_depth.keys()) depth_keys.sort(reverse=True) # Iterate over nodes, by depth level. if len(depth_keys) > 1: for depth in depth_keys: nodes = self._nodes_by_depth[depth] for node in nodes: layer = node.layer if layer in self._input_layers: # We've already covered the input layers # a few lines above. continue # Get the input shapes for the first argument of the node layer_input_shapes = [] layer_inputs = node.call_args[0] for layer_input in tf.nest.flatten(layer_inputs): kh = layer_input._keras_history input_layer_key = kh.layer.name + "_%s_%s" % ( kh.node_index, kh.tensor_index, ) layer_input_shapes.append( layers_to_output_shapes[input_layer_key] ) layer_input_shapes = tf.nest.pack_sequence_as( layer_inputs, layer_input_shapes ) # Layers expect shapes to be tuples for # `compute_output_shape`. layer_input_shapes = tf_utils.convert_shapes( layer_input_shapes, to_tuples=True ) layer_output_shapes = layer.compute_output_shape( layer_input_shapes ) # Convert back to TensorShapes. layer_output_shapes = tf_utils.convert_shapes( layer_output_shapes, to_tuples=False ) node_index = layer._inbound_nodes.index(node) for j, shape in enumerate( tf.nest.flatten(layer_output_shapes) ): shape_key = layer.name + f"_{node_index}_{j}" layers_to_output_shapes[shape_key] = shape # Read final output shapes from layers_to_output_shapes. output_shapes = [] for i in range(len(self._output_layers)): layer, node_index, tensor_index = self._output_coordinates[i] shape_key = layer.name + f"_{node_index}_{tensor_index}" output_shapes.append(layers_to_output_shapes[shape_key]) output_shapes = tf.nest.pack_sequence_as( self._nested_outputs, output_shapes ) # Store in cache. self._output_shape_cache[cache_key] = output_shapes # Return shapes as TensorShapes. return output_shapes def _init_set_name(self, name, zero_based=True): if not name: cls_name = self.__class__.__name__ if self.__class__ == Functional: # Hide the functional class name from user, since its not a # public visible class. Use "Model" instead, cls_name = "Model" self._name = backend.unique_object_name( generic_utils.to_snake_case(cls_name), zero_based=zero_based ) else: self._name = name def _run_internal_graph(self, inputs, training=None, mask=None): """Computes output tensors for new inputs. # Note: - Can be run on non-Keras tensors. Args: inputs: Tensor or nested structure of Tensors. training: Boolean learning phase. mask: (Optional) Tensor or nested structure of Tensors. Returns: output_tensors """ inputs = self._flatten_to_reference_inputs(inputs) if mask is None: masks = [None] * len(inputs) else: masks = self._flatten_to_reference_inputs(mask) for input_t, mask in zip(inputs, masks): input_t._keras_mask = mask # Dictionary mapping reference tensors to computed tensors. tensor_dict = {} tensor_usage_count = self._tensor_usage_count for x, y in zip(self.inputs, inputs): y = self._conform_to_reference_input(y, ref_input=x) x_id = str(id(x)) tensor_dict[x_id] = [y] * tensor_usage_count[x_id] nodes_by_depth = self._nodes_by_depth depth_keys = list(nodes_by_depth.keys()) depth_keys.sort(reverse=True) for depth in depth_keys: nodes = nodes_by_depth[depth] for node in nodes: if node.is_input: continue # Input tensors already exist. if any(t_id not in tensor_dict for t_id in node.flat_input_ids): continue # Node is not computable, try skipping. args, kwargs = node.map_arguments(tensor_dict) outputs = node.layer(*args, **kwargs) # Update tensor_dict. for x_id, y in zip( node.flat_output_ids, tf.nest.flatten(outputs) ): tensor_dict[x_id] = [y] * tensor_usage_count[x_id] output_tensors = [] for x in self.outputs: x_id = str(id(x)) assert x_id in tensor_dict, "Could not compute output " + str(x) output_tensors.append(tensor_dict[x_id].pop()) return tf.nest.pack_sequence_as(self._nested_outputs, output_tensors) def _flatten_to_reference_inputs(self, tensors): """Maps `tensors` to their respective `keras.Input`.""" if self._enable_dict_to_input_mapping and isinstance(tensors, dict): ref_inputs = self._nested_inputs if not tf.nest.is_nested(ref_inputs): ref_inputs = [self._nested_inputs] if isinstance(ref_inputs, dict): # In the case that the graph is constructed with dict input # tensors, We will use the original dict key to map with the # keys in the input data. Note that the model.inputs is using # nest.flatten to process the input tensors, which means the # dict input tensors are ordered by their keys. ref_input_names = sorted(ref_inputs.keys()) else: ref_input_names = [ inp._keras_history.layer.name for inp in ref_inputs ] # Raise an warning if there are more input data comparing to input # tensor if len(tensors) > len(ref_input_names): warnings.warn( "Input dict contained keys {} which did not match any " "model input. They will be ignored by the model.".format( [n for n in tensors.keys() if n not in ref_input_names] ), stacklevel=2, ) try: # Flatten in the order `Input`s were passed during Model # construction. return [tensors[n] for n in ref_input_names] except KeyError: # TODO(b/151582614) return tf.nest.flatten(tensors) # Otherwise both self.inputs and tensors will already be in same order. return tf.nest.flatten(tensors) def _conform_to_reference_input(self, tensor, ref_input): """Set shape and dtype based on `keras.Input`s.""" if isinstance(tensor, tf.Tensor): # Allow (None,) and (None, 1) Tensors to be passed interchangeably. # Use the shape specified by the `keras.Input`. t_shape = tensor.shape t_rank = t_shape.rank ref_shape = ref_input.shape ref_rank = ref_shape.rank keras_history = getattr(tensor, "_keras_history", None) if t_rank is not None and ref_rank is not None: # Should squeeze last dimension. True if tensor is (BATCH, ..., # 1) and reference is (BATCH, ...). if t_rank == ref_rank + 1 and t_shape[-1] == 1: tensor = tf.squeeze(tensor, axis=-1) # Should expand last_dimension. True if tensor is (BATCH, ...) # and reference is (BATCH, ..., 1). elif t_rank == ref_rank - 1 and ref_shape[-1] == 1: tensor = tf.expand_dims(tensor, axis=-1) if keras_history is not None: # Restore keras history. tensor._keras_history = keras_history # Dtype casting. tensor = tf.cast(tensor, dtype=ref_input.dtype) elif tf_utils.is_extension_type(tensor): # Dtype casting (If the extension type has a non-variant dtype and # supports being cast). Only cast if necessary (since some # extension types may not implement tf.cast). tensor_dtype = getattr(tensor, "dtype", None) ref_input_dtype = getattr(ref_input, "dtype", None) if ( ref_input_dtype is not None and tensor_dtype is not None and tensor_dtype != ref_input_dtype and ref_input_dtype != tf.variant ): tensor = tf.cast(tensor, dtype=ref_input_dtype) return tensor @generic_utils.default def get_config(self): # Prepare base arguments config = { "name": self.name, "trainable": self.trainable, } if saved_model_utils.in_tf_saved_model_scope(): # SavedModel special case: need to preserve legacy (potentially # incorrect) behavior. return copy.deepcopy(get_network_config(self, config=config)) # Check whether the class has a constructor compatible with a Functional # model or if it has a custom constructor. if has_functional_like_constructor(self.__class__): # Only return a Functional config if the constructor is the same # as that of a Functional model. This excludes subclassed Functional # models with a custom __init__. config = copy.deepcopy(get_network_config(self, config=config)) else: # Try to autogenerate config xtra_args = set(config.keys()) if getattr(self, "_auto_get_config", False): config.update(self._auto_config.config) # Remove args non explicitly supported argspec = tf_inspect.getfullargspec(self.__init__) if argspec.varkw != "kwargs": for key in xtra_args - xtra_args.intersection(argspec.args[1:]): config.pop(key, None) return config def get_weight_paths(self): result = {} for layer in self.layers: ( descendants, object_paths_dict, ) = tf.__internal__.tracking.ObjectGraphView( layer ).breadth_first_traversal() for descendant in descendants: if isinstance(descendant, tf.Variable): trackable_references = object_paths_dict[descendant] object_path = ".".join( [t.name for t in trackable_references] ) result[layer.name + "." + object_path] = descendant return result def _validate_graph_inputs_and_outputs(self): """Validates the inputs and outputs of a Graph Network.""" # Check for redundancy in inputs. if len({id(i) for i in self.inputs}) != len(self.inputs): raise ValueError( "The list of inputs passed to the model " "contains the same input multiple times. " "All inputs should only appear once." f"Received inputs={self.inputs}" ) for x in self.inputs: # Check that x has appropriate `_keras_history` metadata. if not hasattr(x, "_keras_history"): cls_name = self.__class__.__name__ raise ValueError( f"Input tensors to a {cls_name} model " "must come from `tf.keras.Input`. " f"Received inputs={x} (missing previous layer metadata)." ) # Check that x is an input tensor. layer = x._keras_history.layer if len(layer._inbound_nodes) > 1 or ( layer._inbound_nodes and not layer._inbound_nodes[0].is_input ): cls_name = self.__class__.__name__ logging.warning( f"{cls_name} model inputs must come from " "`tf.keras.Input` (thus holding past layer metadata). " "They cannot be the output of " "a previous non-Input layer. " "Here, a tensor specified as " f'input to "{self.name}" was not an Input tensor, ' f'it was generated by layer "{layer.name}".\n' "Note that input tensors are " "instantiated via `tensor = tf.keras.Input(shape)`.\n" f"The tensor that caused the issue was: {x}" ) # Check compatibility of batch sizes of Input Layers. input_batch_sizes = set( [ training_utils.get_static_batch_size(x._keras_history.layer) for x in self.inputs ] ) input_batch_sizes.discard(None) if len(input_batch_sizes) > 1: logging.warning( "Found incompatible static batch sizes among the " f"inputs. Batch sizes: {sorted(input_batch_sizes)}" ) for x in self.outputs: if not hasattr(x, "_keras_history"): cls_name = self.__class__.__name__ raise ValueError( f"Output tensors of a {cls_name} model must be " "the output of a TensorFlow `Layer` " f"(thus holding past layer metadata). Found: {x}" ) def _insert_layers(self, layers, relevant_nodes=None): """Inserts Layers into the Network after Network creation. This is only valid for TF-Keras Graph Networks. Layers added via this function will be included in the `call` computation and `get_config` of this Network. They will not be added to the Network's outputs. Args: layers: Arbitrary nested structure of Layers. Layers must be reachable from one or more of the `keras.Input` Tensors that correspond to this Network's inputs. relevant_nodes: Nodes from the Layers that should be considered part of this Network. If `None`, all Nodes will be considered part of this Network. Raises: ValueError: If the layers depend on `Input`s not found in this Model. """ layers = tf.nest.flatten(layers) tf_utils.assert_no_legacy_layers(layers) node_to_depth = {} for depth, nodes in self._nodes_by_depth.items(): node_to_depth.update({node: depth for node in nodes}) # The nodes of these Layers that are relevant to this Network. If not # provided, assume all Nodes are relevant if not relevant_nodes: relevant_nodes = tf.nest.flatten( [layer._inbound_nodes for layer in layers] ) network_nodes = set(relevant_nodes + list(node_to_depth.keys())) def _get_min_depth(node): """Gets the minimum depth at which node can be computed.""" min_depth = 0 for layer, node_id, _, _ in node.iterate_inbound(): inbound_node = layer._inbound_nodes[node_id] if inbound_node in node_to_depth: min_depth = min(min_depth, node_to_depth[inbound_node]) elif inbound_node not in network_nodes: continue else: # Previous relevant nodes haven't been processed yet. return None # New node is one shallower than its shallowest input. return min_depth - 1 # Insert nodes into `_nodes_by_depth` and other node attrs. unprocessed_nodes = copy.copy(relevant_nodes) i = 0 while unprocessed_nodes: i += 1 # Do a sanity check. This can occur if `Input`s from outside this # Model are being relied on. if i > 10000: raise ValueError( "Layers could not be added due to missing dependencies." ) node = unprocessed_nodes.pop(0) depth = _get_min_depth(node) if depth is None: # Defer until inbound nodes are processed. unprocessed_nodes.append(node) continue node_key = _make_node_key( node.layer.name, node.layer._inbound_nodes.index(node) ) if node_key not in self._network_nodes: node_to_depth[node] = depth self._network_nodes.add(node_key) self._nodes_by_depth[depth].append(node) # Insert layers and update other layer attrs. layer_set = set(self._self_tracked_trackables) deferred_layers = [] for layer in layers: if layer not in layer_set: self._self_tracked_trackables.append(layer) deferred_layers.append(layer) self._layer_call_argspecs[layer] = tf_inspect.getfullargspec( layer.call ) layer_set.add(layer) self._handle_deferred_layer_dependencies(deferred_layers) self._compute_tensor_usage_count() def _compute_tensor_usage_count(self): """Compute the #. of tensor usages for all the output tensors of layers. The computed tensor usage count is saved as `self._tensor_usage_count`. This is later used for saving memory in eager computation by releasing no-longer-needed tensors as early as possible. """ tensor_usage_count = collections.Counter() available_tensors = set(str(id(tensor)) for tensor in self.inputs) depth_keys = list(self._nodes_by_depth.keys()) depth_keys.sort(reverse=True) depth_keys = depth_keys[1:] for depth in depth_keys: for node in self._nodes_by_depth[depth]: input_tensors = { str(id(tensor)) for tensor in tf.nest.flatten(node.keras_inputs) } if input_tensors.issubset(available_tensors): for tensor in tf.nest.flatten(node.keras_inputs): tensor_usage_count[str(id(tensor))] += 1 for output_tensor in tf.nest.flatten(node.outputs): available_tensors.add(str(id(output_tensor))) for tensor in self.outputs: tensor_usage_count[str(id(tensor))] += 1 self._tensor_usage_count = tensor_usage_count def _assert_weights_created(self): # Override the implementation in Model. # The Functional model should always have weight created already. return def _graph_network_add_loss(self, symbolic_loss): new_nodes, new_layers = _map_subgraph_network( self.inputs, [symbolic_loss] ) # Losses must be keyed on inputs no matter what in order to be supported # in DistributionStrategy. add_loss_layer = base_layer.AddLoss( unconditional=False, dtype=symbolic_loss.dtype ) add_loss_layer(symbolic_loss) new_nodes.extend(add_loss_layer.inbound_nodes) new_layers.append(add_loss_layer) self._insert_layers(new_layers, new_nodes) def _graph_network_add_metric(self, value, aggregation, name): new_nodes, new_layers = _map_subgraph_network(self.inputs, [value]) add_metric_layer = base_layer.AddMetric( aggregation, name, dtype=value.dtype ) add_metric_layer(value) new_nodes.extend(add_metric_layer.inbound_nodes) new_layers.append(add_metric_layer) self._insert_layers(new_layers, new_nodes) @property def _trackable_saved_model_saver(self): return network_serialization.NetworkSavedModelSaver(self) def _get_save_spec(self, dynamic_batch=True, inputs_only=True): if getattr(self, "_has_explicit_input_shape", True): # Functional models and Sequential models that have an explicit # input shape should use the batch size set by the input layer. dynamic_batch = False return super()._get_save_spec(dynamic_batch, inputs_only) def _make_node_key(layer_name, node_index): return layer_name + "_ib-" + str(node_index) def _map_graph_network(inputs, outputs): """Validates a network's topology and gather its layers and nodes. Args: inputs: List of input tensors. outputs: List of outputs tensors. Returns: A tuple `(nodes, nodes_by_depth, layers, layers_by_depth)`. - nodes: list of Node instances. - nodes_by_depth: dict mapping ints (depth) to lists of node instances. - layers: list of Layer instances. - layers_by_depth: dict mapping ints (depth) to lists of layer instances. Raises: ValueError: In case the network is not valid (e.g. disconnected graph). """ # "depth" is number of layers between output Node and the Node. # Nodes are ordered from inputs -> outputs. nodes_in_decreasing_depth, layer_indices = _build_map(outputs) network_nodes = { _make_node_key(node.layer.name, node.layer._inbound_nodes.index(node)) for node in nodes_in_decreasing_depth } nodes_depths = {} # dict {node: depth value} layers_depths = {} # dict {layer: depth value} for node in reversed(nodes_in_decreasing_depth): # If the depth is not set, the node has no outbound nodes (depth 0). depth = nodes_depths.setdefault(node, 0) # Update the depth of the corresponding layer previous_depth = layers_depths.get(node.layer, 0) # If we've seen this layer before at a higher depth, # we should use that depth instead of the node depth. # This is necessary for shared layers that have inputs at different # depth levels in the graph. depth = max(depth, previous_depth) layers_depths[node.layer] = depth nodes_depths[node] = depth # Update the depth of inbound nodes. # The "depth" of a node is the max of the depths # of all nodes it is connected to + 1. for node_dep in node.parent_nodes: previous_depth = nodes_depths.get(node_dep, 0) nodes_depths[node_dep] = max(depth + 1, previous_depth) # Handle inputs that are not connected to outputs. # We do not error out here because the inputs may be used to compute losses # and metrics. for input_t in inputs: input_layer = input_t._keras_history[0] if input_layer not in layers_depths: layers_depths[input_layer] = 0 layer_indices[input_layer] = -1 nodes_depths[input_layer._inbound_nodes[0]] = 0 network_nodes.add(_make_node_key(input_layer.name, 0)) # Build a dict {depth: list of nodes with this depth} nodes_by_depth = collections.defaultdict(list) for node, depth in nodes_depths.items(): nodes_by_depth[depth].append(node) # Build a dict {depth: list of layers with this depth} layers_by_depth = collections.defaultdict(list) for layer, depth in layers_depths.items(): layers_by_depth[depth].append(layer) # Get sorted list of layer depths. depth_keys = list(layers_by_depth.keys()) depth_keys.sort(reverse=True) # Set self.layers ordered by depth. layers = [] for depth in depth_keys: layers_for_depth = layers_by_depth[depth] # Network.layers needs to have a deterministic order: # here we order them by traversal order. layers_for_depth.sort(key=lambda x: layer_indices[x]) layers.extend(layers_for_depth) # Get sorted list of node depths. depth_keys = list(nodes_by_depth.keys()) depth_keys.sort(reverse=True) # Check that all tensors required are computable. # computable_tensors: all tensors in the graph # that can be computed from the inputs provided. computable_tensors = set() for x in inputs: computable_tensors.add(id(x)) layers_with_complete_input = [] # To provide a better error msg. for depth in depth_keys: for node in nodes_by_depth[depth]: layer = node.layer if layer and not node.is_input: for x in tf.nest.flatten(node.keras_inputs): if id(x) not in computable_tensors: raise ValueError( "Graph disconnected: cannot obtain value for " f'tensor {x} at layer "{layer.name}". ' "The following previous layers were accessed " f"without issue: {layers_with_complete_input}" ) for x in tf.nest.flatten(node.outputs): computable_tensors.add(id(x)) layers_with_complete_input.append(layer.name) # Ensure name unicity, which will be crucial for serialization # (since serialized nodes refer to layers by their name). all_names = [layer.name for layer in layers] for name in all_names: if all_names.count(name) != 1: raise ValueError( f'The name "{name}" is used {all_names.count(name)} ' "times in the model. All layer names should be unique." ) return network_nodes, nodes_by_depth, layers, layers_by_depth def _build_map(outputs): """This method topologically sorts nodes in order from inputs to outputs. It uses a depth-first search to topologically sort nodes that appear in the _keras_history connectivity metadata of `outputs`. Args: outputs: the output tensors whose _keras_history metadata should be walked. This may be an arbitrary nested structure. Returns: A tuple like (ordered_nodes, layer_to_first_traversal_index) ordered_nodes: list of nodes appearing in the keras history, topologically sorted from original inputs to the `outputs`. (If outputs have different sets of ancestors, the inputs to one output may appear after a different output). layer_to_first_traversal_index: A dict mapping layer to the traversal index in the DFS where it is seen. Note: if a layer is shared by several nodes, the dict will only store the index corresponding to the *first* time the layer seen. """ finished_nodes = set() nodes_in_progress = set() nodes_in_decreasing_depth = [] # nodes from inputs -> outputs. layer_indices = {} # layer -> in traversal order. for output in tf.nest.flatten(outputs): _build_map_helper( output, finished_nodes, nodes_in_progress, nodes_in_decreasing_depth, layer_indices, ) return nodes_in_decreasing_depth, layer_indices def _build_map_helper( tensor, finished_nodes, nodes_in_progress, nodes_in_decreasing_depth, layer_indices, ): """Recursive helper for `_build_map`.""" ( layer, node_index, _, ) = tensor._keras_history node = layer._inbound_nodes[node_index] # Don't repeat work for shared subgraphs if node in finished_nodes: return # Prevent cycles. if node in nodes_in_progress: raise ValueError( f'Tensor {tensor} from layer "{layer.name}" is part of a cycle.' ) # Store the traversal order for layer sorting. if layer not in layer_indices: layer_indices[layer] = len(layer_indices) # Propagate to all previous tensors connected to this node. nodes_in_progress.add(node) if not node.is_input: for tensor in node.keras_inputs: _build_map_helper( tensor, finished_nodes, nodes_in_progress, nodes_in_decreasing_depth, layer_indices, ) finished_nodes.add(node) nodes_in_progress.remove(node) nodes_in_decreasing_depth.append(node) def _map_subgraph_network(inputs, outputs): """Returns the nodes and layers in the topology from `inputs` to `outputs`. Args: inputs: List of input tensors. outputs: List of output tensors. Returns: A tuple of List{Node] and List[Layer]. """ if not tf.compat.v1.executing_eagerly_outside_functions(): base_layer_utils.create_keras_history(outputs) # Keep only nodes and layers in the topology between inputs and outputs. _, nodes_by_depth, layers, _ = _map_graph_network(inputs, outputs) return tf.nest.flatten([nodes for nodes in nodes_by_depth.values()]), layers def _should_skip_first_node(layer): """Returns True if the first layer node should not be saved or loaded.""" # Networks that are constructed with an Input layer/shape start with a # pre-existing node linking their input to output. This node is excluded # from the network config. if not hasattr(layer, "_self_tracked_trackables"): # Special case for serialization of Functional models without # defined input shape argument. return isinstance(layer, Functional) if layer._self_tracked_trackables: return ( isinstance(layer, Functional) # Filter out Sequential models without an input shape. and isinstance( layer._self_tracked_trackables[0], input_layer_module.InputLayer ) ) else: return isinstance(layer, Functional) def connect_ancillary_layers(model, created_layers): """Adds layers that are not connected to the outputs to the model.""" # Layers not connected to outputs, such as those added in `add_loss`. ancillary_layers = [ layer for layer in created_layers.values() if layer not in model.layers ] if ancillary_layers: relevant_nodes = tf.nest.flatten( [ layer.inbound_nodes[1:] if _should_skip_first_node(layer) else layer.inbound_nodes for layer in created_layers.values() ] ) model._insert_layers(ancillary_layers, relevant_nodes) return model def reconstruct_from_config(config, custom_objects=None, created_layers=None): """Reconstructs graph from config object. Args: config: Dictionary returned from Network.get_config() custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. created_layers: Optional dictionary mapping names to Layer objects. Any layer not in this dictionary will be created and added to the dict. This function will add new nodes to all layers (excluding InputLayers), instead of re-using pre-existing nodes in the layers. Returns: Tuple of (input tensors, output tensors, dictionary of created layers) """ # Layer instances created during the graph reconstruction process. created_layers = created_layers or collections.OrderedDict() # Maps input data (tuple of inbound layer name, node index) from the config # to node indices in the newly generated model. The node indices may be # different if the layers have already been called previously. node_index_map = {} node_count_by_layer = {} # Dictionary mapping layer instances to # node data that specifies a layer call. # It acts as a queue that maintains any unprocessed # layer call until it becomes possible to process it # (i.e. until the input tensors to the call all exist). unprocessed_nodes = collections.defaultdict(list) def get_node_index(layer, config_node_index): """Returns node index in layer (might differ from config_node_index).""" if isinstance(layer, input_layer_module.InputLayer): return 0 return node_index_map.get((layer.name, config_node_index), None) def _deserialize_keras_tensors(kwargs, layer_map): """Deserializes TF-Keras Tensors passed to `call`..""" def _deserialize_keras_tensor(t): """Deserializes a single TF-Keras Tensor passed to `call`.""" if isinstance(t, tf_utils.ListWrapper): t = t.as_list() layer_name = t[0] node_index = t[1] tensor_index = t[2] layer = layer_map[layer_name] new_node_index = get_node_index(layer, node_index) if new_node_index is None: # The inbound node may not have been processed yet, # (This can happen e.g. if it depends on a different set # of inputs than those that have been processed already). # raise an IndexError so that the current node puts itself # back on the unprocessed queue. # Caution: This may lead to infinite loops for malformed # network configurations! (or when there is a bug in # the network config loading code). raise IndexError node = layer._inbound_nodes[new_node_index] return tf.nest.flatten(node.outputs)[tensor_index] return t kwargs = tf_utils.convert_inner_node_data(kwargs, wrap=True) return tf.nest.map_structure(_deserialize_keras_tensor, kwargs) def process_node(layer, node_data): """Deserialize a node. Args: layer: layer instance. node_data: Nested structure of `ListWrapper`. Returns: Whether the node was processed (i.e. the layer was called on the inputs specified by the node data) Raises: ValueError: In case of improperly formatted `node_data`. """ input_tensors = [] for input_data in tf.nest.flatten(node_data): input_data = input_data.as_list() if len(input_data) == 3: kwargs = {} elif len(input_data) == 4: kwargs = input_data[3] try: kwargs = _deserialize_keras_tensors(kwargs, created_layers) except IndexError: # Happens if keras tensors in kwargs are still unprocessed return False else: raise ValueError("Improperly formatted model config.") if input_data[0] != node_module._CONSTANT_VALUE: inbound_layer_name = input_data[0] inbound_node_index = input_data[1] inbound_tensor_index = input_data[2] inbound_layer = created_layers[inbound_layer_name] inbound_node_index = get_node_index( inbound_layer, inbound_node_index ) if inbound_node_index is None: return False inbound_node = inbound_layer._inbound_nodes[inbound_node_index] input_tensors.append( tf.nest.flatten(inbound_node.outputs)[inbound_tensor_index] ) else: # We received a constant w/ no TF-Keras history attached, # which means it is a constant tensor input. # Input is a constant value. # Format = [_CONSTANT_VALUE, -1, const_val, kwargs] assert input_data[1] == -1 assert len(input_data) >= 3 const_val = input_data[2] if ( isinstance(const_val, tuple) and len(const_val) == 2 and const_val[0] == node_module._COMPOSITE_TYPE ): # It is a composite tensor. input_tensors.append(json_utils.decode(const_val[1])) else: input_tensors.append(const_val) input_tensors = tf.nest.pack_sequence_as(node_data, input_tensors) # Call layer on its inputs, thus creating the node # and building the layer if needed. if input_tensors is not None: if ( not hasattr(layer, "_preserve_input_structure_in_config") or not layer._preserve_input_structure_in_config ): input_tensors = base_layer_utils.unnest_if_single_tensor( input_tensors ) output_tensors = layer(input_tensors, **kwargs) # Update node index map. output_index = tf.nest.flatten(output_tensors)[ 0 ]._keras_history.node_index node_index_map[ (layer.name, node_count_by_layer[layer]) ] = output_index node_count_by_layer[layer] += 1 return True def process_layer(layer_data): """Deserializes a layer, then call it on appropriate inputs. Args: layer_data: layer config dict. Raises: ValueError: In case of improperly formatted `layer_data` dict. """ layer_name = layer_data["name"] if layer_name in created_layers: layer = created_layers[layer_name] else: # Instantiate layer. from tf_keras.layers import deserialize as deserialize_layer layer = deserialize_layer(layer_data, custom_objects=custom_objects) created_layers[layer_name] = layer node_count_by_layer[layer] = int(_should_skip_first_node(layer)) # Gather layer inputs and convert to `ListWrapper` objects. inbound_nodes_data = layer_data["inbound_nodes"] inbound_nodes_data = tf_utils.convert_inner_node_data( inbound_nodes_data, wrap=True ) for node_data in inbound_nodes_data: # We don't process nodes (i.e. make layer calls) # on the fly because the inbound node may not yet exist, # in case of layer shared at different topological depths # (e.g. a model such as A(B(A(B(x))))) unprocessed_nodes[layer].append(node_data) # First, we create all layers and enqueue nodes to be processed for layer_data in config["layers"]: process_layer(layer_data) # Then we process nodes in order of layer depth. # Nodes that cannot yet be processed (if the inbound node # does not yet exist) are re-enqueued, and the process # is repeated until all nodes are processed. while unprocessed_nodes: for layer_data in config["layers"]: layer = created_layers[layer_data["name"]] if layer in unprocessed_nodes: layer_nodes = unprocessed_nodes.pop(layer) while layer_nodes: node_data = layer_nodes[0] if process_node(layer, node_data): layer_nodes.pop(0) else: # If a node can't be processed, stop processing the # nodes of the current layer to maintain node ordering. unprocessed_nodes[layer] = layer_nodes break input_tensors = [] output_tensors = [] input_layers = tf_utils.convert_inner_node_data( config["input_layers"], wrap=True ) for layer_data in tf.nest.flatten(input_layers): layer_name, node_index, tensor_index = layer_data.as_list() assert layer_name in created_layers layer = created_layers[layer_name] node_index = get_node_index(layer, node_index) layer_output_tensors = layer._inbound_nodes[node_index].output_tensors input_tensors.append( tf.nest.flatten(layer_output_tensors)[tensor_index] ) output_layers = tf_utils.convert_inner_node_data( config["output_layers"], wrap=True ) for layer_data in tf.nest.flatten(output_layers): layer_name, node_index, tensor_index = layer_data.as_list() assert layer_name in created_layers layer = created_layers[layer_name] node_index = get_node_index(layer, node_index) layer_output_tensors = layer._inbound_nodes[node_index].output_tensors output_tensors.append( tf.nest.flatten(layer_output_tensors)[tensor_index] ) input_tensors = tf.nest.pack_sequence_as(input_layers, input_tensors) output_tensors = tf.nest.pack_sequence_as(output_layers, output_tensors) return input_tensors, output_tensors, created_layers def get_network_config(network, serialize_layer_fn=None, config=None): """Build the config, which consists of the node graph and serialized layers. Args: network: A Network object. serialize_layer_fn: Function used to serialize layers. config: A dict to append more config entries into. If None, start with a new dict for the config. Returns: Config dictionary. """ config = config or {} serialize_obj_fn = serialization_lib.serialize_keras_object set_layers_legacy = False # To be removed after full affected g3 user migration to TF-Keras V3 Saving. if getattr(network, "use_legacy_config", False): serialize_obj_fn = serialization.serialize_keras_object set_layers_legacy = True serialize_layer_fn = serialize_layer_fn or serialize_obj_fn config["name"] = network.name node_conversion_map = {} for layer in network.layers: kept_nodes = 1 if _should_skip_first_node(layer) else 0 for original_node_index, node in enumerate(layer._inbound_nodes): node_key = _make_node_key(layer.name, original_node_index) if node_key in network._network_nodes: node_conversion_map[node_key] = kept_nodes kept_nodes += 1 layer_configs = [] with serialization.SharedObjectSavingScope(): for layer in network.layers: # From the earliest layers on. filtered_inbound_nodes = [] for original_node_index, node in enumerate(layer._inbound_nodes): node_key = _make_node_key(layer.name, original_node_index) if node_key in network._network_nodes and not node.is_input: # The node is relevant to the model: # add to filtered_inbound_nodes. node_data = node.serialize( _make_node_key, node_conversion_map ) filtered_inbound_nodes.append(node_data) if isinstance(layer, Functional) and set_layers_legacy: layer.use_legacy_config = True layer_config = serialize_layer_fn(layer) layer_config["name"] = layer.name layer_config["inbound_nodes"] = filtered_inbound_nodes layer_configs.append(layer_config) config["layers"] = layer_configs # Gather info about inputs and outputs. model_inputs = [] for i in range(len(network._input_layers)): layer, node_index, tensor_index = network._input_coordinates[i] node_key = _make_node_key(layer.name, node_index) if node_key not in network._network_nodes: continue new_node_index = node_conversion_map[node_key] model_inputs.append( tf_utils.ListWrapper([layer.name, new_node_index, tensor_index]) ) model_inputs = tf.nest.pack_sequence_as( network._nested_inputs, model_inputs ) # Preserve external TF-Keras compat for Models with single input. if not tf.nest.is_nested(model_inputs): model_inputs = [model_inputs] model_inputs = tf_utils.convert_inner_node_data(model_inputs) config["input_layers"] = model_inputs model_outputs = [] for i in range(len(network._output_layers)): layer, node_index, tensor_index = network._output_coordinates[i] node_key = _make_node_key(layer.name, node_index) if node_key not in network._network_nodes: continue new_node_index = node_conversion_map[node_key] model_outputs.append( tf_utils.ListWrapper([layer.name, new_node_index, tensor_index]) ) model_outputs = tf.nest.pack_sequence_as( network._nested_outputs, model_outputs ) # Preserve external TF-Keras compat for Models with single output. if not tf.nest.is_nested(model_outputs): model_outputs = [model_outputs] model_outputs = tf_utils.convert_inner_node_data(model_outputs) config["output_layers"] = model_outputs return config def shape_with_no_batch_size(x): if x.shape.rank is None: return None shape = x.shape.as_list() if shape: shape[0] = None return shape class ModuleWrapper(base_layer.Layer): """Wrapper for `tf.Module`s to support the Functional and Sequential API.""" def __init__(self, module, method_name=None, **kwargs): """Initializes the wrapper Layer for this module. Args: module: The `tf.Module` instance to be wrapped. method_name: (Optional) str. The name of the method to use as the forward pass of the module. If not set, becomes '__call__' if defined, or 'call'. Defaults to `None`. **kwargs: Additional keywrod arguments. See `tf.keras.layers.Layer`. Raises: ValueError: If `method` is not defined on `module`. """ super().__init__(**kwargs) if method_name is None: if hasattr(module, "__call__"): method_name = "__call__" elif hasattr(module, "call"): method_name = "call" if method_name is None or not hasattr(module, method_name): raise ValueError(f"{method_name} is not defined on object {module}") self._module = module self._method_name = method_name # Check if module.__call__ has a `training` arg or accepts `**kwargs`. method = getattr(module, method_name) method_arg_spec = tf_inspect.getfullargspec(method) self._call_spec.expects_training_arg = ( "training" in method_arg_spec.args or method_arg_spec.varkw is not None ) self._call_spec.expects_mask_arg = ( "mask" in method_arg_spec.args or method_arg_spec.varkw is not None ) def call(self, *args, **kwargs): if "training" in kwargs and not self._expects_training_arg: kwargs.pop("training") if "mask" in kwargs and not self._expects_mask_arg: kwargs.pop("mask") return getattr(self._module, self._method_name)(*args, **kwargs) def has_functional_like_constructor(cls): init_args = tf_inspect.getfullargspec(cls.__init__).args[1:] functional_init_args = tf_inspect.getfullargspec(Functional.__init__).args[ 1: ] if init_args == functional_init_args: return True return False
tf-keras/tf_keras/engine/functional.py/0
{ "file_path": "tf-keras/tf_keras/engine/functional.py", "repo_id": "tf-keras", "token_count": 31491 }
190
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests specific to `Sequential` model.""" import numpy as np import tensorflow.compat.v2 as tf from absl.testing import parameterized import tf_keras as keras from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils # isort: off from tensorflow.python.framework import ( test_util as tf_test_utils, ) class TestSequential(test_combinations.TestCase): """Most Sequential model API tests are covered in `training_test.py`.""" @test_combinations.run_all_keras_modes def test_basic_methods(self): model = keras.models.Sequential() model.add(keras.layers.Dense(1, input_dim=2)) model.add(keras.layers.Dropout(0.3, name="dp")) model.add( keras.layers.Dense( 2, kernel_regularizer="l2", kernel_constraint="max_norm" ) ) self.assertEqual(len(model.layers), 3) self.assertEqual(len(model.weights), 2 * 2) self.assertEqual(model.get_layer(name="dp").name, "dp") @test_combinations.run_all_keras_modes def test_input_defined_first_layer(self): model = keras.models.Sequential() model.add(keras.Input(shape=(2,), name="input_layer")) model.add(keras.layers.Dense(1)) model.add(keras.layers.Dropout(0.3, name="dp")) model.add( keras.layers.Dense( 2, kernel_regularizer="l2", kernel_constraint="max_norm" ) ) self.assertLen(model.layers, 3) self.assertLen(model.weights, 2 * 2) self.assertEqual(model.get_layer(name="dp").name, "dp") @test_combinations.run_all_keras_modes def test_single_layer_in_init(self): model = keras.models.Sequential(keras.layers.Dense(1)) self.assertLen(model.layers, 1) @test_combinations.run_all_keras_modes def test_sequential_pop(self): num_hidden = 5 input_dim = 3 batch_size = 5 num_classes = 2 model = test_utils.get_small_sequential_mlp( num_hidden, num_classes, input_dim ) model.compile( loss="mse", optimizer="rmsprop", run_eagerly=test_utils.should_run_eagerly(), ) x = np.random.random((batch_size, input_dim)) y = np.random.random((batch_size, num_classes)) model.fit(x, y, epochs=1) model.pop() self.assertEqual(len(model.layers), 1) self.assertEqual(model.output_shape, (None, num_hidden)) model.compile( loss="mse", optimizer="rmsprop", run_eagerly=test_utils.should_run_eagerly(), ) y = np.random.random((batch_size, num_hidden)) model.fit(x, y, epochs=1) # Test popping single-layer model model = keras.models.Sequential() model.add(keras.layers.Dense(num_hidden, input_dim=input_dim)) model.pop() self.assertEqual(model.layers, []) self.assertEqual(model.outputs, None) # Invalid use case model = keras.models.Sequential() with self.assertRaises(TypeError): model.pop() @test_combinations.run_all_keras_modes def test_sequential_deferred_build_with_np_arrays(self): num_hidden = 5 input_dim = 3 batch_size = 5 num_classes = 2 model = test_utils.get_small_sequential_mlp(num_hidden, num_classes) model.compile( loss="mse", optimizer="rmsprop", metrics=[keras.metrics.CategoricalAccuracy()], run_eagerly=test_utils.should_run_eagerly(), ) self.assertEqual(len(model.layers), 2) with self.assertRaisesRegex( ValueError, "Weights for model .* have not yet been created" ): len(model.weights) self.assertFalse(model.built) x = np.random.random((batch_size, input_dim)) y = np.random.random((batch_size, num_classes)) model.fit(x, y, epochs=1) self.assertTrue(model.built) self.assertEqual(len(model.weights), 2 * 2) @test_combinations.run_all_keras_modes def test_sequential_deferred_build_with_dataset_iterators(self): num_hidden = 5 input_dim = 3 num_classes = 2 num_samples = 50 steps_per_epoch = 10 model = test_utils.get_small_sequential_mlp(num_hidden, num_classes) model.compile( loss="mse", optimizer="rmsprop", metrics=[keras.metrics.CategoricalAccuracy()], run_eagerly=test_utils.should_run_eagerly(), ) self.assertEqual(len(model.layers), 2) with self.assertRaisesRegex( ValueError, "Weights for model .* have not yet been created" ): len(model.weights) self.assertFalse(model.built) x = tf.ones((num_samples, input_dim)) y = tf.zeros((num_samples, num_classes)) dataset = tf.data.Dataset.from_tensor_slices((x, y)) dataset = dataset.repeat(100) dataset = dataset.batch(10) model.fit(dataset, epochs=1, steps_per_epoch=steps_per_epoch) self.assertTrue(model.built) self.assertEqual(len(model.weights), 2 * 2) # TODO(kaftan) This test fails w/ run_with_all_keras_modes. File ticket @parameterized.parameters((True,), (False,)) def test_training_and_eval_methods_on_symbolic_tensors(self, deferred): with tf.Graph().as_default(), self.cached_session(): def get_model(): if deferred: model = test_utils.get_small_sequential_mlp(10, 4) else: model = test_utils.get_small_sequential_mlp( 10, 4, input_dim=3 ) model.compile( optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"], ) return model inputs = keras.backend.zeros(shape=(10, 3)) targets = keras.backend.zeros(shape=(10, 4)) model = get_model() model.fit(inputs, targets, epochs=10, steps_per_epoch=30) model = get_model() model.evaluate(inputs, targets, steps=2, verbose=0) model = get_model() model.predict(inputs, steps=2) model = get_model() model.train_on_batch(inputs, targets) model = get_model() model.test_on_batch(inputs, targets) model = get_model() model.fit( inputs, targets, epochs=1, steps_per_epoch=2, verbose=0, validation_data=(inputs, targets), validation_steps=2, ) @test_combinations.run_all_keras_modes def test_invalid_use_cases(self): # Added objects must be layer instances with self.assertRaises(TypeError): model = keras.models.Sequential() model.add(None) @test_combinations.run_all_keras_modes def test_nested_sequential_trainability(self): input_dim = 20 num_units = 10 num_classes = 2 inner_model = keras.models.Sequential() inner_model.add(keras.layers.Dense(num_units, input_shape=(input_dim,))) model = keras.models.Sequential() model.add(inner_model) model.add(keras.layers.Dense(num_classes)) self.assertEqual(len(model.layers), 2) self.assertEqual(len(model.trainable_weights), 4) inner_model.trainable = False self.assertEqual(len(model.trainable_weights), 2) inner_model.trainable = True self.assertEqual(len(model.trainable_weights), 4) @test_combinations.run_all_keras_modes def test_sequential_update_disabling(self): val_a = np.random.random((10, 4)) val_out = np.random.random((10, 4)) model = keras.models.Sequential() model.add(keras.layers.BatchNormalization(input_shape=(4,))) model.trainable = False model.compile("sgd", "mse") x1 = model.predict(val_a) model.train_on_batch(val_a, val_out) x2 = model.predict(val_a) self.assertAllClose(x1, x2, atol=1e-7) model.trainable = True model.compile("sgd", "mse") model.train_on_batch(val_a, val_out) x2 = model.predict(val_a) assert np.abs(np.sum(x1 - x2)) > 1e-5 @test_combinations.run_all_keras_modes def test_sequential_deferred_build_serialization(self): num_hidden = 5 input_dim = 3 batch_size = 5 num_classes = 2 model = test_utils.get_small_sequential_mlp(num_hidden, num_classes) model.compile( loss="mse", optimizer="rmsprop", metrics=[keras.metrics.CategoricalAccuracy()], run_eagerly=test_utils.should_run_eagerly(), ) self.assertFalse(model.built) x = np.random.random((batch_size, input_dim)) y = np.random.random((batch_size, num_classes)) model.train_on_batch(x, y) self.assertTrue(model.built) config = model.get_config() new_model = keras.models.Sequential.from_config(config) new_model.compile( loss="mse", optimizer="rmsprop", metrics=[keras.metrics.CategoricalAccuracy()], run_eagerly=test_utils.should_run_eagerly(), ) x = np.random.random((batch_size, input_dim)) y = np.random.random((batch_size, num_classes)) new_model.train_on_batch(x, y) self.assertEqual(len(new_model.layers), 2) self.assertEqual(len(new_model.weights), 4) @test_combinations.run_all_keras_modes def test_sequential_shape_inference_deferred(self): model = test_utils.get_small_sequential_mlp(4, 5) output_shape = model.compute_output_shape((None, 7)) self.assertEqual(tuple(output_shape.as_list()), (None, 5)) @test_combinations.run_all_keras_modes def test_sequential_build_deferred(self): model = test_utils.get_small_sequential_mlp(4, 5) model.build((None, 10)) self.assertTrue(model.built) self.assertEqual(len(model.weights), 4) # Test with nested model model = test_utils.get_small_sequential_mlp(4, 3) inner_model = test_utils.get_small_sequential_mlp(4, 5) model.add(inner_model) model.build((None, 10)) self.assertTrue(model.built) self.assertEqual(len(model.weights), 8) @test_combinations.run_all_keras_modes(always_skip_v1=True) def test_sequential_deferred_manual_build(self): model = test_utils.get_small_sequential_mlp(4, 5) self.assertFalse(model.built) model(tf.zeros([1, 2])) self.assertTrue(model.built) model.compile( "rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly() ) model.train_on_batch(np.zeros((1, 2)), np.zeros((1, 5))) @test_combinations.run_all_keras_modes def test_sequential_nesting(self): model = test_utils.get_small_sequential_mlp(4, 3) inner_model = test_utils.get_small_sequential_mlp(4, 5) model.add(inner_model) model.compile( loss="mse", optimizer="rmsprop", run_eagerly=test_utils.should_run_eagerly(), ) x = np.random.random((2, 6)) y = np.random.random((2, 5)) model.fit(x, y, epochs=1) @tf_test_utils.run_v1_only("Behavior changed in V2.") def test_variable_names_deferred(self): model = keras.models.Sequential([keras.layers.Dense(3)]) model.add(keras.layers.Dense(2)) model(tf.ones([2, 4])) # Note that for regular sequential models (wrapping graph network), # the layers' weights are built # without the model name as prefix (because the Functional API __call__ # reset the name scope). This is fixable, but it would be # backwards incompatible. self.assertEqual( [ "sequential/dense/kernel:0", "sequential/dense/bias:0", "sequential/dense_1/kernel:0", "sequential/dense_1/bias:0", ], [v.name for v in model.variables], ) @test_combinations.run_all_keras_modes def test_input_assumptions_propagation(self): model = keras.models.Sequential() model.add(keras.layers.Dense(1)) if tf.executing_eagerly(): with self.assertRaisesRegex( ValueError, "expected min_ndim=2, found ndim=0" ): model(1.0) @test_combinations.run_all_keras_modes def test_string_input(self): seq = keras.Sequential( [ keras.layers.InputLayer(input_shape=(1,), dtype=tf.string), keras.layers.Lambda(lambda x: x[0]), ] ) seq.run_eagerly = test_utils.should_run_eagerly() preds = seq.predict([["tensorflow eager"]]) self.assertEqual(preds.shape, (1,)) @test_combinations.run_all_keras_modes def test_multi_output_layer_not_accepted(self): class MultiOutputLayer(keras.layers.Layer): def call(self, inputs): return inputs, inputs with self.assertRaisesRegex( ValueError, "should have a single output tensor" ): keras.Sequential([MultiOutputLayer(input_shape=(3,))]) with self.assertRaisesRegex( ValueError, "should have a single output tensor" ): keras.Sequential( [keras.layers.Dense(1, input_shape=(3,)), MultiOutputLayer()] ) @test_combinations.run_all_keras_modes(always_skip_v1=True) def test_layer_add_after_compile_deferred(self): model = keras.Sequential([keras.layers.Dense(3)]) self.assertFalse(model.built) model.compile("adam", loss="mse") model.fit(np.random.random((1, 3)), np.random.random((1, 3))) self.assertTrue(model.built) model.add(keras.layers.Dense(3)) model.compile("adam", loss="mse") model.fit(np.random.random((1, 3)), np.random.random((1, 3))) self.assertTrue(model.built) def test_sequential_layer_tracking(self): """Test that Sequential only tracks layers added in init or `.add`.""" layer = keras.layers.Dense(1) model = keras.Sequential([layer]) self.assertEqual( list(model._flatten_layers(include_self=False, recursive=False))[ -1 ], layer, ) model.a = [ keras.layers.Dense(3) ] # should not be added to the layers list. self.assertEqual( list(model._flatten_layers(include_self=False, recursive=False))[ -1 ], layer, ) layer2 = keras.layers.Dense(2) model.add(layer2) self.assertEqual( list(model._flatten_layers(include_self=False, recursive=False))[ -1 ], layer2, ) model.a = [ keras.layers.Dense(3) ] # should not be added to the layers list. self.assertEqual( list(model._flatten_layers(include_self=False, recursive=False))[ -1 ], layer2, ) model.pop() self.assertEqual( list(model._flatten_layers(include_self=False, recursive=False))[ -1 ], layer, ) def test_config_preserves_input_layer(self): model = keras.Sequential( [ keras.Input((None,), name="my_embedding_input", dtype="int32"), keras.layers.Embedding(32, 32), keras.layers.Dense(3), ] ) config = model.get_config() new_model = keras.Sequential.from_config(config) self.assertTrue(new_model.built) layers = list( new_model._flatten_layers(include_self=False, recursive=False) ) self.assertEqual(layers[0].dtype, "int32") self.assertEqual(layers[0].name, "my_embedding_input") def test_name_unicity(self): model = keras.Sequential() model.add(keras.layers.Dense(3, name="specific_name")) with self.assertRaisesRegex(ValueError, "should have unique names"): model.add(keras.layers.Dense(3, name="specific_name")) @test_combinations.run_all_keras_modes(always_skip_v1=True) def test_tf_module_call(self): class MyModule(tf.Module): def __init__(self): self.v = tf.Variable(2.0) def __call__(self, x): return self.v * x model = keras.Sequential() model.add(MyModule()) model.compile("sgd", "mse") x, y = np.ones((10, 1)), np.ones((10, 1)) model.fit(x, y, batch_size=2) self.assertLen(model.trainable_variables, 1) @test_combinations.run_all_keras_modes(always_skip_v1=True) def test_tf_module_training(self): class MyModule(tf.Module): def __init__(self): self.v = tf.Variable(2.0) def call(self, x, training=None): # training should be set by Sequential. assert training is not None return self.v * x model = keras.Sequential() model.add(MyModule()) model.compile("sgd", "mse") x, y = np.ones((10, 1)), np.ones((10, 1)) model.fit(x, y, batch_size=2) self.assertLen(model.trainable_variables, 1) @test_combinations.run_all_keras_modes(always_skip_v1=True) def test_tf_module_error(self): class MyModule(tf.Module): def __init__(self): self.v = tf.Variable(2.0) model = keras.Sequential() with self.assertRaisesRegex(ValueError, "is not defined"): model.add(MyModule()) @test_combinations.run_all_keras_modes(always_skip_v1=True) def test_multi_inputs_outputs(self): model = keras.Sequential( [ ImageAugmentLayer(), ImageAugmentLayer(), ] ) image_inputs = tf.ones((2, 512, 512, 3)) label_inputs = tf.ones((2, 2)) output = model({"images": image_inputs, "labels": label_inputs}) self.assertAllClose(output["images"], image_inputs) self.assertAllClose(output["labels"], label_inputs) model.compile(loss="mse") model.fit( x={"images": image_inputs, "labels": label_inputs}, y={"images": image_inputs, "labels": label_inputs}, steps_per_epoch=1, ) self.assertIsNone(model.inputs) self.assertIsNone(model.outputs) # Use the same model with image input only model({"images": image_inputs}) model.fit( x={"images": image_inputs}, y={"images": image_inputs}, steps_per_epoch=1, ) model(image_inputs) model.fit(x=image_inputs, y=image_inputs, steps_per_epoch=1) class TestSequentialEagerIntegration(test_combinations.TestCase): @test_combinations.run_all_keras_modes def test_defun_on_call(self): # Check that one can subclass Sequential and place the `call` in a # `defun`. class MySequential(keras.Sequential): def __init__(self, name=None): super().__init__(name=name) self.call = tf.function(self.call) model = MySequential() model.add(keras.layers.Dense(4, activation="relu")) model.add(keras.layers.Dense(5, activation="softmax")) model.compile( loss="mse", optimizer="rmsprop", run_eagerly=test_utils.should_run_eagerly(), ) x = np.random.random((2, 6)) y = np.random.random((2, 5)) model.fit(x, y, epochs=1) @test_combinations.run_all_keras_modes def test_build_before_fit(self): # Fix for b/112433577 model = test_utils.get_small_sequential_mlp(4, 5) model.compile( loss="mse", optimizer="rmsprop", run_eagerly=test_utils.should_run_eagerly(), ) model.build((None, 6)) x = np.random.random((2, 6)) y = np.random.random((2, 5)) model.fit(x, y, epochs=1) @test_combinations.run_all_keras_modes def test_build_empty_network(self): x = np.random.random((2, 6)) y = np.random.random((2, 5)) model = keras.Sequential() # Make sure an empty sequential model can still work with build(). model.build((None, 6)) self.assertTrue(model.built) model.add(keras.layers.Dense(5, input_shape=(6,))) model.compile( loss="mse", optimizer="rmsprop", run_eagerly=test_utils.should_run_eagerly(), ) model.fit(x, y) model.pop() self.assertFalse(model.built) model.build((None, 6)) self.assertTrue(model.built) class ImageAugmentLayer(keras.layers.Layer): def call(self, inputs): return inputs if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/engine/sequential_test.py/0
{ "file_path": "tf-keras/tf_keras/engine/sequential_test.py", "repo_id": "tf-keras", "token_count": 10831 }
191
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """V1 Training-related part of the TF-Keras engine.""" import collections import warnings import numpy as np import tensorflow.compat.v2 as tf from tf_keras import backend from tf_keras import losses from tf_keras import metrics as metrics_module from tf_keras import optimizers from tf_keras.distribute import distributed_training_utils from tf_keras.distribute import distributed_training_utils_v1 from tf_keras.engine import base_layer from tf_keras.engine import training as training_lib from tf_keras.engine import training_arrays_v1 from tf_keras.engine import training_distributed_v1 from tf_keras.engine import training_eager_v1 from tf_keras.engine import training_generator_v1 from tf_keras.engine import training_utils from tf_keras.engine import training_utils_v1 from tf_keras.mixed_precision import loss_scale_optimizer from tf_keras.optimizers import optimizer_v1 from tf_keras.optimizers.legacy import optimizer_v2 from tf_keras.saving.legacy import saving_utils from tf_keras.saving.legacy.saved_model import model_serialization from tf_keras.utils import data_utils from tf_keras.utils import layer_utils from tf_keras.utils import losses_utils from tf_keras.utils import tf_inspect from tf_keras.utils import tf_utils from tf_keras.utils.mode_keys import ModeKeys # isort: off from tensorflow.python.platform import tf_logging as logging try: from scipy.sparse import issparse except ImportError: issparse = None class Model(training_lib.Model): """A model groups layers into an object with training & inference features. There are two ways to instantiate a `Model`: 1 - With the "functional API", where you start from `Input`, you chain layer calls to specify the model's forward pass, and finally you create your model from inputs and outputs: ```python import tensorflow as tf inputs = tf.keras.Input(shape=(3,)) x = tf.keras.layers.Dense(4, activation=tf.nn.relu)(inputs) outputs = tf.keras.layers.Dense(5, activation=tf.nn.softmax)(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) ``` 2 - By subclassing the `Model` class: in that case, you should define your layers in `__init__` and you should implement the model's forward pass in `call`. ```python import tensorflow as tf class MyModel(tf.keras.Model): def __init__(self): super().__init__() self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu) self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax) def call(self, inputs): x = self.dense1(inputs) return self.dense2(x) model = MyModel() ``` If you subclass `Model`, you can optionally have a `training` argument (boolean) in `call`, which you can use to specify a different behavior in training and inference: ```python import tensorflow as tf class MyModel(tf.keras.Model): def __init__(self): super().__init__() self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu) self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax) self.dropout = tf.keras.layers.Dropout(0.5) def call(self, inputs, training=False): x = self.dense1(inputs) if training: x = self.dropout(x, training=training) return self.dense2(x) model = MyModel() ``` """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # initializing _distribution_strategy here since it is possible to call # predict on a model without compiling it. self._distribution_strategy = None self._compile_time_distribution_strategy = None if ( tf.compat.v1.executing_eagerly_outside_functions() and tf.distribute.has_strategy() ): self._set_strategy(tf.distribute.get_strategy()) # This flag is used to track if the user is using the deprecated path of # passing distribution strategy to compile rather than creating the # model under distribution strategy scope. self._compile_distribution = False self._run_eagerly = None self._experimental_run_tf_function = ( tf.compat.v1.executing_eagerly_outside_functions() ) self._v1_compile_was_called = False def _init_batch_counters(self): pass # Batch counters should not be created in legacy graph mode. @tf.__internal__.tracking.no_automatic_dependency_tracking def _set_strategy(self, strategy): self._compile_time_distribution_strategy = strategy def get_weights(self): """Retrieves the weights of the model. Returns: A flat list of Numpy arrays. """ strategy = ( self._distribution_strategy or self._compile_time_distribution_strategy ) if strategy: with strategy.scope(): return base_layer.Layer.get_weights(self) return base_layer.Layer.get_weights(self) def load_weights(self, filepath, by_name=False, skip_mismatch=False): """Loads all layer weights, either from a TensorFlow or an HDF5 file. If `by_name` is False weights are loaded based on the network's topology. This means the architecture should be the same as when the weights were saved. Note that layers that don't have weights are not taken into account in the topological ordering, so adding or removing layers is fine as long as they don't have weights. If `by_name` is True, weights are loaded into layers only if they share the same name. This is useful for fine-tuning or transfer-learning models where some of the layers have changed. Only topological loading (`by_name=False`) is supported when loading weights from the TensorFlow format. Note that topological loading differs slightly between TensorFlow and HDF5 formats for user-defined classes inheriting from `tf.keras.Model`: HDF5 loads based on a flattened list of weights, while the TensorFlow format loads based on the object-local names of attributes to which layers are assigned in the `Model`'s constructor. Args: filepath: String, path to the weights file to load. For weight files in TensorFlow format, this is the file prefix (the same as was passed to `save_weights`). by_name: Boolean, whether to load weights by name or by topological order. Only topological loading is supported for weight files in TensorFlow format. skip_mismatch: Boolean, whether to skip loading of layers where there is a mismatch in the number of weights, or a mismatch in the shape of the weight (only valid when `by_name=True`). Returns: When loading a weight file in TensorFlow format, returns the same status object as `tf.train.Checkpoint.restore`. When graph building, restore ops are run automatically as soon as the network is built (on first call for user-defined classes inheriting from `Model`, immediately if it is already built). When loading weights in HDF5 format, returns `None`. Raises: ImportError: If h5py is not available and the weight file is in HDF5 format. ValueError: If `skip_mismatch` is set to `True` when `by_name` is `False`. """ if backend.is_tpu_strategy(self._distribution_strategy): if self._distribution_strategy.extended.steps_per_run > 1 and ( not saving_utils.is_hdf5_filepath(filepath) ): raise ValueError( "Load weights is not yet supported with TPUStrategy " "with steps_per_run greater than 1." ) return super().load_weights( filepath, by_name=by_name, skip_mismatch=skip_mismatch ) @tf.__internal__.tracking.no_automatic_dependency_tracking def compile( self, optimizer="rmsprop", loss=None, metrics=None, loss_weights=None, sample_weight_mode=None, weighted_metrics=None, target_tensors=None, distribute=None, **kwargs, ): """Configures the model for training. Args: optimizer: String (name of optimizer) or optimizer instance. See `tf.keras.optimizers`. loss: String (name of objective function), objective function or `tf.keras.losses.Loss` instance. See `tf.keras.losses`. An objective function is any callable with the signature `scalar_loss = fn(y_true, y_pred)`. If the model has multiple outputs, you can use a different loss on each output by passing a dictionary or a list of losses. The loss value that will be minimized by the model will then be the sum of all individual losses. metrics: List of metrics to be evaluated by the model during training and testing. Typically you will use `metrics=['accuracy']`. To specify different metrics for different outputs of a multi-output model, you could also pass a dictionary, such as `metrics={'output_a': 'accuracy', 'output_b': ['accuracy', 'mse']}`. You can also pass a list (len = len(outputs)) of lists of metrics such as `metrics=[['accuracy'], ['accuracy', 'mse']]` or `metrics=['accuracy', ['accuracy', 'mse']]`. loss_weights: Optional list or dictionary specifying scalar coefficients (Python floats) to weight the loss contributions of different model outputs. The loss value that will be minimized by the model will then be the *weighted sum* of all individual losses, weighted by the `loss_weights` coefficients. If a list, it is expected to have a 1:1 mapping to the model's outputs. If a tensor, it is expected to map output names (strings) to scalar coefficients. sample_weight_mode: If you need to do timestep-wise sample weighting (2D weights), set this to `"temporal"`. `None` becomes sample-wise weights (1D). If the model has multiple outputs, you can use a different `sample_weight_mode` on each output by passing a dictionary or a list of modes. Defaults to `None`. weighted_metrics: List of metrics to be evaluated and weighted by sample_weight or class_weight during training and testing. target_tensors: By default, TF-Keras will create placeholders for the model's target, which will be fed with the target data during training. If instead you would like to use your own target tensors (in turn, TF-Keras will not expect external Numpy data for these targets at training time), you can specify them via the `target_tensors` argument. It can be a single tensor (for a single-output model), a list of tensors, or a dict mapping output names to target tensors. distribute: NOT SUPPORTED IN TF 2.0, please create and compile the model under distribution strategy scope instead of passing it to compile. **kwargs: Any additional arguments. Raises: ValueError: In case of invalid arguments for `optimizer`, `loss`, `metrics` or `sample_weight_mode`. """ self._assert_built_as_v1() self._run_eagerly = kwargs.pop("run_eagerly", None) self._experimental_run_tf_function = kwargs.pop( "experimental_run_tf_function", True ) self._v1_compile_was_called = True # Prepare Session arguments (legacy). kwargs.pop("cloning", None) # Legacy DistStrat argument, never used. self._from_serialized = kwargs.pop("from_serialized", False) allowed_kwargs = {"feed_dict", "fetches", "options", "run_metadata"} unknown_kwargs = set(kwargs.keys()) - allowed_kwargs if unknown_kwargs: raise TypeError( f"Invalid keyword argument(s) in `compile`: {unknown_kwargs}" ) self._function_kwargs = kwargs if self._function_kwargs: self._experimental_run_tf_function = False if self.run_eagerly: raise ValueError( "Session keyword arguments are not supported " "when `run_eagerly=True`. You passed the following " "Session arguments: %s" % (self._function_kwargs,) ) self._set_optimizer(optimizer) is_any_keras_optimizer_v1 = any( ( isinstance(opt, optimizer_v1.Optimizer) and not isinstance(opt, optimizer_v1.TFOptimizer) ) for opt in tf.nest.flatten(self.optimizer) ) if ( is_any_keras_optimizer_v1 and tf.compat.v1.executing_eagerly_outside_functions() ): raise ValueError( "`tf.compat.v1.keras` Optimizer (", optimizer, ") is " "not supported when eager execution is enabled. Use a " "`tf.keras` Optimizer instead, or disable eager " "execution.", ) if ( target_tensors is not None ) or not tf.compat.v1.executing_eagerly_outside_functions(): # Fallback out of things that aren't supported with v2 loops self._experimental_run_tf_function = False if distribute is not None: if ( tf.__internal__.tf2.enabled() or self._experimental_run_tf_function ): raise ValueError( "Distribute argument in compile is not available in TF 2.0 " "please create the model under the distribution strategy " "scope." ) logging.warning( "Distribute argument in compile is deprecated please " "create the model under the distribution strategy scope." ) self._distribution_strategy = distribute self._compile_distribution = True else: if tf.distribute.has_strategy(): # When the user builds the model in the DS scope and cross # replica context we want distribution strategy to be set but # when building the replica copies of the models internally we # should not be compiling with distribution strategy and use the # default compilation path. if tf.distribute.in_cross_replica_context(): self._distribution_strategy = tf.distribute.get_strategy() if isinstance( self._distribution_strategy, tf.compat.v1.distribute.experimental.ParameterServerStrategy, ): raise NotImplementedError( "`tf.compat.v1.distribute.experimental.ParameterServerStrategy`" " currently only works with the tf.Estimator API" ) if isinstance( self._distribution_strategy, tf.distribute.experimental.ParameterServerStrategy, ): raise NotImplementedError( "`tf.distribute.experimental.ParameterServerStrategy` is only " "supported in TF2." ) if not self._experimental_run_tf_function: self._validate_compile_param_for_distribution_strategy( self.run_eagerly, sample_weight_mode, target_tensors, weighted_metrics, ) # We've disabled automatic dependency tracking for this method, but do # want to add a checkpoint dependency on the optimizer if it's # trackable. if isinstance(self.optimizer, tf.__internal__.tracking.Trackable): self._track_trackable( self.optimizer, name="optimizer", overwrite=True ) self.loss = loss or {} self.loss_weights = loss_weights self.sample_weight_mode = sample_weight_mode self._compile_metrics = metrics or [] self._compile_weighted_metrics = weighted_metrics if self.run_eagerly and target_tensors is not None: raise ValueError( "target_tensors argument is not supported when " "running a model eagerly." ) # _training_endpoints contains a list of _TrainingEndpoint object, which # has all the model output/target/loss and related metadata. self._training_endpoints = [] # Used to freeze the behavior of the Model once `compile` has been # called. self._compiled_trainable_state = self._get_trainable_state() # Set tf.distribute.Strategy specific parameters. self._distributed_model_cache = {} self._distributed_function_cache = {} # Clear any `_eager_losses` that was added. self._clear_losses() if ( not tf.executing_eagerly() and self._distribution_strategy is not None ): # Ensures a Session is created and configured correctly for # Distribution Strategy. backend.configure_and_create_distributed_session( self._distribution_strategy ) # Initialize model metric attributes. self._init_metric_attributes() if not self.built or not self.inputs or not self.outputs: # Model is not compilable because it does not know its number of # inputs and outputs, nor their shapes and names. We will compile # after the first time the model gets called on training data. return self._is_compiled = True # Prepare list of loss functions, same size of model outputs. self.loss_functions = training_utils_v1.prepare_loss_functions( self.loss, self.output_names ) target_tensors = self._process_target_tensor_for_compile(target_tensors) for o, n, l, t in zip( self.outputs, self.output_names, self.loss_functions, target_tensors ): endpoint = _TrainingEndpoint(o, n, l) endpoint.create_training_target(t, run_eagerly=self.run_eagerly) self._training_endpoints.append(endpoint) # Prepare list loss weights, same size of model outputs. training_utils_v1.prepare_loss_weights( self._training_endpoints, loss_weights ) # Initialization for Eager mode execution. if self.run_eagerly: self._compile_eagerly(metrics, weighted_metrics, sample_weight_mode) return with backend.get_graph().as_default(): # Save all metric attributes per output of the model. self._cache_output_metric_attributes(metrics, weighted_metrics) # Set metric attributes on model. self._set_metric_attributes() # Invoke metric functions (unweighted) for all the outputs. self._handle_metrics( self.outputs, targets=self._targets, skip_target_masks=self._prepare_skip_target_masks(), masks=self._prepare_output_masks(), ) # Prepare sample weight modes. List with the same length as model # outputs. training_utils_v1.prepare_sample_weight_modes( self._training_endpoints, sample_weight_mode ) # Creates the model loss and weighted metrics sub-graphs. self._compile_weights_loss_and_weighted_metrics() # Functions for train, test and predict will # be compiled lazily when required. # This saves time when the user is not using all functions. self.train_function = None self.test_function = None self.predict_function = None # Collected trainable weights, sorted in topological order. self._collected_trainable_weights = self.trainable_weights # Validate all variables were correctly created in distribution # scope. if self._distribution_strategy and not self._compile_distribution: for v in self.variables: strategy = self._distribution_strategy if not strategy.extended.variable_created_in_scope(v): raise ValueError( "Variable (%s) was not created in the distribution " "strategy scope of (%s). It is most likely due to " "not all layers or the model or optimizer being " "created outside the distribution strategy scope. " "Try to make sure your code looks similar " "to the following.\n" "with strategy.scope():\n" " model=_create_model()\n" " model.compile(...)" % (v, strategy) ) @tf.__internal__.tracking.no_automatic_dependency_tracking def _init_distributed_function_cache_if_not_compiled(self): if not hasattr(self, "_distributed_function_cache"): self._distributed_function_cache = {} @property def metrics(self): """Returns the model's metrics added using `compile`, `add_metric` APIs.""" metrics = [] if self._is_compiled: if not hasattr(self, "_v1_compile_was_called"): # See b/155687393 for more details, the model is created as a v2 # instance but converted to v1. Fallback to use base Model to # retrieve the metrics. return super().metrics metrics += self._compile_metric_functions metrics.extend(self._metrics) metrics.extend( _get_metrics_from_layers( list(self._flatten_layers(include_self=False, recursive=False)) ) ) return metrics @property def metrics_names(self): """Returns the model's display labels for all outputs.""" # This property includes all output names including `loss` and # per-output losses for backward compatibility. metrics_names = ["loss"] if self._is_compiled: if not hasattr(self, "_v1_compile_was_called"): # See b/155687393 for more details, the model is created as a v2 # instance but converted to v1. Fallback to use base Model to # retrieve the metrics name return super().metrics_names # Add output loss metric names to the metric names list. if len(self._training_endpoints) > 1: metrics_names.extend( [ e.loss_name() for e in self._training_endpoints if not e.should_skip_target() ] ) # Add all metric names. metrics_names += [m.name for m in self.metrics] return metrics_names @property def run_eagerly(self): """Settable attribute indicating whether the model should run eagerly. Running eagerly means that your model will be run step by step, like Python code. Your model might run slower, but it should become easier for you to debug it by stepping into individual layer calls. By default, we will attempt to compile your model to a static graph to deliver the best execution performance. Returns: Boolean, whether the model should run eagerly. """ if self._run_eagerly is True and not tf.executing_eagerly(): raise ValueError( "You can only set `run_eagerly=True` if eager execution " "is enabled." ) if not self.dynamic: if self._run_eagerly is None: # Respect `tf.config.run_functions_eagerly` unless # `run_eagerly` was explicitly passed to `compile`. return tf.config.functions_run_eagerly() else: return self._run_eagerly else: if not tf.executing_eagerly(): raise ValueError( "Your model contains layers that can only be " "successfully run in eager execution (layers " "constructed with `dynamic=True`). " "You must enable eager execution with " "`tf.enable_eager_execution()`." ) if self._run_eagerly is False: # TODO(fchollet): consider using py_func to enable this. raise ValueError( "Your model contains layers that can only be " "successfully run in eager execution (layers " "constructed with `dynamic=True`). " "You cannot set `run_eagerly=False`." ) return tf.executing_eagerly() @run_eagerly.setter def run_eagerly(self, value): self._run_eagerly = value def _select_training_loop(self, inputs): """Select training loop for fit/eval/predict based on the inputs.""" # TODO(kaftan) or TODO(scottzhu): This check should eventually be nicely # integrated into the data adapters in the v2 loop. We can't do this yet # because we currently have to fall back for unhandled data types. if isinstance(inputs, (tf.compat.v1.data.Iterator, tf.data.Iterator)): raise ValueError( "For performance reasons TF-Keras `fit`, `evaluate` and" "`predict` accept tf.data `Datasets` as input but not " "iterators that have been manually generated from " "Datasets by users. Please directly pass in the " "original `Dataset` object instead of passing in " "`iter(dataset)`." ) # Case 1: distribution strategy. if self._distribution_strategy: if self._in_multi_worker_mode(): return training_distributed_v1.DistributionMultiWorkerTrainingLoop( # noqa: E501 training_distributed_v1.DistributionSingleWorkerTrainingLoop() # noqa: E501 ) else: return ( training_distributed_v1.DistributionSingleWorkerTrainingLoop() # noqa: E501 ) # Case 2: generator-like. Input is Python generator, or Sequence object, # or a non-distributed Dataset or iterator in eager execution. if data_utils.is_generator_or_sequence(inputs): return training_generator_v1.GeneratorOrSequenceTrainingLoop() if training_utils_v1.is_eager_dataset_or_iterator(inputs): return training_generator_v1.EagerDatasetOrIteratorTrainingLoop() # Case 3: Symbolic tensors or Numpy array-like. # This includes Datasets and iterators in graph mode (since they # generate symbolic tensors). if self.run_eagerly: return training_generator_v1.GeneratorLikeTrainingLoop() else: return training_arrays_v1.ArrayLikeTrainingLoop() def fit( self, x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0.0, validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_freq=1, max_queue_size=10, workers=1, use_multiprocessing=False, **kwargs, ): """Trains the model for a fixed number of epochs (dataset iterations). Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. - A `tf.data` dataset. Should return a tuple of either `(inputs, targets)` or `(inputs, targets, sample_weights)`. - A generator or `keras.utils.Sequence` returning `(inputs, targets)` or `(inputs, targets, sample weights)`. y: Target data. Like the input data `x`, it could be either Numpy array(s) or TensorFlow tensor(s). It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). If `x` is a dataset, generator, or `keras.utils.Sequence` instance, `y` should not be specified (since targets will be obtained from `x`). batch_size: Integer or `None`. Number of samples per gradient update. If unspecified, `batch_size` will default to 32. Do not specify the `batch_size` if your data is in the form of symbolic tensors, datasets, generators, or `keras.utils.Sequence` instances (since they generate batches). epochs: Integer. Number of epochs to train the model. An epoch is an iteration over the entire `x` and `y` data provided. Note that in conjunction with `initial_epoch`, `epochs` is to be understood as "final epoch". The model is not trained for a number of iterations given by `epochs`, but merely until the epoch of index `epochs` is reached. verbose: 0, 1, or 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per epoch. Note that the progress bar is not particularly useful when logged to a file, so verbose=2 is recommended when not running interactively (eg, in a production environment). callbacks: List of `keras.callbacks.Callback` instances. List of callbacks to apply during training. See `tf.keras.callbacks`. validation_split: Float between 0 and 1. Fraction of the training data to be used as validation data. The model will set apart this fraction of the training data, will not train on it, and will evaluate the loss and any model metrics on this data at the end of each epoch. The validation data is selected from the last samples in the `x` and `y` data provided, before shuffling. This argument is not supported when `x` is a dataset, generator or `keras.utils.Sequence` instance. validation_data: Data on which to evaluate the loss and any model metrics at the end of each epoch. The model will not be trained on this data. `validation_data` will override `validation_split`. `validation_data` could be: - tuple `(x_val, y_val)` of Numpy arrays or tensors - tuple `(x_val, y_val, val_sample_weights)` of Numpy arrays - dataset For the first two cases, `batch_size` must be provided. For the last case, `validation_steps` could be provided. shuffle: Boolean (whether to shuffle the training data before each epoch) or str (for 'batch'). 'batch' is a special option for dealing with the limitations of HDF5 data; it shuffles in batch-sized chunks. Has no effect when `steps_per_epoch` is not `None`. class_weight: Optional dictionary mapping class indices (integers) to a weight (float) value, used for weighting the loss function (during training only). This can be useful to tell the model to "pay more attention" to samples from an under-represented class. sample_weight: Optional Numpy array of weights for the training samples, used for weighting the loss function (during training only). You can either pass a flat (1D) Numpy array with the same length as the input samples (1:1 mapping between weights and samples), or in the case of temporal data, you can pass a 2D array with shape `(samples, sequence_length)`, to apply a different weight to every timestep of every sample. In this case you should make sure to specify `sample_weight_mode="temporal"` in `compile()`. This argument is not supported when `x` is a dataset, generator, or `keras.utils.Sequence` instance, instead provide the sample_weights as the third element of `x`. initial_epoch: Integer. Epoch at which to start training (useful for resuming a previous training run). steps_per_epoch: Integer or `None`. Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. When training with input tensors such as TensorFlow data tensors, the default `None` is equal to the number of samples in your dataset divided by the batch size, or 1 if that cannot be determined. If x is a `tf.data` dataset, and 'steps_per_epoch' is None, the epoch will run until the input dataset is exhausted. This argument is not supported with array inputs. validation_steps: Only relevant if `validation_data` is provided and is a `tf.data` dataset. Total number of steps (batches of samples) to draw before stopping when performing validation at the end of every epoch. If 'validation_steps' is None, validation will run until the `validation_data` dataset is exhausted. In the case of a infinite dataset, it will run into a infinite loop. If 'validation_steps' is specified and only part of the dataset will be consumed, the evaluation will start from the beginning of the dataset at each epoch. This ensures that the same validation samples are used every time. validation_freq: Only relevant if validation data is provided. Integer or `collections.abc.Container` instance (e.g. list, tuple, etc.). If an integer, specifies how many training epochs to run before a new validation run is performed, e.g. `validation_freq=2` runs validation every 2 epochs. If a Container, specifies the epochs on which to run validation, e.g. `validation_freq=[1, 2, 10]` runs validation at the end of the 1st, 2nd, and 10th epochs. max_queue_size: Integer. Used for generator or `keras.utils.Sequence` input only. Maximum size for the generator queue. If unspecified, `max_queue_size` will default to 10. workers: Integer. Used for generator or `keras.utils.Sequence` input only. Maximum number of processes to spin up when using process-based threading. If unspecified, `workers` will default to 1. If 0, will execute the generator on the main thread. use_multiprocessing: Boolean. Used for generator or `keras.utils.Sequence` input only. If `True`, use process-based threading. If unspecified, `use_multiprocessing` will default to `False`. Note that because this implementation relies on multiprocessing, you should not pass non-pickleable arguments to the generator as they can't be passed easily to children processes. **kwargs: Used for backwards compatibility. Returns: A `History` object. Its `History.history` attribute is a record of training loss values and metrics values at successive epochs, as well as validation loss values and validation metrics values (if applicable). Raises: RuntimeError: If the model was never compiled. ValueError: In case of mismatch between the provided input data and what the model expects. """ self._assert_built_as_v1() # Legacy support if "nb_epoch" in kwargs: logging.warning( "The `nb_epoch` argument in `fit` has been renamed `epochs`." ) epochs = kwargs.pop("nb_epoch") if kwargs: raise TypeError("Unrecognized keyword arguments: " + str(kwargs)) self._assert_compile_was_called() self._check_call_args("fit") func = self._select_training_loop(x) return func.fit( self, x=x, y=y, batch_size=batch_size, epochs=epochs, verbose=verbose, callbacks=callbacks, validation_split=validation_split, validation_data=validation_data, shuffle=shuffle, class_weight=class_weight, sample_weight=sample_weight, initial_epoch=initial_epoch, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps, validation_freq=validation_freq, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, ) def evaluate( self, x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, ): """Returns the loss value & metrics values for the model in test mode. Computation is done in batches (see the `batch_size` arg.) Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. - A `tf.data` dataset. - A generator or `keras.utils.Sequence` instance. y: Target data. Like the input data `x`, it could be either Numpy array(s) or TensorFlow tensor(s). It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). If `x` is a dataset, generator or `keras.utils.Sequence` instance, `y` should not be specified (since targets will be obtained from the iterator/dataset). batch_size: Integer or `None`. Number of samples per batch of computation. If unspecified, `batch_size` will default to 32. Do not specify the `batch_size` if your data is in the form of symbolic tensors, dataset, generators, or `keras.utils.Sequence` instances (since they generate batches). verbose: 0 or 1. Verbosity mode. 0 = silent, 1 = progress bar. sample_weight: Optional Numpy array of weights for the test samples, used for weighting the loss function. You can either pass a flat (1D) Numpy array with the same length as the input samples (1:1 mapping between weights and samples), or in the case of temporal data, you can pass a 2D array with shape `(samples, sequence_length)`, to apply a different weight to every timestep of every sample. In this case you should make sure to specify `sample_weight_mode="temporal"` in `compile()`. This argument is not supported when `x` is a dataset, instead pass sample weights as the third element of `x`. steps: Integer or `None`. Total number of steps (batches of samples) before declaring the evaluation round finished. Ignored with the default value of `None`. If x is a `tf.data` dataset and `steps` is None, 'evaluate' will run until the dataset is exhausted. This argument is not supported with array inputs. callbacks: List of `keras.callbacks.Callback` instances. List of callbacks to apply during evaluation. See [callbacks](/api_docs/python/tf/tf_keras/callbacks). max_queue_size: Integer. Used for generator or `keras.utils.Sequence` input only. Maximum size for the generator queue. If unspecified, `max_queue_size` will default to 10. workers: Integer. Used for generator or `keras.utils.Sequence` input only. Maximum number of processes to spin up when using process-based threading. If unspecified, `workers` will default to 1. If 0, will execute the generator on the main thread. use_multiprocessing: Boolean. Used for generator or `keras.utils.Sequence` input only. If `True`, use process-based threading. If unspecified, `use_multiprocessing` will default to `False`. Note that because this implementation relies on multiprocessing, you should not pass non-pickleable arguments to the generator as they can't be passed easily to children processes. Returns: Scalar test loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the scalar outputs. Raises: ValueError: in case of invalid arguments. """ self._assert_built_as_v1() self._assert_compile_was_called() self._check_call_args("evaluate") func = self._select_training_loop(x) return func.evaluate( self, x=x, y=y, batch_size=batch_size, verbose=verbose, sample_weight=sample_weight, steps=steps, callbacks=callbacks, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, ) def predict( self, x, batch_size=None, verbose=0, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, ): """Generates output predictions for the input samples. Computation is done in batches (see the `batch_size` arg.) Args: x: Input samples. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A `tf.data` dataset. - A generator or `keras.utils.Sequence` instance. batch_size: Integer or `None`. Number of samples per batch of computation. If unspecified, `batch_size` will default to 32. Do not specify the `batch_size` if your data is in the form of symbolic tensors, dataset, generators, or `keras.utils.Sequence` instances (since they generate batches). verbose: Verbosity mode, 0 or 1. steps: Total number of steps (batches of samples) before declaring the prediction round finished. Ignored with the default value of `None`. If x is a `tf.data` dataset and `steps` is None, `predict` will run until the input dataset is exhausted. callbacks: List of `keras.callbacks.Callback` instances. List of callbacks to apply during prediction. See [callbacks](/api_docs/python/tf/tf_keras/callbacks). max_queue_size: Integer. Used for generator or `keras.utils.Sequence` input only. Maximum size for the generator queue. If unspecified, `max_queue_size` will default to 10. workers: Integer. Used for generator or `keras.utils.Sequence` input only. Maximum number of processes to spin up when using process-based threading. If unspecified, `workers` will default to 1. If 0, will execute the generator on the main thread. use_multiprocessing: Boolean. Used for generator or `keras.utils.Sequence` input only. If `True`, use process-based threading. If unspecified, `use_multiprocessing` will default to `False`. Note that because this implementation relies on multiprocessing, you should not pass non-pickleable arguments to the generator as they can't be passed easily to children processes. Returns: Numpy array(s) of predictions. Raises: ValueError: In case of mismatch between the provided input data and the model's expectations, or in case a stateful model receives a number of samples that is not a multiple of the batch size. """ self._assert_built_as_v1() self._check_call_args("predict") func = self._select_training_loop(x) return func.predict( self, x=x, batch_size=batch_size, verbose=verbose, steps=steps, callbacks=callbacks, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, ) def reset_metrics(self): """Resets the state of metrics.""" metrics = self._get_training_eval_metrics() for m in metrics: m.reset_state() # Reset metrics on all the distributed (cloned) models. if self._distribution_strategy: distributed_training_utils_v1._reset_metrics(self) def train_on_batch( self, x, y=None, sample_weight=None, class_weight=None, reset_metrics=True, ): """Runs a single gradient update on a single batch of data. Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. - A `tf.data` dataset. y: Target data. Like the input data `x`, it could be either Numpy array(s) or TensorFlow tensor(s). It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). If `x` is a dataset, `y` should not be specified (since targets will be obtained from the iterator). sample_weight: Optional array of the same length as x, containing weights to apply to the model's loss for each sample. In the case of temporal data, you can pass a 2D array with shape (samples, sequence_length), to apply a different weight to every timestep of every sample. In this case you should make sure to specify sample_weight_mode="temporal" in compile(). This argument is not supported when `x` is a dataset. class_weight: Optional dictionary mapping class indices (integers) to a weight (float) to apply to the model's loss for the samples from this class during training. This can be useful to tell the model to "pay more attention" to samples from an under-represented class. reset_metrics: If `True`, the metrics returned will be only for this batch. If `False`, the metrics will be statefully accumulated across batches. Returns: Scalar training loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the scalar outputs. Raises: ValueError: In case of invalid user-provided arguments. """ self._assert_compile_was_called() self._check_call_args("train_on_batch") # If at this point we are in the replica context, then it is okay to # execute the Eager code path. The expected way to get here is to call # `fit` that calls `train_on_batch` on each replica. if ( self._distribution_strategy and tf.distribute.in_cross_replica_context() ): raise NotImplementedError( "`train_on_batch` is not supported for models " "distributed with tf.distribute.Strategy." ) # Validate and standardize user data. x, y, sample_weights = self._standardize_user_data( x, y, sample_weight=sample_weight, class_weight=class_weight, extract_tensors_from_dataset=True, ) # If `self._distribution_strategy` is True, then we are in a replica # context at this point because of the check above. `train_on_batch` is # being run for each replica by `self._distribution_strategy` and the # same code path as Eager is expected to be taken. if self.run_eagerly or self._distribution_strategy: output_dict = training_eager_v1.train_on_batch( self, x, y, sample_weights=sample_weights, output_loss_metrics=self._output_loss_metrics, ) outputs = ( output_dict["total_loss"] + output_dict["output_losses"] + output_dict["metrics"] ) outputs = [_non_none_constant_value(v) for v in outputs] else: x = training_utils_v1.ModelInputs(x).as_list() ins = x + list(y or []) + list(sample_weights or []) if not isinstance(backend.symbolic_learning_phase(), int): ins += [True] # Add learning phase value. self._update_sample_weight_modes(sample_weights=sample_weights) self._make_train_function() outputs = self.train_function(ins) if reset_metrics: self.reset_metrics() if len(outputs) == 1: return outputs[0] return outputs def test_on_batch(self, x, y=None, sample_weight=None, reset_metrics=True): """Test the model on a single batch of samples. Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. - A `tf.data` dataset. y: Target data. Like the input data `x`, it could be either Numpy array(s) or TensorFlow tensor(s). It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). If `x` is a dataset `y` should not be specified (since targets will be obtained from the iterator). sample_weight: Optional array of the same length as x, containing weights to apply to the model's loss for each sample. In the case of temporal data, you can pass a 2D array with shape (samples, sequence_length), to apply a different weight to every timestep of every sample. In this case you should make sure to specify sample_weight_mode="temporal" in compile(). This argument is not supported when `x` is a dataset. reset_metrics: If `True`, the metrics returned will be only for this batch. If `False`, the metrics will be statefully accumulated across batches. Returns: Scalar test loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the scalar outputs. Raises: ValueError: In case of invalid user-provided arguments. """ self._assert_compile_was_called() self._check_call_args("test_on_batch") if ( self._distribution_strategy and tf.distribute.in_cross_replica_context() ): raise NotImplementedError( "`test_on_batch` is not supported for models " "distributed with tf.distribute.Strategy." ) # Validate and standardize user data. x, y, sample_weights = self._standardize_user_data( x, y, sample_weight=sample_weight, extract_tensors_from_dataset=True ) # If `self._distribution_strategy` is True, then we are in a replica # context at this point. if self.run_eagerly or self._distribution_strategy: output_dict = training_eager_v1.test_on_batch( self, x, y, sample_weights=sample_weights, output_loss_metrics=self._output_loss_metrics, ) outputs = ( output_dict["total_loss"] + output_dict["output_losses"] + output_dict["metrics"] ) outputs = [_non_none_constant_value(v) for v in outputs] else: x = training_utils_v1.ModelInputs(x).as_list() inputs = x + list(y or []) + list(sample_weights or []) self._update_sample_weight_modes(sample_weights=sample_weights) self._make_test_function() outputs = self.test_function(inputs) if reset_metrics: self.reset_metrics() if len(outputs) == 1: return outputs[0] return outputs def predict_on_batch(self, x): """Returns predictions for a single batch of samples. Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A `tf.data` dataset. Returns: Numpy array(s) of predictions. Raises: ValueError: In case of mismatch between given number of inputs and expectations of the model. """ self._check_call_args("predict_on_batch") if ( self._distribution_strategy and tf.distribute.in_cross_replica_context() ): raise NotImplementedError( "`predict_on_batch` is not supported for models distributed " "with tf.distribute.Strategy." ) # Validate and standardize user data. inputs, _, _ = self._standardize_user_data( x, extract_tensors_from_dataset=True ) # If `self._distribution_strategy` is True, then we are in a replica # context at this point. if self.run_eagerly or self._distribution_strategy: inputs = training_utils_v1.cast_if_floating_dtype(inputs) if isinstance(inputs, collections.abc.Sequence): # Unwrap lists with only one input, as we do when training on # batch if len(inputs) == 1: inputs = inputs[0] return self(inputs) self._make_predict_function() outputs = self.predict_function(inputs) if len(outputs) == 1: return outputs[0] return outputs def fit_generator( self, generator, steps_per_epoch=None, epochs=1, verbose=1, callbacks=None, validation_data=None, validation_steps=None, validation_freq=1, class_weight=None, max_queue_size=10, workers=1, use_multiprocessing=False, shuffle=True, initial_epoch=0, ): """Fits the model on data yielded batch-by-batch by a Python generator. DEPRECATED: `Model.fit` now supports generators, so there is no longer any need to use this endpoint. """ warnings.warn( "`model.fit_generator` is deprecated and " "will be removed in a future version. " "Please use `Model.fit`, which supports generators.", stacklevel=2, ) return self.fit( generator, steps_per_epoch=steps_per_epoch, epochs=epochs, verbose=verbose, callbacks=callbacks, validation_data=validation_data, validation_steps=validation_steps, validation_freq=validation_freq, class_weight=class_weight, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, shuffle=shuffle, initial_epoch=initial_epoch, ) def evaluate_generator( self, generator, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0, ): """Evaluates the model on a data generator. DEPRECATED: `Model.evaluate` now supports generators, so there is no longer any need to use this endpoint. """ warnings.warn( "`Model.evaluate_generator` is deprecated and " "will be removed in a future version. " "Please use `Model.evaluate`, which supports generators.", stacklevel=2, ) self._check_call_args("evaluate_generator") return self.evaluate( generator, steps=steps, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, verbose=verbose, callbacks=callbacks, ) def predict_generator( self, generator, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0, ): """Generates predictions for the input samples from a data generator. DEPRECATED: `Model.predict` now supports generators, so there is no longer any need to use this endpoint. """ warnings.warn( "`Model.predict_generator` is deprecated and " "will be removed in a future version. " "Please use `Model.predict`, which supports generators.", stacklevel=2, ) return self.predict( generator, steps=steps, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, verbose=verbose, callbacks=callbacks, ) def _check_call_args(self, method_name): """Check that `call` has only one positional arg.""" # Always allow first arg, regardless of arg name. fullargspec = self._call_spec.full_argspec if fullargspec.defaults: positional_args = fullargspec.args[: -len(fullargspec.defaults)] else: positional_args = fullargspec.args if "training" in positional_args: positional_args.remove("training") # self and first arg can be positional. if len(positional_args) > 2: extra_args = positional_args[2:] raise ValueError( "Models passed to `" + method_name + "` can only have `training` " "and the first argument in `call` as positional arguments, " "found: " + str(extra_args) + "." ) def _set_optimizer(self, optimizer): """Sets self.optimizer. Sets self.optimizer to `optimizer`, potentially wrapping it with a LossScaleOptimizer. Args: optimizer: The optimizer(s) to assign to self.optimizer. """ if isinstance(optimizer, (list, tuple)): self.optimizer = [optimizers.get(opt) for opt in optimizer] else: self.optimizer = optimizers.get(optimizer) if self._dtype_policy.name == "mixed_float16" and not isinstance( self.optimizer, loss_scale_optimizer.LossScaleOptimizer ): if isinstance(self.optimizer, list): raise ValueError( 'When the "mixed_float16" dtype policy is used, you ' "can only pass a single optimizer. Using policy %s " "and got optimizers: %s" % self._dtype_policy, self.optimizer, ) if not isinstance(self.optimizer, optimizer_v2.OptimizerV2): raise ValueError( '"optimizer" must be an instance of ' "tf.keras.optimizers.legacy.Optimizer when a dype policy " "with a loss scale is used, but got: %s. Using policy: " "%s" % (self.optimizer, self._dtype_policy) ) self.optimizer = loss_scale_optimizer.LossScaleOptimizer( self.optimizer ) def _prepare_validation_data( self, validation_data, batch_size, validation_steps ): """Unpack and check the validation data.""" ( val_x, val_y, val_sample_weights, ) = training_utils_v1.unpack_validation_data(validation_data) return self._standardize_user_data( val_x, val_y, sample_weight=val_sample_weights, batch_size=batch_size, steps=validation_steps, steps_name="validation_steps", ) def _validate_compile_param_for_distribution_strategy( self, run_eagerly, sample_weight_mode, target_tensors, weighted_metrics ): # Validate that arguments passed by the user to `compile` are supported # by tf.distribute.Strategy. if self._distribution_strategy: if sample_weight_mode: raise NotImplementedError( "sample_weight_mode is not supported with " "tf.distribute.Strategy." ) if weighted_metrics: raise NotImplementedError( "weighted_metrics is not supported with " "tf.distribute.Strategy." ) if target_tensors: raise ValueError( "target_tensors is not supported with " "tf.distribute.Strategy." ) if run_eagerly: raise ValueError( "We currently do not support enabling `run_eagerly` with " "distribution strategy." ) if distributed_training_utils_v1.is_distributing_by_cloning( self ) and (not self.built or not self.inputs or not self.outputs): raise ValueError( "We currently do not support distribution strategy with a " "`Sequential` model that is created without `input_shape`/" "`input_dim` set in its first layer or a subclassed model." ) def _process_target_tensor_for_compile(self, target_tensors): if self.run_eagerly: # target tensor is not supported with run_eagerly. Create a list # with None as placeholder for each output. return [None for _ in self.output_names] if target_tensors is not None and not ( isinstance(target_tensors, list) and target_tensors == [] ): if isinstance(target_tensors, list): if len(target_tensors) != len(self.outputs): raise ValueError( "When passing a list as `target_tensors`, " "it should have one entry per model output. " "The model has %s outputs, " "but you passed target_tensors=%s" % (len(self.outputs), target_tensors) ) elif isinstance(target_tensors, dict): unexpected_target_tensor_names = set( target_tensors.keys() ).difference(self.output_names) if unexpected_target_tensor_names: raise ValueError( "Unknown entry in `target_tensors` dictionary: " '"{name}". ' "Only expected the following keys: {keys}".format( name=unexpected_target_tensor_names, keys=str(self.output_names), ) ) tmp_target_tensors = [] for name in self.output_names: tmp_target_tensors.append(target_tensors.get(name, None)) target_tensors = tmp_target_tensors elif tf.is_tensor(target_tensors): target_tensors = [target_tensors] else: raise TypeError( "Expected `target_tensors` to be a list or tuple or " "dict or a single tensor, but got:", target_tensors, ) else: # In case target tensor is empty or None, create a list with Nones # that has same length as self.output_names. With that, the None # check of target tensor can be skipped downstream. target_tensors = [None for _ in self.output_names] return target_tensors def _compile_eagerly(self, metrics, weighted_metrics, sample_weight_mode): # Prepare sample weight modes. List with the same length as model # outputs. training_utils_v1.prepare_sample_weight_modes( self._training_endpoints, sample_weight_mode ) # Prepare sample weights. self._prepare_sample_weights() # Save all metric attributes per output of the model. self._cache_output_metric_attributes(metrics, weighted_metrics) self.total_loss = None # Set metric attributes on model. self._set_metric_attributes() self._collected_trainable_weights = self.trainable_weights def _update_sample_weight_modes(self, sample_weights=None): """Updates sample weight modes based on training/eval inputs. Sample weight placeholders will be created for all or no outputs based on whether sample_weight is provided for any output. If model contains `_sample_weight_modes` we check if the input `sample_weights` corresponds to the sample weight modes. 1. Set sample weight mode to be 'temporal' for output i, if `compile` sample_weight_mode was set to `temporal` and sample weight inputs are given for one or more outputs. 2. Set sample weight mode to be 'samplewise' for output i, if `compile` sample_weight_mode was not set and sample weight inputs are given for one or more outputs. 3. Reset sample weight mode to None for output i if sample weight mode was set but there is no sample weight input. Args: sample_weights: List of sample weights of the same length as model outputs or None. """ if not self._is_compiled: return if sample_weights and any(s is not None for s in sample_weights): for endpoint in self._training_endpoints: endpoint.sample_weight_mode = ( endpoint.sample_weight_mode or "samplewise" ) else: for endpoint in self._training_endpoints: endpoint.sample_weight_mode = None def _recompile_weights_loss_and_weighted_metrics(self): if not self._is_compiled: return False recompile = any( e.sample_weights_mismatch() for e in self._training_endpoints ) if recompile: self._compile_weights_loss_and_weighted_metrics() return recompile @tf.__internal__.tracking.no_automatic_dependency_tracking def _compile_weights_loss_and_weighted_metrics(self, sample_weights=None): """Compiles the model loss and weighted metric sub-graphs. This may be used to set graph tensors as sample weights (instead of creating placeholders). This functionality is necessary for `tf.keras.estimator.model_to_estimator`, which calls TF-Keras models in a v1 graph, and creates iterator tensors for inputs, targets, and sample weights. Args: sample_weights: List of tensors to use as the sample weights. Must be the same length as the number of outputs. If left as `None`, placeholders are used instead. """ with backend.get_graph().as_default(): if sample_weights is not None: self._update_sample_weight_modes(sample_weights) self._prepare_sample_weights(sample_weights) masks = self._prepare_output_masks() # Compute weighted metrics. self._handle_metrics( self.outputs, targets=self._targets, skip_target_masks=self._prepare_skip_target_masks(), sample_weights=self.sample_weights, masks=masks, return_weighted_metrics=True, ) # Compute total loss. # Used to keep track of the total loss value (stateless). # eg., total_loss = loss_weight_1 * output_1_loss_fn(...) + # loss_weight_2 * output_2_loss_fn(...) + # layer losses. self.total_loss = self._prepare_total_loss(masks) def _prepare_skip_target_masks(self): """Boolean mask for whether target in output list should be skipped. If the loss function corresponding to a model output is None, then this output will be skipped during total loss calculation and feed targets preparation. Returns: A boolean list for whether the corresponding target in the output list should be skipped during loss calculation. """ return [l is None for l in self.loss_functions] def _prepare_output_masks(self): """Returns masks corresponding to model outputs.""" return [getattr(x, "_keras_mask", None) for x in self.outputs] def _prepare_total_loss(self, masks): """Computes total loss from loss functions. Args: masks: List of mask values corresponding to each model output. Returns: A list of loss weights of python floats. Raises: TypeError: If model run_eagerly is True. """ if self.run_eagerly: raise TypeError( "total loss can not be computed when compiled with " "run_eagerly = True." ) loss_list = [] with backend.name_scope("loss"): for endpoint, mask in zip(self._training_endpoints, masks): if endpoint.should_skip_target(): continue y_true = endpoint.training_target.target y_pred = endpoint.output loss_fn = endpoint.loss_fn loss_weight = endpoint.loss_weight loss_name = endpoint.loss_name() sample_weight = endpoint.sample_weight with backend.name_scope(loss_name): if mask is not None: mask = tf.cast(mask, y_pred.dtype) # Update weights with mask. if sample_weight is None: sample_weight = mask else: # Update dimensions of weights to match with mask if # possible. ( mask, _, sample_weight, ) = losses_utils.squeeze_or_expand_dimensions( mask, sample_weight=sample_weight ) if hasattr(loss_fn, "reduction"): per_sample_losses = loss_fn.call(y_true, y_pred) sample_weight = losses_utils.apply_valid_mask( per_sample_losses, sample_weight, mask, loss_fn.reduction, ) weighted_losses = losses_utils.compute_weighted_loss( per_sample_losses, sample_weight=sample_weight, reduction=losses_utils.ReductionV2.NONE, ) loss_reduction = loss_fn.reduction # `AUTO` loss reduction defaults to # `SUM_OVER_BATCH_SIZE` for all compile use cases. if loss_reduction == losses_utils.ReductionV2.AUTO: loss_reduction = ( losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE ) # Compute the stateless loss value. output_loss = losses_utils.reduce_weighted_loss( weighted_losses, reduction=loss_reduction ) else: # Compute the stateless loss value for a custom loss # class. Here we assume that the class takes care of # loss reduction because if this class returns a vector # value we cannot differentiate between use case where a # custom optimizer expects a vector loss value vs # unreduced per-sample loss value. output_loss = loss_fn( y_true, y_pred, sample_weight=sample_weight ) loss_reduction = ( losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE ) if len(self.outputs) > 1: # Keep track of stateful result tensor for the loss. endpoint.output_loss_metric(output_loss) # Scale output loss for distribution. For custom losses we # assume reduction was mean. if ( loss_reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE ): output_loss = losses_utils.scale_loss_for_distribution( output_loss ) loss_list.append(loss_weight * output_loss) if not loss_list and not self.losses: raise ValueError( "The model cannot be compiled " "because it has no loss to optimize." ) # Add regularization penalties and other layer-specific losses. custom_losses = self.get_losses_for(None) + self.get_losses_for( self.inputs ) if custom_losses: total_custom_loss = tf.add_n( losses_utils.cast_losses_to_common_dtype(custom_losses) ) loss_list.append( losses_utils.scale_loss_for_distribution(total_custom_loss) ) loss_list = losses_utils.cast_losses_to_common_dtype(loss_list) if loss_list: total_loss = tf.add_n(loss_list) else: total_loss = 0.0 return total_loss def _get_callback_model(self): """Returns the Callback Model for this Model.""" if hasattr(self, "_replicated_model") and self._replicated_model: # When using training_distributed, we set the callback model # to an instance of the `DistributedModel` that we create in # the `compile` call. The `DistributedModel` is initialized # with the first replicated model. We need to set the callback # model to a DistributedModel to allow us to override saving # and loading weights when we checkpoint the model during training. return self._replicated_model if hasattr(self, "callback_model") and self.callback_model: return self.callback_model return self @tf.__internal__.tracking.no_automatic_dependency_tracking def _make_callback_model(self, grouped_model): first_replicated_model = self._distribution_strategy.unwrap( grouped_model )[0] # We initialize the callback model with the first replicated model. self._replicated_model = DistributedCallbackModel( first_replicated_model ) self._replicated_model.set_original_model(self) def _validate_or_infer_batch_size(self, batch_size, steps, x): """Validates that `batch_size` provided is consistent with InputLayer. It's possible that the user specified a static batch size in their InputLayer. If so, this method checks the provided `batch_size` and `x` arguments are consistent with this static batch size. Also, if `batch_size` is `None`, this method will attempt to infer the batch size from the static batch size of the InputLayer. Lastly, ValueError will be raised if `x` is a tf.data.Dataset and `batch_size` is specified as we expect users to provide batched datasets. Args: batch_size: The batch_size provided as an argument to fit/evaluate/predict. steps: The steps provided as an argument to fit/evaluate/predict. x: The data passed as `x` to fit/evaluate/predict. Returns: The validated batch_size, auto-inferred from the first layer if not provided. """ if isinstance( x, (tf.compat.v1.data.Dataset, tf.data.Dataset, data_utils.Sequence) ) or tf_inspect.isgenerator(x): if batch_size is not None: raise ValueError( "The `batch_size` argument must not be specified for the " "given input type. Received input: " "{}, batch_size: {}".format(x, batch_size) ) return # Avoids the override in Sequential.layers which filters Input layers. # (Which are often the very layers that we're after.) layers = self._flatten_layers(include_self=False, recursive=False) first_layer = next(layers, None) if first_layer: # The per-replica static batch size. static_batch_size = training_utils.get_static_batch_size( first_layer ) if static_batch_size is not None: # Determine number of times the user-supplied batch size will be # split. if ( self._distribution_strategy and distributed_training_utils.global_batch_size_supported( self._distribution_strategy ) ): num_splits_for_ds = ( self._distribution_strategy.num_replicas_in_sync ) else: num_splits_for_ds = 1 # Check `batch_size` argument is consistent with InputLayer. if batch_size is not None: if batch_size % num_splits_for_ds != 0: raise ValueError( "The `batch_size` argument ({}) must be divisible " "the by number of replicas ({})".format( batch_size, num_splits_for_ds ) ) per_replica_batch_size = batch_size // num_splits_for_ds if per_replica_batch_size != static_batch_size: raise ValueError( "The `batch_size` argument value {} is " "incompatible with the specified batch size of " "your Input Layer: {}".format( per_replica_batch_size, static_batch_size ) ) # Check Dataset/Iterator batch size is consistent with # InputLayer. if isinstance( x, ( tf.data.Dataset, tf.compat.v1.data.Iterator, tf.data.Iterator, ), ): ds_batch_size = tf.compat.v1.Dimension( tf.nest.flatten(tf.compat.v1.data.get_output_shapes(x))[ 0 ][0] ).value if ds_batch_size is not None: if ds_batch_size % num_splits_for_ds != 0: raise ValueError( "The batch output shape of your `Dataset` {} " "cannot be divisible by number of " "replicas {}".format( ds_batch_size, num_splits_for_ds ) ) ds_per_replica_batch_size = ( ds_batch_size // num_splits_for_ds ) if ds_per_replica_batch_size != static_batch_size: raise ValueError( "The batch output shape of your `Dataset` is " "{}, which is incompatible with the specified " "batch size of your Input Layer: {}".format( ds_per_replica_batch_size, static_batch_size ) ) # Set inferred batch size from the InputLayer. if steps is None: batch_size = static_batch_size * num_splits_for_ds if batch_size is None and steps is None: # Backwards compatibility batch_size = 32 return batch_size def _prepare_sample_weights(self, sample_weights=None): """Sets sample weight attribute on the model.""" # List with the same length as model outputs. if sample_weights is not None: if len(sample_weights) != len(self._training_endpoints): raise ValueError( "Provided sample weights must have same length as the " "number of outputs. Expected: {}, got: {}.".format( len(self._training_endpoints), len(sample_weights) ) ) else: sample_weights = [None] * len(self._training_endpoints) for endpoint, weight in zip(self._training_endpoints, sample_weights): endpoint.populate_sample_weight(weight, endpoint.sample_weight_mode) def _cache_output_metric_attributes(self, metrics, weighted_metrics): """Caches metric name and function attributes for every model output.""" output_shapes = [] for output in self.outputs: if output is None or output.shape.rank is None: output_shapes.append(None) else: output_shapes.append(output.shape.as_list()) self._per_output_metrics = ( training_utils_v1.collect_per_output_metric_info( metrics, self.output_names, output_shapes, self.loss_functions, from_serialized=self._from_serialized, ) ) self._per_output_weighted_metrics = ( training_utils_v1.collect_per_output_metric_info( weighted_metrics, self.output_names, output_shapes, self.loss_functions, from_serialized=self._from_serialized, is_weighted=True, ) ) def _add_unique_metric_name(self, metric_name, metric_fn, output_index): """Makes the metric name unique. If there are multiple outputs for which the metrics are calculated, the metric names have to be made unique by appending an integer. Args: metric_name: Metric name that corresponds to the metric specified by the user. For example: 'acc'. metric_fn: The Metric object. output_index: The index of the model output for which the metric name is being added. Returns: string, name of the model's unique metric name """ # For multi-output models, prepend the output names to the metric name. if len(self.output_names) > 1: # If we're loading from an already-serialized model, we've already # prepended the output name, and we don't want to do it again. # # Alternatively, we may be receiving a stateless metric (e.g. the # string "accuracy") rather than a `Metric` object, in which case we # want to prepend the output name even if we are loading a # serialized model. if not getattr(metric_fn, "_from_serialized", False): metric_name = f"{self.output_names[output_index]}_{metric_name}" j = 1 base_metric_name = metric_name while metric_name in self.metrics_names: metric_name = "%s_%d" % (base_metric_name, j) j += 1 return metric_name def _init_metric_attributes(self): """Initialized model metric attributes.""" # List of stateful metric functions. Used for resetting metric state # during training/eval. self._compile_metric_functions = [] def _set_per_output_metric_attributes(self, metrics_dict, output_index): """Sets the metric attributes on the model for the given output. Args: metrics_dict: A dict with metric names as keys and metric fns as values. output_index: The index of the model output for which the metric attributes are added. Returns: Metrics dict updated with unique metric names as keys. """ updated_metrics_dict = collections.OrderedDict() for metric_name, metric_fn in metrics_dict.items(): metric_name = self._add_unique_metric_name( metric_name, metric_fn, output_index ) # Update the name on the metric class to be the unique generated # name. metric_fn._name = metric_name updated_metrics_dict[metric_name] = metric_fn # Keep track of metric name and function. self._compile_metric_functions.append(metric_fn) return updated_metrics_dict def _set_metric_attributes(self): """Sets the metric attributes on the model for all the model outputs.""" updated_per_output_metrics = [] updated_per_output_weighted_metrics = [] for i, endpoint in enumerate(self._training_endpoints): if endpoint.should_skip_target(): updated_per_output_metrics.append(self._per_output_metrics[i]) updated_per_output_weighted_metrics.append( self._per_output_weighted_metrics[i] ) continue updated_per_output_metrics.append( self._set_per_output_metric_attributes( self._per_output_metrics[i], i ) ) updated_per_output_weighted_metrics.append( self._set_per_output_metric_attributes( self._per_output_weighted_metrics[i], i ) ) # Create a metric wrapper for each output loss. This computes mean of an # output loss across mini-batches (irrespective of how we reduce within # a batch). if len(self._training_endpoints) > 1: for endpoint in self._training_endpoints: if not endpoint.should_skip_target(): endpoint.output_loss_metric = metrics_module.Mean( name=endpoint.loss_name() ) self._per_output_metrics = updated_per_output_metrics self._per_output_weighted_metrics = updated_per_output_weighted_metrics def _handle_per_output_metrics( self, metrics_dict, y_true, y_pred, mask, weights=None ): """Calls metric functions for a single output. Args: metrics_dict: A dict with metric names as keys and metric fns as values. y_true: Target output. y_pred: Predicted output. mask: Computed mask value for the current output. weights: Weights to be applied on the current output. Returns: A list of metric result tensors. """ metric_results = [] for metric_name, metric_fn in metrics_dict.items(): with backend.name_scope(metric_name): metric_result = training_utils_v1.call_metric_function( metric_fn, y_true, y_pred, weights=weights, mask=mask ) metric_results.append(metric_result) return metric_results def _handle_metrics( self, outputs, targets=None, skip_target_masks=None, sample_weights=None, masks=None, return_weighted_metrics=False, return_weighted_and_unweighted_metrics=False, ): """Handles calling metric functions. Args: outputs: List of outputs (predictions). targets: List of targets. skip_target_masks: Optional. List of boolean for whether the corresponding target should be ignored or not. sample_weights: Optional list of sample weight arrays. masks: List of computed output mask values. return_weighted_metrics: Flag that indicates whether weighted metrics should be computed instead of unweighted metrics. This flag is ignored when `return_weighted_and_unweighted_metrics` is enabled. return_weighted_and_unweighted_metrics: Flag that is used to indicate whether both weighted and unweighted metrics should be computed. When this is not enabled, we use `return_weighted_metrics` param to indicate whether weighted or unweighted metrics should be returned. Returns: A list of metric result tensors. """ # TODO(scottzhu): Update this to use the new training_endpoints. # Currently the eager and graph logic is bit different. skip_target_masks = skip_target_masks or [False] * len(outputs) metric_results = [] with backend.name_scope("metrics"): # Invoke all metrics added using `compile`. for i in range(len(outputs)): if skip_target_masks[i]: continue output = outputs[i] if outputs else None target = targets[i] if targets else None output_mask = masks[i] if masks else None if ( return_weighted_and_unweighted_metrics or not return_weighted_metrics ): metric_results.extend( self._handle_per_output_metrics( self._per_output_metrics[i], target, output, output_mask, ) ) if ( return_weighted_and_unweighted_metrics or return_weighted_metrics ): metric_results.extend( self._handle_per_output_metrics( self._per_output_weighted_metrics[i], target, output, output_mask, weights=sample_weights[i] if sample_weights else None, ) ) return metric_results def _check_trainable_weights_consistency(self): """Check trainable weights count consistency. This will raise a warning if `trainable_weights` and `_collected_trainable_weights` are inconsistent (i.e. have different number of parameters). Inconsistency will typically arise when one modifies `model.trainable` without calling `model.compile` again. """ if not hasattr(self, "_collected_trainable_weights"): return if len(self.trainable_weights) != len( self._collected_trainable_weights ): logging.log_first_n( logging.WARN, "Discrepancy between trainable weights and collected" " trainable weights, did you set `model.trainable`" " without calling `model.compile` after ?", 1, ) def _make_train_function(self): has_recompiled = self._recompile_weights_loss_and_weighted_metrics() self._check_trainable_weights_consistency() if isinstance(self.optimizer, list): raise ValueError( "The `optimizer` in `compile` should be a single optimizer." ) # If we have re-compiled the loss/weighted metric sub-graphs then create # train function even if one exists already. This is because # `_feed_sample_weights` list has been updated on re-compile. if getattr(self, "train_function", None) is None or has_recompiled: # Restore the compiled trainable state. current_trainable_state = self._get_trainable_state() self._set_trainable_state(self._compiled_trainable_state) inputs = ( self._feed_inputs + self._feed_targets + self._feed_sample_weights ) if not isinstance(backend.symbolic_learning_phase(), int): inputs += [backend.symbolic_learning_phase()] with backend.get_graph().as_default(): with backend.name_scope("training"): # Training updates updates = self.optimizer.get_updates( params=self._collected_trainable_weights, loss=self.total_loss, ) # Unconditional updates updates += self.get_updates_for(None) # Conditional updates relevant to this model updates += self.get_updates_for(self.inputs) metrics = self._get_training_eval_metrics() metrics_tensors = [ m._call_result for m in metrics if hasattr(m, "_call_result") ] with backend.name_scope("training"): # Gets loss and metrics. Updates weights at each call. fn = backend.function( inputs, [self.total_loss] + metrics_tensors, updates=updates, name="train_function", **self._function_kwargs, ) setattr(self, "train_function", fn) # Restore the current trainable state self._set_trainable_state(current_trainable_state) def _make_test_function(self): has_recompiled = self._recompile_weights_loss_and_weighted_metrics() # If we have re-compiled the loss/weighted metric sub-graphs then create # test function even if one exists already. This is because # `_feed_sample_weights` list has been updated on re-compile. if getattr(self, "test_function", None) is None or has_recompiled: inputs = ( self._feed_inputs + self._feed_targets + self._feed_sample_weights ) with backend.get_graph().as_default(): metrics = self._get_training_eval_metrics() metrics_tensors = [ m._call_result for m in metrics if hasattr(m, "_call_result") ] with backend.name_scope("evaluation"): updates = self.state_updates # Return loss and metrics, no gradient updates. # Does update the network states. fn = backend.function( inputs, [self.total_loss] + metrics_tensors, updates=updates, name="test_function", **self._function_kwargs, ) setattr(self, "test_function", fn) def _make_predict_function(self): if not hasattr(self, "predict_function"): self.predict_function = None if self.predict_function is None: inputs = self._feed_inputs # Gets network outputs. Does not update weights. # Does update the network states. kwargs = getattr(self, "_function_kwargs", {}) with backend.name_scope(ModeKeys.PREDICT): self.predict_function = backend.function( inputs, self.outputs, updates=self.state_updates, name="predict_function", **kwargs, ) def _make_execution_function(self, mode): if mode == ModeKeys.TRAIN: self._make_train_function() return self.train_function if mode == ModeKeys.TEST: self._make_test_function() return self.test_function if mode == ModeKeys.PREDICT: self._make_predict_function() return self.predict_function def _distribution_standardize_user_data( self, x, y=None, sample_weight=None, class_weight=None, batch_size=None, validation_split=0.0, shuffle=False, epochs=1, allow_partial_batch=False, ): """Runs validation checks on input and target data passed by the user. This is called when using tf.distribute.Strategy to train, evaluate or serve the model. Args: x: Input data. A numpy array or `tf.data` dataset. y: Target data. A numpy array or None if x is a `tf.data` dataset. sample_weight: An optional sample-weight array passed by the user to weight the importance of each sample in `x`. class_weight: An optional class-weight array by the user to weight the importance of samples in `x` based on the class they belong to, as conveyed by `y`. batch_size: Integer batch size. If provided, it is used to run additional validation checks on stateful models. validation_split: Float between 0 and 1. Fraction of the training data to be used as validation data. shuffle: Boolean whether to shuffle the training data before each epoch. epochs: Integer epochs. If > 1, repeat the numpy training data epochs times when converting to training dataset. allow_partial_batch: Boolean whether to enforce that all batches have the same size. Returns: Dataset instance. Raises: ValueError: In case of invalid user-provided data. RuntimeError: If the model was never compiled. """ if class_weight: raise NotImplementedError( "`class_weight` is currently not supported " "when using tf.distribute.Strategy." ) if ( sample_weight is not None and sample_weight.all() and backend.is_tpu_strategy(self._distribution_strategy) ): raise NotImplementedError( "`sample_weight` is currently not supported " "when using TPUStrategy." ) # Validates `steps` and `shuffle` arguments right at the beginning # since we use it to construct the dataset object. # TODO(anjalisridhar): Remove this check once we refactor the # _standardize_user_data code path. This check is already present # elsewhere in the codebase. if isinstance(x, tf.data.Dataset): if shuffle: training_utils_v1.verify_dataset_shuffled(x) strategy = self._distribution_strategy with strategy.scope(): # We should be sure to call get_session() inside the # strategy.scope() so the strategy can affect the session options. if tf.compat.v1.executing_eagerly_outside_functions(): session = None else: session = backend.get_session() first_x_value = tf.nest.flatten(x)[0] if isinstance(first_x_value, np.ndarray): x = training_utils.list_to_tuple(x) if y is not None: y = training_utils.list_to_tuple(y) if sample_weight is not None: sample_weight = training_utils.list_to_tuple( sample_weight ) in_tuple = (x, y, sample_weight) else: in_tuple = (x, y) else: in_tuple = x ds = strategy.extended.experimental_make_numpy_dataset( in_tuple, session=session ) if shuffle: # We want a buffer size that is larger than the batch size # provided by the user and provides sufficient randomness. # Note that larger numbers introduce more memory usage based # on the size of each sample. ds = ds.shuffle(max(1024, batch_size * 8)) if epochs > 1: ds = ds.repeat(epochs) # We need to use the drop_remainder argument to get a known # static input shape which is required for TPUs. drop_remainder = ( not allow_partial_batch and strategy.extended.experimental_require_static_shapes ) # TODO(b/131720208): We still drop remainder here if number of # examples is divisible by batch size, as sometimes dynamic # padder will time out with keras.metrics.CategoricalAccuracy() # metric. if backend.is_tpu_strategy(strategy) and not drop_remainder: dataset_size = first_x_value.shape[0] if dataset_size % batch_size == 0: drop_remainder = True x = ds.batch(batch_size, drop_remainder=drop_remainder) else: assert isinstance(x, tf.data.Dataset) training_utils_v1.validate_dataset_input( x, y, sample_weight, validation_split ) return x def _standardize_user_data( self, x, y=None, sample_weight=None, class_weight=None, batch_size=None, check_steps=False, steps_name="steps", steps=None, validation_split=0.0, shuffle=False, extract_tensors_from_dataset=False, ): """Runs validation checks on input and target data passed by the user. Also standardizes the data to lists of arrays, in order. Also builds and compiles the model on the fly if it is a subclassed model that has never been called before (and thus has no inputs/outputs). This is a purely internal method, subject to refactoring at any time. Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. - A `tf.data` dataset. y: Target data. Like the input data `x`, it could be either Numpy array(s) or TensorFlow tensor(s). It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). If `x` is a dataset, `y` should not be specified (since targets will be obtained from the iterator). sample_weight: An optional sample-weight array passed by the user to weight the importance of each sample in `x`. class_weight: An optional class-weight array by the user to weight the importance of samples in `x` based on the class they belong to, as conveyed by `y`. If both `sample_weight` and `class_weight` are provided, the weights are multiplied. batch_size: Integer batch size. If provided, it is used to run additional validation checks on stateful models. check_steps: boolean, True if we want to check for validity of `steps` and False, otherwise. For example, when we are standardizing one batch of data for train_on_batch/predict_on_batch/test_on_batch APIs, `steps` value is not required and we should not check for its validity in these cases. steps_name: The public API's parameter name for `steps`. steps: Integer or `None`. Total number of steps (batches of samples) to execute. validation_split: Float between 0 and 1. Fraction of the training data to be used as validation data. shuffle: Boolean whether to shuffle the training data before each epoch. extract_tensors_from_dataset: Boolean. When `x` is a dataset instance, this indicates whether to extract actual tensors from the dataset or instead output the dataset instance itself. Set to True when calling from `train_on_batch`/etc. Returns: A tuple of 3: inputs (arrays or dicts, depending on whether `x` was a dict or not), target arrays, sample-weight arrays. If the model's input and targets are symbolic, these lists are empty (since the model takes no user-provided data, instead the data comes from the symbolic inputs/targets). Raises: ValueError: In case of invalid user-provided data. RuntimeError: If the model was never compiled. """ if isinstance(x, (tf.compat.v1.data.Dataset, tf.data.Dataset)): # Graph mode dataset. We'll pass the dataset as-is (unless # `extract_tensors_from_dataset` is True, in which case we extract # the tensors from the dataset and we output them. training_utils_v1.validate_dataset_input( x, y, sample_weight, validation_split ) if shuffle: training_utils_v1.verify_dataset_shuffled(x) is_dataset = True if extract_tensors_from_dataset: # We do this for `train_on_batch`/etc. ( x, y, sample_weight, ) = training_utils_v1.extract_tensors_from_dataset(x) elif isinstance(x, tf.compat.v1.data.Iterator): # Graph mode iterator. We extract the symbolic tensors. training_utils_v1.validate_dataset_input( x, y, sample_weight, validation_split ) iterator = x x, y, sample_weight = training_utils_v1.unpack_iterator_input( iterator ) is_dataset = True else: is_dataset = False # Validates `steps` argument based on x's type. if check_steps: training_utils_v1.check_steps_argument(x, steps, steps_name) # First, we build the model on the fly if necessary. if not self.inputs: all_inputs, y_input, dict_inputs = self._build_model_with_inputs( x, y ) is_build_called = True else: all_inputs = [] # Whether this is a subclassed model that expects dictionary inputs # rather than list inputs (e.g. FeatureColumn-based models). dict_inputs = isinstance(self.inputs, dict) is_build_called = False y_input = y # Second, we compile the model on the fly if necessary, mostly for # subclass models. is_compile_called = False if not self._is_compiled and self.optimizer: self._compile_from_inputs(all_inputs, y_input, x, y) is_compile_called = True # In graph mode, if we had just set inputs and targets as symbolic # tensors by invoking build and compile on the model respectively, we do # not have to feed anything to the model. Model already has input and # target data as part of the graph. Note: in this case, `any` and `all` # are equivalent since we disallow mixed symbolic/value inputs. # self.run_eagerly is not free to compute, so we want to reuse the # value. run_eagerly = self.run_eagerly if ( not run_eagerly and is_build_called and is_compile_called and not is_dataset and any(_is_symbolic_tensor(v) for v in all_inputs) ): return [], [], None return self._standardize_tensors( x, y, sample_weight, run_eagerly=run_eagerly, dict_inputs=dict_inputs, is_dataset=is_dataset, class_weight=class_weight, batch_size=batch_size, ) def _standardize_tensors( self, x, y, sample_weight, run_eagerly, dict_inputs, is_dataset, class_weight=None, batch_size=None, ): if run_eagerly: # In eager mode, do not do shape validation # since the network has no input nodes (placeholders) to be fed. feed_input_names = self.input_names feed_input_shapes = None elif not self._is_graph_network: # Case: symbolic-mode subclassed network. Do not do shape # validation. feed_input_names = self._feed_input_names feed_input_shapes = None else: # Case: symbolic-mode graph network. # In this case, we run extensive shape validation checks. feed_input_names = self._feed_input_names feed_input_shapes = self._feed_input_shapes # Standardize the inputs. if not isinstance(x, (tf.compat.v1.data.Dataset, tf.data.Dataset)): # TODO(fchollet): run static checks with dataset output shape(s). x = training_utils_v1.standardize_input_data( x, feed_input_names, feed_input_shapes, check_batch_axis=False, # Don't enforce the batch size. exception_prefix="input", ) # Get typespecs for the input data and sanitize it if necessary. # TODO(momernick): This should be capable of doing full input validation # at all times - validate that this is so and refactor the # standardization code. if isinstance(x, tf.data.Dataset): x_shapes = tf.data.experimental.get_structure(x) if isinstance(x_shapes, tuple): # If the output of a Dataset is a tuple, we assume it's either # of the form (x_data, y_data) or (x_data, y_data, # sample_weights). In either case, we only care about x_data # here. x_shapes = x_shapes[0] else: flat_inputs = tf.nest.flatten(x) flat_expected_inputs = tf.nest.flatten(self.inputs) converted_x = [] for a, b in zip(flat_inputs, flat_expected_inputs): converted_x.append(_convert_scipy_sparse_tensor(a, b)) x = tf.nest.pack_sequence_as(x, converted_x) # Convert ResourceVariables to tensors so nest.assert_same_structure # below won't fail with Variable and Tensor. x_tensors = tf_utils.convert_variables_to_tensors(x) x_shapes = tf.nest.map_structure( tf_utils.type_spec_from_value, x_tensors ) flat_inputs = tf.nest.flatten(x_shapes) # Convert ResourceVariables to tensors so nest.assert_same_structure # below won't fail with Variable and Tensor. flat_expected_inputs = tf.nest.flatten( tf_utils.convert_variables_to_tensors(self.inputs) ) for a, b in zip(flat_inputs, flat_expected_inputs): tf.nest.assert_same_structure(a, b, expand_composites=True) if y is not None: # Prepare self._sample_weight_modes. List with the same length as # model outputs. training_utils_v1.prepare_sample_weight_modes( self._training_endpoints, self.sample_weight_mode ) feed_output_names = self._feed_output_names feed_sample_weight_modes = self._sample_weight_modes if not self._is_graph_network: feed_output_shapes = None else: feed_output_shapes = self._feed_output_shapes # Standardize the outputs. y = training_utils_v1.standardize_input_data( y, feed_output_names, # Don't enforce target shapes to match output shapes. # Precise checks will be run in # `check_loss_and_target_compatibility`. shapes=None, check_batch_axis=False, # Don't enforce the batch size. exception_prefix="target", ) # Generate sample-wise weight values given the `sample_weight` and # `class_weight` arguments. sample_weights = training_utils_v1.standardize_sample_weights( sample_weight, feed_output_names ) class_weights = training_utils_v1.standardize_class_weights( class_weight, feed_output_names ) sample_weights = [ training_utils_v1.standardize_weights(ref, sw, cw, mode) for (ref, sw, cw, mode) in zip( y, sample_weights, class_weights, feed_sample_weight_modes ) ] # Check that all arrays have the same length. if not self._distribution_strategy: training_utils_v1.check_array_lengths(x, y, sample_weights) if self._is_graph_network and not run_eagerly: # Additional checks to avoid users mistakenly using improper # loss fns. training_utils_v1.check_loss_and_target_compatibility( y, self._feed_loss_fns, feed_output_shapes ) sample_weights, _, _ = training_utils.handle_partial_sample_weights( y, sample_weights, feed_sample_weight_modes, check_all_flat=True ) else: y = [] sample_weights = None if self.stateful and batch_size and not is_dataset: # Check that for stateful networks, number of samples is a multiple # of the static batch size. if x[0].shape[0] % batch_size != 0: raise ValueError( "In a stateful network, " "you should only pass inputs with " "a number of samples that can be " "divided by the batch size. Found: " + str(x[0].shape[0]) + " samples" ) # If dictionary inputs were provided, we return a dictionary as well. if dict_inputs and not isinstance( x, (tf.compat.v1.data.Dataset, tf.data.Dataset) ): x = dict(zip(feed_input_names, x)) return x, y, sample_weights def _build_model_with_inputs(self, inputs, targets): """Build the model (set model inputs/outputs), mainly for subclass model.""" processed_inputs = [] is_dict_inputs = False orig_inputs = inputs # We need to use `inputs` to set the model inputs. # If input data is a dataset iterator in graph mode or if it is an eager # iterator and only one batch of samples is required, we fetch the data # tensors from the iterator and then standardize them. if isinstance(inputs, (tf.compat.v1.data.Dataset, tf.data.Dataset)): inputs, targets, _ = training_utils_v1.extract_tensors_from_dataset( inputs ) # We type-check that `inputs` and `targets` are either single arrays # or lists of arrays, and extract a flat list of inputs from the passed # structure. training_utils_v1.validate_input_types(inputs, orig_inputs) if isinstance(inputs, (list, tuple)): processed_inputs += list(inputs) elif isinstance(inputs, dict): is_dict_inputs = True keys = sorted(inputs.keys()) processed_inputs = [inputs[k] for k in keys] else: processed_inputs.append(inputs) # Now that we have a flat set of inputs, we make sure that none of them # are CompositeTensors or CompositeTensorValues of any type (or scipy # sparse arrays, which we treat as SparseTensor values). We cannot # safely infer input data from an arbitrary composite tensor, so we # don't try - users should explicitly add composite tensor inputs to # their subclassed models. for input_tensor in processed_inputs: if training_utils_v1.is_composite_or_composite_value( input_tensor ) and not isinstance(input_tensor, tf.Variable): # TODO(b/132691975): Document subclass-model CT input handling. raise ValueError( "All SparseTensor and RaggedTensor inputs must be " "explicitly declared using a keras.Input() with " "sparse=True or ragged=True. We found an undeclared " "input %s. For Sequential models, please add a " "keras.Input() as your first Layer. For subclassed models, " "please call self._set_inputs() on your input set, which " "you can create using keras.Input() for each input to your " "model." % (input_tensor,) ) # Build the model using the retrieved inputs (value or symbolic). # If values are generated from a dataset, then in symbolic-mode # placeholders will be created to match the value shapes. if isinstance( orig_inputs, ( tf.compat.v1.data.Dataset, tf.data.Dataset, tf.compat.v1.data.Iterator, ), ): if not self.inputs: # For subclassed models, a robust input spec is not available so # we must cast to the model dtype. inputs = training_utils_v1.cast_if_floating_dtype( inputs, self.dtype ) def create_tensor_spec(t): return tf.TensorSpec(t.shape, t.dtype) cast_inputs = tf.nest.map_structure(create_tensor_spec, inputs) elif training_utils_v1.has_tensors(inputs): cast_inputs = training_utils_v1.cast_if_floating_dtype(inputs) else: cast_inputs = inputs self._set_inputs(cast_inputs) return processed_inputs, targets, is_dict_inputs def _compile_from_inputs( self, all_inputs, target, orig_inputs, orig_target ): if target is not None: # We need to use `y` to set the model targets. if training_utils_v1.has_tensors(target): target = training_utils_v1.cast_if_floating_dtype_and_mismatch( target, self.outputs ) training_utils_v1.validate_input_types( target, orig_target, allow_dict=False, field_name="target" ) if isinstance(target, (list, tuple)): all_inputs += list(target) else: all_inputs.append(target) # Type check that all inputs are *either* value *or* symbolic. # TODO(fchollet): this check could be removed in Eager mode? if any(tf.is_tensor(v) for v in all_inputs): if not all(tf.is_tensor(v) for v in all_inputs): raise ValueError( "Do not pass inputs that mix Numpy arrays and " "TensorFlow tensors. " "You passed: x=" + str(orig_inputs) + "; y=" + str(orig_target) ) is_dataset = isinstance( orig_inputs, ( tf.compat.v1.data.Dataset, tf.data.Dataset, tf.compat.v1.data.Iterator, ), ) if is_dataset or tf.executing_eagerly(): target_tensors = None else: # Handle target tensors if any passed. if target is not None: if not isinstance(target, (list, tuple)): target = [target] target_tensors = [v for v in target if _is_symbolic_tensor(v)] else: target_tensors = None self.compile( optimizer=self.optimizer, loss=self.loss, metrics=self._compile_metrics, weighted_metrics=self._compile_weighted_metrics, loss_weights=self.loss_weights, target_tensors=target_tensors, sample_weight_mode=self.sample_weight_mode, run_eagerly=self.run_eagerly, experimental_run_tf_function=self._experimental_run_tf_function, ) # TODO(omalleyt): Consider changing to a more descriptive function name. def _set_inputs(self, inputs, outputs=None, training=None): """Set model's input and output specs based on the input data received. This is to be used for Model subclasses, which do not know at instantiation time what their inputs look like. Args: inputs: Single array, or list of arrays. The arrays could be placeholders, Numpy arrays, data tensors, or TensorSpecs. - if placeholders: the model is built on top of these placeholders, and we expect Numpy data to be fed for them when calling `fit`/etc. - if Numpy data or TensorShapes: we create placeholders matching the TensorShapes or shapes of the Numpy arrays. We expect Numpy data to be fed for these placeholders when calling `fit`/etc. - if data tensors: the model is built on top of these tensors. We do not expect any Numpy data to be provided when calling `fit`/etc. outputs: None, a data tensor, or a list of tensors. If None, the outputs will be determined by invoking `self.call()`, otherwise the provided value will be used. training: Boolean or None. Only relevant in symbolic mode. Specifies whether to build the model's graph in inference mode (False), training mode (True), or using the TF-Keras learning phase (None). Raises: ValueError: If dict inputs are passed to a Sequential Model where the first layer isn't FeatureLayer. """ self._set_save_spec(inputs) inputs = self._set_input_attrs(inputs) if outputs is None: kwargs = {} if self._expects_training_arg: # In V2 mode, feeding `training=None` is not allowed because any # value explicitly passed by the user is respected, even # `None`.` if ( training is None and not tf.compat.v1.executing_eagerly_outside_functions() ): training = backend.learning_phase() if training is not None: kwargs["training"] = training try: outputs = self(inputs, **kwargs) except NotImplementedError: # This Model or a submodel is dynamic and hasn't overridden # `compute_output_shape`. outputs = None self._set_output_attrs(outputs) @tf.__internal__.tracking.no_automatic_dependency_tracking def _set_input_attrs(self, inputs): """Sets attributes related to the inputs of the Model.""" if self.inputs: raise ValueError("Model inputs are already set.") if self.__class__.__name__ == "Sequential" and not self.built: if tf.is_tensor(inputs): input_shape = (None,) + tuple(inputs.shape.as_list()[1:]) elif isinstance(inputs, tf.TensorShape): input_shape = (None,) + tuple(inputs.as_list()[1:]) elif isinstance(inputs, dict): # We assert that the first layer is a FeatureLayer. if not training_utils_v1.is_feature_layer(self.layers[0]): raise ValueError( "Passing a dictionary input to a Sequential Model " "which doesn't have FeatureLayer as the first layer" " is an error." ) input_shape = (None,) else: input_shape = (None,) + tuple(inputs.shape[1:]) self._build_input_shape = input_shape # Cast inputs to the compute dtype. This is primarily used # when saving to determine the correct dtype in the input signature. inputs = self._maybe_cast_inputs(inputs) # On-the-fly setting of symbolic model inputs (either by using the # tensor provided, or by creating a placeholder if Numpy data was # provided). model_inputs = training_utils_v1.ModelInputs(inputs) inputs = model_inputs.get_symbolic_inputs() self.inputs = model_inputs.get_symbolic_inputs( return_single_as_list=True ) self.input_names = model_inputs.get_input_names() self._feed_inputs = [] self._feed_input_names = [] self._feed_input_shapes = [] for k, v in model_inputs.as_dict(): if backend.is_placeholder(v): self._feed_input_names.append(k) self._feed_inputs.append(v) self._feed_input_shapes.append(backend.int_shape(v)) return inputs @tf.__internal__.tracking.no_automatic_dependency_tracking def _set_output_attrs(self, outputs): """Sets attributes related to the outputs of the Model.""" # NOTE(taylorrobie): This convention cannot be changed without updating # the data adapter since it assumes nest.flatten ordering. outputs = tf.nest.flatten(outputs) self.outputs = outputs self.output_names = training_utils_v1.generic_output_names(outputs) # TODO(scottzhu): Should we cleanup the self._training_endpoints here? self.built = True @property def _targets(self): """The output target tensors for the model.""" return [ e.training_target.target for e in self._training_endpoints if e.has_training_target() ] @property def _feed_targets(self): return [ e.training_target.target for e in self._training_endpoints if e.has_feedable_training_target() ] @property def _feed_output_names(self): return [ e.output_name for e in self._training_endpoints if e.has_feedable_training_target() ] @property def _feed_output_shapes(self): return [ e.feed_output_shape for e in self._training_endpoints if e.has_feedable_training_target() ] @property def _feed_loss_fns(self): return [ e.loss_fn for e in self._training_endpoints if e.has_feedable_training_target() ] @property def _loss_weights_list(self): return [e.loss_weight for e in self._training_endpoints] @property def _output_loss_metrics(self): if hasattr(self, "_training_endpoints"): return [ e.output_loss_metric for e in self._training_endpoints if e.output_loss_metric is not None ] return None @property def sample_weights(self): return [e.sample_weight for e in self._training_endpoints] @property def _sample_weight_modes(self): return [e.sample_weight_mode for e in self._training_endpoints] @property def _feed_sample_weights(self): return [ e.sample_weight for e in self._training_endpoints if e.sample_weight is not None ] def _maybe_load_initial_epoch_from_ckpt(self, initial_epoch, mode): """Maybe load 1st epoch from checkpoint, considering worker recovery. Refer to tensorflow/python/tf_keras/distribute/worker_training_state.py for more information. Args: initial_epoch: The original initial_epoch user passes in in `fit()`. mode: The mode for running `model.fit()`. Returns: If the training is recovering from previous failure under multi-worker training setting, return the epoch the training is supposed to continue at. Otherwise, return the `initial_epoch` the user passes in. """ if self._training_state is not None: return self._training_state.maybe_load_initial_epoch_from_ckpt( initial_epoch, mode ) return initial_epoch def _get_training_eval_metrics(self): """Returns all the metrics that are to be reported. This includes the output loss metrics, compile metrics/weighted metrics, add_metric metrics. """ metrics = [] metrics.extend(getattr(self, "_output_loss_metrics", None) or []) metrics.extend(getattr(self, "metrics", None) or []) return metrics def _assert_compile_was_called(self): # Checks whether `compile` has been called. If it has been called, # then the optimizer is set. This is different from whether the # model is compiled # (i.e. whether the model is built and its inputs/outputs are set). if not self._compile_was_called: raise RuntimeError( "You must compile your model before " "training/testing. " "Use `model.compile(optimizer, loss)`." ) def _in_multi_worker_mode(self): """Method to infer if this `Model` is working in multi-worker settings. Multi-worker training refers to the setup where the training is distributed across multiple workers, as opposed to the case where only a local process performs the training. This function is used to infer for example whether or not a distribute coordinator should be run, and thus TensorFlow servers should be started for communication with other servers in the cluster, or whether or not saving/restoring checkpoints is relevant for preemption fault tolerance. Experimental. Signature and implementation are subject to change. Returns: Whether this model indicates it's working in multi-worker settings. """ strategy = self._distribution_strategy # Otherwise, use the strategy whose scope this is in. if not strategy and tf.distribute.has_strategy(): strategy = tf.distribute.get_strategy() return strategy and strategy.extended._in_multi_worker_mode() @property def _trackable_saved_model_saver(self): return model_serialization.ModelSavedModelSaver(self) def _get_compile_args(self, user_metrics=True): del user_metrics self._assert_compile_was_called() kwargs = { "loss": self.loss, "metrics": self._compile_metrics, "loss_weights": self.loss_weights, "sample_weight_mode": self.sample_weight_mode, "weighted_metrics": self._compile_weighted_metrics, } return kwargs @property def _compile_was_called(self): return self._v1_compile_was_called class DistributedCallbackModel(Model): """Model that is used for callbacks with tf.distribute.Strategy.""" def __init__(self, model): super().__init__() self.optimizer = model.optimizer def set_original_model(self, orig_model): self._original_model = orig_model def save_weights(self, filepath, overwrite=True, save_format=None): self._replicated_model.save_weights( filepath, overwrite=overwrite, save_format=save_format ) def save(self, filepath, overwrite=True, include_optimizer=True): # save weights from the distributed model to the original model distributed_model_weights = self.get_weights() self._original_model.set_weights(distributed_model_weights) # TODO(anjalisridhar): Do we need to save the original model here? # Saving the first replicated model works as well. self._original_model.save( filepath, overwrite=True, include_optimizer=False ) def load_weights(self, filepath, by_name=False): self._original_model.load_weights(filepath, by_name=False) # Copy the weights from the original model to each of the replicated # models. orig_model_weights = self._original_model.get_weights() distributed_training_utils_v1.set_weights( self._original_model._distribution_strategy, self, orig_model_weights, ) def __getattr__(self, item): # Allowed attributes of the model that can be accessed by the user # during a callback. if item not in ("_setattr_tracking", "_layers"): logging.warning( "You are accessing attribute " + item + " of the " "DistributedCallbackModel that may not have been set " "correctly." ) return super().__getattr__(item) class _TrainingEndpoint: """A container for the training output/target and related entities. In the case of model with multiple outputs, there is a one-to-one mapping between model output (y_pred), model target (y_true), loss, metrics etc. By unifying these entities into one class, different entity can access information between each other, rather than currently access different list of attributes of the model. """ def __init__( self, output, output_name, loss_fn, loss_weight=None, training_target=None, output_loss_metric=None, sample_weight=None, sample_weight_mode=None, ): """Initialize the _TrainingEndpoint. Note that the output and output_name should be stable as long as the model structure doesn't change. The training_target suppose to be mutable since the information is provided via `compile()` Args: output: the output tensor of the model. output_name: the unique name of the output tensor. loss_fn: the loss function for the output tensor. loss_weight: float, the weights for the loss. training_target: the _TrainingTarget for the model. output_loss_metric: the metric object for the loss function. sample_weight: the weights for how a sample is weighted during metric and loss calculation. Could be None. sample_weight_mode: string, 'temporal', 'samplewise' or None. The mode for how the sample_weight is populated. """ self._output = output self._output_name = output_name self._loss_fn = loss_fn self._loss_weight = loss_weight self._training_target = training_target self._output_loss_metric = output_loss_metric self._sample_weight = sample_weight self._sample_weight_mode = sample_weight_mode @property def output(self): return self._output @property def output_name(self): return self._output_name @property def shape(self): return backend.int_shape(self.output) @property def loss_fn(self): return self._loss_fn @property def loss_weight(self): return self._loss_weight @loss_weight.setter def loss_weight(self, value): self._loss_weight = value @property def training_target(self): return self._training_target @training_target.setter def training_target(self, value): self._training_target = value def create_training_target(self, target, run_eagerly=False): """Create training_target instance and update the self.training_target. Note that the input target should just be a tensor or None, and corresponding training target will be created based on the output and loss_fn. Args: target: the target tensor for the current output. Could be None. run_eagerly: boolean, whether the model is in run_eagerly mode. Raises: ValueError if the training_target field for the current instance has already been populated. """ if self.has_training_target(): raise ValueError( "The training_target field for the _TrainingEndpoint " "instance has already been populated" ) if run_eagerly: # When run_eagerly, the target tensor is ignored, and the None # placeholder is created instead. self.training_target = _TrainingTarget( None, feedable=True, skip_target_weights=False ) return if self.should_skip_target(): self.training_target = _TrainingTarget(None) else: if target is not None and not backend.is_placeholder(target): feedable = False skip_target_weights = True else: feedable = True skip_target_weights = False if target is None: target_dtype = losses.LABEL_DTYPES_FOR_LOSSES.get( self.loss_fn, backend.dtype(self.output) ) target = backend.placeholder( ndim=len(self.shape), name=self.output_name + "_target", sparse=backend.is_sparse(self.output), dtype=target_dtype, ) self.training_target = _TrainingTarget( target, feedable=feedable, skip_target_weights=skip_target_weights, ) @property def output_loss_metric(self): return self._output_loss_metric @output_loss_metric.setter def output_loss_metric(self, value): self._output_loss_metric = value @property def sample_weight(self): return self._sample_weight @sample_weight.setter def sample_weight(self, value): self._sample_weight = value @property def sample_weight_mode(self): return self._sample_weight_mode @sample_weight_mode.setter def sample_weight_mode(self, value): self._sample_weight_mode = value def should_skip_target(self): return self._loss_fn is None def should_skip_target_weights(self): return ( self.should_skip_target() or self.training_target is None or self.training_target.skip_target_weights ) def has_training_target(self): return self.training_target is not None def has_feedable_training_target(self): return ( not self.should_skip_target() and self.training_target is not None and self.training_target.feedable ) def loss_name(self): if self._loss_fn is not None: return self._output_name + "_loss" return None @property def feed_output_shape(self): """The output shape for the feedable target.""" if not self.has_feedable_training_target(): return None if ( ( isinstance(self.loss_fn, losses.LossFunctionWrapper) and self.loss_fn.fn == losses.sparse_categorical_crossentropy ) ) or (isinstance(self.loss_fn, losses.SparseCategoricalCrossentropy)): if backend.image_data_format() == "channels_first": return (self.shape[0], 1) + self.shape[2:] else: return self.shape[:-1] + (1,) elif not isinstance(self.loss_fn, losses.Loss) or ( isinstance(self.loss_fn, losses.LossFunctionWrapper) and (getattr(losses, self.loss_fn.fn.__name__, None) is None) ): # If the given loss is not an instance of the `Loss` class (custom # class) or if the loss function that is wrapped is not in the # `losses` module, then it is a user-defined loss and we make no # assumptions about it. return None else: return self.shape def sample_weights_mismatch(self): """Check if the sample weight and the mode match or not.""" # If there is a mismatch between sample weight mode and the placeholders # created, then recompile the sub-graphs that depend on sample weights. return ( self.sample_weight_mode is not None and self.sample_weight is None ) or ( self.sample_weight_mode is None and self.sample_weight is not None ) def populate_sample_weight(self, sample_weight, sample_weight_mode): """Populate the sample weight and based on the sample weight mode.""" if sample_weight is None and ( self.should_skip_target_weights() or sample_weight_mode is None or tf.executing_eagerly() ): self._sample_weight = None return assert sample_weight_mode in ["temporal", "samplewise"] if sample_weight_mode == "temporal": default_value = [[1.0]] shape = [None, None] else: # sample_weight_mode == 'samplewise' default_value = [1.0] shape = [None] if sample_weight is not None: if not sample_weight.shape.is_compatible_with(shape): raise ValueError( "Received sample weight with shape {}. Expected shape " "{}.".format(sample_weight.shape, shape) ) self._sample_weight = sample_weight else: self._sample_weight = tf.compat.v1.placeholder_with_default( tf.constant(default_value, dtype=backend.floatx()), shape=shape, name=self.output_name + "_sample_weights", ) class _TrainingTarget: """Container for a target tensor (y_true) and its metadata (shape, loss...). Args: target: A target tensor for the model. It may be `None` if the output is excluded from loss computation. It is still kept as None since each output of the model should have a corresponding target. If the target is None, the rest of the attributes will be None as well. feedable: Boolean, whether the target is feedable (requires data to be passed in `fit` or `train_on_batch`), or not (model compiled with `target_tensors` argument). skip_target_weights: Boolean, whether the target should be skipped during weights calculation. """ def __init__(self, target, feedable=False, skip_target_weights=True): self._target = target self._feedable = feedable self._skip_target_weights = skip_target_weights @property def target(self): return self._target @property def feedable(self): return self._feedable @property def skip_target_weights(self): return self._skip_target_weights def _is_symbolic_tensor(x): return tf.is_tensor(x) def _convert_scipy_sparse_tensor(value, expected_input): """Handle scipy sparse tensor conversions. This method takes a value 'value' and returns the proper conversion. If value is a scipy sparse tensor and the expected input is a dense tensor, we densify 'value'. If value is a scipy sparse tensor and the expected input is a TF SparseTensor, we convert 'value' to a SparseTensor. If 'value' is not a scipy sparse tensor, or scipy is not imported, we pass it through unchanged. Args: value: An object that may be a scipy sparse tensor expected_input: The expected input placeholder. Returns: The possibly-converted 'value'. """ if issparse is not None and issparse(value): if backend.is_sparse(expected_input): sparse_coo = value.tocoo() row, col = sparse_coo.row, sparse_coo.col data, shape = sparse_coo.data, sparse_coo.shape indices = np.concatenate( (np.expand_dims(row, 1), np.expand_dims(col, 1)), 1 ) return tf.SparseTensor(indices, data, shape) else: if tf.compat.v1.executing_eagerly_outside_functions(): # In TF2 we do not silently densify sparse matrices. raise ValueError( "A SciPy sparse matrix was passed to a model " "that expects dense inputs. Please densify your " "inputs first, such as by calling `x.toarray()." ) return value.toarray() else: return value def _get_metrics_from_layers(layers): """Returns list of metrics from the given layers. This will not include the `compile` metrics of a model layer. Args: layers: List of layers. Returns: List of metrics. """ metrics = [] layers = layer_utils.filter_empty_layer_containers(layers) for layer in layers: if isinstance(layer, Model): # We cannot call 'metrics' on the model because we do not want to # include the metrics that were added in compile API of a nested # model. metrics.extend(layer._metrics) metrics.extend(_get_metrics_from_layers(layer.layers)) else: metrics.extend(layer.metrics) return metrics def _non_none_constant_value(v): constant_value = tf.get_static_value(v) return constant_value if constant_value is not None else v
tf-keras/tf_keras/engine/training_v1.py/0
{ "file_path": "tf-keras/tf_keras/engine/training_v1.py", "repo_id": "tf-keras", "token_count": 70865 }
192
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for sequential_feature_column.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow.compat.v2 as tf from absl.testing import parameterized import tf_keras as keras from tf_keras.feature_column import sequence_feature_column as ksfc from tf_keras.saving.legacy import model_config from tf_keras.testing_infra import test_combinations def _initialized_session(config=None): sess = tf.compat.v1.Session(config=config) sess.run(tf.compat.v1.global_variables_initializer()) sess.run(tf.compat.v1.tables_initializer()) return sess @test_combinations.generate(test_combinations.combine(mode=["graph", "eager"])) class SequenceFeaturesTest(tf.test.TestCase, parameterized.TestCase): @parameterized.named_parameters( { "testcase_name": "2D", "sparse_input_args_a": { # example 0, ids [2] # example 1, ids [0, 1] "indices": ((0, 0), (1, 0), (1, 1)), "values": (2, 0, 1), "dense_shape": (2, 2), }, "sparse_input_args_b": { # example 0, ids [1] # example 1, ids [2, 0] "indices": ((0, 0), (1, 0), (1, 1)), "values": (1, 2, 0), "dense_shape": (2, 2), }, "expected_input_layer": [ # example 0, ids_a [2], ids_b [1] [[5.0, 6.0, 14.0, 15.0, 16.0], [0.0, 0.0, 0.0, 0.0, 0.0]], # example 1, ids_a [0, 1], ids_b [2, 0] [[1.0, 2.0, 17.0, 18.0, 19.0], [3.0, 4.0, 11.0, 12.0, 13.0]], ], "expected_sequence_length": [1, 2], }, { "testcase_name": "3D", "sparse_input_args_a": { # feature 0, ids [[2], [0, 1]] # feature 1, ids [[0, 0], [1]] "indices": ( (0, 0, 0), (0, 1, 0), (0, 1, 1), (1, 0, 0), (1, 0, 1), (1, 1, 0), ), "values": (2, 0, 1, 0, 0, 1), "dense_shape": (2, 2, 2), }, "sparse_input_args_b": { # feature 0, ids [[1, 1], [1]] # feature 1, ids [[2], [0]] "indices": ( (0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0), ), "values": (1, 1, 1, 2, 0), "dense_shape": (2, 2, 2), }, "expected_input_layer": [ # feature 0, [a: 2, -, b: 1, 1], [a: 0, 1, b: 1, -] [[5.0, 6.0, 14.0, 15.0, 16.0], [2.0, 3.0, 14.0, 15.0, 16.0]], # feature 1, [a: 0, 0, b: 2, -], [a: 1, -, b: 0, -] [[1.0, 2.0, 17.0, 18.0, 19.0], [3.0, 4.0, 11.0, 12.0, 13.0]], ], "expected_sequence_length": [2, 2], }, ) def test_embedding_column( self, sparse_input_args_a, sparse_input_args_b, expected_input_layer, expected_sequence_length, ): sparse_input_a = tf.compat.v1.SparseTensorValue(**sparse_input_args_a) sparse_input_b = tf.compat.v1.SparseTensorValue(**sparse_input_args_b) vocabulary_size = 3 embedding_dimension_a = 2 embedding_values_a = ( (1.0, 2.0), # id 0 (3.0, 4.0), # id 1 (5.0, 6.0), # id 2 ) embedding_dimension_b = 3 embedding_values_b = ( (11.0, 12.0, 13.0), # id 0 (14.0, 15.0, 16.0), # id 1 (17.0, 18.0, 19.0), # id 2 ) def _get_initializer(embedding_dimension, embedding_values): def _initializer(shape, dtype, partition_info=None): self.assertAllEqual( (vocabulary_size, embedding_dimension), shape ) self.assertEqual(tf.float32, dtype) self.assertIsNone(partition_info) return embedding_values return _initializer categorical_column_a = ( tf.feature_column.sequence_categorical_column_with_identity( key="aaa", num_buckets=vocabulary_size ) ) embedding_column_a = tf.feature_column.embedding_column( categorical_column_a, dimension=embedding_dimension_a, initializer=_get_initializer( embedding_dimension_a, embedding_values_a ), ) categorical_column_b = ( tf.feature_column.sequence_categorical_column_with_identity( key="bbb", num_buckets=vocabulary_size ) ) embedding_column_b = tf.feature_column.embedding_column( categorical_column_b, dimension=embedding_dimension_b, initializer=_get_initializer( embedding_dimension_b, embedding_values_b ), ) # Test that columns are reordered alphabetically. sequence_input_layer = ksfc.SequenceFeatures( [embedding_column_b, embedding_column_a] ) input_layer, sequence_length = sequence_input_layer( { "aaa": sparse_input_a, "bbb": sparse_input_b, } ) self.evaluate(tf.compat.v1.global_variables_initializer()) weights = sequence_input_layer.weights self.assertCountEqual( ( "sequence_features/aaa_embedding/embedding_weights:0", "sequence_features/bbb_embedding/embedding_weights:0", ), tuple([v.name for v in weights]), ) self.assertAllEqual(embedding_values_a, self.evaluate(weights[0])) self.assertAllEqual(embedding_values_b, self.evaluate(weights[1])) self.assertAllEqual(expected_input_layer, self.evaluate(input_layer)) self.assertAllEqual( expected_sequence_length, self.evaluate(sequence_length) ) def test_embedding_column_with_non_sequence_categorical(self): """Tests that error is raised for non-sequence embedding column.""" vocabulary_size = 3 sparse_input = tf.compat.v1.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] indices=((0, 0), (1, 0), (1, 1)), values=(2, 0, 1), dense_shape=(2, 2), ) categorical_column_a = ( tf.feature_column.categorical_column_with_identity( key="aaa", num_buckets=vocabulary_size ) ) embedding_column_a = tf.feature_column.embedding_column( categorical_column_a, dimension=2 ) sequence_input_layer = ksfc.SequenceFeatures([embedding_column_a]) with self.assertRaisesRegex( ValueError, r"In embedding_column: aaa_embedding\. categorical_column must be " r"of type SequenceCategoricalColumn to use SequenceFeatures\.", ): _, _ = sequence_input_layer({"aaa": sparse_input}) def test_shared_embedding_column(self): with tf.Graph().as_default(): vocabulary_size = 3 sparse_input_a = tf.compat.v1.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] indices=((0, 0), (1, 0), (1, 1)), values=(2, 0, 1), dense_shape=(2, 2), ) sparse_input_b = tf.compat.v1.SparseTensorValue( # example 0, ids [1] # example 1, ids [2, 0] indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 0), dense_shape=(2, 2), ) embedding_dimension = 2 embedding_values = ( (1.0, 2.0), # id 0 (3.0, 4.0), # id 1 (5.0, 6.0), # id 2 ) def _get_initializer(embedding_dimension, embedding_values): def _initializer(shape, dtype, partition_info=None): self.assertAllEqual( (vocabulary_size, embedding_dimension), shape ) self.assertEqual(tf.float32, dtype) self.assertIsNone(partition_info) return embedding_values return _initializer expected_input_layer = [ # example 0, ids_a [2], ids_b [1] [[5.0, 6.0, 3.0, 4.0], [0.0, 0.0, 0.0, 0.0]], # example 1, ids_a [0, 1], ids_b [2, 0] [[1.0, 2.0, 5.0, 6.0], [3.0, 4.0, 1.0, 2.0]], ] expected_sequence_length = [1, 2] categorical_column_a = ( tf.feature_column.sequence_categorical_column_with_identity( key="aaa", num_buckets=vocabulary_size ) ) categorical_column_b = ( tf.feature_column.sequence_categorical_column_with_identity( key="bbb", num_buckets=vocabulary_size ) ) # Test that columns are reordered alphabetically. shared_embedding_columns = tf.feature_column.shared_embeddings( [categorical_column_b, categorical_column_a], dimension=embedding_dimension, initializer=_get_initializer( embedding_dimension, embedding_values ), ) sequence_input_layer = ksfc.SequenceFeatures( shared_embedding_columns ) input_layer, sequence_length = sequence_input_layer( {"aaa": sparse_input_a, "bbb": sparse_input_b} ) global_vars = tf.compat.v1.get_collection( tf.compat.v1.GraphKeys.GLOBAL_VARIABLES ) self.assertCountEqual( ("aaa_bbb_shared_embedding:0",), tuple([v.name for v in global_vars]), ) with _initialized_session() as sess: self.assertAllEqual( embedding_values, global_vars[0].eval(session=sess) ) self.assertAllEqual( expected_input_layer, input_layer.eval(session=sess) ) self.assertAllEqual( expected_sequence_length, sequence_length.eval(session=sess) ) def test_shared_embedding_column_with_non_sequence_categorical(self): """Tests that error is raised for non-sequence shared embedding column.""" with tf.Graph().as_default(): vocabulary_size = 3 sparse_input_a = tf.compat.v1.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] indices=((0, 0), (1, 0), (1, 1)), values=(2, 0, 1), dense_shape=(2, 2), ) sparse_input_b = tf.compat.v1.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] indices=((0, 0), (1, 0), (1, 1)), values=(2, 0, 1), dense_shape=(2, 2), ) categorical_column_a = ( tf.feature_column.categorical_column_with_identity( key="aaa", num_buckets=vocabulary_size ) ) categorical_column_b = ( tf.feature_column.categorical_column_with_identity( key="bbb", num_buckets=vocabulary_size ) ) shared_embedding_columns = tf.feature_column.shared_embeddings( [categorical_column_a, categorical_column_b], dimension=2 ) sequence_input_layer = ksfc.SequenceFeatures( shared_embedding_columns ) with self.assertRaisesRegex( ValueError, r"In embedding_column: aaa_shared_embedding\. " r"categorical_column must " r"be of type SequenceCategoricalColumn to use " r"SequenceFeatures\.", ): _, _ = sequence_input_layer( {"aaa": sparse_input_a, "bbb": sparse_input_b} ) @parameterized.named_parameters( { "testcase_name": "2D", "sparse_input_args_a": { # example 0, ids [2] # example 1, ids [0, 1] "indices": ((0, 0), (1, 0), (1, 1)), "values": (2, 0, 1), "dense_shape": (2, 2), }, "sparse_input_args_b": { # example 0, ids [1] # example 1, ids [1, 0] "indices": ((0, 0), (1, 0), (1, 1)), "values": (1, 1, 0), "dense_shape": (2, 2), }, "expected_input_layer": [ # example 0, ids_a [2], ids_b [1] [[0.0, 0.0, 1.0, 0.0, 1.0], [0.0, 0.0, 0.0, 0.0, 0.0]], # example 1, ids_a [0, 1], ids_b [1, 0] [[1.0, 0.0, 0.0, 0.0, 1.0], [0.0, 1.0, 0.0, 1.0, 0.0]], ], "expected_sequence_length": [1, 2], }, { "testcase_name": "3D", "sparse_input_args_a": { # feature 0, ids [[2], [0, 1]] # feature 1, ids [[0, 0], [1]] "indices": ( (0, 0, 0), (0, 1, 0), (0, 1, 1), (1, 0, 0), (1, 0, 1), (1, 1, 0), ), "values": (2, 0, 1, 0, 0, 1), "dense_shape": (2, 2, 2), }, "sparse_input_args_b": { # feature 0, ids [[1, 1], [1]] # feature 1, ids [[1], [0]] "indices": ( (0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0), ), "values": (1, 1, 1, 1, 0), "dense_shape": (2, 2, 2), }, "expected_input_layer": [ # feature 0, [a: 2, -, b: 1, 1], [a: 0, 1, b: 1, -] [[0.0, 0.0, 1.0, 0.0, 2.0], [1.0, 1.0, 0.0, 0.0, 1.0]], # feature 1, [a: 0, 0, b: 1, -], [a: 1, -, b: 0, -] [[2.0, 0.0, 0.0, 0.0, 1.0], [0.0, 1.0, 0.0, 1.0, 0.0]], ], "expected_sequence_length": [2, 2], }, ) def test_indicator_column( self, sparse_input_args_a, sparse_input_args_b, expected_input_layer, expected_sequence_length, ): sparse_input_a = tf.compat.v1.SparseTensorValue(**sparse_input_args_a) sparse_input_b = tf.compat.v1.SparseTensorValue(**sparse_input_args_b) vocabulary_size_a = 3 vocabulary_size_b = 2 categorical_column_a = ( tf.feature_column.sequence_categorical_column_with_identity( key="aaa", num_buckets=vocabulary_size_a ) ) indicator_column_a = tf.feature_column.indicator_column( categorical_column_a ) categorical_column_b = ( tf.feature_column.sequence_categorical_column_with_identity( key="bbb", num_buckets=vocabulary_size_b ) ) indicator_column_b = tf.feature_column.indicator_column( categorical_column_b ) # Test that columns are reordered alphabetically. sequence_input_layer = ksfc.SequenceFeatures( [indicator_column_b, indicator_column_a] ) input_layer, sequence_length = sequence_input_layer( {"aaa": sparse_input_a, "bbb": sparse_input_b} ) self.assertAllEqual(expected_input_layer, self.evaluate(input_layer)) self.assertAllEqual( expected_sequence_length, self.evaluate(sequence_length) ) def test_indicator_column_with_non_sequence_categorical(self): """Tests that error is raised for non-sequence categorical column.""" vocabulary_size = 3 sparse_input = tf.compat.v1.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] indices=((0, 0), (1, 0), (1, 1)), values=(2, 0, 1), dense_shape=(2, 2), ) categorical_column_a = ( tf.feature_column.categorical_column_with_identity( key="aaa", num_buckets=vocabulary_size ) ) indicator_column_a = tf.feature_column.indicator_column( categorical_column_a ) sequence_input_layer = ksfc.SequenceFeatures([indicator_column_a]) with self.assertRaisesRegex( ValueError, r"In indicator_column: aaa_indicator\. categorical_column must be " r"of type SequenceCategoricalColumn to use SequenceFeatures\.", ): _, _ = sequence_input_layer({"aaa": sparse_input}) @parameterized.named_parameters( { "testcase_name": "2D", "sparse_input_args": { # example 0, values [0., 1] # example 1, [10.] "indices": ((0, 0), (0, 1), (1, 0)), "values": (0.0, 1.0, 10.0), "dense_shape": (2, 2), }, "expected_input_layer": [[[0.0], [1.0]], [[10.0], [0.0]]], "expected_sequence_length": [2, 1], }, { "testcase_name": "3D", "sparse_input_args": { # feature 0, ids [[20, 3], [5]] # feature 1, ids [[3], [8]] "indices": ( (0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0), ), "values": (20.0, 3.0, 5.0, 3.0, 8.0), "dense_shape": (2, 2, 2), }, "expected_input_layer": [ [[20.0], [3.0], [5.0], [0.0]], [[3.0], [0.0], [8.0], [0.0]], ], "expected_sequence_length": [2, 2], }, ) def test_numeric_column( self, sparse_input_args, expected_input_layer, expected_sequence_length ): sparse_input = tf.compat.v1.SparseTensorValue(**sparse_input_args) numeric_column = tf.feature_column.sequence_numeric_column("aaa") sequence_input_layer = ksfc.SequenceFeatures([numeric_column]) input_layer, sequence_length = sequence_input_layer( {"aaa": sparse_input} ) self.assertAllEqual(expected_input_layer, self.evaluate(input_layer)) self.assertAllEqual( expected_sequence_length, self.evaluate(sequence_length) ) @parameterized.named_parameters( { "testcase_name": "2D", "sparse_input_args": { # example 0, values [0., 1., 2., 3., 4., 5., 6., 7.] # example 1, [10., 11., 12., 13.] "indices": ( (0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (1, 0), (1, 1), (1, 2), (1, 3), ), "values": ( 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 10.0, 11.0, 12.0, 13.0, ), "dense_shape": (2, 8), }, "expected_input_layer": [ # The output of numeric_column._get_dense_tensor should be # flattened. [[0.0, 1.0, 2.0, 3.0], [4.0, 5.0, 6.0, 7.0]], [[10.0, 11.0, 12.0, 13.0], [0.0, 0.0, 0.0, 0.0]], ], "expected_sequence_length": [2, 1], }, { "testcase_name": "3D", "sparse_input_args": { # example 0, values [[0., 1., 2., 3.]], [[4., 5., 6., 7.]] # example 1, [[10., 11., 12., 13.], []] "indices": ( (0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 0, 3), (0, 1, 0), (0, 1, 1), (0, 1, 2), (0, 1, 3), (1, 0, 0), (1, 0, 1), (1, 0, 2), (1, 0, 3), ), "values": ( 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 10.0, 11.0, 12.0, 13.0, ), "dense_shape": (2, 2, 4), }, "expected_input_layer": [ # The output of numeric_column._get_dense_tensor should be # flattened. [[0.0, 1.0, 2.0, 3.0], [4.0, 5.0, 6.0, 7.0]], [[10.0, 11.0, 12.0, 13.0], [0.0, 0.0, 0.0, 0.0]], ], "expected_sequence_length": [2, 1], }, ) def test_numeric_column_multi_dim( self, sparse_input_args, expected_input_layer, expected_sequence_length ): """Tests SequenceFeatures for multi-dimensional numeric_column.""" sparse_input = tf.compat.v1.SparseTensorValue(**sparse_input_args) numeric_column = tf.feature_column.sequence_numeric_column( "aaa", shape=(2, 2) ) sequence_input_layer = ksfc.SequenceFeatures([numeric_column]) input_layer, sequence_length = sequence_input_layer( {"aaa": sparse_input} ) self.assertAllEqual(expected_input_layer, self.evaluate(input_layer)) self.assertAllEqual( expected_sequence_length, self.evaluate(sequence_length) ) def test_sequence_length_not_equal(self): """Tests that an error is raised when sequence lengths are not equal.""" # Input a with sequence_length = [2, 1] sparse_input_a = tf.compat.v1.SparseTensorValue( indices=((0, 0), (0, 1), (1, 0)), values=(0.0, 1.0, 10.0), dense_shape=(2, 2), ) # Input b with sequence_length = [1, 1] sparse_input_b = tf.compat.v1.SparseTensorValue( indices=((0, 0), (1, 0)), values=(1.0, 10.0), dense_shape=(2, 2) ) numeric_column_a = tf.feature_column.sequence_numeric_column("aaa") numeric_column_b = tf.feature_column.sequence_numeric_column("bbb") sequence_input_layer = ksfc.SequenceFeatures( [numeric_column_a, numeric_column_b] ) with self.assertRaisesRegex( tf.errors.InvalidArgumentError, r"Condition x == y did not hold.*" ): _, sequence_length = sequence_input_layer( {"aaa": sparse_input_a, "bbb": sparse_input_b} ) self.evaluate(sequence_length) @parameterized.named_parameters( { "testcase_name": "2D", "sparse_input_args": { # example 0, values [[[0., 1.], [2., 3.]], [[4., 5.], [6., # 7.]]] # example 1, [[[10., 11.], [12., 13.]]] "indices": ( (0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (1, 0), (1, 1), (1, 2), (1, 3), ), "values": ( 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 10.0, 11.0, 12.0, 13.0, ), "dense_shape": (2, 8), }, "expected_shape": [2, 2, 4], }, { "testcase_name": "3D", "sparse_input_args": { # example 0, values [[0., 1., 2., 3.]], [[4., 5., 6., 7.]] # example 1, [[10., 11., 12., 13.], []] "indices": ( (0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 0, 3), (0, 1, 0), (0, 1, 1), (0, 1, 2), (0, 1, 3), (1, 0, 0), (1, 0, 1), (1, 0, 2), (1, 0, 3), ), "values": ( 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 10.0, 11.0, 12.0, 13.0, ), "dense_shape": (2, 2, 4), }, "expected_shape": [2, 2, 4], }, ) def test_static_shape_from_tensors_numeric( self, sparse_input_args, expected_shape ): """Tests that we return a known static shape when we have one.""" sparse_input = tf.compat.v1.SparseTensorValue(**sparse_input_args) numeric_column = tf.feature_column.sequence_numeric_column( "aaa", shape=(2, 2) ) sequence_input_layer = ksfc.SequenceFeatures([numeric_column]) input_layer, _ = sequence_input_layer({"aaa": sparse_input}) shape = input_layer.get_shape() self.assertEqual(shape, expected_shape) @parameterized.named_parameters( { "testcase_name": "2D", "sparse_input_args": { # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] "indices": ((0, 0), (1, 0), (1, 1), (3, 0)), "values": (2, 0, 1, 1), "dense_shape": (4, 2), }, "expected_shape": [4, 2, 3], }, { "testcase_name": "3D", "sparse_input_args": { # example 0, ids [[2]] # example 1, ids [[0, 1], [2]] # example 2, ids [] # example 3, ids [[1], [0, 2]] "indices": ( (0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0), (3, 0, 0), (3, 1, 0), (3, 1, 1), ), "values": (2, 0, 1, 2, 1, 0, 2), "dense_shape": (4, 2, 2), }, "expected_shape": [4, 2, 3], }, ) def test_static_shape_from_tensors_indicator( self, sparse_input_args, expected_shape ): """Tests that we return a known static shape when we have one.""" sparse_input = tf.compat.v1.SparseTensorValue(**sparse_input_args) categorical_column = ( tf.feature_column.sequence_categorical_column_with_identity( key="aaa", num_buckets=3 ) ) indicator_column = tf.feature_column.indicator_column( categorical_column ) sequence_input_layer = ksfc.SequenceFeatures([indicator_column]) input_layer, _ = sequence_input_layer({"aaa": sparse_input}) shape = input_layer.get_shape() self.assertEqual(shape, expected_shape) def test_compute_output_shape(self): price1 = tf.feature_column.sequence_numeric_column("price1", shape=2) price2 = tf.feature_column.sequence_numeric_column("price2") features = { "price1": tf.SparseTensor( indices=[ [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [2, 0, 0], [2, 0, 1], [3, 0, 0], [3, 0, 1], ], values=[ 0.0, 1.0, 10.0, 11.0, 100.0, 101.0, 200.0, 201.0, 300.0, 301.0, ], dense_shape=(4, 3, 2), ), "price2": tf.SparseTensor( indices=[[0, 0], [0, 1], [1, 0], [2, 0], [3, 0]], values=[10.0, 11.0, 20.0, 30.0, 40.0], dense_shape=(4, 3), ), } sequence_features = ksfc.SequenceFeatures([price1, price2]) seq_input, seq_len = sequence_features(features) self.assertEqual( sequence_features.compute_output_shape((None, None)), (None, None, 3), ) self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(tf.compat.v1.tables_initializer()) self.assertAllClose( [ [[0.0, 1.0, 10.0], [10.0, 11.0, 11.0], [0.0, 0.0, 0.0]], [[100.0, 101.0, 20.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[200.0, 201.0, 30.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[300.0, 301.0, 40.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], ], self.evaluate(seq_input), ) self.assertAllClose([2, 1, 1, 1], self.evaluate(seq_len)) @test_combinations.generate(test_combinations.combine(mode=["graph", "eager"])) class SequenceFeaturesSerializationTest( tf.test.TestCase, parameterized.TestCase ): @parameterized.named_parameters( ("trainable", True, "trainable"), ("not_trainable", False, "frozen") ) def test_get_config(self, trainable, name): cols = [tf.feature_column.sequence_numeric_column("a")] orig_layer = ksfc.SequenceFeatures(cols, trainable=trainable, name=name) config = orig_layer.get_config() self.assertEqual(config["name"], orig_layer.name) self.assertEqual(config["trainable"], trainable) self.assertLen(config["feature_columns"], 1) self.assertEqual( config["feature_columns"][0]["class_name"], "SequenceNumericColumn" ) self.assertEqual(config["feature_columns"][0]["config"]["shape"], (1,)) @parameterized.named_parameters( ("trainable", True, "trainable"), ("not_trainable", False, "frozen") ) def test_from_config(self, trainable, name): cols = [tf.feature_column.sequence_numeric_column("a")] orig_layer = ksfc.SequenceFeatures(cols, trainable=trainable, name=name) config = orig_layer.get_config() new_layer = ksfc.SequenceFeatures.from_config(config) self.assertEqual(new_layer.name, orig_layer.name) self.assertEqual(new_layer.trainable, trainable) self.assertLen(new_layer._feature_columns, 1) self.assertEqual(new_layer._feature_columns[0].name, "a") def test_serialization_sequence_features(self): rating = tf.feature_column.sequence_numeric_column("rating") sequence_feature = ksfc.SequenceFeatures([rating]) config = keras.layers.serialize(sequence_feature) revived = keras.layers.deserialize(config) self.assertIsInstance(revived, ksfc.SequenceFeatures) class SequenceFeaturesSavingTest(tf.test.TestCase, parameterized.TestCase): @test_combinations.generate( test_combinations.combine(mode=["graph", "eager"]) ) def test_saving_with_sequence_features(self): cols = [ tf.feature_column.sequence_numeric_column("a"), tf.feature_column.indicator_column( tf.feature_column.sequence_categorical_column_with_vocabulary_list( # noqa: E501 "b", ["one", "two"] ) ), ] input_layers = { "a": keras.layers.Input(shape=(None, 1), sparse=True, name="a"), "b": keras.layers.Input( shape=(None, 1), sparse=True, name="b", dtype="string" ), } fc_layer, _ = ksfc.SequenceFeatures(cols)(input_layers) # TODO(tibell): Figure out the right dtype and apply masking. # sequence_length_mask = array_ops.sequence_mask(sequence_length) # x = keras.layers.GRU(32)(fc_layer, mask=sequence_length_mask) x = keras.layers.GRU(32)(fc_layer) output = keras.layers.Dense(10)(x) model = keras.models.Model(input_layers, output) model.compile( loss=keras.losses.MSE, optimizer="rmsprop", metrics=[keras.metrics.categorical_accuracy], ) config = model.to_json() loaded_model = model_config.model_from_json(config) batch_size = 10 timesteps = 1 values_a = np.arange(10, dtype=np.float32) indices_a = np.zeros((10, 3), dtype=np.int64) indices_a[:, 0] = np.arange(10) inputs_a = tf.SparseTensor( indices_a, values_a, (batch_size, timesteps, 1) ) values_b = np.zeros(10, dtype=str) indices_b = np.zeros((10, 3), dtype=np.int64) indices_b[:, 0] = np.arange(10) inputs_b = tf.SparseTensor( indices_b, values_b, (batch_size, timesteps, 1) ) with self.cached_session(): # Initialize tables for V1 lookup. if not tf.executing_eagerly(): self.evaluate(tf.compat.v1.tables_initializer()) self.assertLen( loaded_model.predict({"a": inputs_a, "b": inputs_b}, steps=1), batch_size, ) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/feature_column/sequence_feature_column_test.py/0
{ "file_path": "tf-keras/tf_keras/feature_column/sequence_feature_column_test.py", "repo_id": "tf-keras", "token_count": 20779 }
193
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import sys import tensorflow.compat.v2 as tf import tf_keras as keras class MiniModel(keras.Model): """Minimal model for mnist. Useful for testing and debugging on slow TPU simulators. """ def __init__(self): super().__init__(name="") self.fc = keras.layers.Dense( 1, name="fc", kernel_initializer="ones", bias_initializer="ones" ) def call(self, inputs, training=True): return self.fc(inputs) class DefunnedMiniModel(MiniModel): @tf.function def call(self, inputs, training=True): return super(DefunnedMiniModel, self).call(inputs, training=training) class ModelWithOptimizer(keras.Model): def __init__(self): super().__init__() self.dense = keras.layers.Dense(1) self.optimizer = keras.optimizers.Adam(0.01) @tf.function( input_signature=( tf.TensorSpec([None, 2], tf.float32), tf.TensorSpec([None], tf.float32), ) ) def call(self, x, y): with tf.GradientTape() as tape: loss = tf.math.reduce_mean((self.dense(x) - y) ** 2.0) trainable_variables = self.trainable_variables gradients = tape.gradient(loss, trainable_variables) self.optimizer.apply_gradients(zip(gradients, trainable_variables)) return {"loss": loss} class FunctionTest(tf.test.TestCase): def testFunctionRelaxationLosesInnerDimWithKerasLayer(self): layer = keras.layers.Dense(1) fn = tf.function(reduce_retracing=True)(layer) with self.captureWritesToStream(sys.stderr) as printed: fn(tf.ones((3, 2))) self.assertNotIn("ValueError", printed.contents()) with self.captureWritesToStream(sys.stderr) as printed: # Use batch size 2 to trigger a second cache miss on the shape. fn(tf.ones((2, 2))) self.assertNotIn("ValueError", printed.contents()) # Shape relaxation passes TensorShape([None, None]), which causes layer # matmul to fail, due to incompatible dims. What would have been a # graph build time error (layer would complain about the inner dim being # 4). with self.captureWritesToStream(sys.stderr) as printed: with self.assertRaisesRegex( tf.errors.InvalidArgumentError, r"Matrix size-incompatible" ): fn(tf.ones((3, 4))) def testDefunKerasModelCall(self): model = MiniModel() model.call = tf.function(model.call) x = tf.ones([1, 2]) y = model(x) self.assertAllEqual([[3.0]], self.evaluate(y)) # Break the reference cycle between the MiniModel and the defun: # `MiniModel` --(through its `call` method)--> `Function` # `Function` --(instancemethod on `MiniModel`)--> `MiniModel` del model.call def testDecoratedMethod(self): m = DefunnedMiniModel() instance_call_one = m.call(tf.ones([1, 2]), training=True) instance_call_two = m.call(inputs=tf.ones([1, 2]), training=True) class_call = DefunnedMiniModel.call(m, tf.ones([1, 2]), training=True) self.assertAllEqual(instance_call_one, instance_call_two) self.assertAllEqual(instance_call_one, class_call) def testDecoratedMethodUniqueFunctionPerInstance(self): m = DefunnedMiniModel() n = DefunnedMiniModel() class_method_one = DefunnedMiniModel.call class_method_two = DefunnedMiniModel.call m_method_one = m.call m_method_two = m.call n_method_one = n.call n_method_two = n.call self.assertEqual(class_method_one, class_method_two) self.assertEqual(m_method_one, m_method_two) self.assertEqual(n_method_one, n_method_two) self.assertNotEqual(m.call, n.call) def testDecoratedMethodGetConcreteFunction(self): m = DefunnedMiniModel() instance_call_one = m.call.get_concrete_function( tf.ones([1, 2]), training=False ) instance_call_two = m.call.get_concrete_function( inputs=tf.ones([1, 2]), training=False ) self.assertAllEqual( instance_call_one(tf.ones([1, 2])), instance_call_two(tf.ones([1, 2])), ) # Also make sure get_concrete_function works on the class method DefunnedMiniModel.call.get_concrete_function( m, tf.ones([1, 2]), training=False ) DefunnedMiniModel.call.get_concrete_function( m, inputs=tf.ones([1, 2]), training=True ) def testDecoratedMethodVariableCleanup(self): m = DefunnedMiniModel() m(tf.ones([1, 2])) variable_refs = list({v.ref() for v in m.variables}) self.assertLen(variable_refs, 2) del m # Verifying if the variables are only referenced from variable_refs. # We expect the reference counter to be 1, but `sys.getrefcount` reports # one higher reference counter because a temporary is created when we # call sys.getrefcount(). Hence check if the number returned is 2. # https://docs.python.org/3/library/sys.html#sys.getrefcount self.assertEqual(sys.getrefcount(variable_refs[0].deref()), 2) self.assertEqual(sys.getrefcount(variable_refs[1].deref()), 2) def testStandardTrainingLoopInFunction(self): layer = keras.layers.Dense(2) dataset = ( tf.data.Dataset.from_tensors( (tf.ones([784]), tf.ones([], tf.int32)) ) .map(lambda x, y: (x, y)) .repeat(10) .batch(32) ) optimizer = keras.optimizers.Adam() @tf.function def train(): for x, y in dataset: with tf.GradientTape() as tape: out = layer(x) loss = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits( logits=out, labels=y ) ) layer_variables = layer.trainable_variables gradients = tape.gradient(loss, layer_variables) optimizer.apply_gradients(zip(gradients, layer_variables)) train() def testEarlyStoppingTrainingLoopInFunction(self): layer = keras.layers.Dense(2) dataset = ( tf.data.Dataset.from_tensors( (tf.ones([784]), tf.ones([], tf.int32)) ) .map(lambda x, y: (x, y)) .repeat(10) .batch(32) ) optimizer = keras.optimizers.Adam() @tf.function def train(): for x, y in dataset: with tf.GradientTape() as tape: out = layer(x) loss = tf.math.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits( logits=out, labels=y ) ) layer_variables = layer.trainable_variables gradients = tape.gradient(loss, layer_variables) optimizer.apply_gradients(zip(gradients, layer_variables)) if optimizer.iterations > 3: break train() def test_optimizer(self): x = tf.constant([[3.0, 4.0]]) y = tf.constant([2.0]) model = ModelWithOptimizer() model(x, y) class AutomaticControlDependenciesTest(tf.test.TestCase): def testVariableInitializersCanBeLifted(self): # The initializer is a stateful op, but using it inside a function # should *not* create additional dependencies. That's what we're # testing. layer = keras.layers.Dense(1, kernel_initializer="glorot_uniform") @tf.function def fn(x): # Stateful operation tf.debugging.Assert(x, ["Error"]) # Variable initialization should be lifted. Prior to the change # that added this test, the lifting would crash because of an auto # control dep added on `x`. Note, the error did not happen if we # manually created a tf.Variable outside of function and used it # here. Alternatively, creating a tf.Variable inside fn() causes a # different sort of error that is out of scope for this test. return layer(tf.convert_to_tensor([[1.0, 1.0]])) true = tf.convert_to_tensor(True) concrete = fn.get_concrete_function( tf.TensorSpec(shape=(), dtype=tf.bool) ) self.evaluate(concrete(true)) self.evaluate(fn(True)) if __name__ == "__main__": if tf.__internal__.tf2.enabled(): tf.test.main()
tf-keras/tf_keras/integration_test/function_test.py/0
{ "file_path": "tf-keras/tf_keras/integration_test/function_test.py", "repo_id": "tf-keras", "token_count": 4301 }
194
import tensorflow as tf from tensorflow import keras from tf_keras.integration_test.models.input_spec import InputSpec def get_data_spec(batch_size): return ( { "num_cat_feat": InputSpec( (batch_size,), dtype="int32", range=[0, 5] ), "string_cat_feat": InputSpec((batch_size,), dtype="string"), "num_feat": InputSpec((batch_size,)), }, InputSpec((batch_size, 1), dtype="int32", range=[0, 2]), ) def get_input_preprocessor(): dataset = tf.data.Dataset.from_tensor_slices( { "num_cat_feat": [0, 1, 2, 3, 4, 5], "string_cat_feat": ["zero", "one", "two", "three", "four", "five"], "num_feat": [0.0, 0.1, 0.2, 0.3, 0.4, 0.5], } ).batch(3) num_cat_feat = keras.Input(shape=(1,), name="num_cat_feat", dtype="int64") string_cat_feat = keras.Input( shape=(1,), name="string_cat_feat", dtype="string" ) num_feat = keras.Input(shape=(1,), name="num_feat", dtype="float32") all_inputs = [ num_cat_feat, string_cat_feat, num_feat, ] all_features = keras.layers.concatenate( [ encode_categorical_feature( num_cat_feat, "num_cat_feat", dataset, False ), encode_categorical_feature( string_cat_feat, "string_cat_feat", dataset, True ), encode_numerical_feature(num_feat, "num_feat", dataset), ] ) preprocessor = keras.Model(all_inputs, all_features) return preprocessor def encode_numerical_feature(feature, name, dataset): normalizer = keras.layers.Normalization(mean=[1.0], variance=[2.0]) encoded_feature = normalizer(feature) return encoded_feature def encode_categorical_feature(feature, name, dataset, is_string): lookup_class = ( keras.layers.StringLookup if is_string else keras.layers.IntegerLookup ) lookup = lookup_class(output_mode="binary") feature_ds = dataset.map(lambda x: x[name]) feature_ds = feature_ds.map(lambda x: tf.expand_dims(x, -1)) lookup.adapt(feature_ds) encoded_feature = lookup(feature) return encoded_feature def get_model( build=False, compile=False, jit_compile=False, include_preprocessing=True ): preprocessor = get_input_preprocessor() if include_preprocessing: all_inputs = preprocessor.inputs all_features = preprocessor.outputs[0] else: all_inputs = keras.Input(shape=preprocessor.outputs[0].shape) all_features = all_inputs x = keras.layers.Dense(32, activation="relu")(all_features) x = keras.layers.Dropout(0.5)(x) output = keras.layers.Dense(1, activation="sigmoid")(x) model = keras.Model(all_inputs, output) if compile: model.compile( "adam", "binary_crossentropy", metrics=["accuracy"], jit_compile=jit_compile, ) return model def get_custom_objects(): return {}
tf-keras/tf_keras/integration_test/models/structured_data_classification.py/0
{ "file_path": "tf-keras/tf_keras/integration_test/models/structured_data_classification.py", "repo_id": "tf-keras", "token_count": 1396 }
195
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import os import tempfile import tensorflow.compat.v2 as tf from absl.testing import parameterized def cycle(obj, cycles, signatures=None): to_save = obj # TODO(vbardiovsky): It would be nice if exported protos reached a fixed # point w.r.t. saving/restoring, ideally after 2nd saving. for _ in range(cycles): path = tempfile.mkdtemp(prefix=tf.compat.v1.test.get_temp_dir()) # If available, we'll run the save and restore preferring the GPU. This # just makes sure we aren't throwing errors and have enough # device("CPU") blocks to satisfy the placer. device = ( "/device:GPU:0" if tf.test.is_gpu_available() else "/device:CPU:0" ) with tf.device(device): tf.saved_model.save(to_save, path, signatures) loaded = tf.saved_model.load(path) to_save = loaded return loaded class _ModelWithOptimizer(tf.train.Checkpoint): def __init__(self): self.dense = tf.keras.layers.Dense(1) self.optimizer = tf.keras.optimizers.Adam(0.01) @tf.function( input_signature=( tf.TensorSpec([None, 2], tf.float32), tf.TensorSpec([None], tf.float32), ) ) def call(self, x, y): with tf.GradientTape() as tape: loss = tf.math.reduce_mean((self.dense(x) - y) ** 2.0) trainable_variables = self.dense.trainable_variables gradients = tape.gradient(loss, trainable_variables) self.optimizer.apply_gradients(zip(gradients, trainable_variables)) return {"loss": loss} def _import_and_infer(save_dir, inputs, signature_key="serving_default"): """Import a SavedModel into a TF 1.x-style graph and run `signature_key`.""" graph = tf.Graph() with graph.as_default(), tf.compat.v1.Session() as session: model = tf.compat.v1.saved_model.load(session, ["serve"], save_dir) return _run_signature(session, model, inputs, signature_key) def _run_signature(session, meta_graph_def, inputs, signature_key): signature = meta_graph_def.signature_def[signature_key] assert set(inputs.keys()) == set(signature.inputs.keys()) feed_dict = {} for arg_name in inputs.keys(): input_tensor = session.graph.get_tensor_by_name( signature.inputs[arg_name].name ) feed_dict[input_tensor] = inputs[arg_name] output_dict = {} for output_name, output_tensor_info in signature.outputs.items(): output_dict[output_name] = session.graph.get_tensor_by_name( output_tensor_info.name ) return session.run(output_dict, feed_dict=feed_dict) class SaveTest(tf.test.TestCase): def test_unbuilt_model_does_not_prevent_saving(self): root = tf.train.Checkpoint( model=tf.keras.Sequential([tf.keras.layers.Dense(2)]) ) tf.saved_model.save( root, os.path.join(self.get_temp_dir(), "saved_model") ) def test_optimizer(self): x = tf.constant([[3.0, 4.0]]) y = tf.constant([2.0]) model = _ModelWithOptimizer() first_loss = model.call(x, y) save_dir = os.path.join(self.get_temp_dir(), "saved_model") tf.saved_model.save(model, save_dir, model.call) second_loss = model.call(x, y) self.assertNotEqual(first_loss, second_loss) self.assertAllClose( second_loss, _import_and_infer(save_dir, {"x": [[3.0, 4.0]], "y": [2.0]}), ) def test_single_method_default_signature(self): model = _ModelWithOptimizer() x = tf.constant([[3.0, 4.0]]) y = tf.constant([2.0]) model.call(x, y) save_dir = os.path.join(self.get_temp_dir(), "saved_model") tf.saved_model.save(model, save_dir) self.assertIn( "loss", _import_and_infer(save_dir, {"x": [[3.0, 4.0]], "y": [2.0]}) ) @parameterized.named_parameters( dict(testcase_name="ReloadOnce", cycles=1), dict(testcase_name="ReloadTwice", cycles=2), dict(testcase_name="ReloadThrice", cycles=3), ) class LoadTest(tf.test.TestCase, parameterized.TestCase): def test_optimizer(self, cycles): class _HasOptimizer(tf.Module): def __init__(self): super().__init__() self.layer = tf.keras.layers.Dense(1) self.optimizer = tf.keras.optimizers.Adam(0.01) @tf.function def __call__(self, x): return self.layer(x) @tf.function def train(self, x, y): with tf.GradientTape() as tape: predicted = self(x) loss = tf.math.reduce_sum(tf.math.abs(y - predicted)) train_vars = self.layer.trainable_variables grads = tape.gradient(loss, train_vars) self.optimizer.apply_gradients(zip(grads, train_vars)) root = _HasOptimizer() train_input = dict(x=tf.constant([[1.0]]), y=tf.constant([[2.0]])) root.train(**train_input) imported = cycle(root, cycles) self.assertAllClose( root.optimizer.learning_rate.numpy(), imported.optimizer.learning_rate.numpy(), ) self.assertAllClose( root(tf.constant([[-0.5]])), imported(tf.constant([[-0.5]])) ) root.train(**train_input) imported.train(**train_input) self.assertAllClose( root(tf.constant([[-0.5]])), imported(tf.constant([[-0.5]])) ) def test_model_with_custom_function_attached(self, cycles): root = tf.train.Checkpoint( model=tf.keras.Sequential([tf.keras.layers.Dense(2)]) ) @tf.function def _use_sequential(x): return root.model.call(x) root.model.traced_call = _use_sequential original = root.model.traced_call(tf.zeros([1, 1])).numpy() root = cycle(root, cycles) self.assertAllEqual( original, root.model.traced_call(tf.zeros([1, 1])).numpy() ) @parameterized.named_parameters( dict(testcase_name="ReloadOnce", cycles=1), dict(testcase_name="ReloadTwice", cycles=2), dict(testcase_name="ReloadThrice", cycles=3), ) class KerasLoadTest(tf.test.TestCase, parameterized.TestCase): def test_dense_features_layer(self, cycles): columns = [ tf.feature_column.numeric_column("x"), tf.feature_column.numeric_column("y"), ] layer = tf.keras.layers.DenseFeatures(columns) model = tf.keras.Sequential([layer]) model_input = {"x": tf.constant([[1.0]]), "y": tf.constant([[2.0]])} self.assertAllClose([[1.0, 2.0]], model.predict(model_input, steps=1)) loaded = cycle(model, cycles) (output,) = loaded._default_save_signature(model_input).values() self.assertAllClose([[1.0, 2.0]], output) (signature_output,) = loaded.signatures["serving_default"]( **model_input ).values() self.assertAllClose([[1.0, 2.0]], signature_output) def test_dense_features_layer_fit(self, cycles): columns = [tf.feature_column.numeric_column("x")] model = tf.keras.Sequential( [tf.keras.layers.DenseFeatures(columns), tf.keras.layers.Dense(1)] ) model_input = {"x": tf.constant([[1.0]])} model.compile(optimizer="adam", loss="mse", run_eagerly=True) model.fit(model_input, tf.constant([[3.0]])) loaded = cycle(model, cycles) loaded._default_save_signature(model_input) loaded.signatures["serving_default"](**model_input) def test_multi_output_layer(self, cycles): inp = tf.keras.Input(name="inp", shape=(None,), dtype=tf.float32) class _MultiOutput(tf.keras.layers.Layer): def call(self, x): return x + 1.0, x + 2.0 out = _MultiOutput(name="out")(inp) model = tf.keras.Model(inp, out) loaded = cycle(model, cycles) self.assertAllClose( dict(out=2.0, out_1=3.0), loaded.signatures["serving_default"](tf.constant(1.0)), ) def test_functional_model_with_conv(self, cycles): x = tf.keras.Input(name="x", shape=(None, None, 3), dtype=tf.float32) conved = tf.keras.layers.Conv2D( filters=3, kernel_size=3, dilation_rate=2 )(x) model = tf.keras.Model([x], conved) model_input = tf.ones((1, 10, 10, 3)) initial_output = model.predict([model_input]) model = cycle(model, cycles) self.assertAllClose( [initial_output], list(model.signatures["serving_default"](model_input).values()), ) if __name__ == "__main__": if tf.__internal__.tf2.enabled(): tf.test.main()
tf-keras/tf_keras/integration_test/saved_model_test.py/0
{ "file_path": "tf-keras/tf_keras/integration_test/saved_model_test.py", "repo_id": "tf-keras", "token_count": 4332 }
196
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests Attention layer.""" import numpy as np import tensorflow.compat.v2 as tf from absl.testing import parameterized import tf_keras as keras from tf_keras.layers import core from tf_keras.testing_infra import test_combinations @test_combinations.generate(test_combinations.combine(mode=["graph", "eager"])) class AttentionTest(tf.test.TestCase, parameterized.TestCase): def test_calculate_scores_one_dim(self): # Query tensor of shape [1, 1, 1] q = np.array([[[1.1]]], dtype=np.float32) # Key tensor of shape [1, 1, 1] k = np.array([[[1.6]]], dtype=np.float32) attention_layer = keras.layers.Attention() attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1])) actual = attention_layer._calculate_scores(query=q, key=k) # Expected tensor of shape [1, 1, 1]. # expected000 = 1.1*1.6 = 1.76 expected = np.array([[[1.76]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_calculate_scores_multi_dim(self): # Query tensor of shape [1, 2, 4] q = np.array( [[[1.0, 1.1, 1.2, 1.3], [2.0, 2.1, 2.2, 2.3]]], dtype=np.float32 ) # Key tensor of shape [1, 3, 4] k = np.array( [ [ [1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8], ] ], dtype=np.float32, ) attention_layer = keras.layers.Attention() attention_layer.build(input_shape=([1, 2, 4], [1, 3, 4])) actual = attention_layer._calculate_scores(query=q, key=k) # Expected tensor of shape [1, 2, 3]. # expected000 = 1.*1.5+1.1*1.6+1.2*1.7+1.3*1.8 = 7.64 # expected001 = 1.*2.5+1.1*2.6+1.2*2.7+1.3*2.8 = 12.24 # expected002 = 1.*3.5+1.1*3.6+1.2*3.7+1.3*3.8 = 16.84 # expected010 = 2.*1.5+2.1*1.6+2.2*1.7+2.3*1.8 = 14.24 # expected011 = 2.*2.5+2.1*2.6+2.2*2.7+2.3*2.8 = 22.84 # expected012 = 2.*3.5+2.1*3.6+2.2*3.7+2.3*3.8 = 31.44 expected = np.array( [[[7.64, 12.24, 16.84], [14.24, 22.84, 31.44]]], dtype=np.float32 ) self.assertAllClose(expected, actual) def test_calculate_scores_multi_dim_concat(self): # Query tensor of shape [1, 2, 4] q = np.array( [[[1.0, 1.1, 1.2, 1.3], [2.0, 2.1, 2.2, 2.3]]], dtype=np.float32 ) # Key tensor of shape [1, 3, 4] k = np.array( [ [ [1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8], ] ], dtype=np.float32, ) attention_layer = keras.layers.Attention(score_mode="concat") attention_layer.concat_score_weight = 1 attention_layer.build(input_shape=([1, 2, 4], [1, 3, 4])) actual = keras.backend.get_value( attention_layer._calculate_scores(query=q, key=k) ) # expected000 = tanh(1.+1.5) + tanh(1.1+1.6) + \ # tanh(1.2+1.7) + tanh(1.3+1.8) = 3.96753427840 # expected001 = tanh(1.+2.5) + tanh(1.1+2.6) + \ # tanh(1.2+2.7) + tanh(1.3+2.8) = 3.99558784825 # expected002 = tanh(1.+3.5) + tanh(1.1+3.6) + \ # tanh(1.2+3.7) + tanh(1.3+3.8) = 3.99940254147 # expected010 = tanh(2.+1.5) + tanh(2.1+1.6) + \ # tanh(2.2+1.7) + tanh(2.3+1.8) = 3.99558784825 # expected011 = tanh(2.+2.5) + tanh(2.1+2.6) + \ # tanh(2.2+2.7) + tanh(2.3+2.8) = 3.99940254147 # expected012 = tanh(2.+3.5) + tanh(2.1+3.6) + \ # tanh(2.2+3.7) + tanh(2.3+3.8) = 3.99991913657 expected = np.array( [ [ [3.96753427840, 3.99558784825, 3.99940254147], [3.99558784825, 3.99940254147, 3.99991913657], ] ], dtype=np.float32, ) self.assertAllClose(expected, actual) def test_calculate_scores_one_dim_batch_size_two(self): # Query tensor of shape [2, 1, 1] q = np.array([[[1.1]], [[2.1]]], dtype=np.float32) # Key tensor of shape [2, 1, 1] k = np.array([[[1.6]], [[2.6]]], dtype=np.float32) attention_layer = keras.layers.Attention() attention_layer.build(input_shape=([2, 1, 1], [2, 1, 1])) actual = attention_layer._calculate_scores(query=q, key=k) # Expected tensor of shape [2, 1, 1]. # expected000 = 1.1*1.6 = 1.76 # expected100 = 2.1*2.6 = 5.46 expected = np.array([[[1.76]], [[5.46]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_calculate_scores_one_dim_with_scale(self): """Tests that scores are multiplied by scale.""" # Query tensor of shape [1, 1, 1] q = np.array([[[1.1]]], dtype=np.float32) # Key tensor of shape [1, 1, 1] k = np.array([[[1.6]]], dtype=np.float32) attention_layer = keras.layers.Attention(use_scale=True) attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1])) attention_layer.scale = -2.0 actual = attention_layer._calculate_scores(query=q, key=k) # Expected tensor of shape [1, 1, 1]. # expected000 = -2*1.1*1.6 = -3.52 expected = np.array([[[-3.52]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_calculate_scores_one_dim_with_scale_concat(self): """Tests that scores are multiplied by scale.""" # Query tensor of shape [1, 1, 1] q = np.array([[[1.1]]], dtype=np.float32) # Key tensor of shape [1, 1, 1] k = np.array([[[1.6]]], dtype=np.float32) attention_layer = keras.layers.Attention( use_scale=True, score_mode="concat" ) attention_layer.concat_score_weight = 1 attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1])) attention_layer.scale = 2.0 actual = keras.backend.get_value( attention_layer._calculate_scores(query=q, key=k) ) # Expected tensor of shape [1, 1, 1]. # expected000 = tanh(2*(1.1+1.6)) = 0.9999592018254402 expected = np.array([[[0.999959202]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_shape(self): # Query tensor of shape [1, 2, 4] q = np.array( [[[1.0, 1.1, 1.2, 1.3], [2.0, 2.1, 2.2, 2.3]]], dtype=np.float32 ) # Value tensor of shape [1, 3, 4] v = np.array( [ [ [1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8], ] ], dtype=np.float32, ) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = keras.layers.Attention() actual = attention_layer([q, v], mask=[None, v_mask]) expected_shape = [1, 2, 4] self.assertAllEqual(expected_shape, tf.shape(actual)) def test_shape_concat(self): # Query tensor of shape [1, 2, 4] q = np.array( [[[1.0, 1.1, 1.2, 1.3], [2.0, 2.1, 2.2, 2.3]]], dtype=np.float32 ) # Value tensor of shape [1, 3, 4] v = np.array( [ [ [1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8], ] ], dtype=np.float32, ) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = keras.layers.Attention(score_mode="concat") attention_layer.concat_score_weight = 1 actual = attention_layer([q, v], mask=[None, v_mask]) expected_shape = [1, 2, 4] self.assertAllEqual(expected_shape, tf.shape(actual)) def test_shape_with_key(self): # Query tensor of shape [1, 2, 4] q = np.array( [[[1.0, 1.1, 1.2, 1.3], [2.0, 2.1, 2.2, 2.3]]], dtype=np.float32 ) # Value tensor of shape [1, 3, 4] v = np.array( [ [ [1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8], ] ], dtype=np.float32, ) # Key tensor of shape [1, 3, 4] k = np.array( [ [ [1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8], ] ], dtype=np.float32, ) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = keras.layers.Attention() actual = attention_layer([q, v, k], mask=[None, v_mask]) expected_shape = [1, 2, 4] self.assertAllEqual(expected_shape, tf.shape(actual)) def test_shape_with_key_concat(self): # Query tensor of shape [1, 2, 4] q = np.array( [[[1.0, 1.1, 1.2, 1.3], [2.0, 2.1, 2.2, 2.3]]], dtype=np.float32 ) # Value tensor of shape [1, 3, 4] v = np.array( [ [ [1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8], ] ], dtype=np.float32, ) # Key tensor of shape [1, 3, 4] k = np.array( [ [ [1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8], ] ], dtype=np.float32, ) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = keras.layers.Attention(score_mode="concat") attention_layer.concat_score_weight = 1 actual = attention_layer([q, v, k], mask=[None, v_mask]) expected_shape = [1, 2, 4] self.assertAllEqual(expected_shape, tf.shape(actual)) def test_multi_dim(self): # Query tensor of shape [1, 1, 1] q = np.array([[[1.1]]], dtype=np.float32) # Value tensor of shape [1, 3, 1] v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = keras.layers.Attention() actual = attention_layer([q, v], mask=[None, v_mask]) # Expected scores of shape [1, 1, 3] # scores = [[[1.1*1.6, 1.1*0.7, -1.1*0.8]]] = [[[1.76, 0.77, -0.88]]] # Expected attention distribution = softmax(scores) with zeros in # positions where v_mask == False. # => attention_distribution000 = exp(1.76)/(exp(1.76) + exp(0.77)) # = 0.72908792234 # attention_distribution001 = exp(0.77)/(exp(1.76) + exp(0.77)) # = 0.27091207765 # attention_distribution002 = 0 # # Expected tensor of shape [1, 1, 1]. # expected000 = 0.72908792234 * 1.6 + 0.27091207765 * 0.7 - 0 * 0.8 # = 1.3561791301 expected = np.array([[[1.3561791301]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_multi_dim_with_key(self): # Query tensor of shape [1, 1, 1] q = np.array([[[1.1]]], dtype=np.float32) # Value tensor of shape [1, 3, 1] v = np.array([[[0.5], [0.8], [-0.3]]], dtype=np.float32) # Key tensor of shape [1, 3, 1] k = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = keras.layers.Attention() actual = attention_layer([q, v, k], mask=[None, v_mask]) # Expected scores of shape [1, 1, 3] # scores = [[[1.1*1.6, 1.1*0.7, -1.1*0.8]]] = [[[1.76, 0.77, -0.88]]] # Expected attention distribution = softmax(scores) with zeros in # positions where v_mask == False. # => attention_distribution000 = exp(1.76)/(exp(1.76) + exp(0.77)) # = 0.72908792234 # attention_distribution001 = exp(0.77)/(exp(1.76) + exp(0.77)) # = 0.27091207765 # attention_distribution002 = 0 # # Expected tensor of shape [1, 1, 1]. # expected000 = 0.72908792234 * 0.5 + 0.27091207765 * 0.8 - 0 * 0.3 # = 0.58127362329 expected = np.array([[[0.58127362329]]], dtype=np.float32) self.assertAllClose(expected, actual) @parameterized.named_parameters( ("", False), ("return_attention_scores", True), ) def test_multi_dim_with_query_mask(self, return_attention_scores): # Query tensor of shape [1, 2, 1] q = np.array([[[1.1], [-0.5]]], dtype=np.float32) # Value tensor of shape [1, 3, 1] v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32) # Query mask tensor of shape [1, 2] q_mask = np.array([[True, False]], dtype=np.bool_) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = keras.layers.Attention() if return_attention_scores: actual, actual_scores = attention_layer( [q, v], mask=[q_mask, v_mask], return_attention_scores=return_attention_scores, ) else: actual = attention_layer( [q, v], mask=[q_mask, v_mask], return_attention_scores=return_attention_scores, ) # Expected scores of shape [1, 2, 3] # scores = [[[1.1*1.6, 1.1*0.7, -1.1*0.8], # [-0.5*1.6, -0.5*0.7, 0.5*0.8]]] # = [[[1.76, 0.77, -0.88], [-0.8, -0.35, 0.4]]] # Expected attention distribution = softmax(scores) with zeros in # positions where v_mask == False. # => attention_distribution000 = exp(1.76)/(exp(1.76) + exp(0.77)) # = 0.72908792234 # attention_distribution001 = exp(0.77)/(exp(1.76) + exp(0.77)) # = 0.27091207765 # attention_distribution002 = 0 # => attention_distribution010 = exp(-0.8)/(exp(-0.8) + exp(-0.35)) # = 0.38936076605 # attention_distribution011 = exp(-0.35)/(exp(-0.8) + exp(-0.35)) # = 0.61063923394 # attention_distribution012 = 0 if return_attention_scores: expected_scores = np.array( [ [ [0.72908792234, 0.27091207765, 0.0], [0.38936076605, 0.61063923394, 0.0], ] ], dtype=np.float32, ) self.assertAllClose(expected_scores, actual_scores) # Expected tensor of shape [1, 2, 1] with zeros where q_mask == False. # expected000 = 0.72908792234 * 1.6 + 0.27091207765 * 0.7 - 0 * 0.8 # = 1.3561791301 # expected000 = 0 expected = np.array([[[1.3561791301], [0.0]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_scale_none(self): """Tests that scale is None by default.""" attention_layer = keras.layers.Attention() attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1])) self.assertIsNone(attention_layer.scale) def test_scale_init_eager(self): """Tests that scale initializes to 1 when use_scale=True.""" if not tf.executing_eagerly(): self.skipTest("Only run in eager mode") attention_layer = keras.layers.Attention(use_scale=True) attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1])) self.assertAllClose(1.0, attention_layer.scale.value()) def test_scale_init_graph(self): """Tests that scale initializes to 1 when use_scale=True.""" with self.cached_session() as sess: attention_layer = keras.layers.Attention(use_scale=True) attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1])) sess.run(attention_layer.scale.initializer) self.assertAllClose(1.0, attention_layer.scale.value()) @parameterized.named_parameters( ("", False), ("return_attention_scores", True), ) def test_self_attention_causal(self, return_attention_scores): # Query-value tensor of shape [1, 3, 1] q = np.array([[[0.5], [0.8], [-0.3]]], dtype=np.float32) attention_layer = keras.layers.Attention() if return_attention_scores: actual, actual_scores = attention_layer( [q, q], return_attention_scores=return_attention_scores, use_causal_mask=True, ) else: actual = attention_layer( [q, q], return_attention_scores=return_attention_scores, use_causal_mask=True, ) # Expected scores of shape [1, 3, 3] # scores = [[0.25, 0.4, -0.15], # [0.4, 0.64, -0.24], # [-0.15, -0.24, 0.09]] # Expected attention distribution = softmax(scores) lower triangular # => attention_distribution00 = [1., 0., 0.] # attention_distribution01 # = [exp(0.4), exp(0.64), 0.] / (exp(0.4) + exp(0.64)) # = [0.44028635073, 0.55971364926, 0.] # attention_distribution02 # = [exp(-0.15), exp(-0.24), exp(0.09)] # / (exp(-0.15) + exp(-0.24) + exp(0.09)) # = [0.31395396638, 0.28693232061, 0.399113713] if return_attention_scores: expected_scores = np.array( [ [ [1.0, 0.0, 0.0], [0.44028635073, 0.55971364926, 0.0], [0.31395396638, 0.28693232061, 0.399113713], ] ], dtype=np.float32, ) self.assertAllClose(expected_scores, actual_scores) # Expected tensor of shape [1, 3, 1]. # expected000 = 0.5 # expected010 = 0.44028635073 * 0.5 + 0.55971364926 * 0.8 # = 0.66791409477 # expected020 = 0.31395396638 * 0.5 + \ # 0.28693232061 * 0.8 -0.399113713 * 0.3 # = 0.26678872577 expected = np.array( [[[0.5], [0.66791409477], [0.26678872577]]], dtype=np.float32 ) self.assertAllClose(expected, actual) def test_self_attention_causal_deprecated(self): """Verify deprecated specification of causal masking still works.""" # Query-value tensor of shape [1, 3, 1] q = np.array([[[0.5], [0.8], [-0.3]]], dtype=np.float32) attention_layer_new = keras.layers.Attention() new_scores = attention_layer_new( [q, q], use_causal_mask=True, ) attention_layer_old = keras.layers.Attention(causal=True) old_scores = attention_layer_old( [q, q], ) self.assertAllClose(new_scores, old_scores) def test_inputs_not_list(self): attention_layer = keras.layers.Attention() q = np.array([[[1.1]]], dtype=np.float32) with self.assertRaisesRegex( ValueError, "Attention layer must be called on a list of inputs" ): attention_layer(q) def test_inputs_too_short(self): attention_layer = keras.layers.Attention() q = np.array([[[1.1]]], dtype=np.float32) with self.assertRaisesRegex( ValueError, "Attention layer accepts inputs list of length 2 or 3" ): attention_layer([q]) def test_inputs_too_long(self): attention_layer = keras.layers.Attention() q = np.array([[[1.1]]], dtype=np.float32) with self.assertRaisesRegex( ValueError, "Attention layer accepts inputs list of length 2 or 3" ): attention_layer([q, q, q, q]) def test_mask_not_list(self): attention_layer = keras.layers.Attention() q = np.array([[[1.1]]], dtype=np.float32) mask = np.array([[True]], dtype=np.bool_) with self.assertRaisesRegex( ValueError, "Attention layer mask must be a list" ): attention_layer([q, q], mask=mask) def test_mask_too_short(self): attention_layer = keras.layers.Attention() q = np.array([[[1.1]]], dtype=np.float32) mask = np.array([[True]], dtype=np.bool_) with self.assertRaisesRegex( ValueError, "Attention layer mask must be a list of length 2" ): attention_layer([q, q], mask=[mask]) def test_mask_too_long(self): attention_layer = keras.layers.Attention() q = np.array([[[1.1]]], dtype=np.float32) mask = np.array([[True]], dtype=np.bool_) with self.assertRaisesRegex( ValueError, "Attention layer mask must be a list of length 2" ): attention_layer([q, q], mask=[mask, mask, mask]) def test_override_mask(self): attention_layer = keras.layers.Attention() q = core.Masking()(np.array([[[1.1]]], dtype=np.float32)) mask = np.array([[False]], dtype=np.bool_) actual = attention_layer([q, q], mask=[mask, mask]) self.assertAllClose([[[0]]], actual) def test_implicit_mask(self): attention_layer = keras.layers.Attention() q = core.Masking(1.1)(np.array([[[1.1], [1]]], dtype=np.float32)) v = core.Masking(1.2)(np.array([[[1.2], [1]]], dtype=np.float32)) actual = attention_layer([q, v]) self.assertAllClose([[[0], [1]]], actual) @parameterized.named_parameters( ("", False), ("use_scale", True), ) def test_serialization(self, use_scale): # Test serialization with use_scale layer = keras.layers.Attention(use_scale=use_scale) config = keras.layers.serialize(layer) new_layer = keras.layers.deserialize(config) self.assertEqual(new_layer.use_scale, use_scale) config = layer.get_config() new_layer = keras.layers.Attention.from_config(config) self.assertEqual(new_layer.use_scale, use_scale) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/layers/attention/attention_test.py/0
{ "file_path": "tf-keras/tf_keras/layers/attention/attention_test.py", "repo_id": "tf-keras", "token_count": 12767 }
197
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for convolutional layers.""" import numpy as np import tensorflow.compat.v2 as tf from absl.testing import parameterized import tf_keras as keras from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils # isort: off from tensorflow.python.framework import ( test_util as tf_test_utils, ) @test_combinations.run_all_keras_modes class Conv1DTest(test_combinations.TestCase): def _run_test(self, kwargs, expected_output_shape): num_samples = 2 stack_size = 3 length = 7 with self.cached_session(): test_utils.layer_test( keras.layers.Conv1D, kwargs=kwargs, input_shape=(num_samples, length, stack_size), expected_output_shape=expected_output_shape, ) def _run_test_extra_batch_dim(self, kwargs, expected_output_shape): batch_shape = (2, 11) stack_size = 3 length = 7 with self.cached_session(): if expected_output_shape is not None: expected_output_shape = (None,) + expected_output_shape test_utils.layer_test( keras.layers.Conv1D, kwargs=kwargs, input_shape=batch_shape + (length, stack_size), expected_output_shape=expected_output_shape, ) @parameterized.named_parameters( ("padding_valid", {"padding": "valid"}, (None, 5, 2)), ("padding_same", {"padding": "same"}, (None, 7, 2)), ( "padding_same_dilation_2", {"padding": "same", "dilation_rate": 2}, (None, 7, 2), ), ( "padding_same_dilation_3", {"padding": "same", "dilation_rate": 3}, (None, 7, 2), ), ("padding_causal", {"padding": "causal"}, (None, 7, 2)), ("strides", {"strides": 2}, (None, 3, 2)), ("dilation_rate", {"dilation_rate": 2}, (None, 3, 2)), ("group", {"groups": 3, "filters": 6}, (None, 5, 6)), ) def test_conv1d(self, kwargs, expected_output_shape): kwargs["filters"] = kwargs.get("filters", 2) kwargs["kernel_size"] = 3 self._run_test(kwargs, expected_output_shape) self._run_test_extra_batch_dim(kwargs, expected_output_shape) def test_conv1d_regularizers(self): kwargs = { "filters": 3, "kernel_size": 3, "padding": "valid", "kernel_regularizer": "l2", "bias_regularizer": "l2", "activity_regularizer": "l2", "strides": 1, } with self.cached_session(): layer = keras.layers.Conv1D(**kwargs) layer.build((None, 5, 2)) self.assertEqual(len(layer.losses), 2) layer(keras.backend.variable(np.ones((1, 5, 2)))) self.assertEqual(len(layer.losses), 3) def test_conv1d_constraints(self): k_constraint = lambda x: x b_constraint = lambda x: x kwargs = { "filters": 3, "kernel_size": 3, "padding": "valid", "kernel_constraint": k_constraint, "bias_constraint": b_constraint, "strides": 1, } with self.cached_session(): layer = keras.layers.Conv1D(**kwargs) layer.build((None, 5, 2)) self.assertEqual(layer.kernel.constraint, k_constraint) self.assertEqual(layer.bias.constraint, b_constraint) def test_conv1d_recreate_conv(self): with self.cached_session(): layer = keras.layers.Conv1D( filters=1, kernel_size=3, strides=1, dilation_rate=2, padding="causal", ) inpt1 = np.random.normal(size=[1, 2, 1]) inpt2 = np.random.normal(size=[1, 1, 1]) outp1_shape = layer(inpt1).shape _ = layer(inpt2).shape self.assertEqual(outp1_shape, layer(inpt1).shape) def test_conv1d_recreate_conv_unknown_dims(self): with self.cached_session(): layer = keras.layers.Conv1D( filters=1, kernel_size=3, strides=1, dilation_rate=2, padding="causal", ) inpt1 = np.random.normal(size=[1, 9, 1]).astype(np.float32) inpt2 = np.random.normal(size=[1, 2, 1]).astype(np.float32) outp1_shape = layer(inpt1).shape @tf.function(input_signature=[tf.TensorSpec([1, None, 1])]) def fn(inpt): return layer(inpt) fn(inpt2) self.assertEqual(outp1_shape, layer(inpt1).shape) def test_conv1d_invalid_output_shapes(self): kwargs = {"filters": 2, "kernel_size": 20} with self.assertRaisesRegex( ValueError, r"""One of the dimensions in the output is <= 0""" ): layer = keras.layers.Conv1D(**kwargs) layer.build((None, 5, 2)) def test_conv1d_invalid_strides_and_dilation_rate(self): kwargs = {"strides": 2, "dilation_rate": 2} with self.assertRaisesRegex( ValueError, r"""`strides > 1` not supported in conjunction""" ): keras.layers.Conv1D(filters=1, kernel_size=2, **kwargs) @test_combinations.run_all_keras_modes class Conv2DTest(test_combinations.TestCase): def _run_test(self, kwargs, expected_output_shape, spatial_shape=(7, 6)): num_samples = 2 stack_size = 3 num_row, num_col = spatial_shape input_data = None # Generate valid input data. if None in spatial_shape: input_data_shape = ( num_samples, num_row or 7, num_col or 6, stack_size, ) input_data = 10 * np.random.random(input_data_shape).astype( np.float32 ) with self.cached_session(): test_utils.layer_test( keras.layers.Conv2D, kwargs=kwargs, input_shape=(num_samples, num_row, num_col, stack_size), input_data=input_data, expected_output_shape=expected_output_shape, ) def _run_test_extra_batch_dim( self, kwargs, expected_output_shape, spatial_shape=(7, 6) ): batch_shape = (2, 11) stack_size = 3 num_row, num_col = spatial_shape input_data = None # Generate valid input data. if None in spatial_shape: input_data_shape = batch_shape + ( num_row or 7, num_col or 6, stack_size, ) input_data = 10 * np.random.random(input_data_shape).astype( np.float32 ) with self.cached_session(): if expected_output_shape is not None: expected_output_shape = (None,) + expected_output_shape test_utils.layer_test( keras.layers.Conv2D, kwargs=kwargs, input_shape=batch_shape + (num_row, num_col, stack_size), input_data=input_data, expected_output_shape=expected_output_shape, ) @parameterized.named_parameters( ("padding_valid", {"padding": "valid"}, (None, 5, 4, 2)), ("padding_same", {"padding": "same"}, (None, 7, 6, 2)), ( "padding_same_dilation_2", {"padding": "same", "dilation_rate": 2}, (None, 7, 6, 2), ), ("strides", {"strides": (2, 2)}, (None, 3, 2, 2)), ("dilation_rate", {"dilation_rate": (2, 2)}, (None, 3, 2, 2)), # Only runs on GPU with CUDA, channels_first is not supported on CPU. # TODO(b/62340061): Support channels_first on CPU. ("data_format", {"data_format": "channels_first"}, None, True), ("group", {"groups": 3, "filters": 6}, (None, 5, 4, 6), False), ( "dilation_2_unknown_width", {"dilation_rate": (2, 2)}, (None, None, 2, 2), False, (None, 6), ), ( "dilation_2_unknown_height", {"dilation_rate": (2, 2)}, (None, 3, None, 2), False, (7, None), ), ) def test_conv2d( self, kwargs, expected_output_shape=None, requires_gpu=False, spatial_shape=(7, 6), ): kwargs["filters"] = kwargs.get("filters", 2) kwargs["kernel_size"] = (3, 3) if not requires_gpu or tf.test.is_gpu_available(cuda_only=True): self._run_test(kwargs, expected_output_shape, spatial_shape) self._run_test_extra_batch_dim( kwargs, expected_output_shape, spatial_shape ) def test_conv2d_regularizers(self): kwargs = { "filters": 3, "kernel_size": 3, "padding": "valid", "kernel_regularizer": "l2", "bias_regularizer": "l2", "activity_regularizer": "l2", "strides": 1, } with self.cached_session(): layer = keras.layers.Conv2D(**kwargs) layer.build((None, 5, 5, 2)) self.assertEqual(len(layer.losses), 2) layer(keras.backend.variable(np.ones((1, 5, 5, 2)))) self.assertEqual(len(layer.losses), 3) def test_conv2d_constraints(self): k_constraint = lambda x: x b_constraint = lambda x: x kwargs = { "filters": 3, "kernel_size": 3, "padding": "valid", "kernel_constraint": k_constraint, "bias_constraint": b_constraint, "strides": 1, } with self.cached_session(): layer = keras.layers.Conv2D(**kwargs) layer.build((None, 5, 5, 2)) self.assertEqual(layer.kernel.constraint, k_constraint) self.assertEqual(layer.bias.constraint, b_constraint) def test_conv2d_zero_kernel_size(self): kwargs = {"filters": 2, "kernel_size": 0} with self.assertRaises(ValueError): keras.layers.Conv2D(**kwargs) def test_conv2d_invalid_output_shapes(self): kwargs = {"filters": 2, "kernel_size": 20} with self.assertRaisesRegex( ValueError, r"""One of the dimensions in the output is <= 0""" ): layer = keras.layers.Conv2D(**kwargs) layer.build((None, 5, 5, 2)) def test_conv2d_invalid_strides_and_dilation_rate(self): kwargs = {"strides": [1, 2], "dilation_rate": [2, 1]} with self.assertRaisesRegex( ValueError, r"""`strides > 1` not supported in conjunction""" ): keras.layers.Conv2D(filters=1, kernel_size=2, **kwargs) @test_combinations.run_all_keras_modes class Conv3DTest(test_combinations.TestCase): def _run_test(self, kwargs, expected_output_shape, validate_training=True): num_samples = 2 stack_size = 3 num_row = 7 num_col = 6 depth = 5 with self.cached_session(): test_utils.layer_test( keras.layers.Conv3D, kwargs=kwargs, input_shape=(num_samples, depth, num_row, num_col, stack_size), expected_output_shape=expected_output_shape, validate_training=validate_training, ) def _run_test_extra_batch_dim( self, kwargs, expected_output_shape, validate_training=True ): batch_shape = (2, 11) stack_size = 3 num_row = 7 num_col = 6 depth = 5 with self.cached_session(): if expected_output_shape is not None: expected_output_shape = (None,) + expected_output_shape test_utils.layer_test( keras.layers.Conv3D, kwargs=kwargs, input_shape=batch_shape + (depth, num_row, num_col, stack_size), expected_output_shape=expected_output_shape, validate_training=validate_training, ) @parameterized.named_parameters( ("padding_valid", {"padding": "valid"}, (None, 3, 5, 4, 2)), ("padding_same", {"padding": "same"}, (None, 5, 7, 6, 2)), ("strides", {"strides": (2, 2, 2)}, (None, 2, 3, 2, 2)), ("dilation_rate", {"dilation_rate": (2, 2, 2)}, (None, 1, 3, 2, 2)), # Only runs on GPU with CUDA, channels_first is not supported on CPU. # TODO(b/62340061): Support channels_first on CPU. ("data_format", {"data_format": "channels_first"}, None, True), ("group", {"groups": 3, "filters": 6}, (None, 3, 5, 4, 6)), ) def test_conv3d( self, kwargs, expected_output_shape=None, requires_gpu=False ): kwargs["filters"] = kwargs.get("filters", 2) kwargs["kernel_size"] = (3, 3, 3) # train_on_batch currently fails with XLA enabled on GPUs test_training = ( "groups" not in kwargs or not tf_test_utils.is_xla_enabled() ) if not requires_gpu or tf.test.is_gpu_available(cuda_only=True): self._run_test(kwargs, expected_output_shape, test_training) self._run_test_extra_batch_dim( kwargs, expected_output_shape, test_training ) def test_conv3d_regularizers(self): kwargs = { "filters": 3, "kernel_size": 3, "padding": "valid", "kernel_regularizer": "l2", "bias_regularizer": "l2", "activity_regularizer": "l2", "strides": 1, } with self.cached_session(): layer = keras.layers.Conv3D(**kwargs) layer.build((None, 5, 5, 5, 2)) self.assertEqual(len(layer.losses), 2) self.assertEqual(len(layer.losses), 2) layer(keras.backend.variable(np.ones((1, 5, 5, 5, 2)))) self.assertEqual(len(layer.losses), 3) def test_conv3d_constraints(self): k_constraint = lambda x: x b_constraint = lambda x: x kwargs = { "filters": 3, "kernel_size": 3, "padding": "valid", "kernel_constraint": k_constraint, "bias_constraint": b_constraint, "strides": 1, } with self.cached_session(): layer = keras.layers.Conv3D(**kwargs) layer.build((None, 5, 5, 5, 2)) self.assertEqual(layer.kernel.constraint, k_constraint) self.assertEqual(layer.bias.constraint, b_constraint) def test_conv3d_dynamic_shape(self): input_data = np.random.random((1, 3, 3, 3, 3)).astype(np.float32) with self.cached_session(): # Won't raise error here. test_utils.layer_test( keras.layers.Conv3D, kwargs={ "data_format": "channels_last", "filters": 3, "kernel_size": 3, }, input_shape=(None, None, None, None, 3), input_data=input_data, ) if tf.test.is_gpu_available(cuda_only=True): test_utils.layer_test( keras.layers.Conv3D, kwargs={ "data_format": "channels_first", "filters": 3, "kernel_size": 3, }, input_shape=(None, 3, None, None, None), input_data=input_data, ) def test_conv3d_invalid_output_shapes(self): kwargs = {"filters": 2, "kernel_size": 20} with self.assertRaisesRegex( ValueError, r"""One of the dimensions in the output is <= 0""" ): layer = keras.layers.Conv3D(**kwargs) layer.build((None, 5, 5, 5, 2)) def test_conv3d_zero_dim_output(self): conv = keras.layers.Convolution3DTranspose(2, [3, 3, 3], padding="same") x = tf.random.uniform([1, 32, 32, 0, 3], dtype=tf.float32) # The layer doesn't crash with 0 dim input _ = conv(x) def test_conv3d_invalid_strides_and_dilation_rate(self): kwargs = {"strides": [1, 1, 2], "dilation_rate": [1, 2, 1]} with self.assertRaisesRegex( ValueError, r"""`strides > 1` not supported in conjunction""" ): keras.layers.Conv3D(filters=1, kernel_size=2, **kwargs) @test_combinations.run_all_keras_modes(always_skip_v1=True) class GroupedConvTest(test_combinations.TestCase): @parameterized.named_parameters( ("Conv1D", keras.layers.Conv1D), ("Conv2D", keras.layers.Conv2D), ("Conv3D", keras.layers.Conv3D), ) def test_group_conv_incorrect_use(self, layer): with self.assertRaisesRegex(ValueError, "The number of filters"): layer(16, 3, groups=3) with self.assertRaisesRegex(ValueError, "The number of input channels"): layer(16, 3, groups=4).build((32, 12, 12, 3)) @parameterized.named_parameters( ("Conv1D", keras.layers.Conv1D, (32, 12, 32)), ("Conv2D", keras.layers.Conv2D, (32, 12, 12, 32)), ("Conv3D", keras.layers.Conv3D, (32, 12, 12, 12, 32)), ) def test_group_conv(self, layer_cls, input_shape): if tf.test.is_gpu_available(cuda_only=True): with test_utils.use_gpu(): inputs = tf.random.uniform(shape=input_shape) layer = layer_cls(16, 3, groups=4, use_bias=False) layer.build(input_shape) input_slices = tf.split(inputs, 4, axis=-1) weight_slices = tf.split(layer.kernel, 4, axis=-1) expected_outputs = tf.concat( [ tf.nn.convolution(inputs, weights) for inputs, weights in zip(input_slices, weight_slices) ], axis=-1, ) self.assertAllClose( layer(inputs), expected_outputs, rtol=3e-5, atol=3e-5 ) def test_group_conv_depthwise(self): if tf.test.is_gpu_available(cuda_only=True): with test_utils.use_gpu(): inputs = tf.random.uniform(shape=(3, 27, 27, 32)) layer = keras.layers.Conv2D(32, 3, groups=32, use_bias=False) layer.build((3, 27, 27, 32)) weights_dw = tf.reshape(layer.kernel, [3, 3, 32, 1]) expected_outputs = tf.compat.v1.nn.depthwise_conv2d( inputs, weights_dw, strides=[1, 1, 1, 1], padding="VALID" ) self.assertAllClose(layer(inputs), expected_outputs, rtol=1e-5) @test_combinations.run_all_keras_modes class ConvSequentialTest(test_combinations.TestCase): def _run_test( self, conv_layer_cls, kwargs, input_shape1, input_shape2, expected_output_shape1, expected_output_shape2, ): kwargs["filters"] = 1 kwargs["kernel_size"] = 3 kwargs["dilation_rate"] = 2 with self.cached_session(): layer = conv_layer_cls(**kwargs) output1 = layer(np.zeros(input_shape1)) self.assertEqual(output1.shape, expected_output_shape1) output2 = layer(np.zeros(input_shape2)) self.assertEqual(output2.shape, expected_output_shape2) @parameterized.named_parameters( ( "padding_valid", {"padding": "valid"}, (1, 8, 2), (1, 5, 2), (1, 4, 1), (1, 1, 1), ), ( "padding_same", {"padding": "same"}, (1, 8, 2), (1, 5, 2), (1, 8, 1), (1, 5, 1), ), ( "padding_causal", {"padding": "causal"}, (1, 8, 2), (1, 5, 2), (1, 8, 1), (1, 5, 1), ), ) def test_conv1d( self, kwargs, input_shape1, input_shape2, expected_output_shape1, expected_output_shape2, ): self._run_test( keras.layers.Conv1D, kwargs, input_shape1, input_shape2, expected_output_shape1, expected_output_shape2, ) @parameterized.named_parameters( ( "padding_valid", {"padding": "valid"}, (1, 7, 6, 2), (1, 6, 5, 2), (1, 3, 2, 1), (1, 2, 1, 1), ), ( "padding_same", {"padding": "same"}, (1, 7, 6, 2), (1, 6, 5, 2), (1, 7, 6, 1), (1, 6, 5, 1), ), ) def test_conv2d( self, kwargs, input_shape1, input_shape2, expected_output_shape1, expected_output_shape2, ): self._run_test( keras.layers.Conv2D, kwargs, input_shape1, input_shape2, expected_output_shape1, expected_output_shape2, ) @parameterized.named_parameters( ( "padding_valid", {"padding": "valid"}, (1, 5, 7, 6, 2), (1, 8, 6, 5, 2), (1, 1, 3, 2, 1), (1, 4, 2, 1, 1), ), ( "padding_same", {"padding": "same"}, (1, 5, 7, 6, 2), (1, 8, 6, 5, 2), (1, 5, 7, 6, 1), (1, 8, 6, 5, 1), ), ) def test_conv3d( self, kwargs, input_shape1, input_shape2, expected_output_shape1, expected_output_shape2, ): self._run_test( keras.layers.Conv3D, kwargs, input_shape1, input_shape2, expected_output_shape1, expected_output_shape2, ) def test_dynamic_shape(self): with self.cached_session(): layer = keras.layers.Conv3D(2, 3) input_shape = (5, None, None, 2) inputs = keras.Input(shape=input_shape) x = layer(inputs) # Won't raise error here with None values in input shape # (b/144282043). layer(x) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/layers/convolutional/conv_test.py/0
{ "file_path": "tf-keras/tf_keras/layers/convolutional/conv_test.py", "repo_id": "tf-keras", "token_count": 12416 }
198
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for embedding layer.""" import os import numpy as np import tensorflow.compat.v2 as tf import tf_keras as keras from tf_keras.mixed_precision import policy from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils class EmbeddingTest(test_combinations.TestCase): @test_combinations.run_all_keras_modes def test_embedding(self): if tf.test.is_gpu_available(): self.skipTest("Only test embedding on CPU.") test_utils.layer_test( keras.layers.Embedding, kwargs={"output_dim": 4, "input_dim": 10, "input_length": 2}, input_shape=(3, 2), input_dtype="int32", expected_output_dtype="float32", ) test_utils.layer_test( keras.layers.Embedding, kwargs={"output_dim": 4, "input_dim": 10, "mask_zero": True}, input_shape=(3, 2), input_dtype="int32", expected_output_dtype="float32", ) test_utils.layer_test( keras.layers.Embedding, kwargs={"output_dim": 4, "input_dim": 10, "mask_zero": True}, input_shape=(3, 4, 2), input_dtype="int32", expected_output_dtype="float32", ) test_utils.layer_test( keras.layers.Embedding, kwargs={ "output_dim": 4, "input_dim": 10, "mask_zero": True, "input_length": (None, 2), }, input_shape=(3, 4, 2), input_dtype="int32", expected_output_dtype="float32", ) @test_combinations.run_all_keras_modes def test_embedding_correctness(self): layer = keras.layers.Embedding(output_dim=2, input_dim=2) model = keras.models.Sequential([layer]) layer.set_weights([np.array([[1, 1], [2, 2]])]) model.run_eagerly = test_utils.should_run_eagerly() outputs = model.predict(np.array([[0, 1, 0]], dtype="int32")) self.assertAllClose(outputs, [[[1, 1], [2, 2], [1, 1]]]) def test_embedding_incorrect_dimension(self): with self.assertRaises(ValueError): keras.layers.Embedding(input_dim=0, output_dim=1) with self.assertRaises(ValueError): keras.layers.Embedding(input_dim=1, output_dim=0) @test_combinations.generate( test_combinations.combine(mode=["graph", "eager"]) ) def test_eager_gpu_cpu(self): l = keras.layers.Embedding(output_dim=2, input_dim=2) l.build((None, 2)) inputs = keras.backend.constant([[0, 1, 0]], dtype="int32") with tf.GradientTape() as tape: output = l(inputs) gs = tape.gradient(output, l.weights) opt = tf.compat.v1.train.AdagradOptimizer(0.1) opt.apply_gradients(zip(gs, l.weights)) self.assertAllEqual(len(gs), 1) @test_combinations.run_all_keras_modes def test_embedding_with_ragged_input(self): layer = keras.layers.Embedding( input_dim=3, output_dim=2, weights=[np.array([[0.0, 0.0], [1.0, 1.0], [2.0, 2.0]])], ) inputs = keras.layers.Input( shape=(None,), dtype=tf.float32, ragged=True ) outputs = keras.layers.Lambda( lambda args: keras.backend.identity(args) )(inputs) outputs = layer(outputs) model = keras.Model(inputs, outputs) model.run_eagerly = test_utils.should_run_eagerly() outputs = model.predict( tf.ragged.constant( [[1.0, 2.0, 2.0], [0.0], [1.0, 2.0]], ragged_rank=1 ) ) self.assertAllClose( outputs, tf.ragged.constant( [ [[1.0, 1.0], [2.0, 2.0], [2.0, 2.0]], [[0.0, 0.0]], [[1.0, 1.0], [2.0, 2.0]], ], ragged_rank=1, ), ) @test_utils.enable_v2_dtype_behavior def test_mixed_precision_embedding(self): try: policy.set_global_policy("mixed_float16") layer = keras.layers.Embedding(input_dim=5, output_dim=2) self.assertEqual(layer._dtype_policy.name, "mixed_float16") outputs = layer(np.array([0, 1, 2])) self.assertEqual(outputs.dtype, "float16") finally: policy.set_global_policy("float32") @test_combinations.run_all_keras_modes def test_embedding_with_sparse_input_sparse_output(self): layer = keras.layers.Embedding( input_dim=3, output_dim=2, weights=[np.array([[0.0, 0.0], [1.0, 1.0], [2.0, 2.0]])], sparse=True, ) input = tf.SparseTensor( indices=[[0, 1], [1, 2]], values=[1, 2], dense_shape=[3, 3] ) output = layer(input) expected_output = tf.SparseTensor( indices=[[0, 1, 0], [0, 1, 1], [1, 2, 0], [1, 2, 1]], values=[1.0, 1.0, 2.0, 2.0], dense_shape=[3, 3, 2], ) self.assertAllClose(output.indices, expected_output.indices) self.assertAllClose(output.values, expected_output.values) self.assertAllClose(output.dense_shape, expected_output.dense_shape) @test_combinations.run_all_keras_modes def test_embedding_with_sparse_input_dense_output(self): layer = keras.layers.Embedding( input_dim=3, output_dim=2, weights=[np.array([[0.1, 0.1], [1.0, 1.0], [2.0, 2.0]])], sparse=False, ) input = tf.SparseTensor( indices=[[0, 1], [1, 2]], values=[1, 2], dense_shape=[3, 3] ) output = layer(input) expected_output = tf.constant( [ [[0.1, 0.1], [1.0, 1.0], [0.1, 0.1]], [[0.1, 0.1], [0.1, 0.1], [2.0, 2.0]], [[0.1, 0.1], [0.1, 0.1], [0.1, 0.1]], ] ) self.assertAllClose(output, expected_output) @test_combinations.run_all_keras_modes def test_error_message_for_mask_zero_enabled_with_sparse_tensor(self): with self.assertRaisesRegex( ValueError, "`mask_zero` cannot be enabled when " "`tf.keras.layers.Embedding` is used with `tf.SparseTensor` " "input.", ): layer = keras.layers.Embedding( input_dim=3, output_dim=2, weights=[np.array([[0.1, 0.1], [1.0, 1.0], [2.0, 2.0]])], sparse=True, mask_zero=True, ) inputs = tf.SparseTensor( indices=[[0, 1], [1, 2]], values=[1, 2], dense_shape=[3, 3] ) layer(inputs) @test_combinations.run_all_keras_modes def test_embedding_with_dense_input_sprase_output(self): layer = keras.layers.Embedding( input_dim=3, output_dim=2, weights=[np.array([[0, 0], [1.0, 1.0], [2.0, 2.0]])], sparse=True, mask_zero=False, ) inputs = tf.constant([0, 0, 0, 2, 1]) output = layer(inputs) expected_output = tf.SparseTensor( indices=[[3, 0], [3, 1], [4, 0], [4, 1]], values=[2.0, 2.0, 1.0, 1.0], dense_shape=[5, 2], ) self.assertAllClose(output.indices, expected_output.indices) self.assertAllClose(output.values, expected_output.values) self.assertAllClose(output.dense_shape, expected_output.dense_shape) @test_combinations.run_all_keras_modes(always_skip_v1=True) def test_use_one_hot(self): batch = 8 input_length = 10 layer = keras.layers.Embedding(input_dim=100, output_dim=16) self.assertFalse(layer._use_one_hot_matmul) inputs = tf.random.uniform( shape=[batch, input_length], minval=0, maxval=9, dtype=tf.int64 ) output_1 = layer(inputs) layer._use_one_hot_matmul = True output_2 = layer(inputs) self.assertAllClose(output_1, output_2) self.assertEqual(output_1.dtype, output_2.dtype) # Make sure the layer can be created with hidden kwargs, and not # serialize it into config (for now). layer = keras.layers.Embedding( input_dim=100, output_dim=16, use_one_hot_matmul=True ) self.assertTrue(layer._use_one_hot_matmul) self.assertNotIn("use_one_hot_matmul", layer.get_config()) @test_combinations.run_all_keras_modes def test_tensor_dim_serialization(self): self.skipTest("Disable the failing test.") embedding_size = tf.constant(16000) layer = keras.layers.Embedding(input_dim=embedding_size, output_dim=128) model = keras.models.Sequential([layer]) temp_filepath = os.path.join(self.get_temp_dir(), "model.keras") model.save(temp_filepath) loaded = keras.models.load_model(temp_filepath) self.assertEqual(loaded.layers[0].input_dim, tf.constant(16000)) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/layers/core/embedding_test.py/0
{ "file_path": "tf-keras/tf_keras/layers/core/embedding_test.py", "repo_id": "tf-keras", "token_count": 5002 }
199
# Description: # Contains the TF-Keras locally-connected layers. # Placeholder: load unaliased py_library load("@org_keras//tf_keras:tf_keras.bzl", "tf_py_test") package( # copybara:uncomment default_applicable_licenses = ["//tf_keras:license"], default_visibility = [ "//tf_keras:friends", ], licenses = ["notice"], ) py_library( name = "locally_connected", srcs = [ "__init__.py", ], srcs_version = "PY3", deps = [ ":locally_connected1d", ":locally_connected2d", ], ) py_library( name = "locally_connected_utils", srcs = ["locally_connected_utils.py"], srcs_version = "PY3", deps = [ "//:expect_numpy_installed", "//:expect_tensorflow_installed", "//tf_keras:backend", "//tf_keras/utils:engine_utils", ], ) py_library( name = "locally_connected1d", srcs = ["locally_connected1d.py"], srcs_version = "PY3", deps = [ ":locally_connected_utils", "//tf_keras:activations", "//tf_keras:backend", "//tf_keras:constraints", "//tf_keras:regularizers", "//tf_keras/engine:base_layer", "//tf_keras/engine:input_spec", "//tf_keras/initializers", "//tf_keras/utils:engine_utils", "//tf_keras/utils:tf_utils", ], ) py_library( name = "locally_connected2d", srcs = ["locally_connected2d.py"], srcs_version = "PY3", deps = [ ":locally_connected_utils", "//tf_keras:activations", "//tf_keras:backend", "//tf_keras:constraints", "//tf_keras:regularizers", "//tf_keras/engine:base_layer", "//tf_keras/engine:input_spec", "//tf_keras/initializers", "//tf_keras/utils:engine_utils", "//tf_keras/utils:tf_utils", ], ) tf_py_test( name = "locally_connected_test", size = "medium", srcs = ["locally_connected_test.py"], python_version = "PY3", shard_count = 4, tags = ["no_windows"], deps = [ "//:expect_absl_installed", # absl/testing:parameterized "//:expect_numpy_installed", "//:expect_tensorflow_installed", "//tf_keras", "//tf_keras/optimizers/legacy:optimizers", "//tf_keras/testing_infra:test_combinations", "//tf_keras/testing_infra:test_utils", ], )
tf-keras/tf_keras/layers/locally_connected/BUILD/0
{ "file_path": "tf-keras/tf_keras/layers/locally_connected/BUILD", "repo_id": "tf-keras", "token_count": 1159 }
200
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Layer that multiplies (element-wise) several inputs.""" from tf_keras.layers.merging.base_merge import _Merge # isort: off from tensorflow.python.util.tf_export import keras_export @keras_export("keras.layers.Multiply") class Multiply(_Merge): """Layer that multiplies (element-wise) a list of inputs. It takes as input a list of tensors, all of the same shape, and returns a single tensor (also of the same shape). >>> tf.keras.layers.Multiply()([np.arange(5).reshape(5, 1), ... np.arange(5, 10).reshape(5, 1)]) <tf.Tensor: shape=(5, 1), dtype=int64, numpy= array([[ 0], [ 6], [14], [24], [36]])> >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2)) >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2)) >>> multiplied = tf.keras.layers.Multiply()([x1, x2]) >>> multiplied.shape TensorShape([5, 8]) """ def _merge_function(self, inputs): output = inputs[0] for i in range(1, len(inputs)): output = output * inputs[i] return output @keras_export("keras.layers.multiply") def multiply(inputs, **kwargs): """Functional interface to the `Multiply` layer. Example: >>> x1 = np.arange(3.0) >>> x2 = np.arange(3.0) >>> tf.keras.layers.multiply([x1, x2]) <tf.Tensor: shape=(3,), dtype=float32, numpy=array([0., 1., 4.], ...)> Usage in a functional model: >>> input1 = tf.keras.layers.Input(shape=(16,)) >>> x1 = tf.keras.layers.Dense( ... 8, activation='relu')(input1) #shape=(None, 8) >>> input2 = tf.keras.layers.Input(shape=(32,)) >>> x2 = tf.keras.layers.Dense( ... 8, activation='relu')(input2) #shape=(None, 8) >>> out = tf.keras.layers.multiply([x1,x2]) #shape=(None, 8) >>> out = tf.keras.layers.Dense(4)(out) >>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out) Args: inputs: A list of input tensors. **kwargs: Standard layer keyword arguments. Returns: A tensor, the element-wise product of the inputs. """ return Multiply(**kwargs)(inputs)
tf-keras/tf_keras/layers/merging/multiply.py/0
{ "file_path": "tf-keras/tf_keras/layers/merging/multiply.py", "repo_id": "tf-keras", "token_count": 1140 }
201
# Copyright 2022 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Unit Normalization layer.""" import tensorflow.compat.v2 as tf import tf_keras as keras from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils def squared_l2_norm(x): return tf.reduce_sum(x**2) @test_utils.run_v2_only class UnitNormalizationTest(test_combinations.TestCase): @test_combinations.run_all_keras_modes def test_basics(self): test_utils.layer_test( keras.layers.UnitNormalization, kwargs={"axis": -1}, input_shape=(2, 3), ) test_utils.layer_test( keras.layers.UnitNormalization, kwargs={"axis": (1, 2)}, input_shape=(1, 3, 3), ) def test_correctness(self): layer = keras.layers.UnitNormalization(axis=-1) inputs = tf.random.normal(shape=(2, 3)) outputs = layer(inputs).numpy() self.assertAllClose(squared_l2_norm(outputs[0, :]), 1.0) self.assertAllClose(squared_l2_norm(outputs[1, :]), 1.0) layer = keras.layers.UnitNormalization(axis=(1, 2)) inputs = tf.random.normal(shape=(2, 3, 3)) outputs = layer(inputs).numpy() self.assertAllClose(squared_l2_norm(outputs[0, :, :]), 1.0) self.assertAllClose(squared_l2_norm(outputs[1, :, :]), 1.0) layer = keras.layers.UnitNormalization(axis=1) inputs = tf.random.normal(shape=(2, 3, 2)) outputs = layer(inputs).numpy() self.assertAllClose(squared_l2_norm(outputs[0, :, 0]), 1.0) self.assertAllClose(squared_l2_norm(outputs[1, :, 0]), 1.0) self.assertAllClose(squared_l2_norm(outputs[0, :, 1]), 1.0) self.assertAllClose(squared_l2_norm(outputs[1, :, 1]), 1.0) def testInvalidAxis(self): with self.assertRaisesRegex( TypeError, r"Invalid value for `axis` argument" ): layer = keras.layers.UnitNormalization(axis=None) with self.assertRaisesRegex( ValueError, r"Invalid value for `axis` argument" ): layer = keras.layers.UnitNormalization(axis=3) layer.build(input_shape=(2, 2, 2)) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/layers/normalization/unit_normalization_test.py/0
{ "file_path": "tf-keras/tf_keras/layers/normalization/unit_normalization_test.py", "repo_id": "tf-keras", "token_count": 1201 }
202
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for global average pooling layers.""" import numpy as np import tensorflow.compat.v2 as tf from absl.testing import parameterized import tf_keras as keras from tf_keras.mixed_precision import policy from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils @test_combinations.generate(test_combinations.combine(mode=["graph", "eager"])) class GlobalAveragePoolingTest(tf.test.TestCase, parameterized.TestCase): @test_utils.enable_v2_dtype_behavior def test_mixed_float16_policy(self): with policy.policy_scope("mixed_float16"): inputs1 = keras.Input(shape=(36, 512), dtype="float16") inputs2 = keras.Input(shape=(36,), dtype="bool") average_layer = keras.layers.GlobalAveragePooling1D() _ = average_layer(inputs1, inputs2) def test_global_average_pooling_1d(self): test_utils.layer_test( keras.layers.GlobalAveragePooling1D, input_shape=(3, 4, 5) ) test_utils.layer_test( keras.layers.GlobalAveragePooling1D, kwargs={"data_format": "channels_first"}, input_shape=(3, 4, 5), ) def test_global_average_pooling_1d_masking_support(self): model = keras.Sequential() model.add(keras.layers.Masking(mask_value=0.0, input_shape=(None, 4))) model.add(keras.layers.GlobalAveragePooling1D()) model.compile(loss="mae", optimizer="rmsprop") model_input = np.random.random((2, 3, 4)) model_input[0, 1:, :] = 0 output = model.predict(model_input) self.assertAllClose(output[0], model_input[0, 0, :]) def test_global_average_pooling_1d_with_ragged(self): ragged_data = tf.ragged.constant( [[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]], [[1.0, 1.0], [2.0, 2.0]]], ragged_rank=1, ) dense_data = ragged_data.to_tensor() inputs = keras.Input(shape=(None, 2), dtype="float32", ragged=True) out = keras.layers.GlobalAveragePooling1D()(inputs) model = keras.models.Model(inputs=inputs, outputs=out) output_ragged = model.predict(ragged_data, steps=1) inputs = keras.Input(shape=(None, 2), dtype="float32") masking = keras.layers.Masking(mask_value=0.0, input_shape=(3, 2))( inputs ) out = keras.layers.GlobalAveragePooling1D()(masking) model = keras.models.Model(inputs=inputs, outputs=out) output_dense = model.predict(dense_data, steps=1) self.assertAllEqual(output_ragged, output_dense) def test_global_average_pooling_2d(self): test_utils.layer_test( keras.layers.GlobalAveragePooling2D, kwargs={"data_format": "channels_first"}, input_shape=(3, 4, 5, 6), ) test_utils.layer_test( keras.layers.GlobalAveragePooling2D, kwargs={"data_format": "channels_last"}, input_shape=(3, 5, 6, 4), ) def test_global_average_pooling_3d(self): test_utils.layer_test( keras.layers.GlobalAveragePooling3D, kwargs={"data_format": "channels_first"}, input_shape=(3, 4, 3, 4, 3), ) test_utils.layer_test( keras.layers.GlobalAveragePooling3D, kwargs={"data_format": "channels_last"}, input_shape=(3, 4, 3, 4, 3), ) def test_global_average_pooling_1d_keepdims(self): test_utils.layer_test( keras.layers.GlobalAveragePooling1D, kwargs={"keepdims": True}, input_shape=(3, 4, 5), expected_output_shape=(None, 1, 5), ) test_utils.layer_test( keras.layers.GlobalAveragePooling1D, kwargs={"data_format": "channels_first", "keepdims": True}, input_shape=(3, 4, 5), expected_output_shape=(None, 4, 1), ) def test_global_average_pooling_2d_keepdims(self): test_utils.layer_test( keras.layers.GlobalAveragePooling2D, kwargs={"data_format": "channels_first", "keepdims": True}, input_shape=(3, 4, 5, 6), expected_output_shape=(None, 4, 1, 1), ) test_utils.layer_test( keras.layers.GlobalAveragePooling2D, kwargs={"data_format": "channels_last", "keepdims": True}, input_shape=(3, 4, 5, 6), expected_output_shape=(None, 1, 1, 6), ) def test_global_average_pooling_3d_keepdims(self): test_utils.layer_test( keras.layers.GlobalAveragePooling3D, kwargs={"data_format": "channels_first", "keepdims": True}, input_shape=(3, 4, 3, 4, 3), expected_output_shape=(None, 4, 1, 1, 1), ) test_utils.layer_test( keras.layers.GlobalAveragePooling3D, kwargs={"data_format": "channels_last", "keepdims": True}, input_shape=(3, 4, 3, 4, 3), expected_output_shape=(None, 1, 1, 1, 3), ) def test_global_average_pooling_1d_keepdims_masking_support(self): model = keras.Sequential() model.add(keras.layers.Masking(mask_value=0.0, input_shape=(None, 4))) model.add(keras.layers.GlobalAveragePooling1D(keepdims=True)) model.compile(loss="mae", optimizer="rmsprop") model_input = np.random.random((2, 3, 4)) model_input[0, 1:, :] = 0 output = model.predict(model_input) self.assertAllEqual((2, 1, 4), output.shape) self.assertAllClose(output[0, 0], model_input[0, 0, :]) def test_global_average_pooling_1d_invalid_input_dimension(self): with self.assertRaisesRegex(ValueError, r"""Incorrect input shape"""): layer = keras.layers.GlobalAveragePooling1D() layer.build((None, 0, 2)) def test_global_average_pooling_3d_invalid_input_dimension(self): with self.assertRaisesRegex(ValueError, r"""Incorrect input shape"""): layer = keras.layers.GlobalAveragePooling3D(keepdims=True) layer.build((None, 0, 16, 16, 3)) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/layers/pooling/global_average_pooling_test.py/0
{ "file_path": "tf-keras/tf_keras/layers/pooling/global_average_pooling_test.py", "repo_id": "tf-keras", "token_count": 3137 }
203
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmark for TF-Keras text vectorization preprocessing layer's adapt method. """ import time import numpy as np import tensorflow.compat.v2 as tf import tf_keras as keras from tf_keras.layers.preprocessing import normalization def reduce_fn(state, values): """tf.data.Dataset-friendly implementation of mean and variance.""" k, n, ex, ex2 = state # If this is the first iteration, we pick the first value to be 'k', # which helps with precision - we assume that k is close to an average # value and calculate mean and variance with respect to that. k = tf.cond(tf.equal(n, 0), lambda: values[0], lambda: k) sum_v = tf.reduce_sum(values, axis=0) sum_v2 = tf.reduce_sum(tf.square(values), axis=0) ones = tf.ones_like(values, dtype=tf.int32) batch_size = tf.reduce_sum(ones, axis=0) batch_size_f = tf.cast(batch_size, tf.float32) ex = 0 + sum_v - tf.multiply(batch_size_f, k) ex2 = ( 0 + sum_v2 + tf.multiply( batch_size_f, (tf.square(k) - tf.multiply(tf.multiply(2.0, k), sum_v)), ) ) return (k, n + batch_size, ex, ex2) class BenchmarkAdapt(tf.test.Benchmark): """Benchmark adapt.""" def run_dataset_implementation(self, num_elements, batch_size): input_t = keras.Input(shape=(1,)) layer = normalization.Normalization() _ = layer(input_t) num_repeats = 5 starts = [] ends = [] for _ in range(num_repeats): ds = tf.data.Dataset.range(num_elements) ds = ds.map(lambda x: tf.expand_dims(tf.cast(x, tf.float32), -1)) ds = ds.batch(batch_size) starts.append(time.time()) # Benchmarked code begins here. k, n, ex, ex2 = ds.reduce((0.0, 0, 0.0, 0.0), reduce_fn) mean = k.numpy() + ex.numpy() / n.numpy() var = (ex2.numpy() - (ex.numpy() * ex.numpy()) / n.numpy()) / ( n.numpy() - 1 ) layer.set_weights([mean, var]) # Benchmarked code ends here. ends.append(time.time()) avg_time = np.mean(np.array(ends) - np.array(starts)) return avg_time def bm_adapt_implementation(self, num_elements, batch_size): """Test the KPL adapt implementation.""" input_t = keras.Input(shape=(1,), dtype=tf.float32) layer = normalization.Normalization() _ = layer(input_t) num_repeats = 5 starts = [] ends = [] for _ in range(num_repeats): ds = tf.data.Dataset.range(num_elements) ds = ds.map(lambda x: tf.expand_dims(tf.cast(x, tf.float32), -1)) ds = ds.batch(batch_size) starts.append(time.time()) # Benchmarked code begins here. layer.adapt(ds) # Benchmarked code ends here. ends.append(time.time()) avg_time = np.mean(np.array(ends) - np.array(starts)) name = f"normalization_adapt|{num_elements}_elements|batch_{batch_size}" baseline = self.run_dataset_implementation(num_elements, batch_size) extras = { "tf.data implementation baseline": baseline, "delta seconds": (baseline - avg_time), "delta percent": ((baseline - avg_time) / baseline) * 100, } self.report_benchmark( iters=num_repeats, wall_time=avg_time, extras=extras, name=name ) def benchmark_vocab_size_by_batch(self): for vocab_size in [100, 1000, 10000, 100000, 1000000]: for batch in [1, 16, 2048]: self.bm_adapt_implementation(vocab_size, batch) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/layers/preprocessing/benchmarks/normalization_adapt_benchmark.py/0
{ "file_path": "tf-keras/tf_keras/layers/preprocessing/benchmarks/normalization_adapt_benchmark.py", "repo_id": "tf-keras", "token_count": 1916 }
204
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras index lookup preprocessing layer.""" import collections import numpy as np import tensorflow.compat.v2 as tf from tf_keras import backend from tf_keras.engine import base_layer_utils from tf_keras.engine import base_preprocessing_layer from tf_keras.layers.preprocessing import preprocessing_utils as utils from tf_keras.saving.legacy.saved_model import layer_serialization from tf_keras.utils import layer_utils from tf_keras.utils import tf_utils # isort: off from tensorflow.python.platform import tf_logging as logging INT = utils.INT MULTI_HOT = utils.MULTI_HOT ONE_HOT = utils.ONE_HOT COUNT = utils.COUNT TF_IDF = utils.TF_IDF _VOCAB_NAME = "vocab" _IDF_WEIGHTS_NAME = "idf_weights" class NullInitializer(tf.lookup.KeyValueTensorInitializer): """A placeholder initializer for restoring this layer from a SavedModel.""" def __init__(self, key_dtype, value_dtype): """Construct a table initializer object. Args: key_dtype: Type of the table keys. value_dtype: Type of the table values. """ self._key_dtype = key_dtype self._value_dtype = value_dtype @property def key_dtype(self): """The expected table key dtype.""" return self._key_dtype @property def value_dtype(self): """The expected table value dtype.""" return self._value_dtype def initialize(self, table): """Returns the table initialization op.""" pass class VocabWeightHandler(base_layer_utils.TrackableWeightHandler): """Adds the vocabulary as a layer weight during serialization.""" def __init__(self, lookup_layer): # Note that this class doesn't call super().__init__() in order to # have customized behavior. The fileds like '_dtype' and # '_distribute_strategy' are required by the parent class, as well as # tf.distribute. See `strategy.extended.variable_created_in_scope` self._layer = lookup_layer self._dtype = lookup_layer.vocabulary_dtype self._distribute_strategy = tf.distribute.get_strategy() @property def num_tensors(self): return 1 def set_weights(self, weights): tokens = tf.convert_to_tensor(weights[0], self._dtype) self._layer.lookup_table = self._layer._lookup_table_from_tokens(tokens) def get_tensors(self): # Just save the non-config part of the vocab (no special tokens). tokens = self._layer.get_vocabulary(include_special_tokens=False) tokens = tf.convert_to_tensor(tokens, self._dtype) return [tokens] class IndexLookup(base_preprocessing_layer.PreprocessingLayer): """Maps values from a vocabulary to integer indices. This layer translates a set of arbitrary hashables into an integer output via a table-based lookup, with optional out-of-vocabulary handling. This is the basis layer for both IntegerLookup and StringLookup; it holds the common logic but is not intended to be exported as part of the TF-Keras API. Args: max_tokens: The maximum size of the vocabulary for this layer. If None, there is no cap on the size of the vocabulary. Note that this size includes the OOV and mask tokens. num_oov_indices: The number of out-of-vocabulary tokens to use. If this value is more than 1, OOV inputs are hashed to determine their OOV value. If this value is 0, OOV inputs will cause an error when calling the layer. mask_token: A token that represents masked inputs. When `output_mode` is `"int"`, the token is included in vocabulary and mapped to index 0. In other output modes, the token will not appear in the vocabulary and instances of the mask token in the input will be dropped. If set to None, no mask term will be added. oov_token: Only used when `invert` is True. The token to return for OOV indices. vocabulary: Optional. Either an array or a string path to a text file. If passing an array, can pass a tuple, list, 1D numpy array, or 1D tensor containing the vocbulary terms. If passing a file path, the file should contain one line per term in the vocabulary. If this argument is set, there is no need to `adapt` the layer. vocabulary_dtype: The dtype of the vocabulary terms. For example, `"int64"` or `"string"`. idf_weights: Only valid when `output_mode` is `"tf_idf"`. A tuple, list, 1D numpy array, or 1D tensor or the same length as the vocabulary, containing the floating point inverse document frequency weights, which will be multiplied by per sample term counts for the final `tf_idf` weight. If the `vocabulary` argument is set, and `output_mode` is `"tf_idf"`, this argument must be supplied. invert: Only valid when `output_mode` is `"int"`. If True, this layer will map indices to vocabulary items instead of mapping vocabulary items to indices. Defaults to `False`. output_mode: Specification for the output of the layer. Values can be `"int"`, `"one_hot"`, `"multi_hot"`, `"count"`, or `"tf_idf"` configuring the layer as follows: - `"int"`: Return the raw integer indices of the input tokens. - `"one_hot"`: Encodes each individual element in the input into an array the same size as the vocabulary, containing a 1 at the element index. If the last dimension is size 1, will encode on that dimension. If the last dimension is not size 1, will append a new dimension for the encoded output. - `"multi_hot"`: Encodes each sample in the input into a single array the same size as the vocabulary, containing a 1 for each vocabulary term present in the sample. Treats the last dimension as the sample dimension, if input shape is (..., sample_length), output shape will be (..., num_tokens). - `"count"`: As `"multi_hot"`, but the int array contains a count of the number of times the token at that index appeared in the sample. - `"tf_idf"`: As `"multi_hot"`, but the TF-IDF algorithm is applied to find the value in each token slot. Defaults to `"int"`. pad_to_max_tokens: Only valid when `output_mode` is `"multi_hot"`, `"count"`, or `"tf_idf"`. If True, the output will have its feature axis padded to `max_tokens` even if the number of unique tokens in the vocabulary is less than max_tokens, resulting in a tensor of shape [batch_size, max_tokens] regardless of vocabulary size. Defaults to False. sparse: Boolean. Only applicable to `"one_hot"`, `"multi_hot"`, `"count"` and `"tf-idf"` output modes. If True, returns a `SparseTensor` instead of a dense `Tensor`. Defaults to `False`. """ def __init__( self, max_tokens, num_oov_indices, mask_token, oov_token, vocabulary_dtype, vocabulary=None, idf_weights=None, invert=False, output_mode="int", sparse=False, pad_to_max_tokens=False, **kwargs, ): # If max_tokens is set, the value must be greater than 1 - otherwise we # are creating a 0-element vocab, which doesn't make sense. if max_tokens is not None and max_tokens <= 1: raise ValueError( "If set, `max_tokens` must be greater than 1. " f"Received: max_tokens={max_tokens}" ) if pad_to_max_tokens and max_tokens is None: raise ValueError( "If pad_to_max_tokens is True, must set `max_tokens`. " f"Received: max_tokens={max_tokens}" ) if num_oov_indices < 0: raise ValueError( "`num_oov_indices` must be greater than or equal to 0. " f"Received: num_oov_indices={num_oov_indices}" ) # Support deprecated names for output_modes. if output_mode == "binary": output_mode = MULTI_HOT if output_mode == "tf-idf": output_mode = TF_IDF # 'output_mode' must be one of (INT, ONE_HOT, MULTI_HOT, COUNT, TF_IDF) layer_utils.validate_string_arg( output_mode, allowable_strings=(INT, ONE_HOT, MULTI_HOT, COUNT, TF_IDF), layer_name=self.__class__.__name__, arg_name="output_mode", ) if invert and output_mode != INT: raise ValueError( "`output_mode` must be `'int'` when `invert` is true. " f"Received: output_mode={output_mode}" ) if sparse and output_mode == INT: raise ValueError( "`sparse` may only be true if `output_mode` is " "`'one_hot'`, `'multi_hot'`, `'count'` or `'tf_idf'`. " f"Received: sparse={sparse} and " f"output_mode={output_mode}" ) if idf_weights is not None and output_mode != TF_IDF: raise ValueError( "`idf_weights` should only be set if `output_mode` is " f"`'tf_idf'`. Received: idf_weights={idf_weights} and " f"output_mode={output_mode}" ) self.invert = invert self.max_tokens = max_tokens self.num_oov_indices = num_oov_indices self.mask_token = mask_token self.oov_token = oov_token self.output_mode = output_mode self.sparse = sparse self.pad_to_max_tokens = pad_to_max_tokens self.vocabulary_dtype = vocabulary_dtype self._frozen_vocab_size = kwargs.pop("vocabulary_size", None) self.input_vocabulary = vocabulary self.input_idf_weights = idf_weights # VocabularySavedModelSaver will clear the config vocabulary to restore # the lookup table ops directly. We persist this hidden option to # persist the fact that we have have a non-adaptable layer with a # manually set vocab. self._has_input_vocabulary = kwargs.pop( "has_input_vocabulary", (vocabulary is not None) ) # Drop deprecated config options. kwargs.pop("has_static_table", None) # By default, output int64 when output_mode='int' and floats otherwise. if "dtype" not in kwargs: kwargs["dtype"] = ( tf.int64 if output_mode == INT else backend.floatx() ) super().__init__(**kwargs) # Check dtype only after base layer parses it; dtype parsing is complex. if ( output_mode == INT and not tf.as_dtype(self.compute_dtype).is_integer ): input_dtype = kwargs["dtype"] raise ValueError( "When `output_mode='int'`, `dtype` should be an integer " f"type. Received: dtype={input_dtype}" ) if invert: self._key_dtype = self.dtype if output_mode == INT else tf.int64 self._value_dtype = tf.as_dtype(self.vocabulary_dtype) mask_key = 0 mask_value = mask_token self._default_value = self.oov_token else: self._key_dtype = tf.as_dtype(self.vocabulary_dtype) self._value_dtype = self.dtype if output_mode == INT else tf.int64 mask_key = mask_token # Masks should map to 0 for int output and be dropped otherwise. Max # ints will be dropped from the bincount op. mask_value = 0 if self.output_mode == INT else self._value_dtype.max if self.num_oov_indices == 0: # If there are no OOV indices, we map OOV tokens to -1 and error # out during call if we find a negative index. self._default_value = -1 elif self.num_oov_indices == 1: # If there is only one OOV index, we can set that index as the # default value of the index_lookup table. self._default_value = self._oov_start_index() else: # If we have multiple OOV values, we need to do a further # hashing step; to make this easier, we set the OOV value to -1. # (This lets us do a vectorized add and cast to boolean to # determine locations where we need to do extra hashing.) self._default_value = -1 if self.mask_token is not None: self._mask_key = tf.convert_to_tensor(mask_key, self._key_dtype) self._mask_value = tf.convert_to_tensor( mask_value, self._value_dtype ) if self.output_mode == TF_IDF: self.idf_weights = tf.Variable( [0] * self._token_start_index(), shape=(None,), dtype=self.compute_dtype, trainable=False, ) self.idf_weights_const = self.idf_weights.value() if vocabulary is not None: self.set_vocabulary(vocabulary, idf_weights) else: # When restoring from a keras SavedModel, the loading code will # expect to find and restore a lookup_table attribute on the layer. # This table needs to be uninitialized as a StaticHashTable cannot # be initialized twice. self.lookup_table = self._uninitialized_lookup_table() # Only set up adapt state if we did not receive a vocab on construction. if not self._has_input_vocabulary: # Add custom weight handler to return the layer's vocab as a weight. self._add_trackable(VocabWeightHandler(self), False) # Set adapt state. self.token_counts = tf.lookup.experimental.MutableHashTable( key_dtype=vocabulary_dtype, value_dtype=tf.int64, default_value=0, ) if self.output_mode == TF_IDF: self.token_document_counts = ( tf.lookup.experimental.MutableHashTable( key_dtype=vocabulary_dtype, value_dtype=tf.int64, default_value=0, ) ) self.num_documents = tf.Variable( 0, dtype=tf.int64, trainable=False ) def compute_output_shape(self, input_shape): if self.output_mode == INT: return input_shape depth = ( self.max_tokens if self.pad_to_max_tokens else self._frozen_vocab_size ) return tf.TensorShape([input_shape[0], depth]) def compute_output_signature(self, input_spec): output_shape = self.compute_output_shape(input_spec.shape.as_list()) output_dtype = ( self.vocabulary_dtype if self.invert else self.compute_dtype ) return tf.TensorSpec(shape=output_shape, dtype=output_dtype) def get_vocabulary(self, include_special_tokens=True): """Returns the current vocabulary of the layer. Args: include_special_tokens: If True, the returned vocabulary will include mask and OOV tokens, and a term's index in the vocabulary will equal the term's index when calling the layer. If False, the returned vocabulary will not include any mask or OOV tokens. """ # The lookup table data will not be sorted, so we will create a inverted # lookup here, and use that to lookup a range of indices [0, # vocab_size). if self.lookup_table.size() == 0: vocab, indices = [], [] else: keys, values = self.lookup_table.export() vocab, indices = (values, keys) if self.invert else (keys, values) vocab, indices = ( self._tensor_vocab_to_numpy(vocab), indices.numpy(), ) lookup = collections.defaultdict( lambda: self.oov_token, zip(indices, vocab) ) vocab = [lookup[x] for x in range(self.vocabulary_size())] if self.mask_token is not None and self.output_mode == INT: vocab[0] = self.mask_token if not include_special_tokens: vocab = vocab[self._token_start_index() :] return vocab def vocabulary_size(self): """Gets the current size of the layer's vocabulary. Returns: The integer size of the vocabulary, including optional mask and oov indices. """ if tf.executing_eagerly(): return ( int(self.lookup_table.size().numpy()) + self._token_start_index() ) else: return self.lookup_table.size() + self._token_start_index() def vocab_size(self): logging.warning("vocab_size is deprecated, please use vocabulary_size.") return self.vocabulary_size() def get_config(self): config = { "invert": self.invert, "max_tokens": self.max_tokens, "num_oov_indices": self.num_oov_indices, "oov_token": self.oov_token, "mask_token": self.mask_token, "output_mode": self.output_mode, "sparse": self.sparse, "pad_to_max_tokens": self.pad_to_max_tokens, "vocabulary_dtype": self.vocabulary_dtype, "idf_weights": utils.listify_tensors(self.input_idf_weights), "vocabulary": utils.listify_tensors(self.input_vocabulary), "vocabulary_size": self._frozen_vocab_size, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def _record_vocabulary_size(self): self._ensure_vocab_size_unchanged() with tf.init_scope(): self._frozen_vocab_size = self.vocabulary_size() def set_vocabulary(self, vocabulary, idf_weights=None): """Sets vocabulary (and optionally document frequency) for this layer. This method sets the vocabulary and idf weights for this layer directly, instead of analyzing a dataset through `adapt`. It should be used whenever the vocab (and optionally document frequency) information is already known. If vocabulary data is already present in the layer, this method will replace it. Args: vocabulary: Either an array or a string path to a text file. If passing an array, can pass a tuple, list, 1D numpy array, or 1D tensor containing the vocbulary terms. If passing a file path, the file should contain one line per term in the vocabulary. idf_weights: A tuple, list, 1D numpy array, or 1D tensor of inverse document frequency weights with equal length to vocabulary. Must be set if `output_mode` is `"tf_idf"`. Should not be set otherwise. Raises: ValueError: If there are too many inputs, the inputs do not match, or input data is missing. RuntimeError: If the vocabulary cannot be set when this function is called. This happens when `"multi_hot"`, `"count"`, and `"tf_idf"` modes, if `pad_to_max_tokens` is False and the layer itself has already been called. RuntimeError: If a tensor vocabulary is passed outside of eager execution. """ if self.output_mode == TF_IDF: if idf_weights is None: raise ValueError( "`idf_weights` must be set if output_mode is TF_IDF" ) elif idf_weights is not None: raise ValueError( "`idf_weights` should only be set if output_mode is " f"`'tf_idf'`. Received: output_mode={self.output_mode} " f"and idf_weights={idf_weights}" ) if isinstance(vocabulary, str): if not tf.io.gfile.exists(vocabulary): raise ValueError( f"Vocabulary file {vocabulary} does not exist." ) if self.output_mode == TF_IDF: raise ValueError( "output_mode `'tf_idf'` does not support loading a " "vocabulary from file." ) self.lookup_table = self._lookup_table_from_file(vocabulary) self._record_vocabulary_size() return if not tf.executing_eagerly() and ( tf.is_tensor(vocabulary) or tf.is_tensor(idf_weights) ): raise RuntimeError( "Cannot set a tensor vocabulary on {} layer {} when not " "executing eagerly. Create this layer or call `set_vocabulary` " "outside of any `tf.function`s and with eager execution " "enabled.".format(self.__class__.__name__, self.name) ) # TODO(mattdangerw): for better performance we should rewrite this # entire function to operate on tensors and convert vocabulary to a # tensor here. if tf.is_tensor(vocabulary): vocabulary = self._tensor_vocab_to_numpy(vocabulary) elif isinstance(vocabulary, (list, tuple)): vocabulary = np.array(vocabulary) if tf.is_tensor(idf_weights): idf_weights = idf_weights.numpy() elif isinstance(idf_weights, (list, tuple)): idf_weights = np.array(idf_weights) if vocabulary.size == 0: raise ValueError( f"Cannot set an empty vocabulary, you passed {vocabulary}." ) oov_start = self._oov_start_index() token_start = self._token_start_index() special_tokens = [self.mask_token] * oov_start + [ self.oov_token ] * self.num_oov_indices found_special_tokens = np.array_equal( special_tokens, vocabulary[:token_start] ) if found_special_tokens: tokens = vocabulary[token_start:] else: tokens = vocabulary repeated_tokens = self._find_repeated_tokens(tokens) if repeated_tokens: raise ValueError( "The passed vocabulary has at least one repeated " "term. Please uniquify your dataset. The repeated terms " "are {}".format(repeated_tokens) ) if self.mask_token is not None and self.mask_token in tokens: mask_index = np.argwhere(vocabulary == self.mask_token)[-1] raise ValueError( "Found reserved mask token at unexpected location in " "`vocabulary`. Note that passed `vocabulary` does not need to " "include the OOV and mask tokens. Either remove all mask and " "OOV tokens, or include them only at the start of the " f"vocabulary in precisely this order: {special_tokens}. " f"Received: mask_token={self.mask_token} at " f"vocabulary index {mask_index}" ) # Only error out for oov_token when invert=True. When invert=False, # oov_token is unused during lookup. if ( self.oov_token is not None and self.invert and self.oov_token in tokens ): oov_index = np.argwhere(vocabulary == self.oov_token)[-1] raise ValueError( "Found reserved OOV token at unexpected location in " "`vocabulary`. Note that passed `vocabulary` does not need to " "include the OOV and mask tokens. Either remove all mask and " "OOV tokens, or include them only at the start of the " f"vocabulary in precisely this order: {special_tokens}. " f"Received: oov_token={self.oov_token} at " f"vocabulary index {oov_index}" ) new_vocab_size = token_start + len(tokens) if self.max_tokens is not None and (new_vocab_size > self.max_tokens): raise ValueError( "Attempted to set a vocabulary larger than the maximum vocab " "size. Passed vocab size is {}, max vocab size is {}.".format( new_vocab_size, self.max_tokens ) ) self.lookup_table = self._lookup_table_from_tokens(tokens) self._record_vocabulary_size() if self.output_mode == TF_IDF and idf_weights is not False: if len(vocabulary) != len(idf_weights): raise ValueError( "`idf_weights` must be the same length as vocabulary. " "len(idf_weights) is {}, len(vocabulary) is {}".format( len(vocabulary), len(idf_weights) ) ) idf_weights = self._convert_to_ndarray(idf_weights) if idf_weights.ndim != 1: raise ValueError( "TF-IDF data must be a 1-index array, " "but received {}".format(type(idf_weights)) ) # If the passed vocabulary has no special tokens, we need to pad the # front of idf_weights. We don't have real document frequencies for # these tokens so we will use an average of all idf_weights passed # in as a reasonable default. if found_special_tokens: front_padding = 0 front_padding_value = 0 else: front_padding = token_start front_padding_value = np.average(idf_weights) # If pad_to_max_tokens is true, and max_tokens is greater than our # total vocab size, we need to pad the back of idf_weights with # zeros as well. back_padding_value = 0 if self.pad_to_max_tokens and self.max_tokens is not None: back_padding = ( self.max_tokens - front_padding - len(idf_weights) ) else: back_padding = 0 weights = np.pad( idf_weights, (front_padding, back_padding), "constant", constant_values=(front_padding_value, back_padding_value), ) weights = tf.convert_to_tensor(weights, dtype=self.compute_dtype) self.idf_weights.assign(weights) self.idf_weights_const = self.idf_weights.value() def update_state(self, data): if self._has_input_vocabulary: raise ValueError( "Cannot adapt {} layer after setting a static vocabulary via " "init argument " "or `set_vocabulary`.".format(self.__class__.__name__) ) data = utils.ensure_tensor(data, dtype=self.vocabulary_dtype) if data.shape.rank == 0: data = tf.expand_dims(data, 0) if data.shape.rank == 1: # Expand dims on axis 0 for tf-idf. A 1-d tensor is a single # document. data = tf.expand_dims(data, 0) tokens, counts = self._num_tokens(data) self.token_counts.insert( tokens, counts + self.token_counts.lookup(tokens) ) if self.output_mode == TF_IDF: # Dedupe each row of our dataset. deduped_doc_data = tf.map_fn(lambda x: tf.unique(x)[0], data) # Flatten and count tokens. tokens, doc_counts = self._num_tokens(deduped_doc_data) self.token_document_counts.insert( tokens, doc_counts + self.token_document_counts.lookup(tokens) ) if tf_utils.is_ragged(data): self.num_documents.assign_add(data.nrows()) else: self.num_documents.assign_add( tf.shape(data, out_type=tf.int64)[0] ) def finalize_state(self): if self._has_input_vocabulary or tf.equal(self.token_counts.size(), 0): # Finalize idf_weights to a const for call even if we don't need to # compute a new vocabulary. if self.output_mode == TF_IDF: self.idf_weights_const = self.idf_weights.value() self._record_vocabulary_size() return # Remove special tokens from our counts. if self.mask_token is not None: self.token_counts.remove( tf.convert_to_tensor([self.mask_token], self.vocabulary_dtype) ) if self.oov_token is not None: self.token_counts.remove( tf.convert_to_tensor([self.oov_token], self.vocabulary_dtype) ) tokens, counts = self.token_counts.export() # To keep vocabs deterministic, we sort our tokens by count and break # ties by sorting the tokens themselves. Tensorflow has no ops for # sorting strings, so we need to use numpy for the sort. sorted_indices = np.lexsort((tokens.numpy(), counts.numpy()))[::-1] token_start = self._token_start_index() if self.max_tokens: max_learned_tokens = self.max_tokens - token_start sorted_indices = sorted_indices[:max_learned_tokens] tokens = tf.gather(tokens, sorted_indices) self.lookup_table = self._lookup_table_from_tokens(tokens) if self.output_mode == TF_IDF: token_document_counts = self.token_document_counts.lookup(tokens) idf_weights = self._inverse_document_frequency( token_document_counts, self.num_documents ) idf_weights = tf.cast(idf_weights, self.compute_dtype) # Pad the front of idf_weights with the average idf weight for OOV # tokens. We cannot compute the real idf weight of OOV in a single # pass. idf_weights = tf.pad( idf_weights, [[self._token_start_index(), 0]], constant_values=tf.reduce_mean(idf_weights), ) if self.pad_to_max_tokens and self.max_tokens is not None: # Pad the back of idf_weights with zeros. idf_weights = tf.pad( idf_weights, [[0, self.max_tokens - tf.size(idf_weights)]], constant_values=0, ) self.idf_weights.assign(idf_weights) self.idf_weights_const = self.idf_weights.value() # We call this here to save memory, now that we've built our vocabulary, # we don't want to keep every token we've seen in separate lookup # tables. self.reset_state() self._record_vocabulary_size() def reset_state(self): if self._has_input_vocabulary: return self.token_counts.remove(self.token_counts.export()[0]) if self.output_mode == TF_IDF: self.token_document_counts.remove( self.token_document_counts.export()[0] ) self.num_documents.assign(0) def call(self, inputs): self._ensure_known_vocab_size() inputs = utils.ensure_tensor(inputs, dtype=self._key_dtype) original_shape = inputs.shape # Some ops will not handle scalar input, so uprank to rank 1. if inputs.shape.rank == 0: inputs = self._expand_dims(inputs, -1) if tf_utils.is_sparse(inputs): lookups = tf.SparseTensor( inputs.indices, self._lookup_dense(inputs.values), inputs.dense_shape, ) elif tf_utils.is_ragged(inputs): lookups = tf.ragged.map_flat_values(self._lookup_dense, inputs) else: lookups = self._lookup_dense(inputs) if self.output_mode == INT: # If we received a scalar input, downrank back to a scalar. if original_shape.rank == 0: lookups = tf.squeeze(lookups, -1) return lookups depth = ( self.max_tokens if self.pad_to_max_tokens else self._frozen_vocab_size ) idf_weights = ( self.idf_weights_const if self.output_mode == TF_IDF else None ) return utils.encode_categorical_inputs( lookups, output_mode=self.output_mode, depth=depth, dtype=self.compute_dtype, sparse=self.sparse, idf_weights=idf_weights, ) def _lookup_dense(self, inputs): """Lookup table values for a dense Tensor, handling masking and OOV.""" # When executing eagerly and tracing keras.Input objects, # do not call lookup. # This is critical for restoring SavedModel, which will first trace # layer.call and then attempt to restore the table. We need the table to # be uninitialized for the restore to work, but calling the table # uninitialized would error. if tf.executing_eagerly() and backend.is_keras_tensor(inputs): lookups = tf.zeros_like(inputs, dtype=self._value_dtype) else: lookups = self.lookup_table.lookup(inputs) if self.mask_token is not None: mask_locations = tf.equal(inputs, self._mask_key) lookups = tf.where(mask_locations, self._mask_value, lookups) if self.invert: return lookups lookup_checks = [] if self.num_oov_indices == 0: # If we have zero oov indices, we need to check for oov inputs. oov_indices = tf.where(tf.equal(lookups, -1)) oov_inputs = tf.gather_nd(inputs, oov_indices) msg = tf.strings.format( "When `num_oov_indices=0` all inputs should be in vocabulary, " "found OOV values {}, consider setting `num_oov_indices=1`.", (oov_inputs,), ) assertion = tf.Assert(tf.equal(tf.size(oov_indices), 0), [msg]) lookup_checks.append(assertion) elif self.num_oov_indices > 1: # If we have multiple oov indices, we need a further hashing step. if self._key_dtype.is_integer: oov_indices = tf.math.floormod(inputs, self.num_oov_indices) else: oov_indices = tf.strings.to_hash_bucket_fast( inputs, num_buckets=self.num_oov_indices ) oov_indices = oov_indices + self._oov_start_index() oov_locations = tf.equal(lookups, self._default_value) lookups = tf.where(oov_locations, oov_indices, lookups) with tf.control_dependencies(lookup_checks): return tf.identity(lookups) def save_own_variables(self, store): if self.output_mode == TF_IDF: store["idf_weights"] = self.idf_weights_const.numpy() def load_own_variables(self, store): if self.output_mode == TF_IDF: self.idf_weights.assign(store["idf_weights"]) self.idf_weights_const = self.idf_weights.value() def save_assets(self, dir_path): if self.input_vocabulary: # Vocab saved in config. # TODO: consider unifying both paths. return vocabulary = self.get_vocabulary(include_special_tokens=True) vocabulary_filepath = tf.io.gfile.join(dir_path, "vocabulary.txt") with open(vocabulary_filepath, "w") as f: f.write("\n".join([str(w) for w in vocabulary])) def load_assets(self, dir_path): if self.input_vocabulary: # Vocab saved in config. # TODO: consider unifying both paths. return vocabulary_filepath = tf.io.gfile.join(dir_path, "vocabulary.txt") # TODO: fix bug with include_special_tokens and set reload from file. with open(vocabulary_filepath, "r") as f: lines = f.read().split("\n") if tf.as_dtype(self.vocabulary_dtype) == tf.string: values = [str(line) for line in lines] else: values = [int(line) for line in lines] if self.output_mode == TF_IDF: self.set_vocabulary(values, idf_weights=False) else: self.set_vocabulary(values) def _uninitialized_lookup_table(self): with tf.init_scope(): initializer = NullInitializer(self._key_dtype, self._value_dtype) return tf.lookup.StaticHashTable(initializer, self._default_value) def _lookup_table_from_tokens(self, tokens): with tf.init_scope(): token_start = self._token_start_index() token_end = token_start + tf.size(tokens) indices_dtype = ( self._key_dtype if self.invert else self._value_dtype ) indices = tf.range(token_start, token_end, dtype=indices_dtype) keys, values = ( (indices, tokens) if self.invert else (tokens, indices) ) initializer = tf.lookup.KeyValueTensorInitializer( keys, values, self._key_dtype, self._value_dtype ) return tf.lookup.StaticHashTable(initializer, self._default_value) def _lookup_table_from_file(self, filename): if self.invert: key_index = tf.lookup.TextFileIndex.LINE_NUMBER value_index = tf.lookup.TextFileIndex.WHOLE_LINE else: key_index = tf.lookup.TextFileIndex.WHOLE_LINE value_index = tf.lookup.TextFileIndex.LINE_NUMBER with tf.init_scope(): initializer = tf.lookup.TextFileInitializer( filename=filename, key_dtype=self._key_dtype, key_index=key_index, value_dtype=self._value_dtype, value_index=value_index, value_index_offset=self._token_start_index(), ) return tf.lookup.StaticHashTable(initializer, self._default_value) def _convert_to_ndarray(self, x): return np.array(x) if isinstance(x, (list, tuple)) else x def _expand_dims(self, inputs, axis): if tf_utils.is_sparse(inputs): return tf.sparse.expand_dims(inputs, axis) else: return tf.expand_dims(inputs, axis) def _oov_start_index(self): return ( 1 if self.mask_token is not None and self.output_mode == INT else 0 ) def _token_start_index(self): return self._oov_start_index() + self.num_oov_indices def _ensure_known_vocab_size(self): if self.output_mode == INT or self.pad_to_max_tokens: return if self._frozen_vocab_size is None: raise RuntimeError( f"When using `output_mode={self.output_mode}` " "and `pad_to_max_tokens=False`, " "you must set the layer's vocabulary before calling it. Either " "pass a `vocabulary` argument to the layer, or call `adapt` " "with some sample data.".format(self.output_mode) ) def _ensure_vocab_size_unchanged(self): if self.output_mode == INT or self.pad_to_max_tokens: return with tf.init_scope(): new_vocab_size = self.vocabulary_size() if ( self._frozen_vocab_size is not None and new_vocab_size != self._frozen_vocab_size ): raise RuntimeError( f"When using `output_mode={self.output_mode}` " "and `pad_to_max_tokens=False`, " "the vocabulary size cannot be changed after the layer is " f"called. Old vocab size is {self._frozen_vocab_size}, " f"new vocab size is {new_vocab_size}" ) def _find_repeated_tokens(self, vocabulary): """Return all repeated tokens in a vocabulary.""" vocabulary_set = set(vocabulary) if len(vocabulary) != len(vocabulary_set): return [ item for item, count in collections.Counter(vocabulary).items() if count > 1 ] else: return [] def _num_tokens(self, data): """Count the number of tokens in a ragged, sparse or dense tensor.""" if tf_utils.is_sparse(data): flat_values = data.values elif tf_utils.is_ragged(data): flat_values = data.flat_values else: flat_values = tf.reshape(data, [-1]) tokens, _, counts = tf.unique_with_counts(flat_values, out_idx=tf.int64) return tokens, counts def _inverse_document_frequency(self, token_document_counts, num_documents): """Computes the inverse-document-frequency (IDF) component of "tf_idf". Uses the default weighting scheme described in https://en.wikipedia.org/wiki/Tf%E2%80%93idf. Args: token_document_counts: An array of the # of documents each token appears in. num_documents: An int representing the total number of documents Returns: An array of "inverse document frequency" weights. """ return tf.math.log(1 + num_documents / (1 + token_document_counts)) @property def _trackable_saved_model_saver(self): return layer_serialization.VocabularySavedModelSaver(self) # Override points for IntegerLookup and StringLookup. def _tensor_vocab_to_numpy(self, vocabulary): """Converts a tensor vocabulary to a numpy vocabulary.""" return vocabulary.numpy()
tf-keras/tf_keras/layers/preprocessing/index_lookup.py/0
{ "file_path": "tf-keras/tf_keras/layers/preprocessing/index_lookup.py", "repo_id": "tf-keras", "token_count": 19702 }
205
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras text vectorization preprocessing layer.""" import numpy as np import tensorflow.compat.v2 as tf from tf_keras import backend from tf_keras.engine import base_preprocessing_layer from tf_keras.layers.preprocessing import preprocessing_utils as utils from tf_keras.layers.preprocessing import string_lookup from tf_keras.saving.legacy.saved_model import layer_serialization from tf_keras.saving.serialization_lib import deserialize_keras_object from tf_keras.utils import layer_utils from tf_keras.utils import tf_utils # isort: off from tensorflow.python.util.tf_export import keras_export LOWER_AND_STRIP_PUNCTUATION = "lower_and_strip_punctuation" STRIP_PUNCTUATION = "strip_punctuation" LOWER = "lower" WHITESPACE = "whitespace" CHARACTER = "character" TF_IDF = utils.TF_IDF INT = utils.INT MULTI_HOT = utils.MULTI_HOT COUNT = utils.COUNT # This is an explicit regex of all the tokens that will be stripped if # LOWER_AND_STRIP_PUNCTUATION is set. If an application requires other # stripping, a Callable should be passed into the 'standardize' arg. DEFAULT_STRIP_REGEX = r'[!"#$%&()\*\+,-\./:;<=>?@\[\\\]^_`{|}~\']' @keras_export( "keras.layers.TextVectorization", "keras.layers.experimental.preprocessing.TextVectorization", v1=[], ) class TextVectorization(base_preprocessing_layer.PreprocessingLayer): """A preprocessing layer which maps text features to integer sequences. This layer has basic options for managing text in a TF-Keras model. It transforms a batch of strings (one example = one string) into either a list of token indices (one example = 1D tensor of integer token indices) or a dense representation (one example = 1D tensor of float values representing data about the example's tokens). This layer is meant to handle natural language inputs. To handle simple string inputs (categorical strings or pre-tokenized strings) see `tf.keras.layers.StringLookup`. The vocabulary for the layer must be either supplied on construction or learned via `adapt()`. When this layer is adapted, it will analyze the dataset, determine the frequency of individual string values, and create a vocabulary from them. This vocabulary can have unlimited size or be capped, depending on the configuration options for this layer; if there are more unique values in the input than the maximum vocabulary size, the most frequent terms will be used to create the vocabulary. The processing of each example contains the following steps: 1. Standardize each example (usually lowercasing + punctuation stripping) 2. Split each example into substrings (usually words) 3. Recombine substrings into tokens (usually ngrams) 4. Index tokens (associate a unique int value with each token) 5. Transform each example using this index, either into a vector of ints or a dense float vector. Some notes on passing callables to customize splitting and normalization for this layer: 1. Any callable can be passed to this Layer, but if you want to serialize this object you should only pass functions that are registered Keras serializables (see `tf.keras.saving.register_keras_serializable` for more details). 2. When using a custom callable for `standardize`, the data received by the callable will be exactly as passed to this layer. The callable should return a tensor of the same shape as the input. 3. When using a custom callable for `split`, the data received by the callable will have the 1st dimension squeezed out - instead of `[["string to split"], ["another string to split"]]`, the Callable will see `["string to split", "another string to split"]`. The callable should return a Tensor with the first dimension containing the split tokens - in this example, we should see something like `[["string", "to", "split"], ["another", "string", "to", "split"]]`. This makes the callable site natively compatible with `tf.strings.split()`. For an overview and full list of preprocessing layers, see the preprocessing [guide](https://www.tensorflow.org/guide/keras/preprocessing_layers). Args: max_tokens: Maximum size of the vocabulary for this layer. This should only be specified when adapting a vocabulary or when setting `pad_to_max_tokens=True`. Note that this vocabulary contains 1 OOV token, so the effective number of tokens is `(max_tokens - 1 - (1 if output_mode == "int" else 0))`. standardize: Optional specification for standardization to apply to the input text. Values can be: - `None`: No standardization. - `"lower_and_strip_punctuation"`: Text will be lowercased and all punctuation removed. - `"lower"`: Text will be lowercased. - `"strip_punctuation"`: All punctuation will be removed. - Callable: Inputs will passed to the callable function, which should be standardized and returned. split: Optional specification for splitting the input text. Values can be: - `None`: No splitting. - `"whitespace"`: Split on whitespace. - `"character"`: Split on each unicode character. - Callable: Standardized inputs will passed to the callable function, which should be split and returned. ngrams: Optional specification for ngrams to create from the possibly-split input text. Values can be None, an integer or tuple of integers; passing an integer will create ngrams up to that integer, and passing a tuple of integers will create ngrams for the specified values in the tuple. Passing None means that no ngrams will be created. output_mode: Optional specification for the output of the layer. Values can be `"int"`, `"multi_hot"`, `"count"` or `"tf_idf"`, configuring the layer as follows: - `"int"`: Outputs integer indices, one integer index per split string token. When `output_mode == "int"`, 0 is reserved for masked locations; this reduces the vocab size to `max_tokens - 2` instead of `max_tokens - 1`. - `"multi_hot"`: Outputs a single int array per batch, of either vocab_size or max_tokens size, containing 1s in all elements where the token mapped to that index exists at least once in the batch item. - `"count"`: Like `"multi_hot"`, but the int array contains a count of the number of times the token at that index appeared in the batch item. - `"tf_idf"`: Like `"multi_hot"`, but the TF-IDF algorithm is applied to find the value in each token slot. For `"int"` output, any shape of input and output is supported. For all other output modes, currently only rank 1 inputs (and rank 2 outputs after splitting) are supported. output_sequence_length: Only valid in INT mode. If set, the output will have its time dimension padded or truncated to exactly `output_sequence_length` values, resulting in a tensor of shape `(batch_size, output_sequence_length)` regardless of how many tokens resulted from the splitting step. Defaults to `None`. pad_to_max_tokens: Only valid in `"multi_hot"`, `"count"`, and `"tf_idf"` modes. If True, the output will have its feature axis padded to `max_tokens` even if the number of unique tokens in the vocabulary is less than max_tokens, resulting in a tensor of shape `(batch_size, max_tokens)` regardless of vocabulary size. Defaults to `False`. vocabulary: Optional. Either an array of strings or a string path to a text file. If passing an array, can pass a tuple, list, 1D numpy array, or 1D tensor containing the string vocabulary terms. If passing a file path, the file should contain one line per term in the vocabulary. If this argument is set, there is no need to `adapt()` the layer. idf_weights: Only valid when `output_mode` is `"tf_idf"`. A tuple, list, 1D numpy array, or 1D tensor of the same length as the vocabulary, containing the floating point inverse document frequency weights, which will be multiplied by per sample term counts for the final `tf_idf` weight. If the `vocabulary` argument is set, and `output_mode` is `"tf_idf"`, this argument must be supplied. ragged: Boolean. Only applicable to `"int"` output mode. If True, returns a `RaggedTensor` instead of a dense `Tensor`, where each sequence may have a different length after string splitting. Defaults to `False`. sparse: Boolean. Only applicable to `"multi_hot"`, `"count"`, and `"tf_idf"` output modes. If True, returns a `SparseTensor` instead of a dense `Tensor`. Defaults to `False`. encoding: Optional. The text encoding to use to interpret the input strings. Defaults to `"utf-8"`. Example: This example instantiates a `TextVectorization` layer that lowercases text, splits on whitespace, strips punctuation, and outputs integer vocab indices. >>> text_dataset = tf.data.Dataset.from_tensor_slices(["foo", "bar", "baz"]) >>> max_features = 5000 # Maximum vocab size. >>> max_len = 4 # Sequence length to pad the outputs to. >>> >>> # Create the layer. >>> vectorize_layer = tf.keras.layers.TextVectorization( ... max_tokens=max_features, ... output_mode='int', ... output_sequence_length=max_len) >>> >>> # Now that the vocab layer has been created, call `adapt` on the >>> # text-only dataset to create the vocabulary. You don't have to batch, >>> # but for large datasets this means we're not keeping spare copies of >>> # the dataset. >>> vectorize_layer.adapt(text_dataset.batch(64)) >>> >>> # Create the model that uses the vectorize text layer >>> model = tf.keras.models.Sequential() >>> >>> # Start by creating an explicit input layer. It needs to have a shape of >>> # (1,) (because we need to guarantee that there is exactly one string >>> # input per batch), and the dtype needs to be 'string'. >>> model.add(tf.keras.Input(shape=(1,), dtype=tf.string)) >>> >>> # The first layer in our model is the vectorization layer. After this >>> # layer, we have a tensor of shape (batch_size, max_len) containing >>> # vocab indices. >>> model.add(vectorize_layer) >>> >>> # Now, the model can map strings to integers, and you can add an >>> # embedding layer to map these integers to learned embeddings. >>> input_data = [["foo qux bar"], ["qux baz"]] >>> model.predict(input_data) array([[2, 1, 4, 0], [1, 3, 0, 0]]) Example: This example instantiates a `TextVectorization` layer by passing a list of vocabulary terms to the layer's `__init__()` method. >>> vocab_data = ["earth", "wind", "and", "fire"] >>> max_len = 4 # Sequence length to pad the outputs to. >>> >>> # Create the layer, passing the vocab directly. You can also pass the >>> # vocabulary arg a path to a file containing one vocabulary word per >>> # line. >>> vectorize_layer = tf.keras.layers.TextVectorization( ... max_tokens=max_features, ... output_mode='int', ... output_sequence_length=max_len, ... vocabulary=vocab_data) >>> >>> # Because we've passed the vocabulary directly, we don't need to adapt >>> # the layer - the vocabulary is already set. The vocabulary contains the >>> # padding token ('') and OOV token ('[UNK]') as well as the passed >>> # tokens. >>> vectorize_layer.get_vocabulary() ['', '[UNK]', 'earth', 'wind', 'and', 'fire'] """ def __init__( self, max_tokens=None, standardize="lower_and_strip_punctuation", split="whitespace", ngrams=None, output_mode="int", output_sequence_length=None, pad_to_max_tokens=False, vocabulary=None, idf_weights=None, sparse=False, ragged=False, encoding="utf-8", **kwargs, ): # This layer only applies to string processing, and so should only have # a dtype of 'string'. if "dtype" in kwargs and kwargs["dtype"] != tf.string: raise ValueError( "`TextVectorization` may only have a dtype of string. " f"Received dtype: {kwargs['dtype']}." ) elif "dtype" not in kwargs: kwargs["dtype"] = tf.string # 'standardize' must be one of # (None, LOWER_AND_STRIP_PUNCTUATION, LOWER, STRIP_PUNCTUATION, # callable) layer_utils.validate_string_arg( standardize, allowable_strings=( LOWER_AND_STRIP_PUNCTUATION, LOWER, STRIP_PUNCTUATION, ), layer_name="TextVectorization", arg_name="standardize", allow_none=True, allow_callables=True, ) # 'split' must be one of (None, WHITESPACE, CHARACTER, callable) layer_utils.validate_string_arg( split, allowable_strings=(WHITESPACE, CHARACTER), layer_name="TextVectorization", arg_name="split", allow_none=True, allow_callables=True, ) # Support deprecated names for output_modes. if output_mode == "binary": output_mode = MULTI_HOT if output_mode == "tf-idf": output_mode = TF_IDF # 'output_mode' must be one of (None, INT, COUNT, MULTI_HOT, TF_IDF) layer_utils.validate_string_arg( output_mode, allowable_strings=(INT, COUNT, MULTI_HOT, TF_IDF), layer_name="TextVectorization", arg_name="output_mode", allow_none=True, ) # 'ngrams' must be one of (None, int, tuple(int)) if not ( ngrams is None or isinstance(ngrams, int) or isinstance(ngrams, tuple) and all(isinstance(item, int) for item in ngrams) ): raise ValueError( "`ngrams` must be None, an integer, or a tuple of " f"integers. Received: ngrams={ngrams}" ) # 'output_sequence_length' must be one of (None, int) and is only # set if output_mode is INT. if output_mode == INT and not ( isinstance(output_sequence_length, int) or (output_sequence_length is None) ): raise ValueError( "`output_sequence_length` must be either None or an " "integer when `output_mode` is 'int'. Received: " f"output_sequence_length={output_sequence_length}" ) if output_mode != INT and output_sequence_length is not None: raise ValueError( "`output_sequence_length` must not be set if `output_mode` is " "not 'int'. " f"Received output_sequence_length={output_sequence_length}." ) if ragged and output_mode != INT: raise ValueError( "`ragged` must not be true if `output_mode` is " f"`'int'`. Received: ragged={ragged} and " f"output_mode={output_mode}" ) if ragged and output_sequence_length is not None: raise ValueError( "`output_sequence_length` must not be set if ragged " f"is True. Received: ragged={ragged} and " f"output_sequence_length={output_sequence_length}" ) self._max_tokens = max_tokens self._standardize = standardize self._split = split self._ngrams_arg = ngrams if isinstance(ngrams, int): self._ngrams = tuple(range(1, ngrams + 1)) else: self._ngrams = ngrams self._ragged = ragged self._output_mode = output_mode self._output_sequence_length = output_sequence_length self._encoding = encoding # VocabularySavedModelSaver will clear the config vocabulary to restore # the lookup table ops directly. We persist this hidden option to # persist the fact that we have have a non-adaptable layer with a # manually set vocab. self._has_input_vocabulary = kwargs.pop( "has_input_vocabulary", (vocabulary is not None) ) vocabulary_size = kwargs.pop("vocabulary_size", None) super().__init__(**kwargs) self._lookup_layer = string_lookup.StringLookup( max_tokens=max_tokens, vocabulary=vocabulary, idf_weights=idf_weights, pad_to_max_tokens=pad_to_max_tokens, mask_token="", output_mode=output_mode if output_mode is not None else INT, sparse=sparse, has_input_vocabulary=self._has_input_vocabulary, encoding=encoding, vocabulary_size=vocabulary_size, ) def compute_output_shape(self, input_shape): if self._output_mode == INT: return tf.TensorShape( [input_shape[0], self._output_sequence_length] ) if self._split is None: if len(input_shape) <= 1: input_shape = tuple(input_shape) + (1,) else: input_shape = tuple(input_shape) + (None,) return self._lookup_layer.compute_output_shape(input_shape) def compute_output_signature(self, input_spec): output_shape = self.compute_output_shape(input_spec.shape.as_list()) output_dtype = ( tf.int64 if self._output_mode == INT else backend.floatx() ) return tf.TensorSpec(shape=output_shape, dtype=output_dtype) # We override this method solely to generate a docstring. def adapt(self, data, batch_size=None, steps=None): """Computes a vocabulary of string terms from tokens in a dataset. Calling `adapt()` on a `TextVectorization` layer is an alternative to passing in a precomputed vocabulary on construction via the `vocabulary` argument. A `TextVectorization` layer should always be either adapted over a dataset or supplied with a vocabulary. During `adapt()`, the layer will build a vocabulary of all string tokens seen in the dataset, sorted by occurrence count, with ties broken by sort order of the tokens (high to low). At the end of `adapt()`, if `max_tokens` is set, the vocabulary wil be truncated to `max_tokens` size. For example, adapting a layer with `max_tokens=1000` will compute the 1000 most frequent tokens occurring in the input dataset. If `output_mode='tf-idf'`, `adapt()` will also learn the document frequencies of each token in the input dataset. In order to make `TextVectorization` efficient in any distribution context, the vocabulary is kept static with respect to any compiled `tf.Graph`s that call the layer. As a consequence, if the layer is adapted a second time, any models using the layer should be re-compiled. For more information see `tf.keras.layers.experimental.preprocessing.PreprocessingLayer.adapt`. `adapt()` is meant only as a single machine utility to compute layer state. To analyze a dataset that cannot fit on a single machine, see [Tensorflow Transform]( https://www.tensorflow.org/tfx/transform/get_started) for a multi-machine, map-reduce solution. Arguments: data: The data to train on. It can be passed either as a `tf.data.Dataset`, or as a numpy array. batch_size: Integer or `None`. Number of samples per state update. If unspecified, `batch_size` will default to 32. Do not specify the `batch_size` if your data is in the form of datasets, generators, or `keras.utils.Sequence` instances (since they generate batches). steps: Integer or `None`. Total number of steps (batches of samples) When training with input tensors such as TensorFlow data tensors, the default `None` is equal to the number of samples in your dataset divided by the batch size, or 1 if that cannot be determined. If x is a `tf.data` dataset, and 'steps' is None, the epoch will run until the input dataset is exhausted. When passing an infinitely repeating dataset, you must specify the `steps` argument. This argument is not supported with array inputs. """ super().adapt(data, batch_size=batch_size, steps=steps) def update_state(self, data): self._lookup_layer.update_state(self._preprocess(data)) def finalize_state(self): self._lookup_layer.finalize_state() def reset_state(self): self._lookup_layer.reset_state() def get_vocabulary(self, include_special_tokens=True): """Returns the current vocabulary of the layer. Args: include_special_tokens: If True, the returned vocabulary will include the padding and OOV tokens, and a term's index in the vocabulary will equal the term's index when calling the layer. If False, the returned vocabulary will not include any padding or OOV tokens. """ return self._lookup_layer.get_vocabulary(include_special_tokens) def vocabulary_size(self): """Gets the current size of the layer's vocabulary. Returns: The integer size of the vocabulary, including optional mask and OOV indices. """ return self._lookup_layer.vocabulary_size() def get_config(self): config = { "max_tokens": self._lookup_layer.max_tokens, "standardize": self._standardize, "split": self._split, "ngrams": self._ngrams_arg, "output_mode": self._output_mode, "output_sequence_length": self._output_sequence_length, "pad_to_max_tokens": self._lookup_layer.pad_to_max_tokens, "sparse": self._lookup_layer.sparse, "ragged": self._ragged, "vocabulary": utils.listify_tensors( self._lookup_layer.input_vocabulary ), "idf_weights": utils.listify_tensors( self._lookup_layer.input_idf_weights ), "encoding": self._encoding, "vocabulary_size": self.vocabulary_size(), } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config): if config["standardize"] not in ( LOWER_AND_STRIP_PUNCTUATION, LOWER, STRIP_PUNCTUATION, ): config["standardize"] = deserialize_keras_object( config["standardize"] ) if config["split"] not in (WHITESPACE, CHARACTER): config["split"] = deserialize_keras_object(config["split"]) return cls(**config) def set_vocabulary(self, vocabulary, idf_weights=None): """Sets vocabulary (and optionally document frequency) for this layer. This method sets the vocabulary and idf weights for this layer directly, instead of analyzing a dataset through 'adapt'. It should be used whenever the vocab (and optionally document frequency) information is already known. If vocabulary data is already present in the layer, this method will replace it. Args: vocabulary: Either an array or a string path to a text file. If passing an array, can pass a tuple, list, 1D numpy array, or 1D tensor containing the vocbulary terms. If passing a file path, the file should contain one line per term in the vocabulary. idf_weights: A tuple, list, 1D numpy array, or 1D tensor of inverse document frequency weights with equal length to vocabulary. Must be set if `output_mode` is `"tf_idf"`. Should not be set otherwise. Raises: ValueError: If there are too many inputs, the inputs do not match, or input data is missing. RuntimeError: If the vocabulary cannot be set when this function is called. This happens when `"multi_hot"`, `"count"`, and "tf_idf" modes, if `pad_to_max_tokens` is False and the layer itself has already been called. """ self._lookup_layer.set_vocabulary(vocabulary, idf_weights=idf_weights) def _preprocess(self, inputs): inputs = utils.ensure_tensor(inputs, dtype=tf.string) if self._standardize in (LOWER, LOWER_AND_STRIP_PUNCTUATION): inputs = tf.strings.lower(inputs) if self._standardize in ( STRIP_PUNCTUATION, LOWER_AND_STRIP_PUNCTUATION, ): inputs = tf.strings.regex_replace(inputs, DEFAULT_STRIP_REGEX, "") if callable(self._standardize): inputs = self._standardize(inputs) if self._split is not None: # If we are splitting, we validate that the 1st axis is of dimension # 1 and so can be squeezed out. We do this here instead of after # splitting for performance reasons - it's more expensive to squeeze # a ragged tensor. if inputs.shape.rank > 1: if inputs.shape[-1] != 1: raise ValueError( "When using `TextVectorization` to tokenize strings, " "the input rank must be 1 or the last shape dimension " f"must be 1. Received: inputs.shape={inputs.shape} " f"with rank={inputs.shape.rank}" ) else: inputs = tf.squeeze(inputs, axis=-1) if self._split == WHITESPACE: # This treats multiple whitespaces as one whitespace, and strips # leading and trailing whitespace. inputs = tf.strings.split(inputs) elif self._split == CHARACTER: inputs = tf.strings.unicode_split(inputs, "UTF-8") elif callable(self._split): inputs = self._split(inputs) else: raise ValueError( "%s is not a supported splitting." "TextVectorization supports the following options " "for `split`: None, 'whitespace', or a Callable." % self._split ) # Note that 'inputs' here can be either ragged or dense depending on the # configuration choices for this Layer. The strings.ngrams op, however, # does support both ragged and dense inputs. if self._ngrams is not None: inputs = tf.strings.ngrams( inputs, ngram_width=self._ngrams, separator=" " ) return inputs def call(self, inputs): if isinstance(inputs, (list, tuple, np.ndarray)): inputs = tf.convert_to_tensor(inputs) inputs = self._preprocess(inputs) # If we're not doing any output processing, return right away. if self._output_mode is None: return inputs lookup_data = self._lookup_layer(inputs) # For any non-int output, we can return directly from the underlying # layer. if self._output_mode != INT: return lookup_data if self._ragged: return lookup_data # If we have a ragged tensor, we can pad during the conversion to dense. if tf_utils.is_ragged(lookup_data): shape = lookup_data.shape.as_list() # If output sequence length is None, to_tensor will pad the last # dimension to the bounding shape of the ragged dimension. shape[-1] = self._output_sequence_length return lookup_data.to_tensor(default_value=0, shape=shape) # If we have a dense tensor, we need to pad/trim directly. if self._output_sequence_length is not None: # Maybe trim the output. lookup_data = lookup_data[..., : self._output_sequence_length] # Maybe pad the output. We need to be careful to use dynamic shape # here as required_space_to_batch_paddings requires a fully known # shape. shape = tf.shape(lookup_data) padded_shape = tf.concat( (shape[:-1], [self._output_sequence_length]), 0 ) padding, _ = tf.required_space_to_batch_paddings( shape, padded_shape ) return tf.pad(lookup_data, padding) return lookup_data @property def _trackable_saved_model_saver(self): return layer_serialization.VocabularySavedModelSaver(self) def save_own_variables(self, store): self._lookup_layer.save_own_variables(store) def load_own_variables(self, store): self._lookup_layer.load_own_variables(store) def save_assets(self, dir_path): self._lookup_layer.save_assets(dir_path) def load_assets(self, dir_path): self._lookup_layer.load_assets(dir_path)
tf-keras/tf_keras/layers/preprocessing/text_vectorization.py/0
{ "file_path": "tf-keras/tf_keras/layers/preprocessing/text_vectorization.py", "repo_id": "tf-keras", "token_count": 12182 }
206
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains the SpatialDropout2D layer.""" import tensorflow.compat.v2 as tf from tf_keras import backend from tf_keras.engine.input_spec import InputSpec from tf_keras.layers.regularization.dropout import Dropout # isort: off from tensorflow.python.util.tf_export import keras_export @keras_export("keras.layers.SpatialDropout2D") class SpatialDropout2D(Dropout): """Spatial 2D version of Dropout. This version performs the same function as Dropout, however, it drops entire 2D feature maps instead of individual elements. If adjacent pixels within feature maps are strongly correlated (as is normally the case in early convolution layers) then regular dropout will not regularize the activations and will otherwise just result in an effective learning rate decrease. In this case, SpatialDropout2D will help promote independence between feature maps and should be used instead. Args: rate: Float between 0 and 1. Fraction of the input units to drop. data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension (the depth) is at index 1, in 'channels_last' mode is it at index 3. When unspecified, uses `image_data_format` value found in your TF-Keras config file at `~/.keras/keras.json` (if exists) else 'channels_last'. Defaults to 'channels_last'. Call arguments: inputs: A 4D tensor. training: Python boolean indicating whether the layer should behave in training mode (adding dropout) or in inference mode (doing nothing). Input shape: 4D tensor with shape: `(samples, channels, rows, cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, rows, cols, channels)` if data_format='channels_last'. Output shape: Same as input. References: - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280) """ def __init__(self, rate, data_format=None, **kwargs): super().__init__(rate, **kwargs) if data_format is None: data_format = backend.image_data_format() if data_format not in {"channels_last", "channels_first"}: raise ValueError( '`data_format` must be "channels_last" or "channels_first". ' f"Received: data_format={data_format}." ) self.data_format = data_format self.input_spec = InputSpec(ndim=4) def _get_noise_shape(self, inputs): input_shape = tf.shape(inputs) if self.data_format == "channels_first": return (input_shape[0], input_shape[1], 1, 1) elif self.data_format == "channels_last": return (input_shape[0], 1, 1, input_shape[3])
tf-keras/tf_keras/layers/regularization/spatial_dropout2d.py/0
{ "file_path": "tf-keras/tf_keras/layers/regularization/spatial_dropout2d.py", "repo_id": "tf-keras", "token_count": 1204 }
207
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for recurrent layers functionality other than GRU, LSTM, SimpleRNN. See also: lstm_test.py, gru_test.py, simplernn_test.py. """ import collections import numpy as np import tensorflow.compat.v2 as tf from absl.testing import parameterized import tf_keras as keras from tf_keras.engine import base_layer_utils from tf_keras.layers.rnn import gru from tf_keras.layers.rnn import gru_v1 from tf_keras.layers.rnn import lstm from tf_keras.layers.rnn import lstm_v1 from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils # isort: off from tensorflow.python.checkpoint import ( checkpoint as trackable_util, ) # Used for nested input/output/state RNN test. NestedInput = collections.namedtuple("NestedInput", ["t1", "t2"]) NestedState = collections.namedtuple("NestedState", ["s1", "s2"]) @test_combinations.run_all_keras_modes class RNNTest(test_combinations.TestCase): def test_minimal_rnn_cell_non_layer(self): class MinimalRNNCell: def __init__(self, units, input_dim): self.units = units self.state_size = units self.kernel = keras.backend.variable( np.random.random((input_dim, units)) ) def call(self, inputs, states): prev_output = states[0] output = keras.backend.dot(inputs, self.kernel) + prev_output return output, [output] # Basic test case. cell = MinimalRNNCell(32, 5) x = keras.Input((None, 5)) layer = keras.layers.RNN(cell) y = layer(x) model = keras.models.Model(x, y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test stacking. cells = [ MinimalRNNCell(8, 5), MinimalRNNCell(32, 8), MinimalRNNCell(32, 32), ] layer = keras.layers.RNN(cells) y = layer(x) model = keras.models.Model(x, y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) def test_minimal_rnn_cell_non_layer_multiple_states(self): class MinimalRNNCell: def __init__(self, units, input_dim): self.units = units self.state_size = (units, units) self.kernel = keras.backend.variable( np.random.random((input_dim, units)) ) def call(self, inputs, states): prev_output_1 = states[0] prev_output_2 = states[1] output = keras.backend.dot(inputs, self.kernel) output += prev_output_1 output -= prev_output_2 return output, [output * 2, output * 3] # Basic test case. cell = MinimalRNNCell(32, 5) x = keras.Input((None, 5)) layer = keras.layers.RNN(cell) y = layer(x) model = keras.models.Model(x, y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test stacking. cells = [ MinimalRNNCell(8, 5), MinimalRNNCell(16, 8), MinimalRNNCell(32, 16), ] layer = keras.layers.RNN(cells) self.assertEqual(layer.cell.state_size, ((8, 8), (16, 16), (32, 32))) self.assertEqual(layer.cell.output_size, 32) y = layer(x) model = keras.models.Model(x, y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) def test_minimal_rnn_cell_layer(self): class MinimalRNNCell(keras.layers.Layer): def __init__(self, units, **kwargs): self.units = units self.state_size = units super().__init__(**kwargs) def build(self, input_shape): self.kernel = self.add_weight( shape=(input_shape[-1], self.units), initializer="uniform", name="kernel", ) self.recurrent_kernel = self.add_weight( shape=(self.units, self.units), initializer="uniform", name="recurrent_kernel", ) self.built = True def call(self, inputs, states): prev_output = states[0] h = keras.backend.dot(inputs, self.kernel) output = h + keras.backend.dot( prev_output, self.recurrent_kernel ) return output, [output] def get_config(self): config = {"units": self.units} base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) # Test basic case. x = keras.Input((None, 5)) cell = MinimalRNNCell(32) layer = keras.layers.RNN(cell) y = layer(x) model = keras.models.Model(x, y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test basic case serialization. x_np = np.random.random((6, 5, 5)) y_np = model.predict(x_np) weights = model.get_weights() config = layer.get_config() with keras.utils.CustomObjectScope({"MinimalRNNCell": MinimalRNNCell}): layer = keras.layers.RNN.from_config(config) y = layer(x) model = keras.models.Model(x, y) model.set_weights(weights) y_np_2 = model.predict(x_np) self.assertAllClose(y_np, y_np_2, atol=1e-4) # Test stacking. cells = [MinimalRNNCell(8), MinimalRNNCell(12), MinimalRNNCell(32)] layer = keras.layers.RNN(cells) y = layer(x) model = keras.models.Model(x, y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test stacked RNN serialization. x_np = np.random.random((6, 5, 5)) y_np = model.predict(x_np) weights = model.get_weights() config = layer.get_config() with keras.utils.CustomObjectScope({"MinimalRNNCell": MinimalRNNCell}): layer = keras.layers.RNN.from_config(config) y = layer(x) model = keras.models.Model(x, y) model.set_weights(weights) y_np_2 = model.predict(x_np) self.assertAllClose(y_np, y_np_2, atol=1e-4) def test_minimal_rnn_cell_abstract_rnn_cell(self): class MinimalRNNCell(keras.layers.AbstractRNNCell): def __init__(self, units, **kwargs): self.units = units super().__init__(**kwargs) @property def state_size(self): return self.units def build(self, input_shape): self.kernel = self.add_weight( shape=(input_shape[-1], self.units), initializer="uniform", name="kernel", ) self.recurrent_kernel = self.add_weight( shape=(self.units, self.units), initializer="uniform", name="recurrent_kernel", ) self.built = True def call(self, inputs, states): prev_output = states[0] h = keras.backend.dot(inputs, self.kernel) output = h + keras.backend.dot( prev_output, self.recurrent_kernel ) return output, output @property def output_size(self): return self.units cell = MinimalRNNCell(32) x = keras.Input((None, 5)) layer = keras.layers.RNN(cell) y = layer(x) model = keras.models.Model(x, y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test stacking. cells = [MinimalRNNCell(8), MinimalRNNCell(16), MinimalRNNCell(32)] layer = keras.layers.RNN(cells) y = layer(x) model = keras.models.Model(x, y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) def test_rnn_with_time_major(self): batch = 10 time_step = 5 embedding_dim = 4 units = 3 # Test basic case. x = keras.Input((time_step, embedding_dim)) time_major_x = keras.layers.Lambda( lambda t: tf.transpose(t, [1, 0, 2]) )(x) layer = keras.layers.SimpleRNN( units, time_major=True, return_sequences=True ) self.assertEqual( layer.compute_output_shape( (time_step, None, embedding_dim) ).as_list(), [time_step, None, units], ) y = layer(time_major_x) self.assertEqual(layer.output_shape, (time_step, None, units)) y = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))(y) model = keras.models.Model(x, y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch( np.zeros((batch, time_step, embedding_dim)), np.zeros((batch, time_step, units)), ) # Test stacking. x = keras.Input((time_step, embedding_dim)) time_major_x = keras.layers.Lambda( lambda t: tf.transpose(t, [1, 0, 2]) )(x) cell_units = [10, 8, 6] cells = [keras.layers.SimpleRNNCell(cell_units[i]) for i in range(3)] layer = keras.layers.RNN(cells, time_major=True, return_sequences=True) y = layer(time_major_x) self.assertEqual(layer.output_shape, (time_step, None, cell_units[-1])) y = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))(y) model = keras.models.Model(x, y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch( np.zeros((batch, time_step, embedding_dim)), np.zeros((batch, time_step, cell_units[-1])), ) # Test masking. x = keras.Input((time_step, embedding_dim)) time_major = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))( x ) mask = keras.layers.Masking()(time_major) rnn = keras.layers.SimpleRNN( units, time_major=True, return_sequences=True )(mask) y = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))(rnn) model = keras.models.Model(x, y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch( np.zeros((batch, time_step, embedding_dim)), np.zeros((batch, time_step, units)), ) # Test layer output x = keras.Input((time_step, embedding_dim)) rnn_1 = keras.layers.SimpleRNN(units, return_sequences=True) y = rnn_1(x) model = keras.models.Model(x, y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch( np.zeros((batch, time_step, embedding_dim)), np.zeros((batch, time_step, units)), ) x_np = np.random.random((batch, time_step, embedding_dim)) y_np_1 = model.predict(x_np) time_major = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))( x ) rnn_2 = keras.layers.SimpleRNN( units, time_major=True, return_sequences=True ) y_2 = rnn_2(time_major) y_2 = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))(y_2) model_2 = keras.models.Model(x, y_2) rnn_2.set_weights(rnn_1.get_weights()) y_np_2 = model_2.predict(x_np) self.assertAllClose(y_np_1, y_np_2, atol=1e-4) def test_rnn_cell_with_constants_layer(self): # Test basic case. x = keras.Input((None, 5)) c = keras.Input((3,)) cell = RNNCellWithConstants(32, constant_size=3) layer = keras.layers.RNN(cell) y = layer(x, constants=c) model = keras.models.Model([x, c], y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch( [np.zeros((6, 5, 5)), np.zeros((6, 3))], np.zeros((6, 32)) ) # Test basic case serialization. x_np = np.random.random((6, 5, 5)) c_np = np.random.random((6, 3)) y_np = model.predict([x_np, c_np]) weights = model.get_weights() config = layer.get_config() custom_objects = {"RNNCellWithConstants": RNNCellWithConstants} with keras.utils.CustomObjectScope(custom_objects): layer = keras.layers.RNN.from_config(config.copy()) y = layer(x, constants=c) model = keras.models.Model([x, c], y) model.set_weights(weights) y_np_2 = model.predict([x_np, c_np]) self.assertAllClose(y_np, y_np_2, atol=1e-4) # test flat list inputs. with keras.utils.CustomObjectScope(custom_objects): layer = keras.layers.RNN.from_config(config.copy()) y = layer([x, c]) model = keras.models.Model([x, c], y) model.set_weights(weights) y_np_3 = model.predict([x_np, c_np]) self.assertAllClose(y_np, y_np_3, atol=1e-4) # Test stacking. cells = [ gru.GRUCell(8), RNNCellWithConstants(12, constant_size=3), RNNCellWithConstants(32, constant_size=3), ] layer = keras.layers.RNN(cells) y = layer(x, constants=c) model = keras.models.Model([x, c], y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch( [np.zeros((6, 5, 5)), np.zeros((6, 3))], np.zeros((6, 32)) ) # Test GRUCell reset_after property. x = keras.Input((None, 5)) c = keras.Input((3,)) cells = [gru.GRUCell(32, reset_after=True)] layer = keras.layers.RNN(cells) y = layer(x, constants=c) model = keras.models.Model([x, c], y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch( [np.zeros((6, 5, 5)), np.zeros((6, 3))], np.zeros((6, 32)) ) # Test stacked RNN serialization x_np = np.random.random((6, 5, 5)) c_np = np.random.random((6, 3)) y_np = model.predict([x_np, c_np]) weights = model.get_weights() config = layer.get_config() with keras.utils.CustomObjectScope(custom_objects): layer = keras.layers.RNN.from_config(config.copy()) y = layer(x, constants=c) model = keras.models.Model([x, c], y) model.set_weights(weights) y_np_2 = model.predict([x_np, c_np]) self.assertAllClose(y_np, y_np_2, atol=1e-4) def test_rnn_cell_with_non_keras_constants(self): # Test basic case. x = keras.Input((None, 5)) c = tf.zeros([6, 3], dtype=tf.float32) cell = RNNCellWithConstants(32, constant_size=3) layer = keras.layers.RNN(cell) y = layer(x, constants=c) model = keras.models.Model(x, y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test stacking. cells = [ gru.GRUCell(8), RNNCellWithConstants(12, constant_size=3), RNNCellWithConstants(32, constant_size=3), ] layer = keras.layers.RNN(cells) y = layer(x, constants=c) model = keras.models.Model(x, y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) def test_rnn_cell_with_constants_layer_passing_initial_state(self): # Test basic case. x = keras.Input((None, 5)) c = keras.Input((3,)) s = keras.Input((32,)) cell = RNNCellWithConstants(32, constant_size=3) layer = keras.layers.RNN(cell) y = layer(x, initial_state=s, constants=c) model = keras.models.Model([x, s, c], y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch( [np.zeros((6, 5, 5)), np.zeros((6, 32)), np.zeros((6, 3))], np.zeros((6, 32)), ) # Test basic case serialization. x_np = np.random.random((6, 5, 5)) s_np = np.random.random((6, 32)) c_np = np.random.random((6, 3)) y_np = model.predict([x_np, s_np, c_np]) weights = model.get_weights() config = layer.get_config() custom_objects = {"RNNCellWithConstants": RNNCellWithConstants} with keras.utils.CustomObjectScope(custom_objects): layer = keras.layers.RNN.from_config(config.copy()) y = layer(x, initial_state=s, constants=c) model = keras.models.Model([x, s, c], y) model.set_weights(weights) y_np_2 = model.predict([x_np, s_np, c_np]) self.assertAllClose(y_np, y_np_2, atol=1e-4) # verify that state is used y_np_2_different_s = model.predict([x_np, s_np + 10.0, c_np]) with self.assertRaises(AssertionError): self.assertAllClose(y_np, y_np_2_different_s, atol=1e-4) # test flat list inputs with keras.utils.CustomObjectScope(custom_objects): layer = keras.layers.RNN.from_config(config.copy()) y = layer([x, s, c]) model = keras.models.Model([x, s, c], y) model.set_weights(weights) y_np_3 = model.predict([x_np, s_np, c_np]) self.assertAllClose(y_np, y_np_3, atol=1e-4) def test_rnn_cell_with_non_keras_constants_and_initial_state(self): # Test basic case. x = keras.Input((None, 5)) c = tf.zeros([6, 3], dtype=tf.float32) s = tf.zeros([6, 32], dtype=tf.float32) cell = RNNCellWithConstants(32, constant_size=3) layer = keras.layers.RNN(cell) y = layer(x, initial_state=s, constants=c) model = keras.models.Model(x, y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test stacking. cells = [ gru.GRUCell(8), RNNCellWithConstants(12, constant_size=3), RNNCellWithConstants(32, constant_size=3), ] layer = keras.layers.RNN(cells) s = [ tf.zeros([6, 8], dtype=tf.float32), tf.zeros([6, 12], dtype=tf.float32), tf.zeros([6, 32], dtype=tf.float32), ] y = layer(x, initial_state=s, constants=c) model = keras.models.Model(x, y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) def test_stacked_rnn_attributes(self): if tf.executing_eagerly(): self.skipTest("reduce_sum is not available in eager mode.") cells = [keras.layers.LSTMCell(1), keras.layers.LSTMCell(1)] layer = keras.layers.RNN(cells) layer.build((None, None, 1)) # Test weights self.assertEqual(len(layer.trainable_weights), 6) cells[0].trainable = False self.assertEqual(len(layer.trainable_weights), 3) self.assertEqual(len(layer.non_trainable_weights), 3) # Test `get_losses_for` and `losses` x = keras.Input((None, 1)) loss_1 = tf.reduce_sum(x) loss_2 = tf.reduce_sum(cells[0].kernel) cells[0].add_loss(loss_1, inputs=x) cells[0].add_loss(loss_2) self.assertEqual(len(layer.losses), 2) self.assertEqual(layer.get_losses_for(None), [loss_2]) self.assertEqual(layer.get_losses_for(x), [loss_1]) # Test `updates` cells = [keras.layers.LSTMCell(1), keras.layers.LSTMCell(1)] layer = keras.layers.RNN(cells) x = keras.Input((None, 1)) _ = layer(x) update_1 = tf.compat.v1.assign_add( cells[0].kernel, x[0, 0, 0] * cells[0].kernel ) update_2 = tf.compat.v1.assign_add( cells[0].kernel, tf.ones_like(cells[0].kernel) ) # TODO(b/128682878): Remove when RNNCells are __call__'d. with base_layer_utils.call_context().enter(layer, x, True, None): cells[0].add_update(update_1) cells[0].add_update(update_2) self.assertEqual(len(layer.updates), 2) def test_rnn_dynamic_trainability(self): layer_class = keras.layers.SimpleRNN embedding_dim = 4 units = 3 layer = layer_class(units) layer.build((None, None, embedding_dim)) self.assertEqual(len(layer.weights), 3) self.assertEqual(len(layer.trainable_weights), 3) self.assertEqual(len(layer.non_trainable_weights), 0) layer.trainable = False self.assertEqual(len(layer.weights), 3) self.assertEqual(len(layer.trainable_weights), 0) self.assertEqual(len(layer.non_trainable_weights), 3) layer.trainable = True self.assertEqual(len(layer.weights), 3) self.assertEqual(len(layer.trainable_weights), 3) self.assertEqual(len(layer.non_trainable_weights), 0) @parameterized.parameters( [keras.layers.SimpleRNN, keras.layers.GRU, keras.layers.LSTM] ) def test_rnn_cell_trainability(self, layer_cls): # https://github.com/tensorflow/tensorflow/issues/32369. layer = layer_cls(3, trainable=False) self.assertFalse(layer.cell.trainable) layer.trainable = True self.assertTrue(layer.cell.trainable) def test_state_reuse_with_dropout(self): layer_class = keras.layers.SimpleRNN embedding_dim = 4 units = 3 timesteps = 2 num_samples = 2 input1 = keras.Input( batch_shape=(num_samples, timesteps, embedding_dim) ) layer = layer_class( units, return_state=True, return_sequences=True, dropout=0.2 ) state = layer(input1)[1:] input2 = keras.Input( batch_shape=(num_samples, timesteps, embedding_dim) ) output = layer_class(units)(input2, initial_state=state) model = keras.Model([input1, input2], output) inputs = [ np.random.random((num_samples, timesteps, embedding_dim)), np.random.random((num_samples, timesteps, embedding_dim)), ] model.predict(inputs) def test_builtin_and_custom_rnn_cell_serialization(self): @keras.utils.register_keras_serializable(package="TestOnly") class CustomRNNCell(keras.layers.Layer): def __init__(self, units, **kwargs): self.units = units self.state_size = units super().__init__(**kwargs) def build(self, input_shape): self.kernel = self.add_weight( shape=(input_shape[-1], self.units), initializer="uniform", name="kernel", ) self.recurrent_kernel = self.add_weight( shape=(self.units, self.units), initializer="uniform", name="recurrent_kernel", ) self.built = True def call(self, inputs, states): prev_output = states[0] h = keras.backend.dot(inputs, self.kernel) output = h + keras.backend.dot( prev_output, self.recurrent_kernel ) return output, [output] def get_config(self): config = {"units": self.units} base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) for cell_class in [ keras.layers.SimpleRNNCell, keras.layers.GRUCell, keras.layers.LSTMCell, CustomRNNCell, ]: # Test basic case. x = keras.Input((None, 5)) cell = cell_class(32) layer = keras.layers.RNN(cell) y = layer(x) model = keras.models.Model(x, y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) # Test basic case serialization. x_np = np.random.random((6, 5, 5)) y_np = model.predict(x_np) weights = model.get_weights() config = layer.get_config() layer = keras.layers.RNN.from_config(config) y = layer(x) model = keras.models.Model(x, y) model.set_weights(weights) y_np_2 = model.predict(x_np) self.assertAllClose(y_np, y_np_2, atol=1e-4) # Test stacking. cells = [cell_class(8), cell_class(12), cell_class(32)] layer = keras.layers.RNN(cells) y = layer(x) model = keras.models.Model(x, y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) # Test stacked RNN serialization. x_np = np.random.random((6, 5, 5)) y_np = model.predict(x_np) weights = model.get_weights() config = layer.get_config() layer = keras.layers.RNN.from_config(config) y = layer(x) model = keras.models.Model(x, y) model.set_weights(weights) y_np_2 = model.predict(x_np) self.assertAllClose(y_np, y_np_2, atol=1e-4) @parameterized.named_parameters( *test_utils.generate_combinations_with_testcase_name( layer=[ keras.layers.SimpleRNN, gru_v1.GRU, lstm_v1.LSTM, gru.GRU, lstm.LSTM, ], unroll=[True, False], ) ) def test_rnn_dropout(self, layer, unroll): rnn_layer = layer(3, dropout=0.1, recurrent_dropout=0.1, unroll=unroll) if not unroll: x = keras.Input((None, 5)) else: x = keras.Input((5, 5)) y = rnn_layer(x) model = keras.models.Model(x, y) model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly()) x_np = np.random.random((6, 5, 5)) y_np = np.random.random((6, 3)) model.train_on_batch(x_np, y_np) @parameterized.named_parameters( *test_utils.generate_combinations_with_testcase_name( cell=[ keras.layers.SimpleRNNCell, keras.layers.GRUCell, keras.layers.LSTMCell, ], unroll=[True, False], ) ) def test_stacked_rnn_dropout(self, cell, unroll): cells = [ cell(3, dropout=0.1, recurrent_dropout=0.1), cell(3, dropout=0.1, recurrent_dropout=0.1), ] layer = keras.layers.RNN(cells, unroll=unroll) if not unroll: x = keras.Input((None, 5)) else: x = keras.Input((5, 5)) y = layer(x) model = keras.models.Model(x, y) model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly()) x_np = np.random.random((6, 5, 5)) y_np = np.random.random((6, 3)) model.train_on_batch(x_np, y_np) def test_dropout_mask_reuse(self): # The layer is created with recurrent_initializer = zero, so that the # the recurrent state won't affect the output. By doing this, we can # verify the output and see if the same mask is applied to for each # timestep. layer_1 = keras.layers.SimpleRNN( 3, dropout=0.5, kernel_initializer="ones", recurrent_initializer="zeros", return_sequences=True, unroll=True, ) layer_2 = keras.layers.RNN( keras.layers.SimpleRNNCell( 3, dropout=0.5, kernel_initializer="ones", recurrent_initializer="zeros", ), return_sequences=True, unroll=True, ) layer_3 = keras.layers.RNN( [ keras.layers.SimpleRNNCell( 3, dropout=0.5, kernel_initializer="ones", recurrent_initializer="zeros", ), keras.layers.SimpleRNNCell( 3, dropout=0.5, kernel_initializer="ones", recurrent_initializer="zeros", ), ], return_sequences=True, unroll=True, ) def verify(rnn_layer): inputs = tf.constant(1.0, shape=(6, 2, 5)) out = rnn_layer(inputs, training=True) if not tf.executing_eagerly(): self.evaluate(tf.compat.v1.global_variables_initializer()) batch_1 = self.evaluate(out) batch_1_t0, batch_1_t1 = batch_1[:, 0, :], batch_1[:, 1, :] self.assertAllClose(batch_1_t0, batch_1_t1) # This simulate the layer called with multiple batches in eager mode if tf.executing_eagerly(): out2 = rnn_layer(inputs, training=True) else: out2 = out batch_2 = self.evaluate(out2) batch_2_t0, batch_2_t1 = batch_2[:, 0, :], batch_2[:, 1, :] self.assertAllClose(batch_2_t0, batch_2_t1) # Also validate that different dropout is used by between batches. self.assertNotAllClose(batch_1_t0, batch_2_t0) self.assertNotAllClose(batch_1_t1, batch_2_t1) for l in [layer_1, layer_2, layer_3]: verify(l) def test_stacked_rnn_compute_output_shape(self): cells = [keras.layers.LSTMCell(3), keras.layers.LSTMCell(6)] embedding_dim = 4 timesteps = 2 layer = keras.layers.RNN( cells, return_state=True, return_sequences=True ) output_shape = layer.compute_output_shape( (None, timesteps, embedding_dim) ) expected_output_shape = [ (None, timesteps, 6), (None, 3), (None, 3), (None, 6), (None, 6), ] self.assertEqual( [tuple(o.as_list()) for o in output_shape], expected_output_shape ) # Test reverse_state_order = True for stacked cell. stacked_cell = keras.layers.StackedRNNCells( cells, reverse_state_order=True ) layer = keras.layers.RNN( stacked_cell, return_state=True, return_sequences=True ) output_shape = layer.compute_output_shape( (None, timesteps, embedding_dim) ) expected_output_shape = [ (None, timesteps, 6), (None, 6), (None, 6), (None, 3), (None, 3), ] self.assertEqual( [tuple(o.as_list()) for o in output_shape], expected_output_shape ) def test_stacked_rnn_with_training_param(self): # See https://github.com/tensorflow/tensorflow/issues/32586 class CellWrapper(keras.layers.AbstractRNNCell): def __init__(self, cell): super().__init__() self.cell = cell @property def state_size(self): return self.cell.state_size @property def output_size(self): return self.cell.output_size def build(self, input_shape): self.cell.build(input_shape) self.built = True def get_initial_state( self, inputs=None, batch_size=None, dtype=None ): return self.cell.get_initial_state( inputs=inputs, batch_size=batch_size, dtype=dtype ) def call(self, inputs, states, training=None, **kwargs): assert training is not None return self.cell(inputs, states=states, training=training) cell = keras.layers.LSTMCell(32) cell = CellWrapper(cell) cell = keras.layers.StackedRNNCells([cell]) rnn = keras.layers.RNN(cell) inputs = np.ones((8, 4, 16), dtype=np.float32) rnn(inputs, training=True) def test_stacked_rnn_with_nested_cell(self): batch = 10 t = 5 i1, i2, i3 = 3, 4, 5 o11, o12, o13 = 2, 3, 4 o21, o22, o23 = 4, 5, 6 # test 1: use_tuple=False cells = [NestedCell(o11, o12, o13), NestedCell(o21, o22, o23)] rnn = keras.layers.RNN(cells, return_sequences=True, return_state=True) input_1 = keras.Input((t, i1)) input_2 = keras.Input((t, i2, i3)) output1, output2, state1, state2 = rnn((input_1, input_2)) s11, s12 = state1 s21, s22 = state2 self.assertEqual(output1.shape.as_list(), [None, t, o21]) self.assertEqual(output2.shape.as_list(), [None, t, o22, o23]) self.assertEqual(s11.shape.as_list(), [None, o11]) self.assertEqual(s12.shape.as_list(), [None, o12, o13]) self.assertEqual(s21.shape.as_list(), [None, o21]) self.assertEqual(s22.shape.as_list(), [None, o22, o23]) model = keras.models.Model([input_1, input_2], [output1, output2]) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch( [np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3))], [np.zeros((batch, t, o21)), np.zeros((batch, t, o22, o23))], ) self.assertEqual( model.output_shape, [(None, t, o21), (None, t, o22, o23)] ) # test 2: use_tuple=True cells = [ NestedCell(o11, o12, o13, use_tuple=True), NestedCell(o21, o22, o23), ] rnn = keras.layers.RNN(cells, return_sequences=True, return_state=True) input_1 = keras.Input((t, i1)) input_2 = keras.Input((t, i2, i3)) output1, output2, state1, state2 = rnn( NestedInput(t1=input_1, t2=input_2) ) s11, s12 = state1 s21, s22 = state2 self.assertEqual(output1.shape.as_list(), [None, t, o21]) self.assertEqual(output2.shape.as_list(), [None, t, o22, o23]) self.assertEqual(s11.shape.as_list(), [None, o11]) self.assertEqual(s12.shape.as_list(), [None, o12, o13]) self.assertEqual(s21.shape.as_list(), [None, o21]) self.assertEqual(s22.shape.as_list(), [None, o22, o23]) model = keras.models.Model([input_1, input_2], [output1, output2]) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch( [np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3))], [np.zeros((batch, t, o21)), np.zeros((batch, t, o22, o23))], ) self.assertEqual( model.output_shape, [(None, t, o21), (None, t, o22, o23)] ) def test_trackable_dependencies(self): rnn = keras.layers.SimpleRNN x = np.random.random((2, 2, 2)) y = np.random.random((2, 2)) model = keras.models.Sequential() model.add(rnn(2)) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.fit(x, y, epochs=1, batch_size=1) # check whether the model variables are present in the # trackable list of objects checkpointed_objects = { id(o) for o in trackable_util.list_objects(model) } for v in model.variables: self.assertIn(id(v), checkpointed_objects) def test_high_dimension_RNN(self): # Basic test case. unit_a = 10 unit_b = 20 input_a = 5 input_b = 10 batch = 32 time_step = 4 cell = Minimal2DRNNCell(unit_a, unit_b) x = keras.Input((None, input_a, input_b)) layer = keras.layers.RNN(cell) y = layer(x) self.assertEqual(cell.state_size.as_list(), [unit_a, unit_b]) if not tf.executing_eagerly(): init_state = layer.get_initial_state(x) self.assertEqual(len(init_state), 1) self.assertEqual( init_state[0].shape.as_list(), [None, unit_a, unit_b] ) model = keras.models.Model(x, y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch( np.zeros((batch, time_step, input_a, input_b)), np.zeros((batch, unit_a, unit_b)), ) self.assertEqual(model.output_shape, (None, unit_a, unit_b)) # Test stacking. cells = [ Minimal2DRNNCell(unit_a, unit_b), Minimal2DRNNCell(unit_a * 2, unit_b * 2), Minimal2DRNNCell(unit_a * 4, unit_b * 4), ] layer = keras.layers.RNN(cells) y = layer(x) model = keras.models.Model(x, y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch( np.zeros((batch, time_step, input_a, input_b)), np.zeros((batch, unit_a * 4, unit_b * 4)), ) self.assertEqual(model.output_shape, (None, unit_a * 4, unit_b * 4)) def test_high_dimension_RNN_with_init_state(self): unit_a = 10 unit_b = 20 input_a = 5 input_b = 10 batch = 32 time_step = 4 # Basic test case. cell = Minimal2DRNNCell(unit_a, unit_b) x = keras.Input((None, input_a, input_b)) s = keras.Input((unit_a, unit_b)) layer = keras.layers.RNN(cell) y = layer(x, initial_state=s) model = keras.models.Model([x, s], y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch( [ np.zeros((batch, time_step, input_a, input_b)), np.zeros((batch, unit_a, unit_b)), ], np.zeros((batch, unit_a, unit_b)), ) self.assertEqual(model.output_shape, (None, unit_a, unit_b)) # Bad init state shape. bad_shape_a = unit_a * 2 bad_shape_b = unit_b * 2 cell = Minimal2DRNNCell(unit_a, unit_b) x = keras.Input((None, input_a, input_b)) s = keras.Input((bad_shape_a, bad_shape_b)) layer = keras.layers.RNN(cell) with self.assertRaisesWithPredicateMatch( ValueError, "however `cell.state_size` is" ): layer(x, initial_state=s) def test_inconsistent_output_state_size(self): batch = 32 time_step = 4 state_size = 5 input_size = 6 cell = PlusOneRNNCell(state_size) x = keras.Input((None, input_size)) layer = keras.layers.RNN(cell) y = layer(x) self.assertEqual(cell.state_size, state_size) if not tf.executing_eagerly(): init_state = layer.get_initial_state(x) self.assertEqual(len(init_state), 1) self.assertEqual(init_state[0].shape.as_list(), [None, state_size]) model = keras.models.Model(x, y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch( np.zeros((batch, time_step, input_size)), np.zeros((batch, input_size)), ) self.assertEqual(model.output_shape, (None, input_size)) def test_get_initial_state(self): cell = keras.layers.SimpleRNNCell(5) with self.assertRaisesRegex( ValueError, "batch_size and dtype cannot be None" ): cell.get_initial_state(None, None, None) if not tf.executing_eagerly(): inputs = keras.Input((None, 10)) initial_state = cell.get_initial_state(inputs, None, None) self.assertEqual(initial_state.shape.as_list(), [None, 5]) self.assertEqual(initial_state.dtype, inputs.dtype) batch = tf.shape(inputs)[0] dtype = inputs.dtype initial_state = cell.get_initial_state(None, batch, dtype) self.assertEqual(initial_state.shape.as_list(), [None, 5]) self.assertEqual(initial_state.dtype, inputs.dtype) else: batch = 8 inputs = np.random.random((batch, 10)) initial_state = cell.get_initial_state(inputs, None, None) self.assertEqual(initial_state.shape.as_list(), [8, 5]) self.assertEqual(initial_state.dtype, inputs.dtype) dtype = inputs.dtype initial_state = cell.get_initial_state(None, batch, dtype) self.assertEqual(initial_state.shape.as_list(), [batch, 5]) self.assertEqual(initial_state.dtype, inputs.dtype) @parameterized.parameters([True, False]) def test_nested_input_output(self, stateful): batch = 10 t = 5 i1, i2, i3 = 3, 4, 5 o1, o2, o3 = 2, 3, 4 cell = NestedCell(o1, o2, o3) rnn = keras.layers.RNN(cell, stateful=stateful) batch_size = batch if stateful else None input_1 = keras.Input((t, i1), batch_size=batch_size) input_2 = keras.Input((t, i2, i3), batch_size=batch_size) outputs = rnn((input_1, input_2)) self.assertEqual(len(outputs), 2) self.assertEqual(outputs[0].shape.as_list(), [batch_size, o1]) self.assertEqual(outputs[1].shape.as_list(), [batch_size, o2, o3]) model = keras.models.Model((input_1, input_2), outputs) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch( [np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3))], [np.zeros((batch, o1)), np.zeros((batch, o2, o3))], ) self.assertEqual( model.output_shape, [(batch_size, o1), (batch_size, o2, o3)] ) cell = NestedCell(o1, o2, o3, use_tuple=True) rnn = keras.layers.RNN(cell, stateful=stateful) input_1 = keras.Input((t, i1), batch_size=batch_size) input_2 = keras.Input((t, i2, i3), batch_size=batch_size) outputs = rnn(NestedInput(t1=input_1, t2=input_2)) self.assertEqual(len(outputs), 2) self.assertEqual(outputs[0].shape.as_list(), [batch_size, o1]) self.assertEqual(outputs[1].shape.as_list(), [batch_size, o2, o3]) model = keras.models.Model([input_1, input_2], outputs) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch( [np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3))], [np.zeros((batch, o1)), np.zeros((batch, o2, o3))], ) self.assertEqual( model.output_shape, [(batch_size, o1), (batch_size, o2, o3)] ) def test_nested_input_output_with_state(self): batch = 10 t = 5 i1, i2, i3 = 3, 4, 5 o1, o2, o3 = 2, 3, 4 cell = NestedCell(o1, o2, o3) rnn = keras.layers.RNN(cell, return_sequences=True, return_state=True) input_1 = keras.Input((t, i1)) input_2 = keras.Input((t, i2, i3)) output1, output2, s1, s2 = rnn((input_1, input_2)) self.assertEqual(output1.shape.as_list(), [None, t, o1]) self.assertEqual(output2.shape.as_list(), [None, t, o2, o3]) self.assertEqual(s1.shape.as_list(), [None, o1]) self.assertEqual(s2.shape.as_list(), [None, o2, o3]) model = keras.models.Model([input_1, input_2], [output1, output2]) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch( [np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3))], [np.zeros((batch, t, o1)), np.zeros((batch, t, o2, o3))], ) self.assertEqual(model.output_shape, [(None, t, o1), (None, t, o2, o3)]) cell = NestedCell(o1, o2, o3, use_tuple=True) rnn = keras.layers.RNN(cell, return_sequences=True, return_state=True) input_1 = keras.Input((t, i1)) input_2 = keras.Input((t, i2, i3)) output1, output2, s1, s2 = rnn(NestedInput(t1=input_1, t2=input_2)) self.assertEqual(output1.shape.as_list(), [None, t, o1]) self.assertEqual(output2.shape.as_list(), [None, t, o2, o3]) self.assertEqual(s1.shape.as_list(), [None, o1]) self.assertEqual(s2.shape.as_list(), [None, o2, o3]) model = keras.models.Model([input_1, input_2], [output1, output2]) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch( [np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3))], [np.zeros((batch, t, o1)), np.zeros((batch, t, o2, o3))], ) self.assertEqual(model.output_shape, [(None, t, o1), (None, t, o2, o3)]) def test_nest_input_output_with_init_state(self): batch = 10 t = 5 i1, i2, i3 = 3, 4, 5 o1, o2, o3 = 2, 3, 4 cell = NestedCell(o1, o2, o3) rnn = keras.layers.RNN(cell, return_sequences=True, return_state=True) input_1 = keras.Input((t, i1)) input_2 = keras.Input((t, i2, i3)) init_s1 = keras.Input((o1,)) init_s2 = keras.Input((o2, o3)) output1, output2, s1, s2 = rnn( (input_1, input_2), initial_state=(init_s1, init_s2) ) self.assertEqual(output1.shape.as_list(), [None, t, o1]) self.assertEqual(output2.shape.as_list(), [None, t, o2, o3]) self.assertEqual(s1.shape.as_list(), [None, o1]) self.assertEqual(s2.shape.as_list(), [None, o2, o3]) model = keras.models.Model( [input_1, input_2, init_s1, init_s2], [output1, output2] ) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch( [ np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3)), np.zeros((batch, o1)), np.zeros((batch, o2, o3)), ], [np.zeros((batch, t, o1)), np.zeros((batch, t, o2, o3))], ) self.assertEqual(model.output_shape, [(None, t, o1), (None, t, o2, o3)]) cell = NestedCell(o1, o2, o3, use_tuple=True) rnn = keras.layers.RNN(cell, return_sequences=True, return_state=True) input_1 = keras.Input((t, i1)) input_2 = keras.Input((t, i2, i3)) init_s1 = keras.Input((o1,)) init_s2 = keras.Input((o2, o3)) init_state = NestedState(s1=init_s1, s2=init_s2) output1, output2, s1, s2 = rnn( NestedInput(t1=input_1, t2=input_2), initial_state=init_state ) self.assertEqual(output1.shape.as_list(), [None, t, o1]) self.assertEqual(output2.shape.as_list(), [None, t, o2, o3]) self.assertEqual(s1.shape.as_list(), [None, o1]) self.assertEqual(s2.shape.as_list(), [None, o2, o3]) model = keras.models.Model( [input_1, input_2, init_s1, init_s2], [output1, output2] ) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch( [ np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3)), np.zeros((batch, o1)), np.zeros((batch, o2, o3)), ], [np.zeros((batch, t, o1)), np.zeros((batch, t, o2, o3))], ) self.assertEqual(model.output_shape, [(None, t, o1), (None, t, o2, o3)]) def test_masking_rnn_with_output_and_states(self): class Cell(keras.layers.Layer): def __init__(self): self.state_size = None self.output_size = None super().__init__() def build(self, input_shape): self.state_size = input_shape[-1] self.output_size = input_shape[-1] def call(self, inputs, states): return inputs, [s + 1 for s in states] x = keras.Input((3, 1), name="x") x_masked = keras.layers.Masking()(x) s_0 = keras.Input((1,), name="s_0") y, s = keras.layers.RNN(Cell(), return_state=True)( x_masked, initial_state=s_0 ) model = keras.models.Model([x, s_0], [y, s]) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) # last time step masked x_np = np.array([[[1.0], [2.0], [0.0]]]) s_0_np = np.array([[10.0]]) y_np, s_np = model.predict([x_np, s_0_np]) # 1 is added to initial state two times self.assertAllClose(s_np, s_0_np + 2) # Expect last output to be the same as last output before masking self.assertAllClose(y_np, x_np[:, 1, :]) def test_zero_output_for_masking(self): for unroll in [True, False]: cell = keras.layers.SimpleRNNCell(5) x = keras.Input((5, 5)) mask = keras.layers.Masking() layer = keras.layers.RNN( cell, return_sequences=True, zero_output_for_mask=True, unroll=unroll, ) masked_input = mask(x) y = layer(masked_input) model = keras.models.Model(x, y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) np_x = np.ones((6, 5, 5)) result_1 = model.predict(np_x) # set the time 4 and 5 for last record to be zero (masked). np_x[5, 3:] = 0 result_2 = model.predict(np_x) # expect the result_2 has same output, except the time 4,5 for last # record. result_1[5, 3:] = 0 self.assertAllClose(result_1, result_2) def test_unroll_single_step(self): """Even if the time dimension is only one, we should be able to unroll.""" cell = keras.layers.SimpleRNNCell(5) x = keras.Input((1, 5)) layer = keras.layers.RNN(cell, return_sequences=True, unroll=True) y = layer(x) model = keras.models.Model(x, y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) np_x = np.ones((6, 1, 5)) result = model.predict(np_x) self.assertEqual((6, 1, 5), result.shape) def test_unroll_zero_step(self): """If the time dimension is None, we should fail to unroll.""" cell = keras.layers.SimpleRNNCell(5) x = keras.Input((None, 5)) layer = keras.layers.RNN(cell, return_sequences=True, unroll=True) with self.assertRaisesRegex(ValueError, "Cannot unroll a RNN.*"): layer(x) def test_full_input_spec(self): # See https://github.com/tensorflow/tensorflow/issues/25985 inputs = keras.layers.Input(batch_shape=(1, 1, 1)) state_h = keras.layers.Input(batch_shape=(1, 1)) state_c = keras.layers.Input(batch_shape=(1, 1)) states = [state_h, state_c] decoder_out = keras.layers.LSTM(1, stateful=True)( inputs, initial_state=states ) model = keras.Model([inputs, state_h, state_c], decoder_out) output1 = model.predict( [np.ones((1, 1, 1)), np.ones((1, 1)), np.ones((1, 1))] ) output2 = model.predict( [np.ones((1, 1, 1)), np.ones((1, 1)), np.ones((1, 1))] ) model.reset_states() output3 = model.predict( [np.ones((1, 1, 1)), np.ones((1, 1)), np.ones((1, 1))] ) self.assertAllClose(output1, output3) self.assertNotAllClose(output1, output2) def test_reset_states(self): # See https://github.com/tensorflow/tensorflow/issues/25852 with self.assertRaisesRegex( ValueError, "it needs to know its batch size" ): simple_rnn = keras.layers.SimpleRNN(1, stateful=True) simple_rnn.reset_states() with self.assertRaisesRegex( ValueError, "it needs to know its batch size" ): cell = Minimal2DRNNCell(1, 2) custom_rnn = keras.layers.RNN(cell, stateful=True) custom_rnn.reset_states() @parameterized.parameters( [ keras.layers.SimpleRNNCell, keras.layers.GRUCell, keras.layers.LSTMCell, ] ) def test_stateful_rnn_with_stacking(self, cell): # See https://github.com/tensorflow/tensorflow/issues/28614. batch = 12 timesteps = 10 input_dim = 8 output_dim = 64 cells = [cell(32), cell(64)] x = keras.Input(batch_shape=(batch, None, input_dim)) layer = keras.layers.RNN(cells, stateful=True) y = layer(x) model = keras.Model(x, y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch( np.zeros((batch, timesteps, input_dim)), np.zeros((batch, output_dim)), ) model.predict(np.ones((batch, timesteps, input_dim))) model.reset_states() model.predict(np.ones((batch, timesteps, input_dim))) new_states = tf.nest.map_structure( lambda s: np.ones((batch, s)), layer.cell.state_size ) layer.reset_states(new_states) model.predict(np.ones((batch, timesteps, input_dim))) def test_stateful_rnn_with_initial_state(self): # See https://github.com/tensorflow/tensorflow/issues/32299. batch = 12 timesteps = 1 input_dim = 8 output_dim = 16 test_inputs = np.full((batch, timesteps, input_dim), 0.5) def make_model(stateful=False, with_initial_state=False): input_layer = keras.Input(shape=(None, input_dim), batch_size=batch) if with_initial_state: initial_states = keras.backend.constant( np.ones((batch, output_dim)) ) else: initial_states = None rnn_output = keras.layers.GRU( units=output_dim, return_sequences=True, stateful=stateful )(input_layer, initial_state=initial_states) model = keras.Model(input_layer, rnn_output) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) return model # Define a model with a constant state initialization model = make_model(stateful=True, with_initial_state=True) layer_weights = model.layers[1].get_weights() model.reset_states() predict_1 = model.predict(test_inputs) predict_2 = model.predict(test_inputs) model.reset_states() predict_3 = model.predict(test_inputs) # predict 1 and 2 should be different since the batch 2 should use the # state from batch 1 as the initial state. self.assertNotAllClose(predict_1, predict_2) self.assertAllClose(predict_1, predict_3) # Create a new model with same weights but without initial states. Make # sure the predict value is different from the model with non-zero # initial state. model_2 = make_model(stateful=True, with_initial_state=False) model_2.layers[1].set_weights(layer_weights) model_2.reset_states() predict_4 = model_2.predict(test_inputs) predict_5 = model_2.predict(test_inputs) self.assertNotAllClose(predict_1, predict_4) self.assertNotAllClose(predict_4, predict_5) # Create models with stateful=False, and make sure they handle init # state correctly. model_3 = make_model(stateful=False, with_initial_state=True) model_3.layers[1].set_weights(layer_weights) model_3.reset_states() predict_6 = model_3.predict(test_inputs) predict_7 = model_3.predict(test_inputs) self.assertAllClose(predict_1, predict_6) self.assertAllClose(predict_6, predict_7) def test_stateful_rnn_with_customized_get_initial_state(self): class TestCell(keras.layers.AbstractRNNCell): state_size = 1 output_size = 2 def get_initial_state( self, inputs=None, batch_size=None, dtype=None ): return np.ones((batch_size, 1), dtype=dtype) def call(self, inputs, states): return inputs, states layer = keras.layers.RNN(TestCell(), stateful=True, return_state=True) inputs = keras.Input(shape=(10, 2), batch_size=4) model = keras.Model(inputs, layer(inputs)) x = np.ones((4, 10, 2), dtype=np.float32) output, state = model.predict(x) self.assertAllClose(output, np.ones((4, 2))) self.assertAllClose(state, np.ones((4, 1))) def test_stateful_rnn_with_customized_dtype(self): class TestCell(keras.layers.AbstractRNNCell): state_size = 1 output_size = 2 def get_initial_state( self, inputs=None, batch_size=None, dtype=None ): return np.ones((batch_size, 1), dtype=np.float16) def call(self, inputs, states): return inputs, states layer = keras.layers.RNN(TestCell(), stateful=True, return_state=True) inputs = keras.Input(shape=(10, 2), batch_size=4) model = keras.Model(inputs, layer(inputs)) x = np.ones((4, 10, 2), dtype=np.float16) output, state = model.predict(x) self.assertAllClose(output, np.ones((4, 2), dtype=np.float16)) self.assertAllClose(state, np.ones((4, 1), dtype=np.float16)) def test_input_dim_length(self): simple_rnn = keras.layers.SimpleRNN(5, input_length=10, input_dim=8) self.assertEqual(simple_rnn._batch_input_shape, (None, 10, 8)) simple_rnn = keras.layers.SimpleRNN(5, input_dim=8) self.assertEqual(simple_rnn._batch_input_shape, (None, None, 8)) simple_rnn = keras.layers.SimpleRNN(5, input_length=10) self.assertEqual(simple_rnn._batch_input_shape, (None, 10, None)) @parameterized.parameters( [ keras.layers.SimpleRNNCell, keras.layers.GRUCell, keras.layers.LSTMCell, ] ) def test_state_spec_with_stack_cell(self, cell): # See https://github.com/tensorflow/tensorflow/issues/27817 for more # detail. batch = 12 timesteps = 10 input_dim = 8 output_dim = 8 def create_cell(): return [cell(output_dim), cell(output_dim), cell(output_dim)] inputs = keras.Input((timesteps, input_dim)) encoder_output = keras.layers.RNN(create_cell(), return_state=True)( inputs ) states = encoder_output[1:] decoder_output = keras.layers.RNN(create_cell())( inputs, initial_state=states ) model = keras.models.Model(inputs, decoder_output) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch( np.zeros((batch, timesteps, input_dim)), np.zeros((batch, output_dim)), ) model.predict(np.ones((batch, timesteps, input_dim))) @parameterized.named_parameters( *test_utils.generate_combinations_with_testcase_name( layer=[ keras.layers.SimpleRNN, gru_v1.GRU, lstm_v1.LSTM, gru.GRU, lstm.LSTM, ] ) ) def test_rnn_with_ragged_input(self, layer): ragged_data = tf.ragged.constant( [ [[1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 2.0, 3.0, 1.0, 1.0]], [[2.0, 4.0, 1.0, 3.0, 1.0]], [ [2.0, 3.0, 4.0, 1.0, 5.0], [2.0, 3.0, 1.0, 1.0, 1.0], [1.0, 2.0, 3.0, 4.0, 5.0], ], ], ragged_rank=1, ) label_data = np.array([[1, 0, 1], [1, 1, 0], [0, 0, 1]]) # Test results in feed forward np.random.seed(100) rnn_layer = layer(4, activation="sigmoid") x_ragged = keras.Input(shape=(None, 5), ragged=True) y_ragged = rnn_layer(x_ragged) model = keras.models.Model(x_ragged, y_ragged) output_ragged = model.predict(ragged_data, steps=1) x_dense = keras.Input(shape=(3, 5)) masking = keras.layers.Masking()(x_dense) y_dense = rnn_layer(masking) model_2 = keras.models.Model(x_dense, y_dense) dense_data = ragged_data.to_tensor() output_dense = model_2.predict(dense_data, steps=1) self.assertAllClose(output_dense, output_ragged) # Test results with go backwards np.random.seed(200) back_rnn_layer = layer(8, go_backwards=True, activation="sigmoid") x_ragged = keras.Input(shape=(None, 5), ragged=True) y_ragged = back_rnn_layer(x_ragged) model = keras.models.Model(x_ragged, y_ragged) output_ragged = model.predict(ragged_data, steps=1) x_dense = keras.Input(shape=(3, 5)) masking = keras.layers.Masking()(x_dense) y_dense = back_rnn_layer(masking) model_2 = keras.models.Model(x_dense, y_dense) dense_data = ragged_data.to_tensor() output_dense = model_2.predict(dense_data, steps=1) self.assertAllClose(output_dense, output_ragged) # Test densification of the ragged input dense_tensor, row_lengths = keras.backend.convert_inputs_if_ragged( ragged_data ) self.assertAllClose(dense_data, dense_tensor) # Test optional params, all should work except unrolling inputs = keras.Input(shape=(None, 5), dtype=tf.float32, ragged=True) custom_rnn_layer = layer( 3, zero_output_for_mask=True, dropout=0.1, use_bias=True ) outputs = custom_rnn_layer(inputs) model = keras.models.Model(inputs, outputs) model.compile( optimizer="sgd", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch(ragged_data, label_data) # Test stateful and full shape specification inputs = keras.Input( shape=(None, 5), batch_size=3, dtype=tf.float32, ragged=True ) stateful_rnn_layer = layer(3, stateful=True) outputs = stateful_rnn_layer(inputs) model = keras.models.Model(inputs, outputs) model.compile( optimizer="sgd", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch(ragged_data, label_data) # Must raise error when unroll is set to True unroll_rnn_layer = layer(3, unroll=True) with self.assertRaisesRegex( ValueError, "The input received contains RaggedTensors *" ): unroll_rnn_layer(inputs) # Check if return sequences outputs are correct np.random.seed(100) returning_rnn_layer = layer(4, return_sequences=True) x_ragged = keras.Input(shape=(None, 5), ragged=True) y_ragged = returning_rnn_layer(x_ragged) model = keras.models.Model(x_ragged, y_ragged) output_ragged = model.predict(ragged_data, steps=1) self.assertAllClose(output_ragged.ragged_rank, ragged_data.ragged_rank) self.assertAllClose(output_ragged.row_splits, ragged_data.row_splits) x_dense = keras.Input(shape=(3, 5)) masking = keras.layers.Masking()(x_dense) y_dense = returning_rnn_layer(masking) model_2 = keras.models.Model(x_dense, y_dense) dense_data = ragged_data.to_tensor() output_dense = model_2.predict(dense_data, steps=1) # Convert the output here to ragged for value comparison output_dense = tf.RaggedTensor.from_tensor( output_dense, lengths=row_lengths ) self.assertAllClose(output_ragged, output_dense) # Check if return sequences and go_backwards outputs are correct np.random.seed(100) returning_rnn_layer = layer(4, go_backwards=True, return_sequences=True) x_ragged = keras.Input(shape=(None, 5), ragged=True) y_ragged = returning_rnn_layer(x_ragged) model = keras.models.Model(x_ragged, y_ragged) output_ragged = model.predict(ragged_data, steps=1) self.assertAllClose(output_ragged.ragged_rank, ragged_data.ragged_rank) self.assertAllClose(output_ragged.row_splits, ragged_data.row_splits) x_dense = keras.Input(shape=(3, 5)) masking = keras.layers.Masking()(x_dense) y_dense = returning_rnn_layer(masking) model_2 = keras.models.Model(x_dense, y_dense) dense_data = ragged_data.to_tensor() output_dense = model_2.predict(dense_data, steps=1) # Note that the raw output for dense and ragged input when # go_backward=True will be different. Consider following input # [[a, b, 0], [c, 0, 0], [d, e, f]] where 0s are masked value. # The dense output will be [[0, b, a], [0, 0, c], [f, e, d]] since it # will process the whole sequence from the end. # While ragged output will be [[b, a], [c], [f, e, d]] since it just # ignore the 0s. And if we densify the ragged output, it will by default # inserting 0s to the end (rather than from the beginning), which make # the output to be [[b, a, 0], [c, 0, 0], [f, e, d]]. With this, we need # to verify that reverse(ragged_output.to_tensor()) == # reverse(dense_output) output_dense = keras.backend.reverse(output_dense, [1]) output_dense = tf.RaggedTensor.from_tensor( output_dense, lengths=row_lengths ) self.assertAllClose( keras.backend.reverse(output_ragged, [1]), output_dense ) def test_stateless_rnn_cell(self): class StatelessCell(keras.layers.Layer): def __init__(self): self.state_size = ((), [], ()) self.output_size = None super().__init__() def build(self, input_shape): self.output_size = input_shape[-1] def call(self, inputs, states): return inputs, states x = keras.Input((None, 5)) cell = StatelessCell() initial_state = tf.nest.map_structure(lambda t: None, cell.state_size) layer = keras.layers.RNN(cell) y = layer(x, initial_state=initial_state) model = keras.models.Model(x, y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 5))) @parameterized.parameters( [keras.layers.SimpleRNN, gru_v1.GRU, lstm_v1.LSTM, gru.GRU, lstm.LSTM] ) def test_for_enable_caching_device_for_layer(self, layer_cls): expected_caching_device = ( tf.compat.v1.executing_eagerly_outside_functions() ) layer = layer_cls(1) self.assertEqual( layer.cell._enable_caching_device, expected_caching_device ) # Make sure the config only appears when the none default value is used. config = layer.get_config() self.assertNotIn("enable_caching_device", config) non_default_value = not expected_caching_device layer = layer_cls(1, enable_caching_device=non_default_value) self.assertEqual(layer.cell._enable_caching_device, non_default_value) config = layer.get_config() self.assertEqual(config["enable_caching_device"], non_default_value) @parameterized.parameters( [ keras.layers.SimpleRNNCell, gru_v1.GRUCell, lstm_v1.LSTMCell, gru.GRUCell, lstm.LSTMCell, ] ) def test_for_enable_caching_device_for_cell(self, cell_cls): expected_caching_device = ( tf.compat.v1.executing_eagerly_outside_functions() ) cell = cell_cls(1) self.assertEqual(cell._enable_caching_device, expected_caching_device) # Make sure the config only appears when the none default value is used. config = cell.get_config() self.assertNotIn("enable_caching_device", config) non_default_value = not expected_caching_device cell = cell_cls(1, enable_caching_device=non_default_value) self.assertEqual(cell._enable_caching_device, non_default_value) config = cell.get_config() self.assertEqual(config["enable_caching_device"], non_default_value) class RNNCellWithConstants(keras.layers.Layer): def __init__(self, units, constant_size, **kwargs): self.units = units self.state_size = units self.constant_size = constant_size super().__init__(**kwargs) def build(self, input_shape): self.input_kernel = self.add_weight( shape=(input_shape[-1], self.units), initializer="uniform", name="kernel", ) self.recurrent_kernel = self.add_weight( shape=(self.units, self.units), initializer="uniform", name="recurrent_kernel", ) self.constant_kernel = self.add_weight( shape=(self.constant_size, self.units), initializer="uniform", name="constant_kernel", ) self.built = True def call(self, inputs, states, constants): [prev_output] = states [constant] = constants h_input = keras.backend.dot(inputs, self.input_kernel) h_state = keras.backend.dot(prev_output, self.recurrent_kernel) h_const = keras.backend.dot(constant, self.constant_kernel) output = h_input + h_state + h_const return output, [output] def get_config(self): config = {"units": self.units, "constant_size": self.constant_size} base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) class Minimal2DRNNCell(keras.layers.Layer): """The minimal 2D RNN cell is a simple combination of 2 1-D RNN cell. Both internal state and output have 2 dimensions and are orthogonal between each other. """ def __init__(self, unit_a, unit_b, **kwargs): self.unit_a = unit_a self.unit_b = unit_b self.state_size = tf.TensorShape([unit_a, unit_b]) self.output_size = tf.TensorShape([unit_a, unit_b]) super().__init__(**kwargs) def build(self, input_shape): input_a = input_shape[-2] input_b = input_shape[-1] self.kernel = self.add_weight( shape=(input_a, input_b, self.unit_a, self.unit_b), initializer="uniform", name="kernel", ) self.recurring_kernel = self.add_weight( shape=(self.unit_a, self.unit_b, self.unit_a, self.unit_b), initializer="uniform", name="recurring_kernel", ) self.bias = self.add_weight( shape=(self.unit_a, self.unit_b), initializer="uniform", name="bias" ) self.built = True def call(self, inputs, states): prev_output = states[0] h = tf.einsum("bij,ijkl->bkl", inputs, self.kernel) h += tf.expand_dims(self.bias, axis=0) output = h + tf.einsum( "bij,ijkl->bkl", prev_output, self.recurring_kernel ) return output, [output] class PlusOneRNNCell(keras.layers.Layer): """Add one to the input and state. This cell is used for testing state_size and output_size. """ def __init__(self, num_unit, **kwargs): self.state_size = num_unit super().__init__(**kwargs) def build(self, input_shape): self.output_size = input_shape[-1] def call(self, inputs, states): return inputs + 1, [states[0] + 1] class NestedCell(keras.layers.Layer): def __init__(self, unit_1, unit_2, unit_3, use_tuple=False, **kwargs): self.unit_1 = unit_1 self.unit_2 = unit_2 self.unit_3 = unit_3 self.use_tuple = use_tuple super().__init__(**kwargs) # A nested state. if use_tuple: self.state_size = NestedState( s1=unit_1, s2=tf.TensorShape([unit_2, unit_3]) ) else: self.state_size = (unit_1, tf.TensorShape([unit_2, unit_3])) self.output_size = (unit_1, tf.TensorShape([unit_2, unit_3])) def build(self, inputs_shape): # expect input_shape to contain 2 items, [(batch, i1), (batch, i2, i3)] if self.use_tuple: input_1 = inputs_shape.t1[1] input_2, input_3 = inputs_shape.t2[1:] else: input_1 = inputs_shape[0][1] input_2, input_3 = inputs_shape[1][1:] self.kernel_1 = self.add_weight( shape=(input_1, self.unit_1), initializer="uniform", name="kernel_1" ) self.kernel_2_3 = self.add_weight( shape=(input_2, input_3, self.unit_2, self.unit_3), initializer="uniform", name="kernel_2_3", ) def call(self, inputs, states): # inputs should be in [(batch, input_1), (batch, input_2, input_3)] # state should be in shape [(batch, unit_1), (batch, unit_2, unit_3)] flatten_inputs = tf.nest.flatten(inputs) s1, s2 = states output_1 = tf.matmul(flatten_inputs[0], self.kernel_1) output_2_3 = tf.einsum( "bij,ijkl->bkl", flatten_inputs[1], self.kernel_2_3 ) state_1 = s1 + output_1 state_2_3 = s2 + output_2_3 output = [output_1, output_2_3] new_states = NestedState(s1=state_1, s2=state_2_3) return output, new_states if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/layers/rnn/base_rnn_test.py/0
{ "file_path": "tf-keras/tf_keras/layers/rnn/base_rnn_test.py", "repo_id": "tf-keras", "token_count": 40626 }
208
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests that are common for GRU and LSTM. See also: lstm_test.py, gru_test.py. """ import os import numpy as np import tensorflow.compat.v2 as tf from absl.testing import parameterized import tf_keras as keras from tf_keras.layers.rnn import gru from tf_keras.layers.rnn import lstm from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils @test_combinations.run_all_keras_modes class RNNV2Test(test_combinations.TestCase): @parameterized.parameters([lstm.LSTM, gru.GRU]) def test_device_placement(self, layer): if not tf.test.is_gpu_available(): self.skipTest("Need GPU for testing.") vocab_size = 20 embedding_dim = 10 batch_size = 8 timestep = 12 units = 5 x = np.random.randint(0, vocab_size, size=(batch_size, timestep)) y = np.random.randint(0, vocab_size, size=(batch_size, timestep)) # Test when GPU is available but not used, the graph should be properly # created with CPU ops. with test_utils.device(should_use_gpu=False): model = keras.Sequential( [ keras.layers.Embedding( vocab_size, embedding_dim, batch_input_shape=[batch_size, timestep], ), layer(units, return_sequences=True, stateful=True), keras.layers.Dense(vocab_size), ] ) model.compile( optimizer="adam", loss="sparse_categorical_crossentropy", run_eagerly=test_utils.should_run_eagerly(), ) model.fit(x, y, epochs=1, shuffle=False) @parameterized.parameters([lstm.LSTM, gru.GRU]) def test_reset_dropout_mask_between_batch(self, layer): # See https://github.com/tensorflow/tensorflow/issues/29187 for more # details batch_size = 8 timestep = 12 embedding_dim = 10 units = 5 layer = layer(units, dropout=0.5, recurrent_dropout=0.5) inputs = np.random.random((batch_size, timestep, embedding_dim)).astype( np.float32 ) previous_dropout, previous_recurrent_dropout = None, None for _ in range(5): layer(inputs, training=True) dropout = layer.cell.get_dropout_mask_for_cell( inputs, training=True ) recurrent_dropout = layer.cell.get_recurrent_dropout_mask_for_cell( inputs, training=True ) if previous_dropout is not None: self.assertNotAllClose( self.evaluate(previous_dropout), self.evaluate(dropout) ) previous_dropout = dropout if previous_recurrent_dropout is not None: self.assertNotAllClose( self.evaluate(previous_recurrent_dropout), self.evaluate(recurrent_dropout), ) previous_recurrent_dropout = recurrent_dropout @parameterized.parameters([lstm.LSTM, gru.GRU]) def test_recurrent_dropout_with_stateful_RNN(self, layer): # See https://github.com/tensorflow/tensorflow/issues/27829 for details. # The issue was caused by using inplace mul for a variable, which was a # warning for RefVariable, but an error for ResourceVariable in 2.0 keras.models.Sequential( [ layer( 128, stateful=True, return_sequences=True, dropout=0.2, batch_input_shape=[32, None, 5], recurrent_dropout=0.2, ) ] ) @parameterized.parameters([lstm.LSTM, gru.GRU]) def test_recurrent_dropout_saved_model(self, layer): if not tf.executing_eagerly(): self.skipTest("v2-only test") inputs = keras.Input(shape=(784, 3), name="digits") x = layer(64, activation="relu", name="RNN", dropout=0.1)(inputs) x = keras.layers.Dense(64, activation="relu", name="dense")(x) outputs = keras.layers.Dense( 10, activation="softmax", name="predictions" )(x) model = keras.Model(inputs=inputs, outputs=outputs, name="3_layer") model.save(os.path.join(self.get_temp_dir(), "model"), save_format="tf") @parameterized.parameters([lstm.LSTM, gru.GRU]) def test_ragged(self, layer): vocab_size = 100 inputs = tf.ragged.constant( np.random.RandomState(0).randint(0, vocab_size, [128, 25]) ) embedder = keras.layers.Embedding(input_dim=vocab_size, output_dim=16) embedded_inputs = embedder(inputs) layer = layer(32) layer(embedded_inputs) @parameterized.parameters([lstm.LSTM, gru.GRU]) @test_utils.run_v2_only def test_compare_ragged_with_masks(self, layer): vocab_size = 100 timestep = 20 units = 32 embedder = keras.layers.Embedding( input_dim=vocab_size, output_dim=units ) layer = layer(units, return_sequences=True) data = tf.constant( np.random.RandomState(0).randint( 0, vocab_size, [timestep, timestep] ) ) mask = tf.sequence_mask(tf.range(1, timestep + 1)) data_ragged = tf.ragged.boolean_mask(data, mask) outputs = [] devices = [test_utils.device(should_use_gpu=False)] if tf.test.is_gpu_available(): devices.append(test_utils.device(should_use_gpu=True)) for device in devices: with device: outputs.append( tf.boolean_mask(layer(embedder(data), mask=mask), mask) ) outputs.append(layer(embedder(data_ragged)).values) for i in range(len(outputs) - 1): self.assertAllClose(outputs[i], outputs[i + 1], atol=1e-4) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/layers/rnn/gru_lstm_test.py/0
{ "file_path": "tf-keras/tf_keras/layers/rnn/gru_lstm_test.py", "repo_id": "tf-keras", "token_count": 3243 }
209
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Wrapper layer to apply every temporal slice of an input.""" import tensorflow.compat.v2 as tf from tf_keras import backend from tf_keras.engine.base_layer import Layer from tf_keras.engine.input_spec import InputSpec from tf_keras.layers.rnn.base_wrapper import Wrapper from tf_keras.utils import generic_utils from tf_keras.utils import layer_utils from tf_keras.utils import tf_utils # isort: off from tensorflow.python.util.tf_export import keras_export @keras_export("keras.layers.TimeDistributed") class TimeDistributed(Wrapper): """This wrapper allows to apply a layer to every temporal slice of an input. Every input should be at least 3D, and the dimension of index one of the first input will be considered to be the temporal dimension. Consider a batch of 32 video samples, where each sample is a 128x128 RGB image with `channels_last` data format, across 10 timesteps. The batch input shape is `(32, 10, 128, 128, 3)`. You can then use `TimeDistributed` to apply the same `Conv2D` layer to each of the 10 timesteps, independently: >>> inputs = tf.keras.Input(shape=(10, 128, 128, 3)) >>> conv_2d_layer = tf.keras.layers.Conv2D(64, (3, 3)) >>> outputs = tf.keras.layers.TimeDistributed(conv_2d_layer)(inputs) >>> outputs.shape TensorShape([None, 10, 126, 126, 64]) Because `TimeDistributed` applies the same instance of `Conv2D` to each of the timestamps, the same set of weights are used at each timestamp. Args: layer: a `tf.keras.layers.Layer` instance. Call arguments: inputs: Input tensor of shape (batch, time, ...) or nested tensors, and each of which has shape (batch, time, ...). training: Python boolean indicating whether the layer should behave in training mode or in inference mode. This argument is passed to the wrapped layer (only if the layer supports this argument). mask: Binary tensor of shape `(samples, timesteps)` indicating whether a given timestep should be masked. This argument is passed to the wrapped layer (only if the layer supports this argument). Raises: ValueError: If not initialized with a `tf.keras.layers.Layer` instance. """ def __init__(self, layer, **kwargs): if not isinstance(layer, Layer): raise ValueError( "Please initialize `TimeDistributed` layer with a " f"`tf.keras.layers.Layer` instance. Received: {layer}" ) super().__init__(layer, **kwargs) self.supports_masking = True # It is safe to use the fast, reshape-based approach with all of our # built-in Layers. self._always_use_reshape = layer_utils.is_builtin_layer( layer ) and not getattr(layer, "stateful", False) def _get_shape_tuple(self, init_tuple, tensor, start_idx): """Finds non-specific dimensions in the static shapes. The static shapes are replaced with the corresponding dynamic shapes of the tensor. Args: init_tuple: a tuple, the first part of the output shape tensor: the tensor from which to get the (static and dynamic) shapes as the last part of the output shape start_idx: int, which indicate the first dimension to take from the static shape of the tensor Returns: The new shape with the first part from `init_tuple` and the last part from or `tensor.shape`, where every `None` is replaced by the corresponding dimension from `tf.shape(tensor)`. """ # replace all None in int_shape by backend.shape int_shape = backend.int_shape(tensor)[start_idx:] if not any(s is None for s in int_shape): return init_tuple + int_shape shape = backend.shape(tensor) int_shape = list(int_shape) for i, s in enumerate(int_shape): if s is None: int_shape[i] = shape[start_idx + i] return init_tuple + tuple(int_shape) def _remove_timesteps(self, dims): dims = dims.as_list() return tf.TensorShape([dims[0]] + dims[2:]) def build(self, input_shape): input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False) input_dims = tf.nest.flatten( tf.nest.map_structure(lambda x: x.ndims, input_shape) ) if any(dim < 3 for dim in input_dims): raise ValueError( "`TimeDistributed` Layer should be passed an `input_shape ` " f"with at least 3 dimensions, received: {input_shape}" ) # Don't enforce the batch or time dimension. self.input_spec = tf.nest.map_structure( lambda x: InputSpec(shape=[None, None] + x.as_list()[2:]), input_shape, ) child_input_shape = tf.nest.map_structure( self._remove_timesteps, input_shape ) child_input_shape = tf_utils.convert_shapes(child_input_shape) super().build(tuple(child_input_shape)) self.built = True def compute_output_shape(self, input_shape): input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False) child_input_shape = tf.nest.map_structure( self._remove_timesteps, input_shape ) child_output_shape = self.layer.compute_output_shape(child_input_shape) child_output_shape = tf_utils.convert_shapes( child_output_shape, to_tuples=False ) timesteps = tf_utils.convert_shapes(input_shape) timesteps = tf.nest.flatten(timesteps)[1] def insert_timesteps(dims): dims = dims.as_list() return tf.TensorShape([dims[0], timesteps] + dims[1:]) return tf.nest.map_structure(insert_timesteps, child_output_shape) def call(self, inputs, training=None, mask=None): kwargs = {} if generic_utils.has_arg(self.layer.call, "training"): kwargs["training"] = training input_shape = tf.nest.map_structure( lambda x: tf.TensorShape(backend.int_shape(x)), inputs ) batch_size = tf_utils.convert_shapes(input_shape) batch_size = tf.nest.flatten(batch_size)[0] if batch_size and not self._always_use_reshape: inputs, row_lengths = backend.convert_inputs_if_ragged(inputs) is_ragged_input = row_lengths is not None input_length = tf_utils.convert_shapes(input_shape) input_length = tf.nest.flatten(input_length)[1] # batch size matters, use rnn-based implementation def step(x, _): output = self.layer(x, **kwargs) return output, [] _, outputs, _ = backend.rnn( step, inputs, initial_states=[], input_length=row_lengths[0] if is_ragged_input else input_length, mask=mask, unroll=False, ) y = tf.nest.map_structure( lambda output: backend.maybe_convert_to_ragged( is_ragged_input, output, row_lengths ), outputs, ) else: # No batch size specified, therefore the layer will be able # to process batches of any size. # We can go with reshape-based implementation for performance. is_ragged_input = tf.nest.map_structure( lambda x: isinstance(x, tf.RaggedTensor), inputs ) is_ragged_input = tf.nest.flatten(is_ragged_input) if all(is_ragged_input): input_values = tf.nest.map_structure(lambda x: x.values, inputs) input_row_lenghts = tf.nest.map_structure( lambda x: x.nested_row_lengths()[0], inputs ) y = self.layer(input_values, **kwargs) y = tf.nest.map_structure( tf.RaggedTensor.from_row_lengths, y, input_row_lenghts ) elif any(is_ragged_input): raise ValueError( "All inputs has to be either ragged or not, " f"but not mixed. Received: {inputs}" ) else: input_length = tf_utils.convert_shapes(input_shape) input_length = tf.nest.flatten(input_length)[1] if not input_length: input_length = tf.nest.map_structure( lambda x: tf.shape(x)[1], inputs ) input_length = generic_utils.to_list( tf.nest.flatten(input_length) )[0] inner_input_shape = tf.nest.map_structure( lambda x: self._get_shape_tuple((-1,), x, 2), inputs ) # Shape: (num_samples * timesteps, ...). And track the # transformation in self._input_map. inputs = tf.__internal__.nest.map_structure_up_to( inputs, tf.reshape, inputs, inner_input_shape ) # (num_samples * timesteps, ...) if ( generic_utils.has_arg(self.layer.call, "mask") and mask is not None ): inner_mask_shape = self._get_shape_tuple((-1,), mask, 2) kwargs["mask"] = backend.reshape(mask, inner_mask_shape) y = self.layer(inputs, **kwargs) # Reconstruct the output shape by re-splitting the 0th dimension # back into (num_samples, timesteps, ...) # We use batch_size when available so that the 0th dimension is # set in the static shape of the reshaped output reshape_batch_size = batch_size if batch_size else -1 output_shape = tf.nest.map_structure( lambda tensor: self._get_shape_tuple( (reshape_batch_size, input_length), tensor, 1 ), y, ) y = tf.__internal__.nest.map_structure_up_to( y, tf.reshape, y, output_shape ) return y def compute_mask(self, inputs, mask=None): """Computes an output mask tensor for Embedding layer. This is based on the inputs, mask, and the inner layer. If batch size is specified: Simply return the input `mask`. (An rnn-based implementation with more than one rnn inputs is required but not supported in tf.keras yet.) Otherwise we call `compute_mask` of the inner layer at each time step. If the output mask at each time step is not `None`: (E.g., inner layer is Masking or RNN) Concatenate all of them and return the concatenation. If the output mask at each time step is `None` and the input mask is not `None`:(E.g., inner layer is Dense) Reduce the input_mask to 2 dimensions and return it. Otherwise (both the output mask and the input mask are `None`): (E.g., `mask` is not used at all) Return `None`. Args: inputs: Tensor with shape [batch size, timesteps, ...] indicating the input to TimeDistributed. If static shape information is available for "batch size", `mask` is returned unmodified. mask: Either None (indicating no masking) or a Tensor indicating the input mask for TimeDistributed. The shape can be static or dynamic. Returns: Either None (no masking), or a [batch size, timesteps, ...] Tensor with an output mask for the TimeDistributed layer with the shape beyond the second dimension being the value of the input mask shape(if the computed output mask is none), an output mask with the shape beyond the first dimension being the value of the mask shape(if mask is not None) or output mask with the shape beyond the first dimension being the value of the computed output shape. """ # cases need to call the layer.compute_mask when input_mask is None: # Masking layer and Embedding layer with mask_zero input_shape = tf.nest.map_structure( lambda x: tf.TensorShape(backend.int_shape(x)), inputs ) input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False) batch_size = tf_utils.convert_shapes(input_shape) batch_size = tf.nest.flatten(batch_size)[0] is_ragged_input = tf.nest.map_structure( lambda x: isinstance(x, tf.RaggedTensor), inputs ) is_ragged_input = generic_utils.to_list( tf.nest.flatten(is_ragged_input) ) if batch_size and not self._always_use_reshape or any(is_ragged_input): # batch size matters, we currently do not handle mask explicitly, or # if the layer always uses reshape approach, or the input is a # ragged tensor. return mask inner_mask = mask if inner_mask is not None: inner_mask_shape = self._get_shape_tuple((-1,), mask, 2) inner_mask = backend.reshape(inner_mask, inner_mask_shape) inner_input_shape = tf.nest.map_structure( lambda tensor: self._get_shape_tuple((-1,), tensor, 2), inputs ) inner_inputs = tf.__internal__.nest.map_structure_up_to( inputs, tf.reshape, inputs, inner_input_shape ) output_mask = self.layer.compute_mask(inner_inputs, inner_mask) if output_mask is None: if mask is None: return None # input_mask is not None, and output_mask is None: # we should return a not-None mask output_mask = mask for _ in range(2, len(backend.int_shape(mask))): output_mask = backend.any(output_mask, axis=-1) else: # output_mask is not None. We need to reshape it input_length = tf_utils.convert_shapes(input_shape) input_length = tf.nest.flatten(input_length)[1] if not input_length: input_length = tf.nest.map_structure( lambda x: backend.shape(x)[1], inputs ) input_length = tf.nest.flatten(input_length)[0] reshape_batch_size = batch_size if batch_size else -1 output_mask_shape = self._get_shape_tuple( (reshape_batch_size, input_length), output_mask, 1 ) output_mask = backend.reshape(output_mask, output_mask_shape) return output_mask
tf-keras/tf_keras/layers/rnn/time_distributed.py/0
{ "file_path": "tf-keras/tf_keras/layers/rnn/time_distributed.py", "repo_id": "tf-keras", "token_count": 6969 }
210
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Contains the normalization layer classes and their functional aliases.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import warnings import tensorflow.compat.v2 as tf from tf_keras.layers.normalization import batch_normalization_v1 from tf_keras.legacy_tf_layers import base # isort: off from tensorflow.python.util.tf_export import keras_export from tensorflow.python.util.tf_export import tf_export @keras_export(v1=["keras.__internal__.legacy.layers.BatchNormalization"]) @tf_export(v1=["layers.BatchNormalization"]) class BatchNormalization(batch_normalization_v1.BatchNormalization, base.Layer): """Batch Normalization layer from (Ioffe et al., 2015). TF-Keras APIs handle BatchNormalization updates to the moving_mean and moving_variance as part of their `fit()` and `evaluate()` loops. However, if a custom training loop is used with an instance of `Model`, these updates need to be explicitly included. Here's a simple example of how it can be done: ```python # model is an instance of Model that contains BatchNormalization layer. update_ops = model.get_updates_for(None) + model.get_updates_for(features) train_op = optimizer.minimize(loss) train_op = tf.group([train_op, update_ops]) ``` Args: axis: An `int` or list of `int`, the axis or axes that should be normalized, typically the features axis/axes. For instance, after a `Conv2D` layer with `data_format="channels_first"`, set `axis=1`. If a list of axes is provided, each axis in `axis` will be normalized simultaneously. Default is `-1` which uses the last axis. Note: when using multi-axis batch norm, the `beta`, `gamma`, `moving_mean`, and `moving_variance` variables are the same rank as the input Tensor, with dimension size 1 in all reduced (non-axis) dimensions). momentum: Momentum for the moving average. epsilon: Small float added to variance to avoid dividing by zero. center: If True, add offset of `beta` to normalized tensor. If False, `beta` is ignored. scale: If True, multiply by `gamma`. If False, `gamma` is not used. When the next layer is linear (also e.g. `nn.relu`), this can be disabled since the scaling can be done by the next layer. beta_initializer: Initializer for the beta weight. gamma_initializer: Initializer for the gamma weight. moving_mean_initializer: Initializer for the moving mean. moving_variance_initializer: Initializer for the moving variance. beta_regularizer: Optional regularizer for the beta weight. gamma_regularizer: Optional regularizer for the gamma weight. beta_constraint: An optional projection function to be applied to the `beta` weight after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. gamma_constraint: An optional projection function to be applied to the `gamma` weight after being updated by an `Optimizer`. renorm: Whether to use Batch Renormalization (Ioffe, 2017). This adds extra variables during training. The inference is the same for either value of this parameter. renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to scalar `Tensors` used to clip the renorm correction. The correction `(r, d)` is used as `corrected_value = normalized_value * r + d`, with `r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin, dmax are set to inf, 0, inf, respectively. renorm_momentum: Momentum used to update the moving means and standard deviations with renorm. Unlike `momentum`, this affects training and should be neither too small (which would add noise) nor too large (which would give stale estimates). Note that `momentum` is still applied to get the means and variances for inference. fused: if `None` or `True`, use a faster, fused implementation if possible. If `False`, use the system recommended implementation. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). virtual_batch_size: An `int`. By default, `virtual_batch_size` is `None`, which means batch normalization is performed across the whole batch. When `virtual_batch_size` is not `None`, instead perform "Ghost Batch Normalization", which creates virtual sub-batches which are each normalized separately (with shared gamma, beta, and moving statistics). Must divide the actual batch size during execution. adjustment: A function taking the `Tensor` containing the (dynamic) shape of the input tensor and returning a pair (scale, bias) to apply to the normalized values (before gamma and beta), only during training. For example, if axis==-1, `adjustment = lambda shape: ( tf.random.uniform(shape[-1:], 0.93, 1.07), tf.random.uniform(shape[-1:], -0.1, 0.1))` will scale the normalized value by up to 7% up or down, then shift the result by up to 0.1 (with independent scaling and bias for each feature but shared across all examples), and finally apply gamma and/or beta. If `None`, no adjustment is applied. Cannot be specified if virtual_batch_size is specified. name: A string, the name of the layer. References: Batch Normalization - Accelerating Deep Network Training by Reducing Internal Covariate Shift: [Ioffe et al., 2015](http://proceedings.mlr.press/v37/ioffe15.html) ([pdf](http://proceedings.mlr.press/v37/ioffe15.pdf)) Batch Renormalization - Towards Reducing Minibatch Dependence in Batch-Normalized Models: [Ioffe, 2017](http://papers.nips.cc/paper/6790-batch-renormalization-towards-reducing-minibatch-dependence-in-batch-normalized-models) ([pdf](http://papers.nips.cc/paper/6790-batch-renormalization-towards-reducing-minibatch-dependence-in-batch-normalized-models.pdf)) @compatibility(TF2) This API is a legacy api that is only compatible with eager execution and `tf.function` if you combine it with `tf.compat.v1.keras.utils.track_tf1_style_variables` Please refer to [tf.layers model mapping section of the migration guide] (https://www.tensorflow.org/guide/migrate/model_mapping) to learn how to use your TensorFlow v1 model in TF2 with TF-Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.BatchNormalization`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python bn = tf.compat.v1.layers.BatchNormalization() ``` After: ```python bn = tf.keras.layers.BatchNormalization() ``` #### How to Map Arguments TF1 Arg Name | TF2 Arg Name | Note :------------------------ | :------------------------ | :--------------- `name` | `name` | Layer base class `trainable` | `trainable` | Layer base class `axis` | `axis` | - `momentum` | `momentum` | - `epsilon` | `epsilon` | - `center` | `center` | - `scale` | `scale` | - `beta_initializer` | `beta_initializer` | - `gamma_initializer` | `gamma_initializer` | - `moving_mean_initializer` | `moving_mean_initializer` | - `beta_regularizer` | `beta_regularizer' | - `gamma_regularizer` | `gamma_regularizer' | - `beta_constraint` | `beta_constraint' | - `gamma_constraint` | `gamma_constraint' | - `renorm` | Not supported | - `renorm_clipping` | Not supported | - `renorm_momentum` | Not supported | - `fused` | Not supported | - `virtual_batch_size` | Not supported | - `adjustment` | Not supported | - @end_compatibility """ def __init__( self, axis=-1, momentum=0.99, epsilon=1e-3, center=True, scale=True, beta_initializer=tf.compat.v1.zeros_initializer(), gamma_initializer=tf.compat.v1.ones_initializer(), moving_mean_initializer=tf.compat.v1.zeros_initializer(), moving_variance_initializer=tf.compat.v1.ones_initializer(), beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None, renorm=False, renorm_clipping=None, renorm_momentum=0.99, fused=None, trainable=True, virtual_batch_size=None, adjustment=None, name=None, **kwargs ): super().__init__( axis=axis, momentum=momentum, epsilon=epsilon, center=center, scale=scale, beta_initializer=beta_initializer, gamma_initializer=gamma_initializer, moving_mean_initializer=moving_mean_initializer, moving_variance_initializer=moving_variance_initializer, beta_regularizer=beta_regularizer, gamma_regularizer=gamma_regularizer, beta_constraint=beta_constraint, gamma_constraint=gamma_constraint, renorm=renorm, renorm_clipping=renorm_clipping, renorm_momentum=renorm_momentum, fused=fused, trainable=trainable, virtual_batch_size=virtual_batch_size, adjustment=adjustment, name=name, **kwargs ) def call(self, inputs, training=False, mask=None): return super().call(inputs, training=training, mask=mask) @keras_export(v1=["keras.__internal__.legacy.layers.batch_normalization"]) @tf_export(v1=["layers.batch_normalization"]) def batch_normalization( inputs, axis=-1, momentum=0.99, epsilon=1e-3, center=True, scale=True, beta_initializer=tf.compat.v1.zeros_initializer(), gamma_initializer=tf.compat.v1.ones_initializer(), moving_mean_initializer=tf.compat.v1.zeros_initializer(), moving_variance_initializer=tf.compat.v1.ones_initializer(), beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None, training=False, trainable=True, name=None, reuse=None, renorm=False, renorm_clipping=None, renorm_momentum=0.99, fused=None, virtual_batch_size=None, adjustment=None, ): """Functional interface for the batch normalization layer from_config(Ioffe et al., 2015). Note: when training, the moving_mean and moving_variance need to be updated. By default the update ops are placed in `tf.GraphKeys.UPDATE_OPS`, so they need to be executed alongside the `train_op`. Also, be sure to add any batch_normalization ops before getting the update_ops collection. Otherwise, update_ops will be empty, and training/inference will not work properly. For example: ```python x_norm = tf.compat.v1.layers.batch_normalization(x, training=training) # ... update_ops = tf.compat.v1.get_collection(tf.GraphKeys.UPDATE_OPS) train_op = optimizer.minimize(loss) train_op = tf.group([train_op, update_ops]) ``` Args: inputs: Tensor input. axis: An `int`, the axis that should be normalized (typically the features axis). For instance, after a `Convolution2D` layer with `data_format="channels_first"`, set `axis=1` in `BatchNormalization`. momentum: Momentum for the moving average. epsilon: Small float added to variance to avoid dividing by zero. center: If True, add offset of `beta` to normalized tensor. If False, `beta` is ignored. scale: If True, multiply by `gamma`. If False, `gamma` is not used. When the next layer is linear (also e.g. `nn.relu`), this can be disabled since the scaling can be done by the next layer. beta_initializer: Initializer for the beta weight. gamma_initializer: Initializer for the gamma weight. moving_mean_initializer: Initializer for the moving mean. moving_variance_initializer: Initializer for the moving variance. beta_regularizer: Optional regularizer for the beta weight. gamma_regularizer: Optional regularizer for the gamma weight. beta_constraint: An optional projection function to be applied to the `beta` weight after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. gamma_constraint: An optional projection function to be applied to the `gamma` weight after being updated by an `Optimizer`. training: Either a Python boolean, or a TensorFlow boolean scalar tensor (e.g. a placeholder). Whether to return the output in training mode (normalized with statistics of the current batch) or in inference mode (normalized with moving statistics). **NOTE**: make sure to set this parameter correctly, or else your training/inference will not work properly. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). name: String, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. renorm: Whether to use Batch Renormalization (Ioffe, 2017). This adds extra variables during training. The inference is the same for either value of this parameter. renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to scalar `Tensors` used to clip the renorm correction. The correction `(r, d)` is used as `corrected_value = normalized_value * r + d`, with `r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin, dmax are set to inf, 0, inf, respectively. renorm_momentum: Momentum used to update the moving means and standard deviations with renorm. Unlike `momentum`, this affects training and should be neither too small (which would add noise) nor too large (which would give stale estimates). Note that `momentum` is still applied to get the means and variances for inference. fused: if `None` or `True`, use a faster, fused implementation if possible. If `False`, use the system recommended implementation. virtual_batch_size: An `int`. By default, `virtual_batch_size` is `None`, which means batch normalization is performed across the whole batch. When `virtual_batch_size` is not `None`, instead perform "Ghost Batch Normalization", which creates virtual sub-batches which are each normalized separately (with shared gamma, beta, and moving statistics). Must divide the actual batch size during execution. adjustment: A function taking the `Tensor` containing the (dynamic) shape of the input tensor and returning a pair (scale, bias) to apply to the normalized values (before gamma and beta), only during training. For example, if axis==-1, `adjustment = lambda shape: ( tf.random.uniform(shape[-1:], 0.93, 1.07), tf.random.uniform(shape[-1:], -0.1, 0.1))` will scale the normalized value by up to 7% up or down, then shift the result by up to 0.1 (with independent scaling and bias for each feature but shared across all examples), and finally apply gamma and/or beta. If `None`, no adjustment is applied. Cannot be specified if virtual_batch_size is specified. Returns: Output tensor. Raises: ValueError: if eager execution is enabled. References: Batch Normalization - Accelerating Deep Network Training by Reducing Internal Covariate Shift: [Ioffe et al., 2015](http://proceedings.mlr.press/v37/ioffe15.html) ([pdf](http://proceedings.mlr.press/v37/ioffe15.pdf)) Batch Renormalization - Towards Reducing Minibatch Dependence in Batch-Normalized Models: [Ioffe, 2017](http://papers.nips.cc/paper/6790-batch-renormalization-towards-reducing-minibatch-dependence-in-batch-normalized-models) ([pdf](http://papers.nips.cc/paper/6790-batch-renormalization-towards-reducing-minibatch-dependence-in-batch-normalized-models.pdf)) @compatibility(TF2) This API is a legacy api that is only compatible with eager execution and `tf.function` if you combine it with `tf.compat.v1.keras.utils.track_tf1_style_variables` Please refer to [tf.layers model mapping section of the migration guide] (https://www.tensorflow.org/guide/migrate/model_mapping) to learn how to use your TensorFlow v1 model in TF2 with TF-Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.BatchNormalization`. The batch updating pattern with `tf.control_dependencies(tf.GraphKeys.UPDATE_OPS)` should not be used in native TF2. Consult the `tf.keras.layers.BatchNormalization` documentation for further information. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python x_norm = tf.compat.v1.layers.batch_normalization(x) ``` After: To migrate code using TF1 functional layers use the [Keras Functional API] (https://www.tensorflow.org/guide/keras/functional): ```python x = tf.keras.Input(shape=(28, 28, 1),) y = tf.keras.layers.BatchNormalization()(x) model = tf.keras.Model(x, y) ``` #### How to Map Arguments TF1 Arg Name | TF2 Arg Name | Note :------------------------ | :------------------------ | :--------------- `name` | `name` | Layer base class `trainable` | `trainable` | Layer base class `axis` | `axis` | - `momentum` | `momentum` | - `epsilon` | `epsilon` | - `center` | `center` | - `scale` | `scale` | - `beta_initializer` | `beta_initializer` | - `gamma_initializer` | `gamma_initializer` | - `moving_mean_initializer` | `moving_mean_initializer` | - `beta_regularizer` | `beta_regularizer' | - `gamma_regularizer` | `gamma_regularizer' | - `beta_constraint` | `beta_constraint' | - `gamma_constraint` | `gamma_constraint' | - `renorm` | Not supported | - `renorm_clipping` | Not supported | - `renorm_momentum` | Not supported | - `fused` | Not supported | - `virtual_batch_size` | Not supported | - `adjustment` | Not supported | - @end_compatibility """ warnings.warn( "`tf.layers.batch_normalization` is deprecated and " "will be removed in a future version. " "Please use `tf.keras.layers.BatchNormalization` instead. " "In particular, `tf.control_dependencies(tf.GraphKeys.UPDATE_OPS)` " "should not be used (consult the `tf.keras.layers.BatchNormalization` " "documentation).", stacklevel=2, ) layer = BatchNormalization( axis=axis, momentum=momentum, epsilon=epsilon, center=center, scale=scale, beta_initializer=beta_initializer, gamma_initializer=gamma_initializer, moving_mean_initializer=moving_mean_initializer, moving_variance_initializer=moving_variance_initializer, beta_regularizer=beta_regularizer, gamma_regularizer=gamma_regularizer, beta_constraint=beta_constraint, gamma_constraint=gamma_constraint, renorm=renorm, renorm_clipping=renorm_clipping, renorm_momentum=renorm_momentum, fused=fused, trainable=trainable, virtual_batch_size=virtual_batch_size, adjustment=adjustment, name=name, _reuse=reuse, _scope=name, ) return layer(inputs, training=training) # Aliases BatchNorm = BatchNormalization batch_norm = batch_normalization
tf-keras/tf_keras/legacy_tf_layers/normalization.py/0
{ "file_path": "tf-keras/tf_keras/legacy_tf_layers/normalization.py", "repo_id": "tf-keras", "token_count": 8887 }
211
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """F-Score metrics.""" import tensorflow.compat.v2 as tf from tensorflow.python.util.tf_export import keras_export from tf_keras.dtensor import utils as dtensor_utils from tf_keras.metrics import base_metric # Adapted from TF-Addons implementation. @keras_export("keras.metrics.FBetaScore") class FBetaScore(base_metric.Metric): """Computes F-Beta score. This is the weighted harmonic mean of precision and recall. Its output range is `[0, 1]`. It works for both multi-class and multi-label classification. It is defined as: ```python b2 = beta ** 2 f_beta_score = (1 + b2) * (precision * recall) / (precision * b2 + recall) ``` Args: average: Type of averaging to be performed across per-class results in the multi-class case. Acceptable values are `None`, `"micro"`, `"macro"` and `"weighted"`. Default value is `None`. If `None`, no averaging is performed and `result()` will return the score for each class. If `"micro"`, compute metrics globally by counting the total true positives, false negatives and false positives. If `"macro"`, compute metrics for each label, and return their unweighted mean. This does not take label imbalance into account. If `"weighted"`, compute metrics for each label, and return their average weighted by support (the number of true instances for each label). This alters `"macro"` to account for label imbalance. It can result in an score that is not between precision and recall. beta: Determines the weight of given to recall in the harmonic mean between precision and recall (see pseudocode equation above). Default value is 1. threshold: Elements of `y_pred` greater than `threshold` are converted to be 1, and the rest 0. If `threshold` is `None`, the argmax of `y_pred` is converted to 1, and the rest to 0. name: Optional. String name of the metric instance. dtype: Optional. Data type of the metric result. Returns: F-Beta Score: float. Example: >>> metric = tf.keras.metrics.FBetaScore(beta=2.0, threshold=0.5) >>> y_true = np.array([[1, 1, 1], ... [1, 0, 0], ... [1, 1, 0]], np.int32) >>> y_pred = np.array([[0.2, 0.6, 0.7], ... [0.2, 0.6, 0.6], ... [0.6, 0.8, 0.0]], np.float32) >>> metric.update_state(y_true, y_pred) >>> result = metric.result() >>> result.numpy() array([0.3846154 , 0.90909094, 0.8333334 ], dtype=float32) """ @dtensor_utils.inject_mesh def __init__( self, average=None, beta=1.0, threshold=None, name="fbeta_score", dtype=None, ): super().__init__(name=name, dtype=dtype) if average not in (None, "micro", "macro", "weighted"): raise ValueError( "Invalid `average` argument value. Expected one of: " "{None, 'micro', 'macro', 'weighted'}. " f"Received: average={average}" ) if not isinstance(beta, float): raise ValueError( "Invalid `beta` argument value. " "It should be a Python float. " f"Received: beta={beta} of type '{type(beta)}'" ) if beta <= 0.0: raise ValueError( "Invalid `beta` argument value. " "It should be > 0. " f"Received: beta={beta}" ) if threshold is not None: if not isinstance(threshold, float): raise ValueError( "Invalid `threshold` argument value. " "It should be a Python float. " f"Received: threshold={threshold} " f"of type '{type(threshold)}'" ) if threshold > 1.0 or threshold <= 0.0: raise ValueError( "Invalid `threshold` argument value. " "It should verify 0 < threshold <= 1. " f"Received: threshold={threshold}" ) self.average = average self.beta = beta self.threshold = threshold self.axis = None self.built = False if self.average != "micro": self.axis = 0 def build(self, y_true_shape, y_pred_shape): if len(y_pred_shape) != 2 or len(y_true_shape) != 2: raise ValueError( "FBetaScore expects 2D inputs with shape " "(batch_size, output_dim). Received input " f"shapes: y_pred.shape={y_pred_shape} and " f"y_true.shape={y_true_shape}." ) if y_pred_shape[-1] is None or y_true_shape[-1] is None: raise ValueError( "FBetaScore expects 2D inputs with shape " "(batch_size, output_dim), with output_dim fully " "defined (not None). Received input " f"shapes: y_pred.shape={y_pred_shape} and " f"y_true.shape={y_true_shape}." ) num_classes = y_pred_shape[-1] if self.average != "micro": init_shape = [num_classes] else: init_shape = [] def _add_zeros_weight(name): return self.add_weight( name, shape=init_shape, initializer="zeros", dtype=self.dtype, ) self.true_positives = _add_zeros_weight("true_positives") self.false_positives = _add_zeros_weight("false_positives") self.false_negatives = _add_zeros_weight("false_negatives") self.intermediate_weights = _add_zeros_weight("intermediate_weights") self.built = True def update_state(self, y_true, y_pred, sample_weight=None): y_true = tf.convert_to_tensor(y_true, dtype=self.dtype) y_pred = tf.convert_to_tensor(y_pred, dtype=self.dtype) if not self.built: self.build(y_true.shape, y_pred.shape) if self.threshold is None: threshold = tf.reduce_max(y_pred, axis=-1, keepdims=True) # make sure [0, 0, 0] doesn't become [1, 1, 1] # Use abs(x) > eps, instead of x != 0 to check for zero y_pred = tf.logical_and(y_pred >= threshold, tf.abs(y_pred) > 1e-9) else: y_pred = y_pred > self.threshold y_pred = tf.cast(y_pred, dtype=self.dtype) def _weighted_sum(val, sample_weight): if sample_weight is not None: val = tf.math.multiply(val, tf.expand_dims(sample_weight, 1)) return tf.reduce_sum(val, axis=self.axis) self.true_positives.assign_add( _weighted_sum(y_pred * y_true, sample_weight) ) self.false_positives.assign_add( _weighted_sum(y_pred * (1 - y_true), sample_weight) ) self.false_negatives.assign_add( _weighted_sum((1 - y_pred) * y_true, sample_weight) ) self.intermediate_weights.assign_add( _weighted_sum(y_true, sample_weight) ) def result(self): precision = tf.math.divide_no_nan( self.true_positives, self.true_positives + self.false_positives ) recall = tf.math.divide_no_nan( self.true_positives, self.true_positives + self.false_negatives ) mul_value = precision * recall add_value = (tf.math.square(self.beta) * precision) + recall mean = tf.math.divide_no_nan(mul_value, add_value) f1_score = mean * (1 + tf.math.square(self.beta)) if self.average == "weighted": weights = tf.math.divide_no_nan( self.intermediate_weights, tf.reduce_sum(self.intermediate_weights), ) f1_score = tf.reduce_sum(f1_score * weights) elif self.average is not None: # [micro, macro] f1_score = tf.reduce_mean(f1_score) return f1_score def get_config(self): """Returns the serializable config of the metric.""" config = { "average": self.average, "beta": self.beta, "threshold": self.threshold, } base_config = super().get_config() return {**base_config, **config} def reset_state(self): for v in self.variables: v.assign(tf.zeros(v.shape, dtype=v.dtype)) @keras_export("keras.metrics.F1Score") class F1Score(FBetaScore): r"""Computes F-1 Score. This is the harmonic mean of precision and recall. Its output range is `[0, 1]`. It works for both multi-class and multi-label classification. It is defined as: ```python f1_score = 2 * (precision * recall) / (precision + recall) ``` Args: average: Type of averaging to be performed on data. Acceptable values are `None`, `"micro"`, `"macro"` and `"weighted"`. Default value is `None`. If `None`, no averaging is performed and `result()` will return the score for each class. If `"micro"`, compute metrics globally by counting the total true positives, false negatives and false positives. If `"macro"`, compute metrics for each label, and return their unweighted mean. This does not take label imbalance into account. If `"weighted"`, compute metrics for each label, and return their average weighted by support (the number of true instances for each label). This alters `"macro"` to account for label imbalance. It can result in an score that is not between precision and recall. threshold: Elements of `y_pred` greater than `threshold` are converted to be 1, and the rest 0. If `threshold` is `None`, the argmax of `y_pred` is converted to 1, and the rest to 0. name: Optional. String name of the metric instance. dtype: Optional. Data type of the metric result. Returns: F-1 Score: float. Example: >>> metric = tf.keras.metrics.F1Score(threshold=0.5) >>> y_true = np.array([[1, 1, 1], ... [1, 0, 0], ... [1, 1, 0]], np.int32) >>> y_pred = np.array([[0.2, 0.6, 0.7], ... [0.2, 0.6, 0.6], ... [0.6, 0.8, 0.0]], np.float32) >>> metric.update_state(y_true, y_pred) >>> result = metric.result() >>> result.numpy() array([0.5 , 0.8 , 0.6666667], dtype=float32) """ @dtensor_utils.inject_mesh def __init__( self, average=None, threshold=None, name="f1_score", dtype=None, ): super().__init__( average=average, beta=1.0, threshold=threshold, name=name, dtype=dtype, ) def get_config(self): base_config = super().get_config() del base_config["beta"] return base_config
tf-keras/tf_keras/metrics/f_score_metrics.py/0
{ "file_path": "tf-keras/tf_keras/metrics/f_score_metrics.py", "repo_id": "tf-keras", "token_count": 5569 }
212
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains AutoCastVariable, a variable which automatically casts itself.""" import threading from typing import Optional import tensorflow.compat.v2 as tf from tf_keras.distribute import distributed_training_utils # _autocast_dtype.dtype is the dtype AutoCastVariables should be cast to, or # None if AutoCastVariables should not be cast. _autocast_dtype = threading.local() def numpy_text(tensor, is_repr=False): """Human readable representation of a tensor's numpy value.""" if tensor.dtype.is_numpy_compatible: text = repr(tensor._numpy()) if is_repr else str(tensor._numpy()) else: text = "<unprintable>" if "\n" in text: text = "\n" + text return text class AutoCastVariableSpec(tf.types.experimental.TraceType): """TraceType for AutoCastVariableSpec for tracing with tf.function. This class implements the Type for AutoCastVariable used in tracing. """ def __init__(self, value): self._value = value def is_subtype_of(self, other) -> bool: """If the other spec is the same as `self`, return True.""" return self == other def most_specific_common_supertype(self, others): """`self` is the common supertype if all input types match it.""" return self if all(self == other for other in others) else None def placeholder_value(self, placeholder_context=None): """Use the AutoCastVariable value itself as a placeholder.""" return self._value def cast(self, value, _): return value def to_tensors(self, value): return [] def __hash__(self) -> int: return hash(id(self._value)) def __eq__(self, other) -> bool: return self is other class AutoCastVariable(tf.Variable, tf.__internal__.types.Tensor): """Variable that casts itself to a different dtype in applicable contexts. This class wraps a floating-point `tf.Variable`. It emulates the variable interface and delegates to the wrapped variable, but it additionally will cast the wrapped variable under an `enable_auto_cast_variables(dtype)` context manager. For example: >>> v = tf.Variable(1.0, dtype=tf.float32) >>> v = AutoCastVariable(v) >>> tf.identity(v).dtype tf.float32 >>> with enable_auto_cast_variables(tf.float16): ... tf.identity(v).dtype tf.float16 The purpose of this class is to allow TF-Keras layers to create variables in float32, and automatically cast them to float16 or bfloat16 when the layer is called. """ def __init__(self, variable): """Creates an AutoCastVariable instance. Args: variable: A floating-point resource variable to wrap. Raises: ValueError: If `variable` is not a floating-point resource variable """ if not isinstance(variable, tf.Variable): raise ValueError( "variable must be of type tf.ResourceVariable, but got: %s" % variable ) if not variable.dtype.is_floating: raise ValueError( "variable must be a floating point variable but has type: %s" % variable.dtype.name ) self._variable = variable # 'delegate' means AutoCastVariable.op return self._variable.op, which # will raise an AttributeError in Eager (as intended). If set to any # other value, AutoCastVariable.op returns that value instead, which is # used to set the op attribute in AutoCastVariable.assign(). self._op = "delegate" def _should_cast(self): """Returns True if this variable should be casted when accessed.""" autocast_dtype = getattr(_autocast_dtype, "dtype", None) return autocast_dtype is not None and self.dtype != autocast_dtype @property def dtype(self): """The dtype of the underlying variable, before any casts are done.""" return self._variable.dtype @property def true_dtype(self): """Deprecated alias of `dtype`.""" return self._variable.dtype @property def _cast_dtype(self): dtype = getattr(_autocast_dtype, "dtype", None) return dtype or self._variable.dtype def value(self): val = self._variable.value() if not self._should_cast(): return val return tf.cast(val, self._cast_dtype) def read_value(self): val = self._variable.read_value() return tf.cast(val, self._cast_dtype) def sparse_read(self, indices, name=None): """Reads the value of this variable sparsely, using `gather`.""" val = self._variable.sparse_read(indices, name=name) return tf.cast(val, self._cast_dtype) def gather_nd(self, indices, name=None): """Gather slices of the variable into a Tensor.""" val = self._variable.gather_nd(indices, name=name) return tf.cast(val, self._cast_dtype) def __getattr__(self, name): return getattr(self._variable, name) def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False): """Converts this variable to a tensor.""" if as_ref: # This ValueError should not occur in practice since it is # impossible to pass as_ref=True using public APIs. raise ValueError( "Cannot convert AutoCastVariable to a tensor if " "as_ref=True is passed to convert_to_tensor" ) if not self._should_cast(): return tf.convert_to_tensor(self._variable, dtype=dtype, name=name) if dtype is not None and not dtype.is_compatible_with(self._cast_dtype): raise ValueError( "Incompatible type conversion requested to type {!r} for " "AutoCastVariable which is casted to type {!r}".format( dtype.name, self._cast_dtype.name ) ) val = tf.convert_to_tensor( self._variable, dtype=self._variable.dtype, name=name ) return tf.cast(val, self._cast_dtype) def __tf_tensor__( self, dtype: Optional[tf.dtypes.DType] = None, name: Optional[str] = None, ) -> tf.Tensor: return self._dense_var_to_tensor(dtype=dtype, name=name) def _should_act_as_resource_variable(self): """Pass resource_variable_ops.is_resource_variable check.""" pass def __repr__(self): if tf.executing_eagerly() and not self._in_graph_mode: repr_str = ( "<AutoCastVariable '{v.name}' shape={v.shape} " "dtype={v.dtype.name} dtype_to_cast_to={v._cast_dtype.name}, " "numpy={np_repr}>" ) return repr_str.format( v=self, np_repr=numpy_text(self.read_value(), is_repr=True) ) else: repr_str = ( "<AutoCastVariable '{v.name}' shape={v.shape} " "dtype={v.dtype.name} dtype_to_cast_to={v._cast_dtype.name}>" ) return repr_str.format(v=self) # Method delegations: We delegate the following methods to self._variable. # Each of these methods simply calls the same method on self._variable. The # base Variable raises NotImplementedError for most of these, so we must # override them. # # We do not define the following methods from Variable for the following # reasons: # * 'count_up_to': This method only applies to int variables, which cannot # be wrapped with an AutoCastVariable. # * 'ref': Instead we inherit the definition from Variable. # If we defined and delegated to Variable, the ref of an # AutoCastVariable would be the same as the ref of the underlying # variable, which would be strange as they are different Python objects. def set_shape(self, shape): return self._variable.set_shape(self, shape) @property def trainable(self): return self._variable.trainable @property def synchronization(self): return self._variable.synchronization @property def aggregation(self): return self._variable.aggregation def eval(self, session=None): return self._variable.eval(session) def initialized_value(self): return self._variable.initialized_value() @property def initial_value(self): return self._variable.initial_value @property def constraint(self): return self._variable.constraint def _apply_assign_update( self, update_fn, value, use_locking=None, name=None, read_value=True ): # TODO(b/146181571): This logic can be simplified once # DistributedVariable.assign returns a DistributedVariable. Currently # for MirroredStrategy, it returns a Mirrored value. if tf.compat.v1.executing_eagerly_outside_functions(): assign_op = update_fn(value, use_locking, name, False) if read_value: # We create a new AutoCastVariable with the same underlying # tf.Variable. The new AutoCastVariable is identical except the # 'op' attribute is defined. This matches the behavior of # tf.Variable.assign. var = create_autocast_variable(self._variable) var._op = assign_op return var return assign_op # Fallback to wrapping the returned variable in graph mode if possible assign_var = update_fn(value, use_locking, name, read_value) if read_value and tf.__internal__.ops.is_resource_variable(assign_var): return create_autocast_variable(assign_var) return assign_var def _apply_update(self, update_fn, *args, **kwargs): update_var = update_fn(*args, **kwargs) if tf.compat.v1.executing_eagerly_outside_functions(): return self # Fallback to wrapping the returned variable in graph mode if possible if tf.__internal__.ops.is_resource_variable(update_var): return create_autocast_variable(update_var) return update_var def assign(self, value, use_locking=None, name=None, read_value=True): return self._apply_assign_update( self._variable.assign, value, use_locking, name, read_value ) def assign_add(self, delta, use_locking=None, name=None, read_value=True): return self._apply_assign_update( self._variable.assign_add, delta, use_locking, name, read_value ) def assign_sub(self, delta, use_locking=None, name=None, read_value=True): return self._apply_assign_update( self._variable.assign_sub, delta, use_locking, name, read_value ) def scatter_sub(self, sparse_delta, use_locking=False, name=None): return self._apply_update( self._variable.scatter_sub, sparse_delta, use_locking, name ) def scatter_add(self, sparse_delta, use_locking=False, name=None): return self._apply_update( self._variable.scatter_add, sparse_delta, use_locking, name ) def scatter_max(self, sparse_delta, use_locking=False, name=None): return self._apply_update( self._variable.scatter_max, sparse_delta, use_locking, name ) def scatter_min(self, sparse_delta, use_locking=False, name=None): return self._apply_update( self._variable.scatter_min, sparse_delta, use_locking, name ) def scatter_mul(self, sparse_delta, use_locking=False, name=None): return self._apply_update( self._variable.scatter_mul, sparse_delta, use_locking, name ) def scatter_div(self, sparse_delta, use_locking=False, name=None): return self._apply_update( self._variable.scatter_div, sparse_delta, use_locking, name ) def scatter_update(self, sparse_delta, use_locking=False, name=None): return self._apply_update( self._variable.scatter_update, sparse_delta, use_locking, name ) def batch_scatter_update(self, sparse_delta, use_locking=False, name=None): return self._apply_update( self._variable.batch_scatter_update, sparse_delta, use_locking, name ) def scatter_nd_sub(self, indices, updates, name=None): return self._apply_update( self._variable.scatter_nd_sub, indices, updates, name ) def scatter_nd_add(self, indices, updates, name=None): return self._apply_update( self._variable.scatter_nd_add, indices, updates, name ) def scatter_nd_update(self, indices, updates, name=None): return self._apply_update( self._variable.scatter_nd_update, indices, updates, name ) def load(self, value, session=None): return self._variable.load(value, session) @property def name(self): return self._variable.name @property def _shared_name(self): return self._variable._shared_name @property def initializer(self): return self._variable.initializer @property def device(self): return self._variable.device @property def op(self): if self._op == "delegate": return self._variable.op return self._op def _as_graph_element(self): graph_element = self._variable._as_graph_element() if graph_element is None: return self._op return graph_element @property def graph(self): return self._variable.graph @property def shape(self): return self._variable.shape def get_shape(self): return self._variable.get_shape() def __tf_tracing_type__(self, context): return AutoCastVariableSpec(self) def _gather_saveables_for_checkpoint(self): # By delegating this method to the wrapped variable, checkpoints with # AutoCastVariables are identical to checkpoints with normal variables. # Therefore models checkpointed with AutoCastVariables can be restored # on models with normal variables, and vice versa. return self._variable._gather_saveables_for_checkpoint() def _export_to_saved_model_graph( self, object_map, tensor_map, options, **kwargs ): # By delegating this method to the wrapped variable, SavedModel with # AutoCastVariables are identical to SavedModel with normal variables. resource_list = self._variable._export_to_saved_model_graph( object_map, tensor_map, options, **kwargs ) object_map[self] = object_map[self._variable] return resource_list def _copy_trackable_to_cpu(self, object_map): """For implementing `Trackable`.""" # Create a copy of `self._variable` to object_map, then create a new # copy of self that wraps the **copy** of `self._variable`. # When updating value, only the lowest-level variable will actually # update, since `AutoCastVariable` here is like a shell. self._variable._copy_trackable_to_cpu( object_map ) # pylint:disable=protected-access if self not in object_map: # If not populated already, populate self into the object map object_map[self] = AutoCastVariable(object_map[self._variable]) # TODO(reedwm): Maybe encode the fact the variable is an AutoCastVariable in # to_proto(). def to_proto(self, export_scope=None): return self._variable.to_proto(export_scope) def from_proto(self, variable_def, import_scope=None): return self._variable.from_proto(variable_def, import_scope) # Delegate the private attributes _handle_name and _initializer_op to # self._variable. SavedModel sets these attributes when loading a model. For # example, it sets _handle_name here: # https://github.com/tensorflow/tensorflow/blob/db26bd574fa95b5bdd53c08463dd19407cc0297e/tensorflow/python/tf_keras/saving/saved_model/load.py#L211 # We need to expose these attributes on AutoCastVariable as well for # SavedModel to work properly. # TODO(reedwm/kathywu): Find a better way to support SavedModel. Exposing # private attributes is hacky and difficult to maintain. @property def _handle_name(self): return self._variable._handle_name @_handle_name.setter def _handle_name(self, handle_name): self._variable._handle_name = handle_name @property def _initializer_op(self): return self._variable._initializer_op @_initializer_op.setter def _initializer_op(self, initializer_op): self._variable._initializer_op = initializer_op # Operator overloads: # Note we only overload operators that support floating-point types, as # non-float variables cannot be wrapped with an AutoCastVariable. # Also note: We call read_value() instead of value(), because value() causes # gradients not to work properly when TPUStrategy is used: b/143380936 def __add__(self, o): return self.read_value() + o def __radd__(self, o): return o + self.read_value() def __sub__(self, o): return self.read_value() - o def __rsub__(self, o): return o - self.read_value() def __mul__(self, o): return self.read_value() * o def __rmul__(self, o): return o * self.read_value() def __truediv__(self, o): return self.read_value() / o def __rtruediv__(self, o): return o / self.read_value() def __floordiv__(self, o): return self.read_value() // o def __rfloordiv__(self, o): return o // self.read_value() def __mod__(self, o): return self.read_value() % o def __rmod__(self, o): return o % self.read_value() def __lt__(self, o): return self.read_value() < o def __le__(self, o): return self.read_value() <= o def __gt__(self, o): return self.read_value() > o def __ge__(self, o): return self.read_value() >= o def __getitem__(self, o): return self.read_value()[o] def __pow__(self, o, modulo=None): return pow(self.read_value(), o, modulo) def __rpow__(self, o): return pow(o, self.read_value()) def __neg__(self): return -self.read_value() def __abs__(self): return abs(self.read_value()) def __div__(self, o): try: return self.read_value().__div__(o) except AttributeError: # See # https://docs.python.org/3/library/constants.html#NotImplemented return NotImplemented def __rdiv__(self, o): try: return self.read_value().__rdiv__(o) except AttributeError: # See # https://docs.python.org/3/library/constants.html#NotImplemented return NotImplemented def __matmul__(self, o): try: return self.read_value().__matmul__(o) except AttributeError: # See # https://docs.python.org/3/library/constants.html#NotImplemented return NotImplemented def __rmatmul__(self, o): try: return self.read_value().__rmatmul__(o) except AttributeError: # See # https://docs.python.org/3/library/constants.html#NotImplemented return NotImplemented tf.register_tensor_conversion_function( AutoCastVariable, AutoCastVariable._dense_var_to_tensor ) def create_autocast_variable(variable): """Creates an AutoCastVariable that wraps another variable. This typically just returns `AutoCastVariable(variable)`. But, if the variable is a DistributedVariable or one of its subclasses, we instead dynamically create a class that subclasses from both AutoCastVariable and variable.__class__. This is so the returned variable will still pass `isinstance(variable, variable.__class__)`, which is required for DistributedVariables and its subclasses to work properly. Args: variable: A floating-point resource variable to wrap. Returns: An AutoCastVariable that wraps the variable. """ if not distributed_training_utils.is_distributed_variable(variable): return AutoCastVariable(variable) class AutoCastDistributedVariable(AutoCastVariable, variable.__class__): """An AutoCastVariable that also subclasses from variable.__class__. variable.__class__ is either a DistributedVariable or an AggregatingVariable. """ def __repr__(self): return ( "<AutoCastDistributedVariable dtype={v.dtype.name} " "dtype_to_cast_to={v._cast_dtype.name} " "inner_variable={v._variable}>" ).format(v=self) return AutoCastDistributedVariable(variable) class enable_auto_cast_variables: """Context manager which enables the autocasting of `AutoCastVariable`s. Under this context manager, `AutoCastVariable`s will be cast to `dtype` if `dtype` is floating-point. Otherwise, `AutoCastVariable`s will not be cast. """ __slots__ = ["_dtype", "_prev_dtype"] def __init__(self, dtype): if dtype and not dtype.is_floating: dtype = None self._dtype = dtype def __enter__(self): self._prev_dtype = getattr(_autocast_dtype, "dtype", None) _autocast_dtype.dtype = self._dtype def __exit__(self, type_arg, value_arg, traceback_arg): _autocast_dtype.dtype = self._prev_dtype
tf-keras/tf_keras/mixed_precision/autocast_variable.py/0
{ "file_path": "tf-keras/tf_keras/mixed_precision/autocast_variable.py", "repo_id": "tf-keras", "token_count": 8979 }
213
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Adagrad optimizer implementation.""" import tensorflow.compat.v2 as tf from tf_keras.optimizers import optimizer from tf_keras.optimizers.schedules import learning_rate_schedule from tf_keras.saving.object_registration import register_keras_serializable # isort: off from tensorflow.python.util.tf_export import keras_export @register_keras_serializable() @keras_export( "keras.optimizers.Adafactor", "keras.optimizers.experimental.Adafactor", v1=[], ) class Adafactor(optimizer.Optimizer): """Optimizer that implements the Adafactor algorithm. Adafactor is commonly used in NLP tasks, and has the advantage of taking less memory because it only saves partial information of previous gradients. The default argument setup is based on the original paper (see reference). When gradients are of dimension > 2, Adafactor optimizer will delete the last 2 dimensions separately in its accumulator variables. Args: learning_rate: Initial value for the learning rate: either a floating point value, or a `tf.keras.optimizers.schedules.LearningRateSchedule` instance. Defaults to 0.001. beta_2_decay: float, defaults to -0.8. The decay rate of `beta_2`. epsilon_1: float, defaults to 1e-30. A small offset to keep denominator away from 0. epsilon_2: float, defaults to 1e-3. A small offset to avoid learning rate becoming too small by time. clip_threshold: float, defaults to 1.0. Clipping threshold. This is a part of Adafactor algorithm, independent from `clipnorm`, `clipvalue` and `global_clipnorm`. relative_step: bool, defaults to True. If `learning_rate` is a constant and `relative_step=True`, learning rate will be adjusted based on current iterations. This is a default learning rate decay in Adafactor. {{base_optimizer_keyword_args}} Reference: - [Shazeer, Noam et al., 2018](https://arxiv.org/abs/1804.04235). """ def __init__( self, learning_rate=0.001, beta_2_decay=-0.8, epsilon_1=1e-30, epsilon_2=1e-3, clip_threshold=1.0, relative_step=True, weight_decay=None, clipnorm=None, clipvalue=None, global_clipnorm=None, use_ema=False, ema_momentum=0.99, ema_overwrite_frequency=None, jit_compile=True, name="Adafactor", **kwargs, ): super().__init__( name=name, weight_decay=weight_decay, clipnorm=clipnorm, clipvalue=clipvalue, global_clipnorm=global_clipnorm, use_ema=use_ema, ema_momentum=ema_momentum, ema_overwrite_frequency=ema_overwrite_frequency, jit_compile=jit_compile, **kwargs, ) self._learning_rate = self._build_learning_rate(learning_rate) self.beta_2_decay = beta_2_decay self.epsilon_1 = epsilon_1 self.epsilon_2 = epsilon_2 self.clip_threshold = clip_threshold self.relative_step = relative_step def build(self, var_list): """Initialize optimizer variables. Adam optimizer has 3 types of variables: momentums, velocities and velocity_hat (only set when amsgrad is applied), Args: var_list: list of model variables to build Adam variables on. """ super().build(var_list) if hasattr(self, "_built") and self._built: return self._built = True self._r = [] self._c = [] self._v = [] for var in var_list: if len(var.shape) < 2: # Don't factor if variable is of dimension < 2, but we still # need to create dummy variables as placeholder. self._r.append(tf.Variable(0, name=f"r/{var._shared_name}")) self._c.append(tf.Variable(0, name=f"r/{var._shared_name}")) else: # Always factor the last 2 dimenstions. r_shape = var.shape[:-1] c_shape = var.shape[:-2] + var.shape[-1] self._r.append( self.add_variable( shape=r_shape, dtype=var.dtype, name=f"r/{var._shared_name}", ) ) self._c.append( self.add_variable( shape=c_shape, dtype=var.dtype, name=f"c/{var._shared_name}", ) ) self._v.append( self.add_variable_from_reference( model_variable=var, variable_name="v" ) ) def _rms(self, x): return tf.sqrt(tf.reduce_mean(tf.square(x))) def update_step(self, gradient, variable): """Update step given gradient and the associated model variable.""" lr = tf.cast(self.learning_rate, variable.dtype) epsilon_2 = tf.cast(self.epsilon_2, variable.dtype) one = tf.cast(1.0, variable.dtype) local_step = tf.cast(self.iterations + 1, variable.dtype) if ( not isinstance( self._learning_rate, learning_rate_schedule.LearningRateSchedule ) and self.relative_step ): # If `relative_step=True` and learning rate is a constant, we # apply the relative step algorithm. lr = tf.minimum(lr, tf.math.rsqrt(local_step)) var_key = self._var_key(variable) r = self._r[self._index_dict[var_key]] c = self._c[self._index_dict[var_key]] v = self._v[self._index_dict[var_key]] rho_t = tf.minimum(lr, tf.math.rsqrt(local_step)) alpha_t = tf.maximum(epsilon_2, self._rms(variable)) * rho_t regulated_grad_square = tf.square(gradient) + self.epsilon_1 beta_2_t = 1 - tf.pow(local_step, self.beta_2_decay) if len(variable.shape) >= 2: # `r` deletes the last dimension of gradient, so it is of shape # `gradient.shape[:-1]`. r.assign( beta_2_t * r + (1 - beta_2_t) * tf.reduce_mean(regulated_grad_square, axis=-1) ) # `c` deletes the second last dimension of gradient, so it is of # shape `gradient.shape[:-2] + gradient.shape[-1]`. c.assign( beta_2_t * c + (1 - beta_2_t) * tf.reduce_mean(regulated_grad_square, axis=-2) ) v.assign( tf.expand_dims( r / tf.reduce_mean(r, axis=-1, keepdims=True), axis=-1 ) * tf.expand_dims(c, -2) ) else: v.assign(beta_2_t * v + (1 - beta_2_t) * regulated_grad_square) # `convert_to_tensor` unifies the handling of sparse and dense grads. u_t = tf.convert_to_tensor(gradient) * tf.math.rsqrt(v) u_t_hat = u_t / tf.maximum(one, (self._rms(u_t) / self.clip_threshold)) variable.assign_add(-alpha_t * u_t_hat) def get_config(self): config = super().get_config() config.update( { "learning_rate": self._serialize_hyperparameter( self._learning_rate ), "beta_2_decay": self.beta_2_decay, "epsilon_1": self.epsilon_1, "epsilon_2": self.epsilon_2, "clip_threshold": self.clip_threshold, "relative_step": self.relative_step, } ) return config Adafactor.__doc__ = Adafactor.__doc__.replace( "{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args )
tf-keras/tf_keras/optimizers/adafactor.py/0
{ "file_path": "tf-keras/tf_keras/optimizers/adafactor.py", "repo_id": "tf-keras", "token_count": 4143 }
214
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ftrl-proximal optimizer implementation.""" import tensorflow.compat.v2 as tf from tf_keras.optimizers.legacy import optimizer_v2 # isort: off from tensorflow.python.util.tf_export import keras_export @keras_export( "keras.optimizers.legacy.Ftrl", v1=["keras.optimizers.Ftrl", "keras.optimizers.legacy.Ftrl"], ) class Ftrl(optimizer_v2.OptimizerV2): r"""Optimizer that implements the FTRL algorithm. "Follow The Regularized Leader" (FTRL) is an optimization algorithm developed at Google for click-through rate prediction in the early 2010s. It is most suitable for shallow models with large and sparse feature spaces. The algorithm is described by [McMahan et al., 2013](https://research.google.com/pubs/archive/41159.pdf). The TF-Keras version has support for both online L2 regularization (the L2 regularization described in the paper above) and shrinkage-type L2 regularization (which is the addition of an L2 penalty to the loss function). Initialization: ```python n = 0 sigma = 0 z = 0 ``` Update rule for one variable `w`: ```python prev_n = n n = n + g ** 2 sigma = (sqrt(n) - sqrt(prev_n)) / lr z = z + g - sigma * w if abs(z) < lambda_1: w = 0 else: w = (sgn(z) * lambda_1 - z) / ((beta + sqrt(n)) / alpha + lambda_2) ``` Notation: - `lr` is the learning rate - `g` is the gradient for the variable - `lambda_1` is the L1 regularization strength - `lambda_2` is the L2 regularization strength Check the documentation for the `l2_shrinkage_regularization_strength` parameter for more details when shrinkage is enabled, in which case gradient is replaced with a gradient with shrinkage. Args: learning_rate: A `Tensor`, floating point value, or a schedule that is a `tf.keras.optimizers.schedules.LearningRateSchedule`. The learning rate. learning_rate_power: A float value, must be less or equal to zero. Controls how the learning rate decreases during training. Use zero for a fixed learning rate. initial_accumulator_value: The starting value for accumulators. Only zero or positive values are allowed. l1_regularization_strength: A float value, must be greater than or equal to zero. Defaults to `0.0`. l2_regularization_strength: A float value, must be greater than or equal to zero. Defaults to `0.0`. name: Optional name prefix for the operations created when applying gradients. Defaults to `"Ftrl"`. l2_shrinkage_regularization_strength: A float value, must be greater than or equal to zero. This differs from L2 above in that the L2 above is a stabilization penalty, whereas this L2 shrinkage is a magnitude penalty. When input is sparse shrinkage will only happen on the active weights. beta: A float value, representing the beta value from the paper. Defaults to `0.0`. **kwargs: keyword arguments. Allowed arguments are `clipvalue`, `clipnorm`, `global_clipnorm`. If `clipvalue` (float) is set, the gradient of each weight is clipped to be no higher than this value. If `clipnorm` (float) is set, the gradient of each weight is individually clipped so that its norm is no higher than this value. If `global_clipnorm` (float) is set the gradient of all weights is clipped so that their global norm is no higher than this value. Reference: - [McMahan et al., 2013]( https://research.google.com/pubs/archive/41159.pdf) """ def __init__( self, learning_rate=0.001, learning_rate_power=-0.5, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0, name="Ftrl", l2_shrinkage_regularization_strength=0.0, beta=0.0, **kwargs, ): super().__init__(name, **kwargs) if initial_accumulator_value < 0.0: raise ValueError( "`initial_accumulator_value` needs to be " "positive or zero. Received: " f"initial_accumulator_value={initial_accumulator_value}." ) if learning_rate_power > 0.0: raise ValueError( "`learning_rate_power` needs to be " "negative or zero. Received: " f"learning_rate_power={learning_rate_power}." ) if l1_regularization_strength < 0.0: raise ValueError( "`l1_regularization_strength` needs to be positive or zero. " "Received: l1_regularization_strength=" f"{l1_regularization_strength}." ) if l2_regularization_strength < 0.0: raise ValueError( "`l2_regularization_strength` needs to be positive or zero. " "Received: l2_regularization_strength=" f"{l2_regularization_strength}." ) if l2_shrinkage_regularization_strength < 0.0: raise ValueError( "`l2_shrinkage_regularization_strength` needs to be positive " "or zero. Received: l2_shrinkage_regularization_strength" f"={l2_shrinkage_regularization_strength}." ) self._set_hyper("learning_rate", learning_rate) self._set_hyper("decay", self._initial_decay) self._set_hyper("learning_rate_power", learning_rate_power) self._set_hyper( "l1_regularization_strength", l1_regularization_strength ) self._set_hyper( "l2_regularization_strength", l2_regularization_strength ) self._set_hyper("beta", beta) self._initial_accumulator_value = initial_accumulator_value self._l2_shrinkage_regularization_strength = ( l2_shrinkage_regularization_strength ) def _create_slots(self, var_list): # Create the "accum" and "linear" slots. for var in var_list: dtype = var.dtype.base_dtype init = tf.compat.v1.constant_initializer( self._initial_accumulator_value, dtype=dtype ) self.add_slot(var, "accumulator", init) self.add_slot(var, "linear") def _prepare_local(self, var_device, var_dtype, apply_state): super()._prepare_local(var_device, var_dtype, apply_state) apply_state[(var_device, var_dtype)].update( dict( learning_rate_power=tf.identity( self._get_hyper("learning_rate_power", var_dtype) ), l1_regularization_strength=tf.identity( self._get_hyper("l1_regularization_strength", var_dtype) ), l2_regularization_strength=tf.identity( self._get_hyper("l2_regularization_strength", var_dtype) ), beta=tf.identity(self._get_hyper("beta", var_dtype)), l2_shrinkage_regularization_strength=tf.cast( self._l2_shrinkage_regularization_strength, var_dtype ), ) ) def _resource_apply_dense(self, grad, var, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = (apply_state or {}).get( (var_device, var_dtype) ) or self._fallback_apply_state(var_device, var_dtype) # Adjust L2 regularization strength to include beta to avoid the # underlying TensorFlow ops needing to include it. adjusted_l2_regularization_strength = coefficients[ "l2_regularization_strength" ] + coefficients["beta"] / (2.0 * coefficients["lr_t"]) accum = self.get_slot(var, "accumulator") linear = self.get_slot(var, "linear") if self._l2_shrinkage_regularization_strength <= 0.0: return tf.raw_ops.ResourceApplyFtrl( var=var.handle, accum=accum.handle, linear=linear.handle, grad=grad, lr=coefficients["lr_t"], l1=coefficients["l1_regularization_strength"], l2=adjusted_l2_regularization_strength, lr_power=coefficients["learning_rate_power"], use_locking=self._use_locking, ) else: return tf.raw_ops.ResourceApplyFtrlV2( var=var.handle, accum=accum.handle, linear=linear.handle, grad=grad, lr=coefficients["lr_t"], l1=coefficients["l1_regularization_strength"], l2=adjusted_l2_regularization_strength, l2_shrinkage=coefficients[ "l2_shrinkage_regularization_strength" ], lr_power=coefficients["learning_rate_power"], use_locking=self._use_locking, ) def _resource_apply_sparse(self, grad, var, indices, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = (apply_state or {}).get( (var_device, var_dtype) ) or self._fallback_apply_state(var_device, var_dtype) # Adjust L2 regularization strength to include beta to avoid the # underlying TensorFlow ops needing to include it. adjusted_l2_regularization_strength = coefficients[ "l2_regularization_strength" ] + coefficients["beta"] / (2.0 * coefficients["lr_t"]) accum = self.get_slot(var, "accumulator") linear = self.get_slot(var, "linear") if self._l2_shrinkage_regularization_strength <= 0.0: return tf.raw_ops.ResourceSparseApplyFtrl( var=var.handle, accum=accum.handle, linear=linear.handle, grad=grad, indices=indices, lr=coefficients["lr_t"], l1=coefficients["l1_regularization_strength"], l2=adjusted_l2_regularization_strength, lr_power=coefficients["learning_rate_power"], use_locking=self._use_locking, ) else: return tf.raw_ops.ResourceSparseApplyFtrlV2( var=var.handle, accum=accum.handle, linear=linear.handle, grad=grad, indices=indices, lr=coefficients["lr_t"], l1=coefficients["l1_regularization_strength"], l2=adjusted_l2_regularization_strength, l2_shrinkage=coefficients[ "l2_shrinkage_regularization_strength" ], lr_power=coefficients["learning_rate_power"], use_locking=self._use_locking, ) def get_config(self): config = super().get_config() config.update( { "learning_rate": self._serialize_hyperparameter( "learning_rate" ), "decay": self._initial_decay, "initial_accumulator_value": self._initial_accumulator_value, "learning_rate_power": self._serialize_hyperparameter( "learning_rate_power" ), "l1_regularization_strength": self._serialize_hyperparameter( "l1_regularization_strength" ), "l2_regularization_strength": self._serialize_hyperparameter( "l2_regularization_strength" ), "beta": self._serialize_hyperparameter("beta"), "l2_shrinkage_regularization_strength": self._l2_shrinkage_regularization_strength, # noqa: E501 } ) return config
tf-keras/tf_keras/optimizers/legacy/ftrl.py/0
{ "file_path": "tf-keras/tf_keras/optimizers/legacy/ftrl.py", "repo_id": "tf-keras", "token_count": 5730 }
215
"""Tests for calling optimizer on ParameterServerStrategy.""" import os import tensorflow.compat.v2 as tf from absl.testing import parameterized import tf_keras as keras from tf_keras.optimizers import adadelta from tf_keras.optimizers import adagrad from tf_keras.optimizers import adam from tf_keras.optimizers import adamax from tf_keras.optimizers import adamw from tf_keras.optimizers import ftrl from tf_keras.optimizers import lion from tf_keras.optimizers import nadam from tf_keras.optimizers import rmsprop from tf_keras.optimizers import sgd from tf_keras.utils import dataset_creator from tf_keras.utils import losses_utils ds_combinations = tf.__internal__.distribute.combinations STRATEGIES = [ ds_combinations.parameter_server_strategy_3worker_2ps_cpu, ds_combinations.parameter_server_strategy_3worker_2ps_1gpu, ] adadelta_fn = tf.__internal__.test.combinations.NamedObject( "adadelta", lambda: adadelta.Adadelta( 0.002, use_ema=True, ema_overwrite_frequency=None ), ) adagrad_fn = tf.__internal__.test.combinations.NamedObject( "adagrad", lambda: adagrad.Adagrad(0.002) ) adam_fn = tf.__internal__.test.combinations.NamedObject( "adam", lambda: adam.Adam(0.002) ) adamax_fn = tf.__internal__.test.combinations.NamedObject( "adamax", lambda: adamax.Adamax(0.002) ) adamw_fn = tf.__internal__.test.combinations.NamedObject( "adamw", lambda: adamw.AdamW(0.002, weight_decay=0.004) ) ftrl_fn = tf.__internal__.test.combinations.NamedObject( "ftrl", lambda: ftrl.Ftrl(0.002) ) lion_fn = tf.__internal__.test.combinations.NamedObject( "lion", lambda: lion.Lion(0.002) ) nadam_fn = tf.__internal__.test.combinations.NamedObject( "experimentnadam", lambda: nadam.Nadam(0.002) ) rmsprop_fn = tf.__internal__.test.combinations.NamedObject( "rmsprop", lambda: rmsprop.RMSprop(0.002) ) sgd_fn = tf.__internal__.test.combinations.NamedObject( "sgdaverage", lambda: sgd.SGD(0.002, use_ema=True, ema_overwrite_frequency=1), ) OPTIMIZER_FN = [ adadelta_fn, adagrad_fn, adam_fn, adamax_fn, adamw_fn, ftrl_fn, lion_fn, nadam_fn, rmsprop_fn, sgd_fn, ] # TODO(b/228209527): Combine this test with optimizer_test after # fixing the NCCL issue. class OptimizerPssTest(tf.test.TestCase, parameterized.TestCase): def _get_model(self): return keras.Sequential( [keras.layers.Input(shape=(1,)), keras.layers.Dense(1)] ) def _get_dataset_fn(self): def dataset_fn(_): x, y = [1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 0] ds = tf.data.Dataset.from_tensor_slices((x, y)) ds = ds.repeat().batch(6) return ds return dataset_fn def _verify_accumulators_updated(self, optimizer): variables = optimizer.variables for var in variables: if "iteration" not in var.name and "learning_rate" not in var.name: # Find a variable not iteration or learning_rate, and verify its # value is updated (not 0). if isinstance(var, tf.__internal__.distribute.ShardedVariable): for shard in var.variables: self.assertNotAllEqual(shard, 0) else: self.assertNotAllEqual(var, 0) @ds_combinations.generate( tf.__internal__.test.combinations.combine( strategy=STRATEGIES, optimizer_fn=OPTIMIZER_FN ) ) def testGetGradientsInModelPss(self, strategy, optimizer_fn): with strategy.scope(): model = self._get_model() optimizer = optimizer_fn() ds_fn = self._get_dataset_fn() if isinstance(strategy, tf.distribute.ParameterServerStrategy): ds = dataset_creator.DatasetCreator(ds_fn) else: ds = ds_fn(None) model.compile(loss="mse", optimizer=optimizer) model.fit(ds, epochs=1, steps_per_epoch=5) self._verify_accumulators_updated(optimizer) @ds_combinations.generate( tf.__internal__.test.combinations.combine( strategy=STRATEGIES, optimizer_fn=OPTIMIZER_FN ) ) def testGetGradientsInCustomTrainingLoopPss(self, strategy, optimizer_fn): coordinator = tf.distribute.experimental.coordinator.ClusterCoordinator( strategy ) with strategy.scope(): model = self._get_model() optimizer = optimizer_fn() def per_worker_dataset_fn(): return strategy.distribute_datasets_from_function( self._get_dataset_fn() ) ds = coordinator.create_per_worker_dataset(per_worker_dataset_fn) @tf.function def train_step(iterator): def replica_fn(data): features, labels = data with tf.GradientTape() as tape: output = model(tf.expand_dims(features, axis=1)) loss = keras.losses.MeanSquaredError( reduction=losses_utils.ReductionV2.NONE )(labels, output) grads = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients( zip(grads, model.trainable_variables) ) strategy.run(replica_fn, args=(next(iterator),)) for _ in range(3): coordinator.schedule(train_step, args=(iter(ds),)) coordinator.join() self.assertEqual(self.evaluate(optimizer.iterations), 3) self._verify_accumulators_updated(optimizer) @ds_combinations.generate( tf.__internal__.test.combinations.combine( strategy=STRATEGIES, shard_config=[ [2, 2], [2, 3], [3, 2], [2, 1], [1, 1], [1, 2], [1, 3], ], ) ) def testCheckpointShardedVariable(self, strategy, shard_config): # Data are embedding indices near shard boundaries for 2 or 3 shards test_indices = [33, 34, 49, 50, 66, 67] def dataset_fn(_): x, y = [[index] for index in test_indices], [1, 1, 1, 0, 0, 0] ds = tf.data.Dataset.from_tensor_slices((x, y)) ds = ds.repeat().batch(6) return ds vocab_size = 100 embed_dim = 32 def get_model(): return keras.Sequential( [ keras.layers.Embedding(vocab_size, embed_dim), keras.layers.Dense(1, activation="sigmoid"), ] ) # Override partitioning if shard_config[0] == 1: strategy._extended._variable_partitioner = None else: strategy._extended._variable_partitioner = ( tf.distribute.experimental.partitioners.FixedShardsPartitioner( shard_config[0] ) ) # Create model and optimizer with strategy.scope(): model = get_model() optimizer = adam.Adam(0.002) model.compile(loss="mse", optimizer=optimizer) model.build(input_shape=(None, 1)) model.optimizer.build(model.trainable_variables) ds = dataset_creator.DatasetCreator(dataset_fn) # Train a bit to update optimizer variables model.fit(ds, epochs=1, steps_per_epoch=5) self._verify_accumulators_updated(optimizer) # Extract optimizer variables to later check they restore properly pre_ckpt_optimizer_values = [] for var in model.optimizer.variables: # Just check the embedding variables if var.shape == [vocab_size, embed_dim]: for index in test_indices: pre_ckpt_optimizer_values.append(var[index]) # Adam has 2 slot variables, momentum and velocity self.assertLen(pre_ckpt_optimizer_values, 2 * len(test_indices)) checkpoint_path = os.path.join(self.get_temp_dir(), "model_weights") model.save_weights(checkpoint_path) # Create new model under different sharding and load checkpoint if shard_config[1] == 1: strategy._extended._variable_partitioner = None else: strategy._extended._variable_partitioner = ( tf.distribute.experimental.partitioners.FixedShardsPartitioner( shard_config[1] ) ) with strategy.scope(): model_2 = get_model() optimizer_2 = adam.Adam(0.002) model_2.compile(loss="mse", optimizer=optimizer_2) model_2.build(input_shape=(None, 1)) model_2.optimizer.build(model_2.trainable_variables) model_2.load_weights(checkpoint_path) post_ckpt_optimizer_values = [] for var in model_2.optimizer.variables: if var.shape == [vocab_size, embed_dim]: for index in test_indices: post_ckpt_optimizer_values.append(var[index]) self.assertLen(post_ckpt_optimizer_values, 2 * len(test_indices)) for pre_val, post_val in zip( pre_ckpt_optimizer_values, post_ckpt_optimizer_values ): self.assertAllEqual(pre_val, post_val) # Confirm training still functional ds = dataset_creator.DatasetCreator(dataset_fn) model_2.fit(ds, epochs=1, steps_per_epoch=5) if __name__ == "__main__": tf.__internal__.distribute.multi_process_runner.test_main()
tf-keras/tf_keras/optimizers/optimizer_pss_test.py/0
{ "file_path": "tf-keras/tf_keras/optimizers/optimizer_pss_test.py", "repo_id": "tf-keras", "token_count": 4723 }
216
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for TF-Keras Premade WideNDeep models.""" import numpy as np import tensorflow.compat.v2 as tf from tf_keras.engine import input_layer from tf_keras.engine import sequential from tf_keras.engine import training from tf_keras.feature_column import dense_features_v2 from tf_keras.layers import core from tf_keras.optimizers.legacy import gradient_descent from tf_keras.premade_models import linear from tf_keras.premade_models import wide_deep from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils @test_combinations.run_all_keras_modes(always_skip_v1=True) class WideDeepModelTest(test_combinations.TestCase): def test_wide_deep_model(self): linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) linear_inp = np.random.uniform(low=-5.0, high=5.0, size=(64, 2)) dnn_inp = np.random.uniform(low=-5.0, high=5.0, size=(64, 3)) inputs = [linear_inp, dnn_inp] output = 0.3 * linear_inp[:, 0] + 0.2 * dnn_inp[:, 1] wide_deep_model.compile( optimizer=["sgd", "adam"], loss="mse", metrics=[], run_eagerly=test_utils.should_run_eagerly(), ) wide_deep_model.fit(inputs, output, epochs=5) self.assertTrue(wide_deep_model.built) def test_wide_deep_model_backprop(self): with self.cached_session(): linear_model = linear.LinearModel( units=1, kernel_initializer="zeros" ) dnn_model = sequential.Sequential( [core.Dense(units=1, kernel_initializer="zeros")] ) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) linear_inp = np.array([[1.0]]) dnn_inp = np.array([[1.0]]) inputs = [linear_inp, dnn_inp] output = linear_inp + 2 * dnn_inp linear_opt = gradient_descent.SGD(learning_rate=0.1) dnn_opt = gradient_descent.SGD(learning_rate=0.3) wide_deep_model.compile( optimizer=[linear_opt, dnn_opt], loss="mse", metrics=[], run_eagerly=test_utils.should_run_eagerly(), ) self.evaluate(tf.compat.v1.global_variables_initializer()) wide_deep_model.fit(inputs, output, epochs=1) self.assertAllClose( [[0.6]], self.evaluate( wide_deep_model.linear_model.dense_layers[0].kernel ), ) self.assertAllClose( [[1.8]], self.evaluate(wide_deep_model.dnn_model.layers[0].kernel), ) def test_wide_deep_model_with_single_input(self): linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) inputs = np.random.uniform(low=-5.0, high=5.0, size=(64, 3)) output = 0.3 * inputs[:, 0] wide_deep_model.compile( optimizer=["sgd", "adam"], loss="mse", metrics=[], run_eagerly=test_utils.should_run_eagerly(), ) wide_deep_model.fit(inputs, output, epochs=5) def test_wide_deep_model_with_multi_outputs(self): inp = input_layer.Input(shape=(1,), name="linear") l = linear.LinearModel(units=2, use_bias=False)(inp) l1, l2 = tf.split(l, num_or_size_splits=2, axis=1) linear_model = training.Model(inp, [l1, l2]) linear_model.set_weights([np.asarray([[0.5, 0.3]])]) h = core.Dense(units=2, use_bias=False)(inp) h1, h2 = tf.split(h, num_or_size_splits=2, axis=1) dnn_model = training.Model(inp, [h1, h2]) dnn_model.set_weights([np.asarray([[0.1, -0.5]])]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) inp_np = np.asarray([[1.0]]) out1, out2 = wide_deep_model(inp_np) # output should be (0.5 + 0.1), and (0.3 - 0.5) self.assertAllClose([[0.6]], out1) self.assertAllClose([[-0.2]], out2) wide_deep_model = wide_deep.WideDeepModel( linear_model, dnn_model, activation="relu" ) out1, out2 = wide_deep_model(inp_np) # output should be relu((0.5 + 0.1)), and relu((0.3 - 0.5)) self.assertAllClose([[0.6]], out1) self.assertAllClose([[0.0]], out2) def test_wide_deep_model_with_single_optimizer(self): linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) linear_inp = np.random.uniform(low=-5.0, high=5.0, size=(64, 2)) dnn_inp = np.random.uniform(low=-5.0, high=5.0, size=(64, 3)) inputs = [linear_inp, dnn_inp] output = 0.3 * linear_inp[:, 0] + 0.2 * dnn_inp[:, 1] wide_deep_model.compile( optimizer="sgd", loss="mse", metrics=[], run_eagerly=test_utils.should_run_eagerly(), ) wide_deep_model.fit(inputs, output, epochs=5) self.assertTrue(wide_deep_model.built) def test_wide_deep_model_as_layer(self): linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1)]) linear_input = input_layer.Input(shape=(3,), name="linear") dnn_input = input_layer.Input(shape=(5,), name="dnn") wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) wide_deep_output = wide_deep_model((linear_input, dnn_input)) input_b = input_layer.Input(shape=(1,), name="b") output_b = core.Dense(units=1)(input_b) model = training.Model( inputs=[linear_input, dnn_input, input_b], outputs=[wide_deep_output + output_b], ) linear_input_np = np.random.uniform(low=-5.0, high=5.0, size=(64, 3)) dnn_input_np = np.random.uniform(low=-5.0, high=5.0, size=(64, 5)) input_b_np = np.random.uniform(low=-5.0, high=5.0, size=(64,)) output_np = ( linear_input_np[:, 0] + 0.2 * dnn_input_np[:, 1] + input_b_np ) model.compile( optimizer="sgd", loss="mse", metrics=[], run_eagerly=test_utils.should_run_eagerly(), ) model.fit( [linear_input_np, dnn_input_np, input_b_np], output_np, epochs=5 ) def test_wide_deep_model_with_sub_model_trained(self): linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)]) wide_deep_model = wide_deep.WideDeepModel( linear.LinearModel(units=1), sequential.Sequential([core.Dense(units=1, input_dim=3)]), ) linear_inp = np.random.uniform(low=-5.0, high=5.0, size=(64, 2)) dnn_inp = np.random.uniform(low=-5.0, high=5.0, size=(64, 3)) inputs = [linear_inp, dnn_inp] output = 0.3 * linear_inp[:, 0] + 0.2 * dnn_inp[:, 1] linear_model.compile( optimizer="sgd", loss="mse", metrics=[], run_eagerly=test_utils.should_run_eagerly(), ) dnn_model.compile( optimizer="adam", loss="mse", metrics=[], run_eagerly=test_utils.should_run_eagerly(), ) linear_model.fit(linear_inp, output, epochs=50) dnn_model.fit(dnn_inp, output, epochs=50) wide_deep_model.compile( optimizer=["sgd", "adam"], loss="mse", metrics=[], run_eagerly=test_utils.should_run_eagerly(), ) wide_deep_model.fit(inputs, output, epochs=50) # This test is an example for cases where linear and dnn model accepts # same raw input and same transformed inputs, i.e., the raw input is # categorical, and both linear and dnn model accept one hot encoding. def test_wide_deep_model_with_single_feature_column(self): vocab_list = ["alpha", "beta", "gamma"] vocab_val = [0.4, 0.6, 0.9] data = np.random.choice(vocab_list, size=256) y = np.zeros_like(data, dtype=np.float32) for vocab, val in zip(vocab_list, vocab_val): indices = np.where(data == vocab) y[indices] = val + np.random.uniform( low=-0.01, high=0.01, size=indices[0].shape ) cat_column = tf.feature_column.categorical_column_with_vocabulary_list( key="symbol", vocabulary_list=vocab_list ) ind_column = tf.feature_column.indicator_column(cat_column) dense_feature_layer = dense_features_v2.DenseFeatures([ind_column]) linear_model = linear.LinearModel( use_bias=False, kernel_initializer="zeros" ) dnn_model = sequential.Sequential([core.Dense(units=1)]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) combined = sequential.Sequential([dense_feature_layer, wide_deep_model]) opt = gradient_descent.SGD(learning_rate=0.1) combined.compile( opt, "mse", [], run_eagerly=test_utils.should_run_eagerly() ) combined.fit(x={"symbol": data}, y=y, batch_size=32, epochs=10) # This test is an example for cases where linear and dnn model accepts # same raw input but different transformed inputs, i.e,. the raw input is # categorical, and linear model accepts one hot encoding, while dnn model # accepts embedding encoding. def test_wide_deep_model_with_two_feature_columns(self): vocab_list = ["alpha", "beta", "gamma"] vocab_val = [0.4, 0.6, 0.9] data = np.random.choice(vocab_list, size=256) y = np.zeros_like(data, dtype=np.float32) for vocab, val in zip(vocab_list, vocab_val): indices = np.where(data == vocab) y[indices] = val + np.random.uniform( low=-0.01, high=0.01, size=indices[0].shape ) cat_column = tf.feature_column.categorical_column_with_vocabulary_list( key="symbol", vocabulary_list=vocab_list ) ind_column = tf.feature_column.indicator_column(cat_column) emb_column = tf.feature_column.embedding_column(cat_column, dimension=5) linear_feature_layer = dense_features_v2.DenseFeatures([ind_column]) linear_model = linear.LinearModel( use_bias=False, kernel_initializer="zeros" ) combined_linear = sequential.Sequential( [linear_feature_layer, linear_model] ) dnn_model = sequential.Sequential([core.Dense(units=1)]) dnn_feature_layer = dense_features_v2.DenseFeatures([emb_column]) combined_dnn = sequential.Sequential([dnn_feature_layer, dnn_model]) wide_deep_model = wide_deep.WideDeepModel(combined_linear, combined_dnn) opt = gradient_descent.SGD(learning_rate=0.1) wide_deep_model.compile( opt, "mse", [], run_eagerly=test_utils.should_run_eagerly() ) wide_deep_model.fit(x={"symbol": data}, y=y, batch_size=32, epochs=10) def test_config(self): linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) config = wide_deep_model.get_config() cloned_wide_deep_model = wide_deep.WideDeepModel.from_config(config) self.assertEqual( linear_model.units, cloned_wide_deep_model.linear_model.units ) self.assertEqual( dnn_model.layers[0].units, cloned_wide_deep_model.dnn_model.layers[0].units, ) def test_config_with_custom_objects(self): def my_activation(x): return x linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)]) wide_deep_model = wide_deep.WideDeepModel( linear_model, dnn_model, activation=my_activation ) config = wide_deep_model.get_config() cloned_wide_deep_model = wide_deep.WideDeepModel.from_config( config, custom_objects={"my_activation": my_activation} ) self.assertEqual(cloned_wide_deep_model.activation, my_activation) def test_export(self): input1 = input_layer.Input(shape=(1,)) output1 = linear.LinearModel()(input1) linear_model = training.Model(input1, output1) input2 = input_layer.Input(shape=(1,)) output2 = core.Dense(units=1)(input2) dnn_model = training.Model(input2, output2) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) wide_deep_model.compile(optimizer=["adam", "adam"]) output = wide_deep_model([input1, input2]) model = training.Model([input1, input2], output) model.compile() model.export(self.get_temp_dir()) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/premade_models/wide_deep_test.py/0
{ "file_path": "tf-keras/tf_keras/premade_models/wide_deep_test.py", "repo_id": "tf-keras", "token_count": 6609 }
217
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for unit-testing TF-Keras.""" import collections import contextlib import functools import itertools import threading import unittest import numpy as np import tensorflow.compat.v2 as tf from tf_keras import backend from tf_keras import layers from tf_keras import models from tf_keras.engine import base_layer_utils from tf_keras.optimizers.legacy import adadelta as adadelta_v2 from tf_keras.optimizers.legacy import adagrad as adagrad_v2 from tf_keras.optimizers.legacy import adam as adam_v2 from tf_keras.optimizers.legacy import adamax as adamax_v2 from tf_keras.optimizers.legacy import gradient_descent as gradient_descent_v2 from tf_keras.optimizers.legacy import nadam as nadam_v2 from tf_keras.optimizers.legacy import rmsprop as rmsprop_v2 from tf_keras.utils import tf_contextlib from tf_keras.utils import tf_inspect # isort: off from tensorflow.python.framework import ( test_util as tf_test_utils, ) from tensorflow.python.util.tf_export import keras_export def string_test(actual, expected): np.testing.assert_array_equal(actual, expected) def numeric_test(actual, expected): np.testing.assert_allclose(actual, expected, rtol=1e-3, atol=1e-6) def get_test_data( train_samples, test_samples, input_shape, num_classes, random_seed=None ): """Generates test data to train a model on. Args: train_samples: Integer, how many training samples to generate. test_samples: Integer, how many test samples to generate. input_shape: Tuple of integers, shape of the inputs. num_classes: Integer, number of classes for the data and targets. random_seed: Integer, random seed used by numpy to generate data. Returns: A tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. """ if random_seed is not None: np.random.seed(random_seed) num_sample = train_samples + test_samples templates = 2 * num_classes * np.random.random((num_classes,) + input_shape) y = np.random.randint(0, num_classes, size=(num_sample,)) x = np.zeros((num_sample,) + input_shape, dtype=np.float32) for i in range(num_sample): x[i] = templates[y[i]] + np.random.normal( loc=0, scale=1.0, size=input_shape ) return ( (x[:train_samples], y[:train_samples]), (x[train_samples:], y[train_samples:]), ) @keras_export("keras.__internal__.utils.layer_test", v1=[]) @tf_test_utils.disable_cudnn_autotune def layer_test( layer_cls, kwargs=None, input_shape=None, input_dtype=None, input_data=None, expected_output=None, expected_output_dtype=None, expected_output_shape=None, validate_training=True, adapt_data=None, custom_objects=None, test_harness=None, supports_masking=None, ): """Test routine for a layer with a single input and single output. Args: layer_cls: Layer class object. kwargs: Optional dictionary of keyword arguments for instantiating the layer. input_shape: Input shape tuple. input_dtype: Data type of the input data. input_data: Numpy array of input data. expected_output: Numpy array of the expected output. expected_output_dtype: Data type expected for the output. expected_output_shape: Shape tuple for the expected shape of the output. validate_training: Whether to attempt to validate training on this layer. This might be set to False for non-differentiable layers that output string or integer values. adapt_data: Optional data for an 'adapt' call. If None, adapt() will not be tested for this layer. This is only relevant for PreprocessingLayers. custom_objects: Optional dictionary mapping name strings to custom objects in the layer class. This is helpful for testing custom layers. test_harness: The Tensorflow test, if any, that this function is being called in. supports_masking: Optional boolean to check the `supports_masking` property of the layer. If None, the check will not be performed. Returns: The output data (Numpy array) returned by the layer, for additional checks to be done by the calling code. Raises: ValueError: if `input_shape is None`. """ if input_data is None: if input_shape is None: raise ValueError("input_shape is None") if not input_dtype: input_dtype = "float32" input_data_shape = list(input_shape) for i, e in enumerate(input_data_shape): if e is None: input_data_shape[i] = np.random.randint(1, 4) input_data = 10 * np.random.random(input_data_shape) if input_dtype[:5] == "float": input_data -= 0.5 input_data = input_data.astype(input_dtype) elif input_shape is None: input_shape = input_data.shape if input_dtype is None: input_dtype = input_data.dtype if expected_output_dtype is None: expected_output_dtype = input_dtype if tf.as_dtype(expected_output_dtype) == tf.string: if test_harness: assert_equal = test_harness.assertAllEqual else: assert_equal = string_test else: if test_harness: assert_equal = test_harness.assertAllClose else: assert_equal = numeric_test # instantiation kwargs = kwargs or {} layer = layer_cls(**kwargs) if ( supports_masking is not None and layer.supports_masking != supports_masking ): raise AssertionError( "When testing layer %s, the `supports_masking` property is %r" "but expected to be %r.\nFull kwargs: %s" % ( layer_cls.__name__, layer.supports_masking, supports_masking, kwargs, ) ) # Test adapt, if data was passed. if adapt_data is not None: layer.adapt(adapt_data) # test get_weights , set_weights at layer level weights = layer.get_weights() layer.set_weights(weights) # test and instantiation from weights if "weights" in tf_inspect.getargspec(layer_cls.__init__): kwargs["weights"] = weights layer = layer_cls(**kwargs) # test in functional API x = layers.Input(shape=input_shape[1:], dtype=input_dtype) y = layer(x) if backend.dtype(y) != expected_output_dtype: raise AssertionError( "When testing layer %s, for input %s, found output " "dtype=%s but expected to find %s.\nFull kwargs: %s" % ( layer_cls.__name__, x, backend.dtype(y), expected_output_dtype, kwargs, ) ) def assert_shapes_equal(expected, actual): """Asserts that the output shape from the layer matches the actual shape.""" if len(expected) != len(actual): raise AssertionError( "When testing layer %s, for input %s, found output_shape=" "%s but expected to find %s.\nFull kwargs: %s" % (layer_cls.__name__, x, actual, expected, kwargs) ) for expected_dim, actual_dim in zip(expected, actual): if isinstance(expected_dim, tf.compat.v1.Dimension): expected_dim = expected_dim.value if isinstance(actual_dim, tf.compat.v1.Dimension): actual_dim = actual_dim.value if expected_dim is not None and expected_dim != actual_dim: raise AssertionError( "When testing layer %s, for input %s, found output_shape=" "%s but expected to find %s.\nFull kwargs: %s" % (layer_cls.__name__, x, actual, expected, kwargs) ) if expected_output_shape is not None: assert_shapes_equal(tf.TensorShape(expected_output_shape), y.shape) # check shape inference model = models.Model(x, y) computed_output_shape = tuple( layer.compute_output_shape(tf.TensorShape(input_shape)).as_list() ) computed_output_signature = layer.compute_output_signature( tf.TensorSpec(shape=input_shape, dtype=input_dtype) ) actual_output = model.predict(input_data) actual_output_shape = actual_output.shape assert_shapes_equal(computed_output_shape, actual_output_shape) assert_shapes_equal(computed_output_signature.shape, actual_output_shape) if computed_output_signature.dtype != actual_output.dtype: raise AssertionError( "When testing layer %s, for input %s, found output_dtype=" "%s but expected to find %s.\nFull kwargs: %s" % ( layer_cls.__name__, x, actual_output.dtype, computed_output_signature.dtype, kwargs, ) ) if expected_output is not None: assert_equal(actual_output, expected_output) # test serialization, weight setting at model level model_config = model.get_config() recovered_model = models.Model.from_config(model_config, custom_objects) if model.weights: weights = model.get_weights() recovered_model.set_weights(weights) output = recovered_model.predict(input_data) assert_equal(output, actual_output) # test training mode (e.g. useful for dropout tests) # Rebuild the model to avoid the graph being reused between predict() and # See b/120160788 for more details. This should be mitigated after 2.0. layer_weights = ( layer.get_weights() ) # Get the layer weights BEFORE training. if validate_training: model = models.Model(x, layer(x)) if _thread_local_data.run_eagerly is not None: model.compile( "rmsprop", "mse", weighted_metrics=["acc"], run_eagerly=should_run_eagerly(), ) else: model.compile("rmsprop", "mse", weighted_metrics=["acc"]) model.train_on_batch(input_data, actual_output) # test as first layer in Sequential API layer_config = layer.get_config() layer_config["batch_input_shape"] = input_shape layer = layer.__class__.from_config(layer_config) # Test adapt, if data was passed. if adapt_data is not None: layer.adapt(adapt_data) model = models.Sequential() model.add(layers.Input(shape=input_shape[1:], dtype=input_dtype)) model.add(layer) layer.set_weights(layer_weights) actual_output = model.predict(input_data) actual_output_shape = actual_output.shape for expected_dim, actual_dim in zip( computed_output_shape, actual_output_shape ): if expected_dim is not None: if expected_dim != actual_dim: raise AssertionError( "When testing layer %s **after deserialization**, " "for input %s, found output_shape=" "%s but expected to find inferred shape %s.\n" "Full kwargs: %s" % ( layer_cls.__name__, x, actual_output_shape, computed_output_shape, kwargs, ) ) if expected_output is not None: assert_equal(actual_output, expected_output) # test serialization, weight setting at model level model_config = model.get_config() recovered_model = models.Sequential.from_config( model_config, custom_objects ) if model.weights: weights = model.get_weights() recovered_model.set_weights(weights) output = recovered_model.predict(input_data) assert_equal(output, actual_output) # for further checks in the caller function return actual_output _thread_local_data = threading.local() _thread_local_data.model_type = None _thread_local_data.run_eagerly = None _thread_local_data.saved_model_format = None _thread_local_data.save_kwargs = None @tf_contextlib.contextmanager def model_type_scope(value): """Provides a scope within which the model type to test is equal to `value`. The model type gets restored to its original value upon exiting the scope. Args: value: model type value Yields: The provided value. """ previous_value = _thread_local_data.model_type try: _thread_local_data.model_type = value yield value finally: # Restore model type to initial value. _thread_local_data.model_type = previous_value @tf_contextlib.contextmanager def run_eagerly_scope(value): """Provides a scope within which we compile models to run eagerly or not. The boolean gets restored to its original value upon exiting the scope. Args: value: Bool specifying if we should run models eagerly in the active test. Should be True or False. Yields: The provided value. """ previous_value = _thread_local_data.run_eagerly try: _thread_local_data.run_eagerly = value yield value finally: # Restore model type to initial value. _thread_local_data.run_eagerly = previous_value def should_run_eagerly(): """Returns whether the models we are testing should be run eagerly.""" if _thread_local_data.run_eagerly is None: raise ValueError( "Cannot call `should_run_eagerly()` outside of a " "`run_eagerly_scope()` or `run_all_keras_modes` " "decorator." ) return _thread_local_data.run_eagerly and tf.executing_eagerly() @tf_contextlib.contextmanager def saved_model_format_scope(value, **kwargs): """Provides a scope within which the savde model format to test is `value`. The saved model format gets restored to its original value upon exiting the scope. Args: value: saved model format value **kwargs: optional kwargs to pass to the save function. Yields: The provided value. """ previous_format = _thread_local_data.saved_model_format previous_kwargs = _thread_local_data.save_kwargs try: _thread_local_data.saved_model_format = value _thread_local_data.save_kwargs = kwargs yield finally: # Restore saved model format to initial value. _thread_local_data.saved_model_format = previous_format _thread_local_data.save_kwargs = previous_kwargs def get_save_format(): if _thread_local_data.saved_model_format is None: raise ValueError( "Cannot call `get_save_format()` outside of a " "`saved_model_format_scope()` or " "`run_with_all_saved_model_formats` decorator." ) return _thread_local_data.saved_model_format def get_save_kwargs(): if _thread_local_data.save_kwargs is None: raise ValueError( "Cannot call `get_save_kwargs()` outside of a " "`saved_model_format_scope()` or " "`run_with_all_saved_model_formats` decorator." ) return _thread_local_data.save_kwargs or {} def get_model_type(): """Gets the model type that should be tested.""" if _thread_local_data.model_type is None: raise ValueError( "Cannot call `get_model_type()` outside of a " "`model_type_scope()` or `run_with_all_model_types` " "decorator." ) return _thread_local_data.model_type def get_small_sequential_mlp(num_hidden, num_classes, input_dim=None): model = models.Sequential() if input_dim: model.add( layers.Dense(num_hidden, activation="relu", input_dim=input_dim) ) else: model.add(layers.Dense(num_hidden, activation="relu")) activation = "sigmoid" if num_classes == 1 else "softmax" model.add(layers.Dense(num_classes, activation=activation)) return model def get_small_functional_mlp(num_hidden, num_classes, input_dim): inputs = layers.Input(shape=(input_dim,)) outputs = layers.Dense(num_hidden, activation="relu")(inputs) activation = "sigmoid" if num_classes == 1 else "softmax" outputs = layers.Dense(num_classes, activation=activation)(outputs) return models.Model(inputs, outputs) class SmallSubclassMLP(models.Model): """A subclass model based small MLP.""" def __init__( self, num_hidden, num_classes, use_bn=False, use_dp=False, **kwargs ): super().__init__(name="test_model", **kwargs) self.num_hidden = num_hidden self.num_classes = num_classes self.use_bn = use_bn self.use_dp = use_dp self.layer_a = layers.Dense(num_hidden, activation="relu") activation = "sigmoid" if num_classes == 1 else "softmax" self.layer_b = layers.Dense(num_classes, activation=activation) if self.use_dp: self.dp = layers.Dropout(0.5) if self.use_bn: self.bn = layers.BatchNormalization(axis=-1) def call(self, inputs, **kwargs): x = self.layer_a(inputs) if self.use_dp: x = self.dp(x) if self.use_bn: x = self.bn(x) return self.layer_b(x) def get_config(self): config = super().get_config() config.update( { "num_hidden": self.num_hidden, "num_classes": self.num_classes, "use_bn": self.use_bn, "use_dp": self.use_dp, } ) return config class _SmallSubclassMLPCustomBuild(models.Model): """A subclass model small MLP that uses a custom build method.""" def __init__(self, num_hidden, num_classes): super().__init__() self.layer_a = None self.layer_b = None self.num_hidden = num_hidden self.num_classes = num_classes def build(self, input_shape): self.layer_a = layers.Dense(self.num_hidden, activation="relu") activation = "sigmoid" if self.num_classes == 1 else "softmax" self.layer_b = layers.Dense(self.num_classes, activation=activation) def call(self, inputs, **kwargs): x = self.layer_a(inputs) return self.layer_b(x) def get_small_subclass_mlp(num_hidden, num_classes): return SmallSubclassMLP(num_hidden, num_classes) def get_small_subclass_mlp_with_custom_build(num_hidden, num_classes): return _SmallSubclassMLPCustomBuild(num_hidden, num_classes) def get_small_mlp(num_hidden, num_classes, input_dim): """Get a small mlp of the model type specified by `get_model_type`.""" model_type = get_model_type() if model_type == "subclass": return get_small_subclass_mlp(num_hidden, num_classes) if model_type == "subclass_custom_build": return get_small_subclass_mlp_with_custom_build(num_hidden, num_classes) if model_type == "sequential": return get_small_sequential_mlp(num_hidden, num_classes, input_dim) if model_type == "functional": return get_small_functional_mlp(num_hidden, num_classes, input_dim) raise ValueError(f"Unknown model type {model_type}") class _SubclassModel(models.Model): """A TF-Keras subclass model.""" def __init__(self, model_layers, *args, **kwargs): """Instantiate a model. Args: model_layers: a list of layers to be added to the model. *args: Model's args **kwargs: Model's keyword args, at most one of input_tensor -> the input tensor required for ragged/sparse input. """ inputs = kwargs.pop("input_tensor", None) super().__init__(*args, **kwargs) # Note that clone and build doesn't support lists of layers in # subclassed models. Adding each layer directly here. for i, layer in enumerate(model_layers): setattr(self, self._layer_name_for_i(i), layer) self.num_layers = len(model_layers) if inputs is not None: self._set_inputs(inputs) def _layer_name_for_i(self, i): return f"layer{i}" def call(self, inputs, **kwargs): x = inputs for i in range(self.num_layers): layer = getattr(self, self._layer_name_for_i(i)) x = layer(x) return x def get_config(self): # This test model relies on the default TF-Keras serialization of a # model, rather than providing the details of `model_layers`. raise NotImplementedError class _SubclassModelCustomBuild(models.Model): """A TF-Keras subclass model that uses a custom build method.""" def __init__(self, layer_generating_func, *args, **kwargs): super().__init__(*args, **kwargs) self.all_layers = None self._layer_generating_func = layer_generating_func def build(self, input_shape): model_layers = [] for layer in self._layer_generating_func(): model_layers.append(layer) self.all_layers = model_layers def call(self, inputs, **kwargs): x = inputs for layer in self.all_layers: x = layer(x) return x def get_model_from_layers( model_layers, input_shape=None, input_dtype=None, name=None, input_ragged=None, input_sparse=None, model_type=None, ): """Builds a model from a sequence of layers. Args: model_layers: The layers used to build the network. input_shape: Shape tuple of the input or 'TensorShape' instance. input_dtype: Datatype of the input. name: Name for the model. input_ragged: Boolean, whether the input data is a ragged tensor. input_sparse: Boolean, whether the input data is a sparse tensor. model_type: One of "subclass", "subclass_custom_build", "sequential", or "functional". When None, defaults to `get_model_type`. Returns: A TF-Keras model. """ if model_type is None: model_type = get_model_type() if model_type == "subclass": inputs = None if input_ragged or input_sparse: inputs = layers.Input( shape=input_shape, dtype=input_dtype, ragged=input_ragged, sparse=input_sparse, ) return _SubclassModel(model_layers, name=name, input_tensor=inputs) if model_type == "subclass_custom_build": layer_generating_func = lambda: model_layers return _SubclassModelCustomBuild(layer_generating_func, name=name) if model_type == "sequential": model = models.Sequential(name=name) if input_shape: model.add( layers.InputLayer( input_shape=input_shape, dtype=input_dtype, ragged=input_ragged, sparse=input_sparse, ) ) for layer in model_layers: model.add(layer) return model if model_type == "functional": if not input_shape: raise ValueError( "Cannot create a functional model from layers with no " "input shape." ) inputs = layers.Input( shape=input_shape, dtype=input_dtype, ragged=input_ragged, sparse=input_sparse, ) outputs = inputs for layer in model_layers: outputs = layer(outputs) return models.Model(inputs, outputs, name=name) raise ValueError(f"Unknown model type {model_type}") class Bias(layers.Layer): def build(self, input_shape): self.bias = self.add_weight("bias", (1,), initializer="zeros") def call(self, inputs): return inputs + self.bias class _MultiIOSubclassModel(models.Model): """Multi IO TF-Keras subclass model.""" def __init__( self, branch_a, branch_b, shared_input_branch=None, shared_output_branch=None, name=None, ): super().__init__(name=name) self._shared_input_branch = shared_input_branch self._branch_a = branch_a self._branch_b = branch_b self._shared_output_branch = shared_output_branch def call(self, inputs, **kwargs): if self._shared_input_branch: for layer in self._shared_input_branch: inputs = layer(inputs) a = inputs b = inputs elif isinstance(inputs, dict): a = inputs["input_1"] b = inputs["input_2"] else: a, b = inputs for layer in self._branch_a: a = layer(a) for layer in self._branch_b: b = layer(b) outs = [a, b] if self._shared_output_branch: for layer in self._shared_output_branch: outs = layer(outs) return outs class _MultiIOSubclassModelCustomBuild(models.Model): """Multi IO TF-Keras subclass model that uses a custom build method.""" def __init__( self, branch_a_func, branch_b_func, shared_input_branch_func=None, shared_output_branch_func=None, ): super().__init__() self._shared_input_branch_func = shared_input_branch_func self._branch_a_func = branch_a_func self._branch_b_func = branch_b_func self._shared_output_branch_func = shared_output_branch_func self._shared_input_branch = None self._branch_a = None self._branch_b = None self._shared_output_branch = None def build(self, input_shape): if self._shared_input_branch_func(): self._shared_input_branch = self._shared_input_branch_func() self._branch_a = self._branch_a_func() self._branch_b = self._branch_b_func() if self._shared_output_branch_func(): self._shared_output_branch = self._shared_output_branch_func() def call(self, inputs, **kwargs): if self._shared_input_branch: for layer in self._shared_input_branch: inputs = layer(inputs) a = inputs b = inputs else: a, b = inputs for layer in self._branch_a: a = layer(a) for layer in self._branch_b: b = layer(b) outs = a, b if self._shared_output_branch: for layer in self._shared_output_branch: outs = layer(outs) return outs def get_multi_io_model( branch_a, branch_b, shared_input_branch=None, shared_output_branch=None ): """Builds a multi-io model that contains two branches. The produced model will be of the type specified by `get_model_type`. To build a two-input, two-output model: Specify a list of layers for branch a and branch b, but do not specify any shared input branch or shared output branch. The resulting model will apply each branch to a different input, to produce two outputs. The first value in branch_a must be the TF-Keras 'Input' layer for branch a, and the first value in branch_b must be the TF-Keras 'Input' layer for branch b. example usage: ``` branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()] branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()] model = get_multi_io_model(branch_a, branch_b) ``` To build a two-input, one-output model: Specify a list of layers for branch a and branch b, and specify a shared output branch. The resulting model will apply each branch to a different input. It will then apply the shared output branch to a tuple containing the intermediate outputs of each branch, to produce a single output. The first layer in the shared_output_branch must be able to merge a tuple of two tensors. The first value in branch_a must be the TF-Keras 'Input' layer for branch a, and the first value in branch_b must be the TF-Keras 'Input' layer for branch b. example usage: ``` input_branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()] input_branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()] shared_output_branch = [Concatenate(), Dense(), Dense()] model = get_multi_io_model(input_branch_a, input_branch_b, shared_output_branch=shared_output_branch) ``` To build a one-input, two-output model: Specify a list of layers for branch a and branch b, and specify a shared input branch. The resulting model will take one input, and apply the shared input branch to it. It will then respectively apply each branch to that intermediate result in parallel, to produce two outputs. The first value in the shared_input_branch must be the TF-Keras 'Input' layer for the whole model. Branch a and branch b should not contain any Input layers. example usage: ``` shared_input_branch = [Input(shape=(2,), name='in'), Dense(), Dense()] output_branch_a = [Dense(), Dense()] output_branch_b = [Dense(), Dense()] model = get_multi_io_model(output__branch_a, output_branch_b, shared_input_branch=shared_input_branch) ``` Args: branch_a: A sequence of layers for branch a of the model. branch_b: A sequence of layers for branch b of the model. shared_input_branch: An optional sequence of layers to apply to a single input, before applying both branches to that intermediate result. If set, the model will take only one input instead of two. Defaults to `None`. shared_output_branch: An optional sequence of layers to merge the intermediate results produced by branch a and branch b. If set, the model will produce only one output instead of two. Defaults to `None`. Returns: A multi-io model of the type specified by `get_model_type`, specified by the different branches. """ # Extract the functional inputs from the layer lists if shared_input_branch: inputs = shared_input_branch[0] shared_input_branch = shared_input_branch[1:] else: inputs = branch_a[0], branch_b[0] branch_a = branch_a[1:] branch_b = branch_b[1:] model_type = get_model_type() if model_type == "subclass": return _MultiIOSubclassModel( branch_a, branch_b, shared_input_branch, shared_output_branch ) if model_type == "subclass_custom_build": return _MultiIOSubclassModelCustomBuild( (lambda: branch_a), (lambda: branch_b), (lambda: shared_input_branch), (lambda: shared_output_branch), ) if model_type == "sequential": raise ValueError( "Cannot use `get_multi_io_model` to construct sequential models" ) if model_type == "functional": if shared_input_branch: a_and_b = inputs for layer in shared_input_branch: a_and_b = layer(a_and_b) a = a_and_b b = a_and_b else: a, b = inputs for layer in branch_a: a = layer(a) for layer in branch_b: b = layer(b) outputs = a, b if shared_output_branch: for layer in shared_output_branch: outputs = layer(outputs) return models.Model(inputs, outputs) raise ValueError(f"Unknown model type {model_type}") _V2_OPTIMIZER_MAP = { "adadelta": adadelta_v2.Adadelta, "adagrad": adagrad_v2.Adagrad, "adam": adam_v2.Adam, "adamax": adamax_v2.Adamax, "nadam": nadam_v2.Nadam, "rmsprop": rmsprop_v2.RMSprop, "sgd": gradient_descent_v2.SGD, } def get_v2_optimizer(name, **kwargs): """Get the v2 optimizer requested. This is only necessary until v2 are the default, as we are testing in Eager, and Eager + v1 optimizers fail tests. When we are in v2, the strings alone should be sufficient, and this mapping can theoretically be removed. Args: name: string name of TF-Keras v2 optimizer. **kwargs: any kwargs to pass to the optimizer constructor. Returns: Initialized TF-Keras v2 optimizer. Raises: ValueError: if an unknown name was passed. """ try: return _V2_OPTIMIZER_MAP[name](**kwargs) except KeyError: raise ValueError( "Could not find requested v2 optimizer: " "{}\nValid choices: {}".format(name, list(_V2_OPTIMIZER_MAP.keys())) ) def get_expected_metric_variable_names(var_names, name_suffix=""): """Returns expected metric variable names given names and prefix/suffix.""" if tf.__internal__.tf2.enabled() or tf.executing_eagerly(): # In V1 eager mode and V2 variable names are not made unique. return [n + ":0" for n in var_names] # In V1 graph mode variable names are made unique using a suffix. return [n + name_suffix + ":0" for n in var_names] def enable_v2_dtype_behavior(fn): """Decorator for enabling the layer V2 dtype behavior on a test.""" return _set_v2_dtype_behavior(fn, True) def disable_v2_dtype_behavior(fn): """Decorator for disabling the layer V2 dtype behavior on a test.""" return _set_v2_dtype_behavior(fn, False) def _set_v2_dtype_behavior(fn, enabled): """Returns version of 'fn' that runs with v2 dtype behavior on or off.""" @functools.wraps(fn) def wrapper(*args, **kwargs): v2_dtype_behavior = base_layer_utils.V2_DTYPE_BEHAVIOR base_layer_utils.V2_DTYPE_BEHAVIOR = enabled try: return fn(*args, **kwargs) finally: base_layer_utils.V2_DTYPE_BEHAVIOR = v2_dtype_behavior return tf.__internal__.decorator.make_decorator(fn, wrapper) @contextlib.contextmanager def device(should_use_gpu): """Uses gpu when requested and available.""" if should_use_gpu and tf.test.is_gpu_available(): dev = "/device:GPU:0" else: dev = "/device:CPU:0" with tf.device(dev): yield @contextlib.contextmanager def use_gpu(): """Uses gpu when requested and available.""" with device(should_use_gpu=True): yield def for_all_test_methods(decorator, *args, **kwargs): """Generate class-level decorator from given method-level decorator. It is expected for the given decorator to take some arguments and return a method that is then called on the test method to produce a decorated method. Args: decorator: The decorator to apply. *args: Positional arguments **kwargs: Keyword arguments Returns: Function that will decorate a given classes test methods with the decorator. """ def all_test_methods_impl(cls): """Apply decorator to all test methods in class.""" for name in dir(cls): value = getattr(cls, name) if ( callable(value) and name.startswith("test") and (name != "test_session") ): setattr(cls, name, decorator(*args, **kwargs)(value)) return cls return all_test_methods_impl # The description is just for documentation purposes. def run_without_tensor_float_32(description): """Execute test with TensorFloat-32 disabled. While almost every real-world deep learning model runs fine with TensorFloat-32, many tests use assertAllClose or similar methods. TensorFloat-32 matmuls typically will cause such methods to fail with the default tolerances. Args: description: A description used for documentation purposes, describing why the test requires TensorFloat-32 to be disabled. Returns: Decorator which runs a test with TensorFloat-32 disabled. """ def decorator(f): @functools.wraps(f) def decorated(self, *args, **kwargs): allowed = tf.config.experimental.tensor_float_32_execution_enabled() try: tf.config.experimental.enable_tensor_float_32_execution(False) f(self, *args, **kwargs) finally: tf.config.experimental.enable_tensor_float_32_execution(allowed) return decorated return decorator # The description is just for documentation purposes. def run_all_without_tensor_float_32( description, ): """Execute all tests in a class with TensorFloat-32 disabled.""" return for_all_test_methods(run_without_tensor_float_32, description) def run_v2_only(obj=None): """Execute the decorated test only if running in v2 mode. This function is intended to be applied to tests that exercise v2 only functionality. If the test is run in v1 mode it will simply be skipped. See go/tf-test-decorator-cheatsheet for the decorators to use in different v1/v2/eager/graph combinations. Args: obj: function to be annotated. If None, return a decorator the can be applied to a function or class. If `obj` is not None, return the decorator applied to `obj`. Returns: Returns a decorator that will conditionally skip the decorated test method. """ condition = not tf.__internal__.tf2.enabled() reason = "Test is only compatible with TF v2." def decorator(f): if tf_inspect.isclass(f): return unittest.skipIf(condition=condition, reason=reason)(obj) def decorated(self, *args, **kwargs): if condition: self.skipTest(reason) return f(self, *args, **kwargs) return decorated if obj is not None: return decorator(obj) return decorator def generate_combinations_with_testcase_name(**kwargs): """Generate combinations based on its keyword arguments using combine(). This function calls combine() and appends a testcase name to the list of dictionaries returned. The 'testcase_name' key is a required for named parameterized tests. Args: **kwargs: keyword arguments of form `option=[possibilities, ...]` or `option=the_only_possibility`. Returns: a list of dictionaries for each combination. Keys in the dictionaries are the keyword argument names. Each key has one value - one of the corresponding keyword argument values. """ sort_by_key = lambda k: k[0] combinations = [] for key, values in sorted(kwargs.items(), key=sort_by_key): if not isinstance(values, list): values = [values] combinations.append([(key, value) for value in values]) combinations = [ collections.OrderedDict(result) for result in itertools.product(*combinations) ] named_combinations = [] for combination in combinations: assert isinstance(combination, collections.OrderedDict) name = "".join( [ "_{}_{}".format( "".join(filter(str.isalnum, key)), "".join(filter(str.isalnum, str(value))), ) for key, value in combination.items() ] ) named_combinations.append( collections.OrderedDict( list(combination.items()) + [("testcase_name", f"_test{name}")] ) ) return named_combinations
tf-keras/tf_keras/testing_infra/test_utils.py/0
{ "file_path": "tf-keras/tf_keras/testing_infra/test_utils.py", "repo_id": "tf-keras", "token_count": 17000 }
218
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for compiled Model subclassing.""" import os import numpy as np import tensorflow.compat.v2 as tf import tf_keras as keras from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils from tf_keras.tests import model_subclassing_test_util as model_util try: import h5py except ImportError: h5py = None @test_combinations.run_all_keras_modes class ModelSubclassCompiledTest(test_combinations.TestCase): def test_single_io_workflow_with_np_arrays(self): num_classes = 2 num_samples = 100 input_dim = 50 model = test_utils.SmallSubclassMLP( num_hidden=32, num_classes=num_classes, use_dp=True, use_bn=True ) model.compile( loss="mse", optimizer="rmsprop", metrics=["acc", keras.metrics.CategoricalAccuracy()], run_eagerly=test_utils.should_run_eagerly(), ) x = np.ones((num_samples, input_dim)) y = np.zeros((num_samples, num_classes)) model.fit(x, y, epochs=2, batch_size=32, verbose=0) _ = model.evaluate(x, y, verbose=0) def test_multi_io_workflow_with_np_arrays(self): num_classes = (2, 3) num_samples = 1000 input_dim = 50 model = model_util.get_multi_io_subclass_model( num_classes=num_classes, use_dp=True, use_bn=True ) model.compile( loss="mse", optimizer="rmsprop", metrics=["acc"], run_eagerly=test_utils.should_run_eagerly(), ) x1 = np.ones((num_samples, input_dim)) x2 = np.ones((num_samples, input_dim)) y1 = np.zeros((num_samples, num_classes[0])) y2 = np.zeros((num_samples, num_classes[1])) model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0) _ = model.evaluate([x1, x2], [y1, y2], verbose=0) def test_single_io_workflow_with_datasets(self): num_classes = 2 num_samples = 10 input_dim = 50 with self.cached_session(): model = test_utils.SmallSubclassMLP( num_hidden=32, num_classes=num_classes, use_dp=True, use_bn=True ) model.compile( loss="mse", optimizer="rmsprop", run_eagerly=test_utils.should_run_eagerly(), ) x = np.ones((num_samples, input_dim), dtype=np.float32) y = np.zeros((num_samples, num_classes), dtype=np.float32) dataset = tf.data.Dataset.from_tensor_slices((x, y)) dataset = dataset.repeat(100) dataset = dataset.batch(10) model.fit(dataset, epochs=2, steps_per_epoch=10, verbose=0) _ = model.evaluate(dataset, steps=10, verbose=0) def test_attributes(self): # layers, weights, trainable_weights, non_trainable_weights, inputs, # outputs num_classes = (2, 3) num_samples = 100 input_dim = 50 model = model_util.get_multi_io_subclass_model( num_classes=num_classes, use_bn=True ) x1 = np.ones((num_samples, input_dim)) x2 = np.ones((num_samples, input_dim)) y1 = np.zeros((num_samples, num_classes[0])) y2 = np.zeros((num_samples, num_classes[1])) self.assertEqual(model.name, "test_model") self.assertEqual(model.built, False) self.assertEqual(len(model.weights), 0) model.compile( loss="mse", optimizer="rmsprop", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch([x1, x2], [y1, y2]) self.assertEqual(model.built, True) self.assertEqual(len(model.layers), 4) self.assertEqual(len(model.weights), 10) self.assertEqual(len(model.trainable_weights), 8) self.assertEqual(len(model.non_trainable_weights), 2) def test_updates(self): # test that updates get run during training num_samples = 100 input_dim = 50 class BNNet(keras.Model): def __init__(self): super().__init__() self.bn = keras.layers.BatchNormalization( beta_initializer="ones", gamma_initializer="ones" ) def call(self, inputs): return self.bn(inputs) x = np.ones((num_samples, input_dim)) y = np.ones((num_samples, input_dim)) model = BNNet() model.compile( loss="mse", optimizer="rmsprop", run_eagerly=test_utils.should_run_eagerly(), ) y_ref = model.predict(x) model.train_on_batch(x, y) y_new = model.predict(x) self.assertGreater(np.sum(np.abs(y_ref - y_new)), 0.1) def test_training_and_inference_behavior(self): # test that dropout is applied in training and not inference num_samples = 100 input_dim = 50 class DPNet(keras.Model): def __init__(self): super().__init__() self.dp = keras.layers.Dropout(0.5) self.dense = keras.layers.Dense( 1, use_bias=False, kernel_initializer="ones" ) def call(self, inputs): x = self.dp(inputs) return self.dense(x) model = DPNet() x = np.ones((num_samples, input_dim)) y = model.predict(x) self.assertEqual(np.sum(y), np.sum(x)) model.compile( loss="mse", optimizer="rmsprop", run_eagerly=test_utils.should_run_eagerly(), ) loss = model.train_on_batch(x, y) self.assertGreater(loss, 0.1) def test_training_methods(self): # test fit, train_on_batch # on different input types: list, dict num_classes = (2, 3) num_samples = 100 input_dim = 50 x1 = np.ones((num_samples, input_dim)) x2 = np.ones((num_samples, input_dim)) y1 = np.zeros((num_samples, num_classes[0])) y2 = np.zeros((num_samples, num_classes[1])) model = model_util.get_multi_io_subclass_model( num_classes=num_classes, use_bn=True ) model.compile( loss="mse", optimizer="rmsprop", run_eagerly=test_utils.should_run_eagerly(), ) model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0) model.fit( {"input_1": x1, "input_2": x2}, {"output_1": y1, "output_2": y2}, epochs=2, batch_size=32, ) model.fit( [x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0, validation_data=([x1, x2], [y1, y2]), ) model = model_util.get_multi_io_subclass_model( num_classes=num_classes, use_bn=True ) model.compile( loss="mse", optimizer="rmsprop", run_eagerly=test_utils.should_run_eagerly(), ) model.train_on_batch([x1, x2], [y1, y2]) model.train_on_batch( {"input_1": x1, "input_2": x2}, {"output_1": y1, "output_2": y2} ) def test_inference_methods(self): # test predict, evaluate, test_on_batch, predict_on_batch # on different input types: list, dict num_classes = (2, 3) num_samples = 100 input_dim = 50 x1 = np.ones((num_samples, input_dim)) x2 = np.ones((num_samples, input_dim)) y1 = np.zeros((num_samples, num_classes[0])) y2 = np.zeros((num_samples, num_classes[1])) model = model_util.get_multi_io_subclass_model( num_classes=num_classes, use_bn=True ) model.compile( loss="mse", optimizer="rmsprop", run_eagerly=test_utils.should_run_eagerly(), ) model.evaluate([x1, x2], [y1, y2]) model.test_on_batch([x1, x2], [y1, y2]) model = model_util.get_multi_io_subclass_model( num_classes=num_classes, use_bn=True ) model.predict([x1, x2]) model = model_util.get_multi_io_subclass_model( num_classes=num_classes, use_bn=True ) model.predict_on_batch([x1, x2]) def test_saving(self): num_classes = (2, 3) num_samples = 100 input_dim = 50 x1 = np.ones((num_samples, input_dim)) x2 = np.ones((num_samples, input_dim)) y1 = np.zeros((num_samples, num_classes[0])) y2 = np.zeros((num_samples, num_classes[1])) model = model_util.get_multi_io_subclass_model( num_classes=num_classes, use_bn=True ) model.compile( loss="mse", optimizer="rmsprop", run_eagerly=test_utils.should_run_eagerly(), ) model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0) y_ref_1, y_ref_2 = model.predict([x1, x2]) tf_format_name = os.path.join(self.get_temp_dir(), "ckpt") model.save_weights(tf_format_name) if h5py is not None: hdf5_format_name = os.path.join(self.get_temp_dir(), "weights.h5") model.save_weights(hdf5_format_name) model = model_util.get_multi_io_subclass_model( num_classes=num_classes, use_bn=True ) if h5py is not None: with self.assertRaises(ValueError): model.load_weights(hdf5_format_name) model.load_weights(tf_format_name) y1, y2 = model.predict([x1, x2]) self.assertAllClose(y_ref_1, y1, atol=1e-5) self.assertAllClose(y_ref_2, y2, atol=1e-5) if h5py is not None: model.load_weights(hdf5_format_name) y1, y2 = model.predict([x1, x2]) self.assertAllClose(y_ref_1, y1, atol=1e-5) self.assertAllClose(y_ref_2, y2, atol=1e-5) def test_subclass_nested_in_subclass(self): num_classes = 2 num_samples = 100 input_dim = 50 model = model_util.NestedTestModel1(num_classes=num_classes) model.compile( loss="mse", optimizer="rmsprop", metrics=["acc"], run_eagerly=test_utils.should_run_eagerly(), ) x = np.ones((num_samples, input_dim)) y = np.zeros((num_samples, num_classes)) model.fit(x, y, epochs=2, batch_size=32, verbose=0) _ = model.evaluate(x, y, verbose=0) self.assertEqual(len(model.weights), 8 + len(model.test_net.weights)) self.assertEqual( len(model.non_trainable_weights), 2 + len(model.test_net.non_trainable_weights), ) self.assertEqual( len(model.trainable_weights), 6 + len(model.test_net.trainable_weights), ) def test_graph_nested_in_subclass(self): num_classes = 2 num_samples = 100 input_dim = 50 model = model_util.NestedTestModel2(num_classes=num_classes) model.compile( loss="mse", optimizer="rmsprop", metrics=["acc"], run_eagerly=test_utils.should_run_eagerly(), ) x = np.ones((num_samples, input_dim)) y = np.zeros((num_samples, num_classes)) model.fit(x, y, epochs=2, batch_size=32, verbose=0) _ = model.evaluate(x, y, verbose=0) self.assertEqual(len(model.weights), 8 + len(model.test_net.weights)) self.assertEqual( len(model.non_trainable_weights), 2 + len(model.test_net.non_trainable_weights), ) self.assertEqual( len(model.trainable_weights), 6 + len(model.test_net.trainable_weights), ) def test_subclass_nested_in_graph(self): num_classes = 2 num_samples = 100 input_dim = 50 model = model_util.get_nested_model_3( input_dim=input_dim, num_classes=num_classes ) model.compile( loss="mse", optimizer="rmsprop", metrics=["acc"], run_eagerly=test_utils.should_run_eagerly(), ) x = np.ones((num_samples, input_dim)) y = np.zeros((num_samples, num_classes)) model.fit(x, y, epochs=2, batch_size=32, verbose=0) _ = model.evaluate(x, y, verbose=0) self.assertEqual(len(model.weights), 16) self.assertEqual(len(model.non_trainable_weights), 4) self.assertEqual(len(model.trainable_weights), 12) def test_subclass_nested_in_sequential(self): num_classes = 2 num_samples = 100 input_dim = 50 class Inner(keras.Model): def __init__(self): super().__init__() self.dense1 = keras.layers.Dense(32, activation="relu") self.dense2 = keras.layers.Dense(num_classes, activation="relu") self.bn = keras.layers.BatchNormalization() def call(self, inputs): x = self.dense1(inputs) x = self.dense2(x) return self.bn(x) model = keras.Sequential([Inner()]) model.compile( loss="mse", optimizer="rmsprop", metrics=["acc"], run_eagerly=test_utils.should_run_eagerly(), ) x = np.ones((num_samples, input_dim)) y = np.zeros((num_samples, num_classes)) model.fit(x, y, epochs=2, batch_size=32, verbose=0) _ = model.evaluate(x, y, verbose=0) self.assertEqual(len(model.weights), 8) self.assertEqual(len(model.non_trainable_weights), 2) self.assertEqual(len(model.trainable_weights), 6) def test_support_for_manual_training_arg(self): # In most cases, the `training` argument is left unspecified, in which # case it defaults to value corresponding to the Model method being used # (fit -> True, predict -> False, etc). # If the user writes their model `call` method to take # an explicit `training` argument, we must check that the correct value # is being passed to the model for each method call. class DPNet(keras.Model): def __init__(self): super().__init__() self.dp = keras.layers.Dropout(0.5) self.dense = keras.layers.Dense( 1, use_bias=False, kernel_initializer="ones" ) def call(self, inputs, training=False): x = self.dp(inputs, training=training) return self.dense(x) model = DPNet() x = np.ones((10, 10)) y = model.predict(x) self.assertEqual(np.sum(y), np.sum(x)) model.compile( loss="mse", optimizer="rmsprop", run_eagerly=test_utils.should_run_eagerly(), ) loss = model.train_on_batch(x, y) self.assertGreater(loss, 0.1) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/tests/model_subclassing_compiled_test.py/0
{ "file_path": "tf-keras/tf_keras/tests/model_subclassing_compiled_test.py", "repo_id": "tf-keras", "token_count": 8038 }
219
#!/usr/bin/env bash # Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== set -e function is_absolute { [[ "$1" = /* ]] || [[ "$1" =~ ^[a-zA-Z]:[/\\].* ]] } function real_path() { is_absolute "$1" && echo "$1" || echo "$PWD/${1#./}" } function prepare_src() { TMPDIR="$1" mkdir -p "$TMPDIR" echo $(date) : "=== Preparing sources in dir: ${TMPDIR}" if [ ! -d bazel-bin/tf_keras ]; then echo "Could not find bazel-bin. Did you run from the root of the build tree?" exit 1 fi cp -r "bazel-bin/tf_keras/tools/pip_package/build_pip_package.runfiles/org_keras/tf_keras" "$TMPDIR" cp tf_keras/tools/pip_package/setup.py "$TMPDIR" cp LICENSE "$TMPDIR" # Verifies all expected files are in pip. # Creates init files in all directory in pip. python tf_keras/tools/pip_package/create_pip_helper.py --pip-root "${TMPDIR}/tf_keras/" --bazel-root "./tf_keras" } function build_wheel() { if [ $# -lt 2 ] ; then echo "No src and dest dir provided" exit 1 fi TMPDIR="$1" DEST="$2" PROJECT_NAME="$3" pushd ${TMPDIR} > /dev/null echo $(date) : "=== Building wheel" "${PYTHON_BIN_PATH:-python}" setup.py bdist_wheel --universal --project_name $PROJECT_NAME mkdir -p ${DEST} cp dist/* ${DEST} popd > /dev/null echo $(date) : "=== Output wheel file is in: ${DEST}" } function usage() { echo "Usage:" echo "$0 [--src srcdir] [--dst dstdir] [options]" echo "$0 dstdir [options]" echo "" echo " --src prepare sources in srcdir" echo " will use temporary dir if not specified" echo "" echo " --dst build wheel in dstdir" echo " if dstdir is not set do not build, only prepare sources" echo "" echo " Options:" echo " --project_name <name> set project name to name" echo " --nightly build tensorflow_estimator nightly" echo "" exit 1 } function main() { NIGHTLY_BUILD=0 PROJECT_NAME="" SRCDIR="" DSTDIR="" CLEANSRC=1 while true; do if [[ -z "$1" ]]; then break elif [[ "$1" == "--help" ]]; then usage exit 1 elif [[ "$1" == "--nightly" ]]; then NIGHTLY_BUILD=1 elif [[ "$1" == "--project_name" ]]; then shift if [[ -z "$1" ]]; then break fi PROJECT_NAME="$1" elif [[ "$1" == "--src" ]]; then shift if [[ -z "$1" ]]; then break fi SRCDIR="$(real_path $1)" CLEANSRC=0 elif [[ "$1" == "--dst" ]]; then shift if [[ -z "$1" ]]; then break fi DSTDIR="$(real_path $1)" else DSTDIR="$(real_path $1)" fi shift done if [[ -z ${PROJECT_NAME} ]]; then PROJECT_NAME="tf-keras" if [[ ${NIGHTLY_BUILD} == "1" ]]; then PROJECT_NAME="tf-keras-nightly" fi fi if [[ -z "$DSTDIR" ]] && [[ -z "$SRCDIR" ]]; then echo "No destination dir provided" usage exit 1 fi if [[ -z "$SRCDIR" ]]; then # make temp srcdir if none set SRCDIR="$(mktemp -d -t tmp.XXXXXXXXXX)" fi prepare_src "$SRCDIR" if [[ -z "$DSTDIR" ]]; then # only want to prepare sources exit fi build_wheel "$SRCDIR" "$DSTDIR" "$PROJECT_NAME" if [[ $CLEANSRC -ne 0 ]]; then rm -rf "${TMPDIR}" fi } main "$@"
tf-keras/tf_keras/tools/pip_package/build_pip_package.sh/0
{ "file_path": "tf-keras/tf_keras/tools/pip_package/build_pip_package.sh", "repo_id": "tf-keras", "token_count": 1651 }
220
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras image dataset loading utilities.""" import multiprocessing.pool import os import random import time import warnings import numpy as np import tensorflow.compat.v2 as tf from tf_keras.utils import io_utils # isort: off from tensorflow.python.util.tf_export import keras_export @keras_export("keras.utils.split_dataset", v1=[]) def split_dataset( dataset, left_size=None, right_size=None, shuffle=False, seed=None ): """Split a dataset into a left half and a right half (e.g. train / test). Args: dataset: A `tf.data.Dataset` object, or a list/tuple of arrays with the same length. left_size: If float (in the range `[0, 1]`), it signifies the fraction of the data to pack in the left dataset. If integer, it signifies the number of samples to pack in the left dataset. If `None`, it uses the complement to `right_size`. Defaults to `None`. right_size: If float (in the range `[0, 1]`), it signifies the fraction of the data to pack in the right dataset. If integer, it signifies the number of samples to pack in the right dataset. If `None`, it uses the complement to `left_size`. Defaults to `None`. shuffle: Boolean, whether to shuffle the data before splitting it. seed: A random seed for shuffling. Returns: A tuple of two `tf.data.Dataset` objects: the left and right splits. Example: >>> data = np.random.random(size=(1000, 4)) >>> left_ds, right_ds = tf.keras.utils.split_dataset(data, left_size=0.8) >>> int(left_ds.cardinality()) 800 >>> int(right_ds.cardinality()) 200 """ dataset_type_spec = _get_type_spec(dataset) if dataset_type_spec not in [tf.data.Dataset, list, tuple, np.ndarray]: raise TypeError( "The `dataset` argument must be either a `tf.data.Dataset` " "object or a list/tuple of arrays. " f"Received: dataset={dataset} of type {type(dataset)}" ) if right_size is None and left_size is None: raise ValueError( "At least one of the `left_size` or `right_size` " "must be specified. Received: left_size=None and " "right_size=None" ) dataset_as_list = _convert_dataset_to_list(dataset, dataset_type_spec) if shuffle: if seed is None: seed = random.randint(0, int(1e6)) random.seed(seed) random.shuffle(dataset_as_list) total_length = len(dataset_as_list) left_size, right_size = _rescale_dataset_split_sizes( left_size, right_size, total_length ) left_split = list(dataset_as_list[:left_size]) right_split = list(dataset_as_list[-right_size:]) left_split = _restore_dataset_from_list( left_split, dataset_type_spec, dataset ) right_split = _restore_dataset_from_list( right_split, dataset_type_spec, dataset ) left_split = tf.data.Dataset.from_tensor_slices(left_split) right_split = tf.data.Dataset.from_tensor_slices(right_split) # apply batching to the splits if the dataset is batched if dataset_type_spec is tf.data.Dataset and is_batched(dataset): batch_size = get_batch_size(dataset) if batch_size is not None: left_split = left_split.batch(batch_size) right_split = right_split.batch(batch_size) left_split = left_split.prefetch(tf.data.AUTOTUNE) right_split = right_split.prefetch(tf.data.AUTOTUNE) return left_split, right_split def _convert_dataset_to_list( dataset, dataset_type_spec, data_size_warning_flag=True, ensure_shape_similarity=True, ): """Convert `tf.data.Dataset` object or list/tuple of NumPy arrays to a list. Args: dataset : A `tf.data.Dataset` object or a list/tuple of arrays. dataset_type_spec : the type of the dataset data_size_warning_flag (bool, optional): If set to True, a warning will be issued if the dataset takes longer than 10 seconds to iterate. Defaults to `True`. ensure_shape_similarity (bool, optional): If set to True, the shape of the first sample will be used to validate the shape of rest of the samples. Defaults to `True`. Returns: List: A list of tuples/NumPy arrays. """ dataset_iterator = _get_data_iterator_from_dataset( dataset, dataset_type_spec ) dataset_as_list = [] start_time = time.time() for sample in _get_next_sample( dataset_iterator, ensure_shape_similarity, data_size_warning_flag, start_time, ): if dataset_type_spec in [tuple, list]: # The try-except here is for NumPy 1.24 compatibility, see: # https://numpy.org/neps/nep-0034-infer-dtype-is-object.html try: arr = np.array(sample) except ValueError: arr = np.array(sample, dtype=object) dataset_as_list.append(arr) else: dataset_as_list.append(sample) return dataset_as_list def _get_data_iterator_from_dataset(dataset, dataset_type_spec): """Get the iterator from a dataset. Args: dataset : A `tf.data.Dataset` object or a list/tuple of arrays. dataset_type_spec : the type of the dataset Raises: ValueError: - If the dataset is empty. - If the dataset is not a `tf.data.Dataset` object or a list/tuple of arrays. - If the dataset is a list/tuple of arrays and the length of the list/tuple is not equal to the number Returns: iterator: An `iterator` object. """ if dataset_type_spec == list: if len(dataset) == 0: raise ValueError( "Received an empty list dataset. " "Please provide a non-empty list of arrays." ) if _get_type_spec(dataset[0]) is np.ndarray: expected_shape = dataset[0].shape for i, element in enumerate(dataset): if np.array(element).shape[0] != expected_shape[0]: raise ValueError( "Received a list of NumPy arrays with different " f"lengths. Mismatch found at index {i}, " f"Expected shape={expected_shape} " f"Received shape={np.array(element).shape}." "Please provide a list of NumPy arrays with " "the same length." ) else: raise ValueError( "Expected a list of `numpy.ndarray` objects," f"Received: {type(dataset[0])}" ) return iter(zip(*dataset)) elif dataset_type_spec == tuple: if len(dataset) == 0: raise ValueError( "Received an empty list dataset." "Please provide a non-empty tuple of arrays." ) if _get_type_spec(dataset[0]) is np.ndarray: expected_shape = dataset[0].shape for i, element in enumerate(dataset): if np.array(element).shape[0] != expected_shape[0]: raise ValueError( "Received a tuple of NumPy arrays with different " f"lengths. Mismatch found at index {i}, " f"Expected shape={expected_shape} " f"Received shape={np.array(element).shape}." "Please provide a tuple of NumPy arrays with " "the same length." ) else: raise ValueError( "Expected a tuple of `numpy.ndarray` objects, " f"Received: {type(dataset[0])}" ) return iter(zip(*dataset)) elif dataset_type_spec == tf.data.Dataset: if is_batched(dataset): dataset = dataset.unbatch() return iter(dataset) elif dataset_type_spec == np.ndarray: return iter(dataset) def _get_next_sample( dataset_iterator, ensure_shape_similarity, data_size_warning_flag, start_time, ): """ "Yield data samples from the `dataset_iterator`. Args: dataset_iterator : An `iterator` object. ensure_shape_similarity (bool, optional): If set to True, the shape of the first sample will be used to validate the shape of rest of the samples. Defaults to `True`. data_size_warning_flag (bool, optional): If set to True, a warning will be issued if the dataset takes longer than 10 seconds to iterate. Defaults to `True`. start_time (float): the start time of the dataset iteration. this is used only if `data_size_warning_flag` is set to true. Raises: ValueError: - If the dataset is empty. - If `ensure_shape_similarity` is set to True and the shape of the first sample is not equal to the shape of atleast one of the rest of the samples. Yields: data_sample: A tuple/list of numpy arrays. """ try: dataset_iterator = iter(dataset_iterator) first_sample = next(dataset_iterator) if isinstance(first_sample, (tf.Tensor, np.ndarray)): first_sample_shape = np.array(first_sample).shape else: first_sample_shape = None ensure_shape_similarity = False yield first_sample except StopIteration: raise ValueError( "Received an empty Dataset. `dataset` must " "be a non-empty list/tuple of `numpy.ndarray` objects " "or `tf.data.Dataset` objects." ) for i, sample in enumerate(dataset_iterator): if ensure_shape_similarity: if first_sample_shape != np.array(sample).shape: raise ValueError( "All `dataset` samples must have same shape, " f"Expected shape: {np.array(first_sample).shape} " f"Received shape: {np.array(sample).shape} at index " f"{i}." ) if data_size_warning_flag: if i % 10 == 0: cur_time = time.time() # warns user if the dataset is too large to iterate within 10s if int(cur_time - start_time) > 10 and data_size_warning_flag: warnings.warn( "The dataset is taking longer than 10 seconds to " "iterate over. This may be due to the size of the " "dataset. Keep in mind that the `split_dataset` " "utility is only for small in-memory dataset " "(e.g. < 10,000 samples).", category=ResourceWarning, source="split_dataset", ) data_size_warning_flag = False yield sample def _restore_dataset_from_list( dataset_as_list, dataset_type_spec, original_dataset ): """Restore the dataset from the list of arrays.""" if dataset_type_spec in [tuple, list]: return tuple(np.array(sample) for sample in zip(*dataset_as_list)) elif dataset_type_spec == tf.data.Dataset: if isinstance(original_dataset.element_spec, dict): restored_dataset = {} for d in dataset_as_list: for k, v in d.items(): if k not in restored_dataset: restored_dataset[k] = [v] else: restored_dataset[k].append(v) return restored_dataset else: return tuple(np.array(sample) for sample in zip(*dataset_as_list)) return dataset_as_list def _rescale_dataset_split_sizes(left_size, right_size, total_length): """Rescale the dataset split sizes. We want to ensure that the sum of the split sizes is equal to the total length of the dataset. Args: left_size : The size of the left dataset split. right_size : The size of the right dataset split. total_length : The total length of the dataset. Raises: TypeError: - If `left_size` or `right_size` is not an integer or float. ValueError: - If `left_size` or `right_size` is negative or greater than 1 or greater than `total_length`. Returns: tuple: A tuple of rescaled left_size and right_size """ left_size_type = type(left_size) right_size_type = type(right_size) # check both left_size and right_size are integers or floats if (left_size is not None and left_size_type not in [int, float]) and ( right_size is not None and right_size_type not in [int, float] ): raise TypeError( "Invalid `left_size` and `right_size` Types. Expected: " "integer or float or None, Received: type(left_size)=" f"{left_size_type} and type(right_size)={right_size_type}" ) # check left_size is a integer or float if left_size is not None and left_size_type not in [int, float]: raise TypeError( "Invalid `left_size` Type. Expected: int or float or None, " f"Received: type(left_size)={left_size_type}. " ) # check right_size is a integer or float if right_size is not None and right_size_type not in [int, float]: raise TypeError( "Invalid `right_size` Type. " "Expected: int or float or None," f"Received: type(right_size)={right_size_type}." ) # check left_size and right_size are non-zero if left_size == 0 and right_size == 0: raise ValueError( "Both `left_size` and `right_size` are zero. " "At least one of the split sizes must be non-zero." ) # check left_size is non-negative and less than 1 and less than total_length if ( left_size_type == int and (left_size <= 0 or left_size >= total_length) or left_size_type == float and (left_size <= 0 or left_size >= 1) ): raise ValueError( "`left_size` should be either a positive integer " f"smaller than {total_length}, or a float " "within the range `[0, 1]`. Received: left_size=" f"{left_size}" ) # check right_size is non-negative and less than 1 and less than # total_length if ( right_size_type == int and (right_size <= 0 or right_size >= total_length) or right_size_type == float and (right_size <= 0 or right_size >= 1) ): raise ValueError( "`right_size` should be either a positive integer " f"and smaller than {total_length} or a float " "within the range `[0, 1]`. Received: right_size=" f"{right_size}" ) # check sum of left_size and right_size is less than or equal to # total_length if ( right_size_type == left_size_type == float and right_size + left_size > 1 ): raise ValueError( "The sum of `left_size` and `right_size` is greater " "than 1. It must be less than or equal to 1." ) if left_size_type == float: left_size = round(left_size * total_length) elif left_size_type == int: left_size = float(left_size) if right_size_type == float: right_size = round(right_size * total_length) elif right_size_type == int: right_size = float(right_size) if left_size is None: left_size = total_length - right_size elif right_size is None: right_size = total_length - left_size if left_size + right_size > total_length: raise ValueError( "The sum of `left_size` and `right_size` should " "be smaller than the {total_length}. " f"Received: left_size + right_size = {left_size+right_size}" f"and total_length = {total_length}" ) for split, side in [(left_size, "left"), (right_size, "right")]: if split == 0: raise ValueError( f"With `dataset` of length={total_length}, `left_size`=" f"{left_size} and `right_size`={right_size}." f"Resulting {side} side dataset split will be empty. " "Adjust any of the aforementioned parameters" ) left_size, right_size = int(left_size), int(right_size) return left_size, right_size def _get_type_spec(dataset): """Get the type spec of the dataset.""" if isinstance(dataset, tuple): return tuple elif isinstance(dataset, list): return list elif isinstance(dataset, np.ndarray): return np.ndarray elif isinstance(dataset, dict): return dict elif isinstance(dataset, tf.data.Dataset): return tf.data.Dataset else: return None def is_batched(tf_dataset): """ "Check if the `tf.data.Dataset` is batched.""" return hasattr(tf_dataset, "_batch_size") def get_batch_size(tf_dataset): """Get the batch size of the dataset.""" if is_batched(tf_dataset): return tf_dataset._batch_size else: return None def index_directory( directory, labels, formats, class_names=None, shuffle=True, seed=None, follow_links=False, ): """Make list of all files in `directory`, with their labels. Args: directory: Directory where the data is located. If `labels` is "inferred", it should contain subdirectories, each containing files for a class. Otherwise, the directory structure is ignored. labels: Either "inferred" (labels are generated from the directory structure), None (no labels), or a list/tuple of integer labels of the same size as the number of valid files found in the directory. Labels should be sorted according to the alphanumeric order of the image file paths (obtained via `os.walk(directory)` in Python). formats: Allowlist of file extensions to index (e.g. ".jpg", ".txt"). class_names: Only valid if "labels" is "inferred". This is the explicit list of class names (must match names of subdirectories). Used to control the order of the classes (otherwise alphanumerical order is used). shuffle: Whether to shuffle the data. Default: True. If set to False, sorts the data in alphanumeric order. seed: Optional random seed for shuffling. follow_links: Whether to visits subdirectories pointed to by symlinks. Returns: tuple (file_paths, labels, class_names). file_paths: list of file paths (strings). labels: list of matching integer labels (same length as file_paths) class_names: names of the classes corresponding to these labels, in order. """ if labels != "inferred": # in the explicit/no-label cases, index from the parent directory down. subdirs = [""] class_names = subdirs else: subdirs = [] for subdir in sorted(tf.io.gfile.listdir(directory)): if tf.io.gfile.isdir(tf.io.gfile.join(directory, subdir)): if not subdir.startswith("."): if subdir.endswith("/"): subdir = subdir[:-1] subdirs.append(subdir) if not class_names: class_names = subdirs else: if set(class_names) != set(subdirs): raise ValueError( "The `class_names` passed did not match the " "names of the subdirectories of the target directory. " f"Expected: {subdirs}, but received: {class_names}" ) class_indices = dict(zip(class_names, range(len(class_names)))) # Build an index of the files # in the different class subfolders. pool = multiprocessing.pool.ThreadPool() results = [] filenames = [] for dirpath in (tf.io.gfile.join(directory, subdir) for subdir in subdirs): results.append( pool.apply_async( index_subdirectory, (dirpath, class_indices, follow_links, formats), ) ) labels_list = [] for res in results: partial_filenames, partial_labels = res.get() labels_list.append(partial_labels) filenames += partial_filenames if labels not in ("inferred", None): if len(labels) != len(filenames): raise ValueError( "Expected the lengths of `labels` to match the number " "of files in the target directory. len(labels) is " f"{len(labels)} while we found {len(filenames)} files " f"in directory {directory}." ) class_names = sorted(set(labels)) else: i = 0 labels = np.zeros((len(filenames),), dtype="int32") for partial_labels in labels_list: labels[i : i + len(partial_labels)] = partial_labels i += len(partial_labels) if labels is None: io_utils.print_msg(f"Found {len(filenames)} files.") else: io_utils.print_msg( f"Found {len(filenames)} files belonging " f"to {len(class_names)} classes." ) pool.close() pool.join() file_paths = [tf.io.gfile.join(directory, fname) for fname in filenames] if shuffle: # Shuffle globally to erase macro-structure if seed is None: seed = np.random.randint(1e6) rng = np.random.RandomState(seed) rng.shuffle(file_paths) rng = np.random.RandomState(seed) rng.shuffle(labels) return file_paths, labels, class_names def iter_valid_files(directory, follow_links, formats): if not follow_links: walk = tf.io.gfile.walk(directory) else: walk = os.walk(directory, followlinks=follow_links) for root, _, files in sorted(walk, key=lambda x: x[0]): for fname in sorted(files): if fname.lower().endswith(formats): yield root, fname def index_subdirectory(directory, class_indices, follow_links, formats): """Recursively walks directory and list image paths and their class index. Args: directory: string, target directory. class_indices: dict mapping class names to their index. follow_links: boolean, whether to recursively follow subdirectories (if False, we only list top-level images in `directory`). formats: Allowlist of file extensions to index (e.g. ".jpg", ".txt"). Returns: tuple `(filenames, labels)`. `filenames` is a list of relative file paths, and `labels` is a list of integer labels corresponding to these files. """ dirname = os.path.basename(directory) valid_files = iter_valid_files(directory, follow_links, formats) labels = [] filenames = [] for root, fname in valid_files: labels.append(class_indices[dirname]) absolute_path = tf.io.gfile.join(root, fname) relative_path = tf.io.gfile.join( dirname, os.path.relpath(absolute_path, directory) ) filenames.append(relative_path) return filenames, labels def get_training_or_validation_split(samples, labels, validation_split, subset): """Potentially restict samples & labels to a training or validation split. Args: samples: List of elements. labels: List of corresponding labels. validation_split: Float, fraction of data to reserve for validation. subset: Subset of the data to return. Either "training", "validation", or None. If None, we return all of the data. Returns: tuple (samples, labels), potentially restricted to the specified subset. """ if not validation_split: return samples, labels num_val_samples = int(validation_split * len(samples)) if subset == "training": io_utils.print_msg( f"Using {len(samples) - num_val_samples} " f"files for training." ) samples = samples[:-num_val_samples] labels = labels[:-num_val_samples] elif subset == "validation": io_utils.print_msg(f"Using {num_val_samples} files for validation.") samples = samples[-num_val_samples:] labels = labels[-num_val_samples:] else: raise ValueError( '`subset` must be either "training" ' f'or "validation", received: {subset}' ) return samples, labels def labels_to_dataset(labels, label_mode, num_classes): """Create a tf.data.Dataset from the list/tuple of labels. Args: labels: list/tuple of labels to be converted into a tf.data.Dataset. label_mode: String describing the encoding of `labels`. Options are: - 'binary' indicates that the labels (there can be only 2) are encoded as `float32` scalars with values 0 or 1 (e.g. for `binary_crossentropy`). - 'categorical' means that the labels are mapped into a categorical vector. (e.g. for `categorical_crossentropy` loss). num_classes: number of classes of labels. Returns: A `Dataset` instance. """ label_ds = tf.data.Dataset.from_tensor_slices(labels) if label_mode == "binary": label_ds = label_ds.map( lambda x: tf.expand_dims(tf.cast(x, "float32"), axis=-1), num_parallel_calls=tf.data.AUTOTUNE, ) elif label_mode == "categorical": label_ds = label_ds.map( lambda x: tf.one_hot(x, num_classes), num_parallel_calls=tf.data.AUTOTUNE, ) return label_ds def check_validation_split_arg(validation_split, subset, shuffle, seed): """Raise errors in case of invalid argument values. Args: validation_split: float between 0 and 1, fraction of data to reserve for validation. subset: One of "training", "validation" or "both". Only used if `validation_split` is set. shuffle: Whether to shuffle the data. Either True or False. seed: random seed for shuffling and transformations. """ if validation_split and not 0 < validation_split < 1: raise ValueError( "`validation_split` must be between 0 and 1, " f"received: {validation_split}" ) if (validation_split or subset) and not (validation_split and subset): raise ValueError( "If `subset` is set, `validation_split` must be set, and inversely." ) if subset not in ("training", "validation", "both", None): raise ValueError( '`subset` must be either "training", ' f'"validation" or "both", received: {subset}' ) if validation_split and shuffle and seed is None: raise ValueError( "If using `validation_split` and shuffling the data, you must " "provide a `seed` argument, to make sure that there is no " "overlap between the training and validation subset." )
tf-keras/tf_keras/utils/dataset_utils.py/0
{ "file_path": "tf-keras/tf_keras/utils/dataset_utils.py", "repo_id": "tf-keras", "token_count": 12231 }
221
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities related to layer/model functionality.""" import copy import functools import re import weakref import numpy as np import tensorflow.compat.v2 as tf from tf_keras import initializers from tf_keras.utils import io_utils # isort: off from tensorflow.python.util.tf_export import keras_export @keras_export("keras.utils.get_source_inputs") def get_source_inputs(tensor, layer=None, node_index=None): """Returns the list of input tensors necessary to compute `tensor`. Output will always be a list of tensors (potentially with 1 element). Args: tensor: The tensor to start from. layer: Origin layer of the tensor. Will be determined via tensor._keras_history if not provided. node_index: Origin node index of the tensor. Returns: List of input tensors. """ if not hasattr(tensor, "_keras_history"): return tensor if layer is None or node_index: layer, node_index, _ = tensor._keras_history if not layer._inbound_nodes: return [tensor] else: node = layer._inbound_nodes[node_index] if node.is_input: # Reached an Input layer, stop recursion. return tf.nest.flatten(node.input_tensors) else: source_tensors = [] for layer, node_index, _, tensor in node.iterate_inbound(): previous_sources = get_source_inputs(tensor, layer, node_index) # Avoid input redundancy. for x in previous_sources: if all(x is not t for t in source_tensors): source_tensors.append(x) return source_tensors def validate_string_arg( input_data, allowable_strings, layer_name, arg_name, allow_none=False, allow_callables=False, ): """Validates the correctness of a string-based arg.""" if allow_none and input_data is None: return elif allow_callables and callable(input_data): return elif isinstance(input_data, str) and input_data in allowable_strings: return else: allowed_args = "`None`, " if allow_none else "" allowed_args += "a `Callable`, " if allow_callables else "" allowed_args += f"or one of the following values: {allowable_strings}" if allow_callables: callable_note = ( f"If restoring a model and `{arg_name}` is a custom callable, " "please ensure the callable is registered as a custom object. " "See " "https://www.tensorflow.org/guide/keras/save_and_serialize" "#registering_the_custom_object for details. " ) else: callable_note = "" raise ValueError( f"Unkown value for `{arg_name}` argument of layer {layer_name}. " f"{callable_note}Allowed values are: {allowed_args}. Received: " f"{input_data}" ) def count_params(weights): """Count the total number of scalars composing the weights. Args: weights: An iterable containing the weights on which to compute params Returns: The total number of scalars composing the weights """ unique_weights = {id(w): w for w in weights}.values() # Ignore TrackableWeightHandlers, which will not have a shape defined. unique_weights = [w for w in unique_weights if hasattr(w, "shape")] weight_shapes = [w.shape.as_list() for w in unique_weights] standardized_weight_shapes = [ [0 if w_i is None else w_i for w_i in w] for w in weight_shapes ] return int(sum(np.prod(p) for p in standardized_weight_shapes)) def weight_memory_size(weights): """Calculate the memory footprint for weights based on their dtypes. Args: weights: An iterable contains the weights to compute weight size. Returns: The total memory size (in Bytes) of the weights. """ unique_weights = {id(w): w for w in weights}.values() total_memory_size = 0 for w in unique_weights: # Ignore TrackableWeightHandlers, which will not have a shape defined. if not hasattr(w, "shape"): continue elif None in w.shape.as_list(): continue weight_shape = np.prod(w.shape.as_list()) per_param_size = w.dtype.size total_memory_size += weight_shape * per_param_size return total_memory_size def dtensor_variable_summary(weights): """Group and calculate DTensor based weights memory size. Since DTensor weights can be sharded across multiple device, the result will be grouped by the layout/sharding spec for the variables, so that the accurate per-device memory size can be calculated. Args: weights: An iterable contains the weights to compute weight size. Returns: total_weight_count, total_memory_size and per_sharing_spec_result which is a dict with normalized layout spec as key and tuple of weight count and weight size as value. """ unique_weights = {id(w): w for w in weights}.values() total_weight_count = 0 total_memory_size = 0 per_sharing_spec_result = {} for w in unique_weights: # Ignore TrackableWeightHandlers, which will not have a shape defined. if not hasattr(w, "shape"): continue if not isinstance(w, tf.experimental.dtensor.DVariable): continue layout = w.layout # Remove all the duplication axis, and sort the column name. # 1D replicated and 2D replicated variable will still be fully # replicated, and [batch, model] sharding will have same memory # footprint as the [model, batch] layout. reduced_sharding_spec = list(sorted(set(layout.sharding_specs))) if tf.experimental.dtensor.UNSHARDED in reduced_sharding_spec: reduced_sharding_spec.remove(tf.experimental.dtensor.UNSHARDED) reduced_sharding_spec = tuple(reduced_sharding_spec) # For dict key weight_count, memory_size = per_sharing_spec_result.get( reduced_sharding_spec, (0, 0) ) reduced_weight_shape = np.prod(w.shape.as_list()) per_param_size = w.dtype.size weight_count += reduced_weight_shape memory_size += reduced_weight_shape * per_param_size per_sharing_spec_result[reduced_sharding_spec] = ( weight_count, memory_size, ) total_weight_count += reduced_weight_shape total_memory_size += reduced_weight_shape * per_param_size return total_weight_count, total_memory_size, per_sharing_spec_result def print_dtensor_variable_summary(model, print_fn, line_length): if getattr(model, "_layout_map", None) is not None: mesh = model._layout_map.get_default_mesh() elif hasattr(model, "distribute_strategy") and hasattr( model.distribute_strategy, "_mesh" ): mesh = model.distribute_strategy._mesh else: # Not running with DTensor mesh = None if mesh: ( total_weight_count, total_memory_size, per_sharing_spec_result, ) = dtensor_variable_summary(model.weights) total_per_device_memory_size = 0 for sharding_spec in sorted(per_sharing_spec_result.keys()): count, memory_size = per_sharing_spec_result[sharding_spec] if len(sharding_spec) == 0: print_fn( f"{count} / {total_weight_count} params " f"({readable_memory_size(memory_size)}) " "are fully replicated" ) per_device_size = memory_size else: sharding_factor = np.prod( [mesh.dim_size(s) for s in sharding_spec] ) per_device_size = memory_size / sharding_factor print_fn( f"{count} / {total_weight_count} params " f"({readable_memory_size(memory_size)}) are sharded based " f"on spec '{sharding_spec}' and across {sharding_factor} " f"devices." ) total_per_device_memory_size += per_device_size print_fn( "Overall per device memory usage: " f"{readable_memory_size(total_per_device_memory_size)}" ) print_fn( "Overall sharding factor: {:.2f}".format( total_memory_size / total_per_device_memory_size ) ) print_fn("_" * line_length) def readable_memory_size(weight_memory_size): """Convert the weight memory size (Bytes) to a readable string.""" units = ["Byte", "KB", "MB", "GB", "TB", "PB"] scale = 1024 for unit in units: if weight_memory_size / scale < 1: return "{:.2f} {}".format(weight_memory_size, unit) else: weight_memory_size /= scale return "{:.2f} {}".format(weight_memory_size, units[-1]) def get_layer_index_bound_by_layer_name(model, layer_range=None): """Get the layer indexes from the model based on layer names. The layer indexes can be used to slice the model into sub models for display. Args: model: `tf.keras.Model` instance. layer_names: a list or tuple of 2 strings, the starting layer name and ending layer name (both inclusive) for the result. All layers will be included when `None` is provided. Returns: The index value of layer based on its unique name (layer_names). Output will be [first_layer_index, last_layer_index + 1]. """ if layer_range is not None: if len(layer_range) != 2: raise ValueError( "layer_range must be a list or tuple of length 2. Received: " f"layer_range = {layer_range} of length {len(layer_range)}" ) if not isinstance(layer_range[0], str) or not isinstance( layer_range[1], str ): raise ValueError( "layer_range should contain string type only. " f"Received: {layer_range}" ) else: return [0, len(model.layers)] lower_index = [ idx for idx, layer in enumerate(model.layers) if re.match(layer_range[0], layer.name) ] upper_index = [ idx for idx, layer in enumerate(model.layers) if re.match(layer_range[1], layer.name) ] if not lower_index or not upper_index: raise ValueError( "Passed layer_names do not match the layer names in the model. " f"Received: {layer_range}" ) if min(lower_index) > max(upper_index): return [min(upper_index), max(lower_index) + 1] return [min(lower_index), max(upper_index) + 1] def print_summary( model, line_length=None, positions=None, print_fn=None, expand_nested=False, show_trainable=False, layer_range=None, ): """Prints a summary of a model. Args: model: TF-Keras model instance. line_length: Total length of printed lines (e.g. set this to adapt the display to different terminal window sizes). positions: Relative or absolute positions of log elements in each line. If not provided, defaults to `[0.3, 0.6, 0.70, 1.]`. print_fn: Print function to use. It will be called on each line of the summary. You can set it to a custom function in order to capture the string summary. When `None`, uses `print` (prints to stdout). Defaults to `None`. expand_nested: Whether to expand the nested models. Defaults to `False`. show_trainable: Whether to show if a layer is trainable. Defaults to `False`. layer_range: List or tuple containing two strings, the starting layer name and ending layer name (both inclusive), indicating the range of layers to be printed in the summary. The strings could also be regexes instead of an exact name. In this case, the starting layer will be the first layer that matches `layer_range[0]` and the ending layer will be the last element that matches `layer_range[1]`. By default (`None`) all layers in the model are included in the summary. """ if print_fn is None: print_fn = io_utils.print_msg if model.__class__.__name__ == "Sequential": sequential_like = True elif not model._is_graph_network: # We treat subclassed models as a simple sequence of layers, for logging # purposes. sequential_like = True else: sequential_like = True nodes_by_depth = model._nodes_by_depth.values() nodes = [] for v in nodes_by_depth: if (len(v) > 1) or ( len(v) == 1 and len(tf.nest.flatten(v[0].keras_inputs)) > 1 ): # if the model has multiple nodes # or if the nodes have multiple inbound_layers # the model is no longer sequential sequential_like = False break nodes += v if sequential_like: # search for shared layers for layer in model.layers: flag = False for node in layer._inbound_nodes: if node in nodes: if flag: sequential_like = False break else: flag = True if not sequential_like: break if sequential_like: line_length = line_length or 65 positions = positions or [0.45, 0.85, 1.0] if positions[-1] <= 1: positions = [int(line_length * p) for p in positions] # header names for the different log elements to_display = ["Layer (type)", "Output Shape", "Param #"] else: line_length = line_length or 98 positions = positions or [0.3, 0.6, 0.70, 1.0] if positions[-1] <= 1: positions = [int(line_length * p) for p in positions] # header names for the different log elements to_display = ["Layer (type)", "Output Shape", "Param #", "Connected to"] relevant_nodes = [] for v in model._nodes_by_depth.values(): relevant_nodes += v if show_trainable: line_length += 11 positions.append(line_length) to_display.append("Trainable") layer_range = get_layer_index_bound_by_layer_name(model, layer_range) def print_row(fields, positions, nested_level=0): left_to_print = [str(x) for x in fields] while any(left_to_print): line = "" for col in range(len(left_to_print)): if col > 0: start_pos = positions[col - 1] else: start_pos = 0 end_pos = positions[col] # Leave room for 2 spaces to delineate columns # we don't need any if we are printing the last column space = 2 if col != len(positions) - 1 else 0 cutoff = end_pos - start_pos - space # Except for last col, offset by one to align the start of col if col != len(positions) - 1: cutoff -= 1 if col == 0: cutoff -= nested_level fit_into_line = left_to_print[col][:cutoff] # For nicer formatting we line-break on seeing end of # tuple/dict etc. line_break_conditions = ("),", "},", "],", "',") candidate_cutoffs = [ fit_into_line.find(x) + len(x) for x in line_break_conditions if fit_into_line.find(x) >= 0 ] if candidate_cutoffs: cutoff = min(candidate_cutoffs) fit_into_line = fit_into_line[:cutoff] if col == 0: line += "|" * nested_level + " " line += fit_into_line line += " " * space if space else "" left_to_print[col] = left_to_print[col][cutoff:] # Pad out to the next position # Make space for nested_level for last column if nested_level and col == len(positions) - 1: line += " " * (positions[col] - len(line) - nested_level) else: line += " " * (positions[col] - len(line)) line += "|" * nested_level print_fn(line) print_fn(f'Model: "{model.name}"') print_fn("_" * line_length) print_row(to_display, positions) print_fn("=" * line_length) def print_layer_summary(layer, nested_level=0): """Prints a summary for a single layer. Args: layer: target layer. nested_level: level of nesting of the layer inside its parent layer (e.g. 0 for a top-level layer, 1 for a nested layer). """ try: output_shape = layer.output_shape except AttributeError: output_shape = "multiple" except RuntimeError: # output_shape unknown in Eager mode. output_shape = "?" name = layer.name cls_name = layer.__class__.__name__ if not layer.built and not getattr(layer, "_is_graph_network", False): # If a subclassed model has a layer that is not called in # Model.call, the layer will not be built and we cannot call # layer.count_params(). params = "0 (unused)" else: params = layer.count_params() fields = [name + " (" + cls_name + ")", output_shape, params] if show_trainable: fields.append("Y" if layer.trainable else "N") print_row(fields, positions, nested_level) def print_layer_summary_with_connections(layer, nested_level=0): """Prints a summary for a single layer (including its connections). Args: layer: target layer. nested_level: level of nesting of the layer inside its parent layer (e.g. 0 for a top-level layer, 1 for a nested layer). """ try: output_shape = layer.output_shape except AttributeError: output_shape = "multiple" connections = [] for node in layer._inbound_nodes: if relevant_nodes and node not in relevant_nodes: # node is not part of the current network continue for ( inbound_layer, node_index, tensor_index, _, ) in node.iterate_inbound(): connections.append( f"{inbound_layer.name}[{node_index}][{tensor_index}]" ) name = layer.name cls_name = layer.__class__.__name__ fields = [ name + " (" + cls_name + ")", output_shape, layer.count_params(), connections, ] if show_trainable: fields.append("Y" if layer.trainable else "N") print_row(fields, positions, nested_level) def print_layer(layer, nested_level=0, is_nested_last=False): if sequential_like: print_layer_summary(layer, nested_level) else: print_layer_summary_with_connections(layer, nested_level) if expand_nested and hasattr(layer, "layers") and layer.layers: print_fn( "|" * (nested_level + 1) + "¯" * (line_length - 2 * nested_level - 2) + "|" * (nested_level + 1) ) nested_layer = layer.layers is_nested_last = False for i in range(len(nested_layer)): if i == len(nested_layer) - 1: is_nested_last = True print_layer(nested_layer[i], nested_level + 1, is_nested_last) print_fn( "|" * nested_level + "¯" * (line_length - 2 * nested_level) + "|" * nested_level ) if not is_nested_last: print_fn( "|" * nested_level + " " * (line_length - 2 * nested_level) + "|" * nested_level ) for layer in model.layers[layer_range[0] : layer_range[1]]: print_layer(layer) print_fn("=" * line_length) if hasattr(model, "_collected_trainable_weights"): trainable_count = count_params(model._collected_trainable_weights) trainable_memory_size = weight_memory_size( model._collected_trainable_weights ) else: trainable_count = count_params(model.trainable_weights) trainable_memory_size = weight_memory_size(model.trainable_weights) non_trainable_count = count_params(model.non_trainable_weights) non_trainable_memory_size = weight_memory_size(model.non_trainable_weights) total_memory_size = trainable_memory_size + non_trainable_memory_size print_fn( f"Total params: {trainable_count + non_trainable_count} " f"({readable_memory_size(total_memory_size)})" ) print_fn( f"Trainable params: {trainable_count} " f"({readable_memory_size(trainable_memory_size)})" ) print_fn( f"Non-trainable params: {non_trainable_count} " f"({readable_memory_size(non_trainable_memory_size)})" ) print_fn("_" * line_length) print_dtensor_variable_summary(model, print_fn, line_length) def convert_dense_weights_data_format( dense, previous_feature_map_shape, target_data_format="channels_first" ): """Utility useful when changing a convnet's `data_format`. When porting the weights of a convnet from one data format to the other, if the convnet includes a `Flatten` layer (applied to the last convolutional feature map) followed by a `Dense` layer, the weights of that `Dense` layer should be updated to reflect the new dimension ordering. Args: dense: The target `Dense` layer. previous_feature_map_shape: A shape tuple of 3 integers, e.g. `(512, 7, 7)`. The shape of the convolutional feature map right before the `Flatten` layer that came before the target `Dense` layer. target_data_format: One of "channels_last", "channels_first". Set it "channels_last" if converting a "channels_first" model to "channels_last", or reciprocally. """ assert target_data_format in {"channels_last", "channels_first"} kernel, bias = dense.get_weights() for i in range(kernel.shape[1]): if target_data_format == "channels_first": c, h, w = previous_feature_map_shape original_fm_shape = (h, w, c) ki = kernel[:, i].reshape(original_fm_shape) ki = np.transpose(ki, (2, 0, 1)) # last -> first else: h, w, c = previous_feature_map_shape original_fm_shape = (c, h, w) ki = kernel[:, i].reshape(original_fm_shape) ki = np.transpose(ki, (1, 2, 0)) # first -> last kernel[:, i] = np.reshape(ki, (np.prod(previous_feature_map_shape),)) dense.set_weights([kernel, bias]) def is_builtin_layer(layer): if not getattr(layer, "_keras_api_names", None): return False # Subclasses of `Layer` that are not exported inherit the export name # of the base layer class. return layer._keras_api_names != ( "keras.layers.Layer", ) and layer._keras_api_names_v1 != ("keras.layers.Layer",) def cached_per_instance(f): """Lightweight decorator for caching lazily constructed properties. When to use: This decorator provides simple caching with minimal overhead. It is designed for properties which are expensive to compute and static over the life of a class instance, and provides no mechanism for cache invalidation. Thus it is best suited for lazily exposing derived properties of other static data. For classes with custom getattr / setattr behavior (such as trackable objects), storing cache results as object attributes is not performant. Instead, a specialized cache can significantly reduce property lookup overhead. (While still allowing the decorated property to be lazily computed.) Consider the following class: ``` class MyClass: def __setattr__(self, key, value): # Some expensive class specific code # ... # ... super(MyClass, self).__setattr__(key, value) @property def thing(self): # `thing` is expensive to compute (and may not even be requested), so we # want to lazily compute it and then cache it. output = getattr(self, '_thing', None) if output is None: self._thing = output = compute_thing(self) return output ``` It's also worth noting that ANY overriding of __setattr__, even something as simple as: ``` def __setattr__(self, key, value): super(MyClass, self).__setattr__(key, value) ``` Slows down attribute assignment by nearly 10x. By contrast, replacing the definition of `thing` with the following sidesteps the expensive __setattr__ altogether: ''' @property @tracking.cached_per_instance def thing(self): # `thing` is expensive to compute (and may not even be requested), so we # want to lazily compute it and then cache it. return compute_thing(self) ''' Performance: The overhead for this decorator is ~0.4 us / call. A much lower overhead implementation (~0.085 us / call) can be achieved by using a custom dict type: ``` def dict_based_cache(f): class Cache(dict): __slots__ = () def __missing__(self, key): self[key] = output = f(key) return output return property(Cache().__getitem__) ``` However, that implementation holds class instances as keys, and as a result blocks garbage collection. (And modifying it to use weakref's as keys raises the lookup overhead to ~0.4 us) As a result, the WeakKeyDictionary implementation below turns out to be more prudent. Args: f: The function to cache. Returns: f decorated with simple caching behavior. """ cache = weakref.WeakKeyDictionary() @functools.wraps(f) def wrapped(item): output = cache.get(item) if output is None: cache[item] = output = f(item) return output wrapped.cache = cache return wrapped def filter_empty_layer_containers(layer_list): """Filter out empty Layer-like containers and uniquify.""" # TODO(b/130381733): Make this an attribute in base_layer.Layer. existing = set() to_visit = layer_list[::-1] while to_visit: obj = to_visit.pop() if id(obj) in existing: continue existing.add(id(obj)) if hasattr(obj, "_is_layer") and not isinstance(obj, type): yield obj else: sub_layers = getattr(obj, "layers", None) or [] # Trackable data structures will not show up in ".layers" lists, but # the layers they contain will. to_visit.extend(sub_layers[::-1]) class CallFunctionSpec: """Caches the spec and provides utilities for handling call function args.""" def __init__(self, full_argspec): """Initialies a `CallFunctionSpec`. Args: full_argspec: the FullArgSpec of a call function of a layer. """ self._full_argspec = full_argspec self._arg_names = list(self._full_argspec.args) # Scrub `self` that appears if a decorator was applied. if self._arg_names and self._arg_names[0] == "self": self._arg_names = self._arg_names[1:] self._arg_names += self._full_argspec.kwonlyargs or [] call_accepts_kwargs = self._full_argspec.varkw is not None self._expects_training_arg = ( "training" in self._arg_names or call_accepts_kwargs ) self._expects_mask_arg = ( "mask" in self._arg_names or call_accepts_kwargs ) call_fn_defaults = self._full_argspec.defaults or [] defaults = dict() # The call arg defaults are an n-tuple of the last n elements of the # args list. (n = # of elements that have a default argument) for i in range(-1 * len(call_fn_defaults), 0): defaults[self._arg_names[i]] = call_fn_defaults[i] # The default training arg will be any (non-None) default specified in # the method signature, or None if no value is specified. defaults.update(self._full_argspec.kwonlydefaults or {}) self._default_training_arg = defaults.get("training") @property def full_argspec(self): """Returns the FullArgSpec of the call function.""" return self._full_argspec @property def arg_names(self): """List of names of args and kwonlyargs.""" # `arg_names` is not accurate if the layer has variable positional args. return self._arg_names @arg_names.setter def arg_names(self, value): self._arg_names = value @property @cached_per_instance def arg_positions(self): """Returns a dict mapping arg names to their index positions.""" # `arg_positions` is not accurate if the layer has variable positional # args. call_fn_arg_positions = dict() for pos, arg in enumerate(self._arg_names): call_fn_arg_positions[arg] = pos return call_fn_arg_positions @property def expects_training_arg(self): """Whether the call function uses 'training' as a parameter.""" return self._expects_training_arg @expects_training_arg.setter def expects_training_arg(self, value): self._expects_training_arg = value @property def expects_mask_arg(self): """Whether the call function uses `mask` as a parameter.""" return self._expects_mask_arg @expects_mask_arg.setter def expects_mask_arg(self, value): self._expects_mask_arg = value @property def default_training_arg(self): """The default value given to the "training" argument.""" return self._default_training_arg def arg_was_passed(self, arg_name, args, kwargs, inputs_in_args=False): """Returns true if argument is present in `args` or `kwargs`. Args: arg_name: String name of the argument to find. args: Tuple of args passed to the call function. kwargs: Dictionary of kwargs passed to the call function. inputs_in_args: Whether the input argument (the first argument in the call function) is included in `args`. Defaults to `False`. Returns: True if argument with `arg_name` is present in `args` or `kwargs`. """ # Performance optimization: do no work in most common case. if not args and not kwargs: return False if arg_name in kwargs: return True call_fn_args = self._arg_names if not inputs_in_args: # Ignore `inputs` arg. call_fn_args = call_fn_args[1:] return arg_name in dict(zip(call_fn_args, args)) def get_arg_value(self, arg_name, args, kwargs, inputs_in_args=False): """Retrieves the value for the argument with name `arg_name`. Args: arg_name: String name of the argument to find. args: Tuple of args passed to the call function. kwargs: Dictionary of kwargs passed to the call function. inputs_in_args: Whether the input argument (the first argument in the call function) is included in `args`. Defaults to `False`. Returns: The value of the argument with name `arg_name`, extracted from `args` or `kwargs`. Raises: KeyError if the value of `arg_name` cannot be found. """ if arg_name in kwargs: return kwargs[arg_name] call_fn_args = self._arg_names if not inputs_in_args: # Ignore `inputs` arg. call_fn_args = call_fn_args[1:] args_dict = dict(zip(call_fn_args, args)) return args_dict[arg_name] def set_arg_value( self, arg_name, new_value, args, kwargs, inputs_in_args=False, pop_kwarg_if_none=False, ): """Sets the value of an argument into the given args/kwargs. Args: arg_name: String name of the argument to find. new_value: New value to give to the argument. args: Tuple of args passed to the call function. kwargs: Dictionary of kwargs passed to the call function. inputs_in_args: Whether the input argument (the first argument in the call function) is included in `args`. Defaults to `False`. pop_kwarg_if_none: If the new value is `None`, and this is `True`, then the argument is deleted from `kwargs`. Returns: The updated `(args, kwargs)`. """ if self.full_argspec.varargs: try: arg_pos = self.full_argspec.args.index(arg_name) if self.full_argspec.args[0] == "self": arg_pos -= 1 except ValueError: arg_pos = None else: arg_pos = self.arg_positions.get(arg_name, None) if arg_pos is not None: if not inputs_in_args: # Ignore `inputs` arg. arg_pos = arg_pos - 1 if len(args) > arg_pos: args = list(args) args[arg_pos] = new_value return tuple(args), kwargs if new_value is None and pop_kwarg_if_none: kwargs.pop(arg_name, None) else: kwargs[arg_name] = new_value return args, kwargs def split_out_first_arg(self, args, kwargs): """Splits (args, kwargs) into (inputs, args, kwargs).""" # Grab the argument corresponding to the first argument in the # layer's `call` method spec. This will either be the first positional # argument, or it will be provided as a keyword argument. if args: inputs = args[0] args = args[1:] elif self._arg_names[0] in kwargs: kwargs = copy.copy(kwargs) inputs = kwargs.pop(self._arg_names[0]) else: raise ValueError( "The first argument to `Layer.call` must always be passed." ) return inputs, args, kwargs @keras_export("keras.utils.warmstart_embedding_matrix") def warmstart_embedding_matrix( base_vocabulary, new_vocabulary, base_embeddings, new_embeddings_initializer="uniform", ): """Warm start embedding matrix with changing vocab. This util can be used to warmstart the embedding layer matrix when vocabulary changes between previously saved checkpoint and model. Vocabulary change could mean, the size of the new vocab is different or the vocabulary is reshuffled or new vocabulary has been added to old vocabulary. If the vocabulary size changes, size of the embedding layer matrix also changes. This util remaps the old vocabulary embeddings to the new embedding layer matrix. Example: Here is an example that demonstrates how to use the `warmstart_embedding_matrix` util. >>> import tf_keras as keras >>> vocab_base = tf.convert_to_tensor(["unk", "a", "b", "c"]) >>> vocab_new = tf.convert_to_tensor( ... ["unk", "unk", "a", "b", "c", "d", "e"]) >>> vectorized_vocab_base = np.random.rand(vocab_base.shape[0], 3) >>> vectorized_vocab_new = np.random.rand(vocab_new.shape[0], 3) >>> warmstarted_embedding_matrix = warmstart_embedding_matrix( ... base_vocabulary=vocab_base, ... new_vocabulary=vocab_new, ... base_embeddings=vectorized_vocab_base, ... new_embeddings_initializer=keras.initializers.Constant( ... vectorized_vocab_new)) Here is an example that demonstrates how to get vocabulary and embedding weights from layers, use the `warmstart_embedding_matrix` util to remap the layer embeddings and continue with model training. ``` # get old and new vocabulary by using layer.get_vocabulary() # for example assume TextVectorization layer is used base_vocabulary = old_text_vectorization_layer.get_vocabulary() new_vocabulary = new_text_vectorization_layer.get_vocabulary() # get previous embedding layer weights embedding_weights_base = model.get_layer('embedding').get_weights()[0] warmstarted_embedding = keras.utils.warmstart_embedding_matrix( base_vocabulary, new_vocabulary, base_embeddings=embedding_weights_base, new_embeddings_initializer="uniform") updated_embedding_variable = tf.Variable(warmstarted_embedding) # update embedding layer weights model.layers[1].embeddings = updated_embedding_variable model.fit(..) # continue with model training ``` Args: base_vocabulary: The list of vocabulary terms that the preexisting embedding matrix `base_embeddings` represents. It can be either a 1D array/tensor or a tuple/list of vocabulary terms (strings), or a path to a vocabulary text file. If passing a file path, the file should contain one line per term in the vocabulary. new_vocabulary: The list of vocabulary terms for the new vocabulary (same format as above). base_embeddings: NumPy array or tensor representing the preexisting embedding matrix. new_embeddings_initializer: Initializer for embedding vectors for previously unseen terms to be added to the new embedding matrix (see `keras.initializers`). new_embedding matrix needs to be specified with "constant" initializer. matrix. None means "uniform". Default value is None. Returns: tf.tensor of remapped embedding layer matrix """ # convert vocab to list base_vocabulary = convert_vocab_to_list(base_vocabulary) new_vocabulary = convert_vocab_to_list(new_vocabulary) # Initialize the new embedding layer matrix new_embeddings_initializer = initializers.get(new_embeddings_initializer) new_embedding = new_embeddings_initializer( shape=(len(new_vocabulary), base_embeddings.shape[1]), dtype=base_embeddings.dtype, ) # create mapping dict {vocab:index} base_vocabulary_dict = dict( zip(base_vocabulary, range(len(base_vocabulary))) ) indices_base_vocabulary = [] indices_new_vocabulary = [] for index, key in enumerate(new_vocabulary): if key in base_vocabulary_dict: indices_base_vocabulary.append(base_vocabulary_dict[key]) indices_new_vocabulary.append(int(index)) # update embedding matrix if indices_base_vocabulary: values_to_update = tf.gather(base_embeddings, indices_base_vocabulary) new_embedding = tf.tensor_scatter_nd_update( new_embedding, tf.expand_dims(indices_new_vocabulary, axis=1), values_to_update, ) return new_embedding def convert_vocab_to_list(vocab): """Convert input vacabulary to list.""" vocab_list = [] if tf.is_tensor(vocab): vocab_list = list(vocab.numpy()) elif isinstance(vocab, (np.ndarray, tuple, list)): vocab_list = list(vocab) elif isinstance(vocab, str): if not tf.io.gfile.exists(vocab): raise ValueError(f"Vocabulary file {vocab} does not exist.") with tf.io.gfile.GFile(vocab, "r") as vocabulary_file: vocab_list = vocabulary_file.read().splitlines() else: raise ValueError( "Vocabulary is expected to be either a NumPy array, " "list, 1D tensor or a vocabulary text file. Instead type " f"{type(vocab)} was received." ) if len(vocab_list) == 0: raise ValueError( "Vocabulary is expected to be either a NumPy array, " "list, 1D tensor or a vocabulary text file with at least one token." " Received 0 instead." ) return vocab_list
tf-keras/tf_keras/utils/layer_utils.py/0
{ "file_path": "tf-keras/tf_keras/utils/layer_utils.py", "repo_id": "tf-keras", "token_count": 18042 }
222
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for text_dataset.""" import os import random import shutil import string import tensorflow.compat.v2 as tf from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils from tf_keras.utils import text_dataset @test_utils.run_v2_only class TextDatasetFromDirectoryTest(test_combinations.TestCase): def _prepare_directory( self, num_classes=2, nested_dirs=False, count=16, length=20 ): # Get a unique temp directory temp_dir = os.path.join( self.get_temp_dir(), str(random.randint(0, 1e6)) ) os.mkdir(temp_dir) self.addCleanup(shutil.rmtree, temp_dir) # Generate paths to class subdirectories paths = [] for class_index in range(num_classes): class_directory = f"class_{class_index}" if nested_dirs: class_paths = [ class_directory, os.path.join(class_directory, "subfolder_1"), os.path.join(class_directory, "subfolder_2"), os.path.join( class_directory, "subfolder_1", "sub-subfolder" ), ] else: class_paths = [class_directory] for path in class_paths: os.mkdir(os.path.join(temp_dir, path)) paths += class_paths for i in range(count): path = paths[i % len(paths)] filename = os.path.join(path, f"text_{i}.txt") with open(os.path.join(temp_dir, filename), "w") as f: text = "".join( [random.choice(string.printable) for _ in range(length)] ) f.write(text) return temp_dir def test_text_dataset_from_directory_standalone(self): # Test retrieving txt files without labels from a directory and its # subdirs. Save a few extra files in the parent directory. directory = self._prepare_directory(count=7, num_classes=2) for i in range(3): filename = f"text_{i}.txt" with open(os.path.join(directory, filename), "w") as f: text = "".join( [random.choice(string.printable) for _ in range(20)] ) f.write(text) dataset = text_dataset.text_dataset_from_directory( directory, batch_size=5, label_mode=None, max_length=10 ) batch = next(iter(dataset)) # We just return the texts, no labels self.assertEqual(batch.shape, (5,)) self.assertEqual(batch.dtype.name, "string") # Count samples batch_count = 0 sample_count = 0 for batch in dataset: batch_count += 1 sample_count += batch.shape[0] self.assertEqual(batch_count, 2) self.assertEqual(sample_count, 10) def test_text_dataset_from_directory_binary(self): directory = self._prepare_directory(num_classes=2) dataset = text_dataset.text_dataset_from_directory( directory, batch_size=8, label_mode="int", max_length=10 ) batch = next(iter(dataset)) self.assertLen(batch, 2) self.assertEqual(batch[0].shape, (8,)) self.assertEqual(batch[0].dtype.name, "string") self.assertEqual(len(batch[0].numpy()[0]), 10) # Test max_length self.assertEqual(batch[1].shape, (8,)) self.assertEqual(batch[1].dtype.name, "int32") dataset = text_dataset.text_dataset_from_directory( directory, batch_size=8, label_mode="binary" ) batch = next(iter(dataset)) self.assertLen(batch, 2) self.assertEqual(batch[0].shape, (8,)) self.assertEqual(batch[0].dtype.name, "string") self.assertEqual(batch[1].shape, (8, 1)) self.assertEqual(batch[1].dtype.name, "float32") dataset = text_dataset.text_dataset_from_directory( directory, batch_size=8, label_mode="categorical" ) batch = next(iter(dataset)) self.assertLen(batch, 2) self.assertEqual(batch[0].shape, (8,)) self.assertEqual(batch[0].dtype.name, "string") self.assertEqual(batch[1].shape, (8, 2)) self.assertEqual(batch[1].dtype.name, "float32") def test_sample_count(self): directory = self._prepare_directory(num_classes=4, count=15) dataset = text_dataset.text_dataset_from_directory( directory, batch_size=8, label_mode=None ) sample_count = 0 for batch in dataset: sample_count += batch.shape[0] self.assertEqual(sample_count, 15) def test_text_dataset_from_directory_multiclass(self): directory = self._prepare_directory(num_classes=4, count=15) dataset = text_dataset.text_dataset_from_directory( directory, batch_size=8, label_mode=None ) batch = next(iter(dataset)) self.assertEqual(batch.shape, (8,)) dataset = text_dataset.text_dataset_from_directory( directory, batch_size=8, label_mode=None ) sample_count = 0 iterator = iter(dataset) for batch in dataset: sample_count += next(iterator).shape[0] self.assertEqual(sample_count, 15) dataset = text_dataset.text_dataset_from_directory( directory, batch_size=8, label_mode="int" ) batch = next(iter(dataset)) self.assertLen(batch, 2) self.assertEqual(batch[0].shape, (8,)) self.assertEqual(batch[0].dtype.name, "string") self.assertEqual(batch[1].shape, (8,)) self.assertEqual(batch[1].dtype.name, "int32") dataset = text_dataset.text_dataset_from_directory( directory, batch_size=8, label_mode="categorical" ) batch = next(iter(dataset)) self.assertLen(batch, 2) self.assertEqual(batch[0].shape, (8,)) self.assertEqual(batch[0].dtype.name, "string") self.assertEqual(batch[1].shape, (8, 4)) self.assertEqual(batch[1].dtype.name, "float32") def test_text_dataset_from_directory_validation_split(self): directory = self._prepare_directory(num_classes=2, count=10) dataset = text_dataset.text_dataset_from_directory( directory, batch_size=10, validation_split=0.2, subset="training", seed=1337, ) batch = next(iter(dataset)) self.assertLen(batch, 2) self.assertEqual(batch[0].shape, (8,)) dataset = text_dataset.text_dataset_from_directory( directory, batch_size=10, validation_split=0.2, subset="validation", seed=1337, ) batch = next(iter(dataset)) self.assertLen(batch, 2) self.assertEqual(batch[0].shape, (2,)) train_dataset, val_dataset = text_dataset.text_dataset_from_directory( directory, batch_size=10, validation_split=0.2, subset="both", seed=1337, ) batch = next(iter(train_dataset)) self.assertLen(batch, 2) self.assertEqual(batch[0].shape, (8,)) batch = next(iter(val_dataset)) self.assertLen(batch, 2) self.assertEqual(batch[0].shape, (2,)) def test_text_dataset_from_directory_manual_labels(self): directory = self._prepare_directory(num_classes=2, count=2) dataset = text_dataset.text_dataset_from_directory( directory, batch_size=8, labels=[0, 1], shuffle=False ) batch = next(iter(dataset)) self.assertLen(batch, 2) self.assertAllClose(batch[1], [0, 1]) def test_text_dataset_from_directory_follow_links(self): directory = self._prepare_directory( num_classes=2, count=25, nested_dirs=True ) dataset = text_dataset.text_dataset_from_directory( directory, batch_size=8, label_mode=None, follow_links=True ) sample_count = 0 for batch in dataset: sample_count += batch.shape[0] self.assertEqual(sample_count, 25) def test_text_dataset_from_directory_no_files(self): directory = self._prepare_directory(num_classes=2, count=0) with self.assertRaisesRegex(ValueError, "No text files found"): _ = text_dataset.text_dataset_from_directory(directory) def test_text_dataset_from_directory_errors(self): directory = self._prepare_directory(num_classes=3, count=5) with self.assertRaisesRegex(ValueError, "`labels` argument should be"): _ = text_dataset.text_dataset_from_directory( directory, labels="other" ) with self.assertRaisesRegex( ValueError, "`label_mode` argument must be" ): _ = text_dataset.text_dataset_from_directory( directory, label_mode="other" ) with self.assertRaisesRegex( ValueError, 'only pass `class_names` if `labels="inferred"`' ): _ = text_dataset.text_dataset_from_directory( directory, labels=[0, 0, 1, 1, 1], class_names=["class_0", "class_1", "class_2"], ) with self.assertRaisesRegex( ValueError, "Expected the lengths of `labels` to match the number of files", ): _ = text_dataset.text_dataset_from_directory( directory, labels=[0, 0, 1, 1] ) with self.assertRaisesRegex( ValueError, "`class_names` passed did not match" ): _ = text_dataset.text_dataset_from_directory( directory, class_names=["class_0", "class_2"] ) with self.assertRaisesRegex(ValueError, "there must be exactly 2"): _ = text_dataset.text_dataset_from_directory( directory, label_mode="binary" ) with self.assertRaisesRegex( ValueError, "`validation_split` must be between 0 and 1" ): _ = text_dataset.text_dataset_from_directory( directory, validation_split=2 ) with self.assertRaisesRegex( ValueError, '`subset` must be either "training", "validation" or "both"', ): _ = text_dataset.text_dataset_from_directory( directory, validation_split=0.2, subset="other" ) with self.assertRaisesRegex( ValueError, "`validation_split` must be set" ): _ = text_dataset.text_dataset_from_directory( directory, validation_split=0, subset="training" ) with self.assertRaisesRegex(ValueError, "must provide a `seed`"): _ = text_dataset.text_dataset_from_directory( directory, validation_split=0.2, subset="training" ) def test_text_dataset_from_directory_not_batched(self): directory = self._prepare_directory() dataset = text_dataset.text_dataset_from_directory( directory, batch_size=None, label_mode=None, follow_links=True ) sample = next(iter(dataset)) self.assertEqual(len(sample.shape), 0) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/utils/text_dataset_test.py/0
{ "file_path": "tf-keras/tf_keras/utils/text_dataset_test.py", "repo_id": "tf-keras", "token_count": 5771 }
223