repo_name
stringlengths 9
109
| hexsha
stringlengths 40
40
| code
stringlengths 547
141k
| apis
sequence | file_path
stringlengths 6
143
| api_extract
stringlengths 142
58.4k
|
---|---|---|---|---|---|
hyhieu/tensor2tensor | fd9b3150ad72140c05dfad7a4ebc4577be6c1c08 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""T2TModel Base Class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import math
import time
# Dependency imports
import six
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators.problem import problem_hparams_to_features
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import beam_search
from tensor2tensor.utils import decoding
from tensor2tensor.utils import expert_utils as eu
from tensor2tensor.utils import learning_rate
from tensor2tensor.utils import metrics
from tensor2tensor.utils import optimize
from tensor2tensor.utils import registry
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.layers import base
from tensorflow.python.ops import variable_scope
_no_problem_err_str = (
"The default implementation of %s requires that the "
"model be used with a Problem. If using a Problem, augment the "
"hparams object with trainer_lib.add_problem_hparams. If not, "
"override %s.")
_no_problem_err = (
lambda method_name: _no_problem_err_str % (method_name, method_name))
class T2TModel(base.Layer):
"""Abstract base class for models.
Subclassess generally only need to override `body`.
"""
REGISTERED_NAME = None # Updated on registration.
def __init__(self,
hparams,
mode=tf.estimator.ModeKeys.TRAIN,
problem_hparams=None,
data_parallelism=None,
decode_hparams=None):
"""Create a T2TModel.
Args:
hparams: tf.contrib.training.HParams, model hyperparameters.
mode: tf.estimator.ModeKeys, the execution mode.
problem_hparams: tf.contrib.training.HParams, hyperparameters for the
Problem. If provided here or in hparams.problems, the model will
automatically determine bottom, top, and loss methods. If not provided,
calling the model will only invoke body.
data_parallelism: a expert_utils.Parallelism object,
specifies devices for data parallelism.
decode_hparams: a hyperparameter object with decoding parameters.
See decoding.decode_hparams.
Returns:
a T2TModel
"""
# Determine name first: use registered name if possible, class name else.
default_name = registry.default_name(type(self))
name = self.REGISTERED_NAME or default_name
super(T2TModel, self).__init__(
trainable=mode == tf.estimator.ModeKeys.TRAIN, name=name)
if not problem_hparams and hasattr(hparams, "problems"):
problem_hparams = hparams.problems[0]
self._problem_hparams = problem_hparams
# Setup hparams
# If vocabularies differ, unset shared_embedding_and_softmax_weights.
hparams = copy.copy(hparams)
if self._problem_hparams and hparams.shared_embedding_and_softmax_weights:
same_vocab_sizes = True
if "inputs" in self._problem_hparams.input_modality:
if (self._problem_hparams.input_modality["inputs"] !=
self._problem_hparams.target_modality):
same_vocab_sizes = False
if not same_vocab_sizes:
log_info("Unsetting shared_embedding_and_softmax_weights.")
hparams.shared_embedding_and_softmax_weights = 0
self._original_hparams = hparams
self.set_mode(mode)
self._decode_hparams = copy.copy(decode_hparams or
decoding.decode_hparams())
self._data_parallelism = data_parallelism or eu.Parallelism([""])
self._num_datashards = self._data_parallelism.n
self._ps_devices = self._data_parallelism.ps_devices
self._eager_var_store = create_eager_var_store()
if self._problem_hparams:
self._create_modalities(self._problem_hparams, self._hparams)
@property
def hparams(self):
return self._hparams
@property
def has_input(self):
if self._problem_hparams:
return "inputs" in self._problem_hparams.input_modality
else:
return True
def call(self, features):
tf.get_variable_scope().set_initializer(
optimize.get_variable_initializer(self.hparams))
with self._eager_var_store.as_default():
self._fill_problem_hparams_features(features)
sharded_features = self._shard_features(features)
sharded_logits, losses = self.model_fn_sharded(sharded_features)
if isinstance(sharded_logits, dict):
concat_logits = {}
for k, v in sharded_logits.iteritems():
concat_logits[k] = tf.concat(v, 0)
return concat_logits, losses
else:
return tf.concat(sharded_logits, 0), losses
@property
def use_body_sharded(self):
return False
def body_sharded(self, sharded_features):
raise NotImplementedError("Models that wish to manually control sharding, "
"e.g. MoE models, should override body_sharded "
"and set use_body_sharded to True.")
def model_fn_sharded(self, sharded_features):
dp = self._data_parallelism
summarize_features(sharded_features, num_shards=dp.n)
datashard_to_features = self._to_features_per_datashard(sharded_features)
if self.use_body_sharded:
# MoE models override body_sharded
transformed_features = dp(self.bottom, datashard_to_features)
body_out = self.body_sharded(
self._to_single_features_dict(transformed_features))
body_out, losses = self._normalize_body_output(body_out)
if "training" in losses:
log_info("Skipping T2TModel top and loss because training loss "
"returned from body")
sharded_logits = body_out
else:
if isinstance(body_out, dict):
sharded_logits = {}
sharded_losses = {}
for k, v in body_out.iteritems():
sharded_logits[k] = dp(self.top, v, datashard_to_features)
sharded_losses[k] = dp(self.loss, sharded_logits[k],
datashard_to_features)
training_loss_dict = average_sharded_losses([{
"training": l
} for l in loss for loss in sharded_losses.values()])
losses.update(training_loss_dict)
else:
sharded_logits = dp(self.top, body_out, datashard_to_features)
sharded_losses = dp(self.loss, sharded_logits, datashard_to_features)
training_loss_dict = average_sharded_losses([{
"training": loss
} for loss in sharded_losses])
losses.update(training_loss_dict)
else:
sharded_logits, sharded_losses = dp(self.model_fn, datashard_to_features)
if isinstance(sharded_logits[0], dict):
temp_dict = {k: [] for k, _ in sharded_logits[0].iteritems()}
for k, _ in sharded_logits[0].iteritems():
for l in sharded_logits:
temp_dict[k].append(l[k])
sharded_logits = temp_dict
losses = average_sharded_losses(sharded_losses)
# TODO(rsepassi): Reenable scheduled sampling
# Disabled because of model_fn_sharded refactor
#
# do_scheduled_sampling = ( # Only do it if training and set for it.
# self.hparams.scheduled_sampling_prob > 0.0 and
# self.hparams.mode == tf.estimator.ModeKeys.TRAIN)
# if do_scheduled_sampling:
# sharded_logits, losses = scheduled_sampling(
# self.hparams, self._problem_hparams, dp,
# sharded_logits, losses, sharded_features,
# transformed_features, self)
return sharded_logits, losses
def model_fn(self, features):
transformed_features = self.bottom(features)
with tf.variable_scope("body"):
log_info("Building model body")
body_out = self.body(transformed_features)
output, losses = self._normalize_body_output(body_out)
if "training" in losses:
log_info("Skipping T2TModel top and loss because training loss "
"returned from body")
logits = output
else:
logits = self.top(output, features)
losses["training"] = self.loss(logits, features)
return logits, losses
def bottom(self, features):
"""Transform features to feed into body."""
if not self._problem_hparams:
log_warn("Without a Problem, T2TModel.bottom is a passthrough.")
return features
transformed_features = {}
all_previous_modalities = []
# Transform the input features
for key, input_modality in six.iteritems(
self._problem_hparams.input_modality):
if key not in features:
tf.logging.warning("Missing feature %s - ignoring." % key)
continue
do_reuse = input_modality.name in all_previous_modalities
with tf.variable_scope(input_modality.name, reuse=do_reuse):
log_info("Transforming feature '%s' with %s.bottom", key,
input_modality.name)
transformed_features[key] = input_modality.bottom(features[key])
all_previous_modalities.append(input_modality.name)
# Transform the targets (for autoregressive models)
target_modality = self._problem_hparams.target_modality
with tf.variable_scope(target_modality.name):
log_info("Transforming 'targets' with %s.targets_bottom",
target_modality.name)
transformed_features["targets"] = target_modality.targets_bottom(
features["targets"])
for key in features:
if key not in transformed_features:
# For features without a modality, we pass them along as is
transformed_features[key] = features[key]
else:
# Other features get passed along with the "raw" suffix
transformed_features[key + "_raw"] = features[key]
return transformed_features
def body(self, features):
"""Most models will override this function.
Compute label logits for one shard as a function of the transformed
features.
Args:
features: A dictionary of key to Tensor. Each Tensor has shape
[batch_size, ?, ?, hidden_size].
Returns:
output: tensor of logits with shape [batch_size, O, P, body_output_size.
losses: either single loss as a scalar, a list, a tensor (to be averaged)
or a dictionary of losses.
"""
raise NotImplementedError("Abstract Method")
def _top_single(self, body_output, features):
if not self._problem_hparams:
log_warn("Without a Problem, T2TModel.top is a passthrough.")
return body_output
target_modality = self._problem_hparams.target_modality
with tf.variable_scope(target_modality.name):
log_info("Transforming body output with %s.top", target_modality.name)
last_only = (
target_modality.top_is_pointwise and
self.hparams.mode == tf.estimator.ModeKeys.PREDICT and
not self.hparams.force_full_predict)
if not last_only:
logits = target_modality.top(body_output, features["targets"])
else:
# Take body outputs for the last position only, and targets too.
last_position_body_output = tf.expand_dims(
body_output[:, -1, :, :], axis=[1])
last_position_targets = tf.expand_dims(
features["targets"][:, -1:, :, :], axis=[1])
logits = target_modality.top(last_position_body_output,
last_position_targets)
return logits
def top(self, body_output, features):
if isinstance(body_output, dict):
logits = {}
for k, v in body_output.iteritems():
logits[k] = self._top_single(v, features)
return logits
else:
return self._top_single(body_output, features)
def _loss_single(self, logits, features):
if not self._problem_hparams:
log_warn(_no_problem_err("loss"))
return (tf.constant(0., dtype=tf.float32),
tf.constant(1., dtype=tf.float32))
target_modality = self._problem_hparams.target_modality
loss_num, loss_den = target_modality.loss(logits, features["targets"])
loss_num *= self._problem_hparams.loss_multiplier
return loss_num, loss_den
def loss(self, logits, features):
if isinstance(logits, dict):
losses = {}
for k, v in logits.iteritems():
losses[k] = self._loss_single(v, features)
return tf.add_n([n / d for n, d in logits.values()])
else:
return self._loss_single(logits, features)
def optimize(self, loss, num_async_replicas=1):
"""Return a training op minimizing loss."""
log_info("Base learning rate: %f", self.hparams.learning_rate)
lr = learning_rate.learning_rate_schedule(self.hparams)
if num_async_replicas > 1:
log_info("Dividing learning rate by num_async_replicas: %d",
num_async_replicas)
lr /= math.sqrt(float(num_async_replicas))
train_op = optimize.optimize(
loss, lr, self.hparams, use_tpu=common_layers.is_on_tpu())
return train_op
def set_mode(self, mode):
"""Set hparams with the given mode."""
log_info("Setting T2TModel mode to '%s'", mode)
hparams = copy.copy(self._original_hparams)
hparams.add_hparam("mode", mode)
# When not in training mode, set all forms of dropout to zero.
if mode != tf.estimator.ModeKeys.TRAIN:
for key in hparams.values():
if key.endswith("dropout"):
log_info("Setting hparams.%s to 0.0", key)
setattr(hparams, key, 0.0)
self._hparams = hparams
def _create_modalities(self, problem_hparams, hparams):
"""Construct modalities in problem_hparams."""
input_modality_overrides = {}
for override_str in hparams.input_modalities.split(";"):
if override_str != "default":
parts = override_str.split(":")
feature_name = parts[0]
modality_name = ":".join(parts[1:])
input_modality_overrides[feature_name] = modality_name
target_modality_name = None
if hparams.target_modality and hparams.target_modality != "default":
target_modality_name = hparams.target_modality
input_modality = {}
for f, modality_spec in six.iteritems(problem_hparams.input_modality):
if f in input_modality_overrides:
_warn_changed_modality_type(input_modality_overrides[f],
modality_spec[0], f)
modality_spec = (input_modality_overrides[f], modality_spec[1])
input_modality[f] = registry.create_modality(modality_spec, hparams)
problem_hparams.input_modality = input_modality
target_modality_spec = problem_hparams.target_modality
if target_modality_name:
_warn_changed_modality_type(target_modality_name, target_modality_spec[0],
"target")
target_modality_spec = (target_modality_name, target_modality_spec[1])
target_modality = registry.create_modality(target_modality_spec, hparams)
problem_hparams.target_modality = target_modality
def prepare_features_for_infer(self, features):
"""Called before inference to allow adding infer-specific features."""
pass
def eval_autoregressive(self, features=None, decode_length=50):
"""Autoregressive eval.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
Returns:
logits: `Tensor`
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
Contains a single key "training".
"""
results = self._slow_greedy_infer(features, decode_length=decode_length)
return results["logits"], results["losses"]
def _fill_problem_hparams_features(self, features):
if features is not None:
for k, v in six.iteritems(
problem_hparams_to_features(self._problem_hparams)):
if k not in features:
features[k] = tf.constant(v, name=k)
def infer(self,
features=None,
decode_length=50,
beam_size=1,
top_beams=1,
alpha=0.0):
"""A inference method.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for slonger translations.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
if slow greedy decoding is used then the dict will also contain {
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`
}
"""
with self._eager_var_store.as_default():
# TODO(rsepassi): Make decoding work with real-valued model outputs
# (i.e. if the target modality is RealModality).
self.prepare_features_for_infer(features)
if not self.has_input and beam_size > 1:
log_warn("Beam searching for a model with no inputs.")
if not self.has_input and self.hparams.sampling_method != "random":
log_warn("Non-random sampling for a model with no inputs.")
self._fill_problem_hparams_features(features)
if self._problem_hparams:
target_modality = self._problem_hparams.target_modality
if target_modality.is_class_modality:
beam_size = 1 # No use to run beam-search for a single class.
if beam_size == 1:
log_info("Greedy Decoding")
results = self._greedy_infer(features, decode_length)
else:
log_info("Beam Decoding with beam size %d" % beam_size)
results = self._beam_decode(features, decode_length, beam_size,
top_beams, alpha)
return results
def _beam_decode(self, features, decode_length, beam_size, top_beams, alpha):
"""Beam search decoding.
Models should ideally implement a more efficient version of this function.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for slonger translations.
Returns:
samples: an integer `Tensor`. Top samples from the beam search
"""
return self._beam_decode_slow(features, decode_length, beam_size, top_beams,
alpha)
def _beam_decode_slow(self, features, decode_length, beam_size, top_beams,
alpha):
"""Slow version of Beam search decoding.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for slonger translations.
Returns:
samples: an integer `Tensor`. Top samples from the beam search
"""
batch_size = common_layers.shape_list(features["inputs"])[0]
def symbols_to_logits_fn(ids):
"""Go from ids to logits."""
ids = tf.expand_dims(tf.expand_dims(ids, axis=2), axis=3)
ids = tf.pad(ids[:, 1:], [[0, 0], [0, 1], [0, 0], [0, 0]])
if "partial_targets" in features:
pt = features["partial_targets"]
pt_length = common_layers.shape_list(pt)[1]
pt = tf.tile(pt, [1, beam_size])
pt = tf.reshape(pt, [batch_size * beam_size, pt_length, 1, 1])
ids = tf.concat([pt, ids], axis=1)
features["targets"] = ids
self._coverage = None
logits, _ = self(features) # pylint: disable=not-callable
# now self._coverage is a coverage tensor for the first datashard.
# it has shape [batch_size] and contains floats between 0 and
# source_length.
if self._problem_hparams:
modality = self._problem_hparams.target_modality
if modality.top_is_pointwise:
return tf.squeeze(logits, axis=[1, 2, 3])
# -1 due to the pad above.
current_output_position = common_layers.shape_list(ids)[1] - 1
logits = logits[:, current_output_position, :, :]
return tf.squeeze(logits, axis=[1, 2])
initial_ids = tf.zeros([batch_size], dtype=tf.int32)
if self.has_input:
inputs_old = features["inputs"]
features["inputs"] = tf.expand_dims(features["inputs"], 1)
if len(features["inputs"].shape) < 5:
features["inputs"] = tf.expand_dims(features["inputs"], 4)
# Expand the inputs in to the beam size.
features["inputs"] = tf.tile(features["inputs"], [1, beam_size, 1, 1, 1])
s = common_layers.shape_list(features["inputs"])
features["inputs"] = tf.reshape(features["inputs"],
[s[0] * s[1], s[2], s[3], s[4]])
target_modality = self._problem_hparams.target_modality
vocab_size = target_modality.top_dimensionality
# Setting decode length to input length + decode_length
decode_length = tf.constant(decode_length)
if "partial_targets" not in features:
decode_length += common_layers.shape_list(features["inputs"])[1]
ids, scores = beam_search.beam_search(
symbols_to_logits_fn,
initial_ids,
beam_size,
decode_length,
vocab_size,
alpha,
stop_early=(top_beams == 1))
# Set inputs back to the unexpanded inputs to not to confuse the Estimator!
if self.has_input:
features["inputs"] = inputs_old
# Return `top_beams` decodings (also remove initial id from the beam search)
# TODO(lukaszkaiser): make it work multi-problem.
if top_beams == 1:
samples = ids[:, 0, 1:]
else:
samples = ids[:, :top_beams, 1]
return {"outputs": samples, "scores": scores}
def _greedy_infer(self, features, decode_length):
"""A greedy inference method.
Models should ideally implement a more efficient version of this function.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": None
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`}
}
"""
return self._slow_greedy_infer(features, decode_length)
def _slow_greedy_infer(self, features, decode_length):
"""A slow greedy inference method.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": None
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`}
}
"""
if not features:
features = {}
inputs_old = None
if "inputs" in features and len(features["inputs"].shape) < 4:
inputs_old = features["inputs"]
features["inputs"] = tf.expand_dims(features["inputs"], 2)
if not self.has_input:
features["partial_targets"] = tf.to_int64(features["inputs"])
# Save the targets in a var and reassign it after the tf.while loop to avoid
# having targets being in a 'while' frame. This ensures targets when used
# in metric functions stays in the same frame as other vars.
targets_old = features.get("targets", None)
target_modality = self._problem_hparams.target_modality
def infer_step(recent_output, recent_logits, unused_loss):
"""Inference step."""
if not context.in_eager_mode():
recent_output.set_shape([None, None, None, 1])
padded = tf.pad(recent_output, [[0, 0], [0, 1], [0, 0], [0, 0]])
features["targets"] = padded
# This is inefficient in that it generates samples at all timesteps,
# not just the last one, except if target_modality is pointwise.
samples, logits, losses = self.sample(features)
# Concatenate the already-generated recent_output with last timestep
# of the newly-generated samples.
if target_modality.top_is_pointwise:
cur_sample = samples[:, -1, :, :]
else:
cur_sample = samples[:,
common_layers.shape_list(recent_output)[1], :, :]
cur_sample = tf.to_int64(tf.expand_dims(cur_sample, axis=1))
samples = tf.concat([recent_output, cur_sample], axis=1)
if not context.in_eager_mode():
samples.set_shape([None, None, None, 1])
# Assuming we have one shard for logits.
logits = tf.concat([recent_logits, logits[:, -1:]], 1)
loss = sum([l for l in losses.values() if l is not None])
return samples, logits, loss
# Create an initial output tensor. This will be passed
# to the infer_step, which adds one timestep at every iteration.
if "partial_targets" in features:
initial_output = tf.to_int64(features["partial_targets"])
while len(initial_output.get_shape().as_list()) < 4:
initial_output = tf.expand_dims(initial_output, 2)
batch_size = common_layers.shape_list(initial_output)[0]
else:
batch_size = common_layers.shape_list(features["inputs"])[0]
initial_output = tf.zeros((batch_size, 0, 1, 1), dtype=tf.int64)
# Hack: foldl complains when the output shape is less specified than the
# input shape, so we confuse it about the input shape.
initial_output = tf.slice(initial_output, [0, 0, 0, 0],
common_layers.shape_list(initial_output))
target_modality = self._problem_hparams.target_modality
if target_modality.is_class_modality:
decode_length = 1
else:
decode_length = common_layers.shape_list(
features["inputs"])[1] + decode_length
# Initial values of result, logits and loss.
result = initial_output
# tensor of shape [batch_size, time, 1, 1, vocab_size]
logits = tf.zeros((batch_size, 0, 1, 1, target_modality.top_dimensionality))
if not context.in_eager_mode():
logits.set_shape([None, None, None, None, None])
loss = 0.0
def while_exit_cond(result, logits, loss): # pylint: disable=unused-argument
"""Exit the loop either if reach decode_length or EOS."""
length = common_layers.shape_list(result)[1]
not_overflow = length < decode_length
if self._problem_hparams.stop_at_eos:
def fn_not_eos():
return tf.not_equal( # Check if the last predicted element is a EOS
tf.squeeze(result[:, -1, :, :]), text_encoder.EOS_ID)
not_eos = tf.cond(
# We only check for early stoping if there is at least 1 element (
# otherwise not_eos will crash)
tf.not_equal(length, 0),
fn_not_eos,
lambda: True,
)
return tf.cond(
tf.equal(batch_size, 1),
# If batch_size == 1, we check EOS for early stoping
lambda: tf.logical_and(not_overflow, not_eos),
# Else, just wait for max length
lambda: not_overflow)
return not_overflow
result, logits, loss = tf.while_loop(
while_exit_cond,
infer_step, [result, logits, loss],
shape_invariants=[
tf.TensorShape([None, None, None, None]),
tf.TensorShape([None, None, None, None, None]),
tf.TensorShape([]),
],
back_prop=False,
parallel_iterations=1)
if inputs_old is not None: # Restore to not confuse Estimator.
features["inputs"] = inputs_old
# Reassign targets back to the previous value.
if targets_old is not None:
features["targets"] = targets_old
losses = {"training": loss}
if "partial_targets" in features:
partial_target_length = common_layers.shape_list(
features["partial_targets"])[1]
result = tf.slice(result, [0, partial_target_length, 0, 0],
[-1, -1, -1, -1])
return {
"outputs": result,
"scores": None,
"logits": logits,
"losses": losses,
}
def sample(self, features):
"""Run the model and extract samples.
Args:
features: an map of string to `Tensor`.
Returns:
samples: an integer `Tensor`.
logits: a list of `Tensor`s, one per datashard.
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
"""
logits, losses = self(features) # pylint: disable=not-callable
if self.hparams.sampling_method == "argmax":
samples = tf.argmax(logits, axis=-1)
else:
assert self.hparams.sampling_method == "random"
def multinomial_squeeze(logits, temperature=1.0):
logits_shape = common_layers.shape_list(logits)
reshaped_logits = (
tf.reshape(logits, [-1, logits_shape[-1]]) / temperature)
choices = tf.multinomial(reshaped_logits, 1)
choices = tf.reshape(choices, logits_shape[:-1])
return choices
samples = multinomial_squeeze(logits, self.hparams.sampling_temp)
return samples, logits, losses
def _shard_features(self, features): # pylint: disable=missing-docstring
sharded_features = dict()
for k, v in six.iteritems(features):
v = tf.convert_to_tensor(v)
v_shape = common_layers.shape_list(v)
if not v_shape:
v = tf.expand_dims(v, axis=-1)
v_shape = [1]
if v_shape == [1]:
v = tf.tile(v, [self._num_datashards])
sharded_features[k] = self._data_parallelism(tf.identity,
tf.split(
v, self._num_datashards,
0))
return sharded_features
def _to_features_per_datashard(self, features):
datashard_features = []
assert len(features[list(features.keys())[0]]) == self._num_datashards
for d in range(self._num_datashards):
f = {k: v[d] for k, v in six.iteritems(features)}
datashard_features.append(f)
return datashard_features
def _to_single_features_dict(self, datashard_features):
assert len(datashard_features) == self._num_datashards
features = collections.defaultdict(list)
for feats in datashard_features:
for k, v in six.iteritems(feats):
features[k].append(v)
return features
@staticmethod
def make_estimator_model_fn(model_name,
hparams,
decode_hparams=None,
use_tpu=False):
model_cls = registry.model(model_name)
def wrapping_model_fn(features, labels, mode, params=None, config=None):
return model_cls.estimator_model_fn(
hparams,
features,
labels,
mode,
config=config,
params=params,
decode_hparams=decode_hparams,
use_tpu=use_tpu)
return wrapping_model_fn
@classmethod
def estimator_model_fn(cls,
hparams,
features,
labels,
mode,
config=None,
params=None,
decode_hparams=None,
use_tpu=False):
"""Model fn for Estimator.
Args:
hparams: HParams, model hyperparameters
features: dict<str name, Tensor feature>
labels: Tensor
mode: tf.estimator.ModeKeys
config: RunConfig, possibly with data_parallelism attribute
params: dict, may include batch_size
decode_hparams: HParams, used when mode == PREDICT.
use_tpu: bool, whether using TPU
Returns:
TPUEstimatorSpec if use tpu else EstimatorSpec
"""
_create_dummy_vars()
hparams = copy.deepcopy(hparams)
# Instantiate model
data_parallelism = None
if not use_tpu and config:
data_parallelism = config.data_parallelism
model = cls(
hparams,
mode,
data_parallelism=data_parallelism,
decode_hparams=decode_hparams)
# PREDICT mode
if mode == tf.estimator.ModeKeys.PREDICT:
assert not use_tpu
return model.estimator_spec_predict(features)
# TRAIN and EVAL modes
if hparams.eval_run_autoregressive and mode == tf.estimator.ModeKeys.EVAL:
logits, losses_dict = model.eval_autoregressive(features)
else:
logits, losses_dict = model(features) # pylint: disable=not-callable
# Set known shapes
if use_tpu:
if isinstance(logits, dict):
for k, v in logits.iteritems():
if "scalar/" in k:
continue
shape = v.get_shape().as_list()
if shape[0] is None:
shape[0] = params["batch_size"]
if shape[1] is None:
shape[1] = hparams.max_length
v.set_shape(shape)
else:
shape = logits.get_shape().as_list()
if shape[0] is None:
shape[0] = params["batch_size"]
if shape[1] is None:
shape[1] = hparams.max_length
logits.set_shape(shape)
assert "training" in losses_dict
# Summarize losses
with tf.name_scope("losses"):
for loss_name, loss_val in losses_dict.items():
tf.summary.scalar(loss_name, loss_val)
# Accumulate losses
loss = sum(losses_dict.values())
# EVAL mode
if mode == tf.estimator.ModeKeys.EVAL:
return model.estimator_spec_eval(features, logits, labels, loss,
losses_dict)
# TRAIN mode
assert mode == tf.estimator.ModeKeys.TRAIN
num_async_replicas = (1 if (use_tpu or not config) else
config.t2t_device_info["num_async_replicas"])
return model.estimator_spec_train(
loss, num_async_replicas=num_async_replicas)
def estimator_spec_train(self, loss, num_async_replicas=1):
"""Construct EstimatorSpec for TRAIN mode."""
train_op = self.optimize(loss, num_async_replicas=num_async_replicas)
if common_layers.is_on_tpu():
_remove_summaries() # summaries not currently working on TPU
return tf.contrib.tpu.TPUEstimatorSpec(
tf.estimator.ModeKeys.TRAIN, loss=loss, train_op=train_op)
else:
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.TRAIN, loss=loss, train_op=train_op)
def estimator_spec_eval(self, features, logits, labels, loss, losses_dict):
"""Construct EstimatorSpec for EVAL mode."""
hparams = self.hparams
if not hasattr(hparams, "problem_instances"):
raise NotImplementedError(_no_problem_err("estimator_spec_eval"))
problem = hparams.problem_instances[0]
if common_layers.is_on_tpu():
eval_metrics_fn = _create_tpu_eval_metrics_fn(problem, hparams)
_remove_summaries()
if isinstance(logits, dict):
# For TPU, logits dict will be passed as keyword arguments to
# eval_metrics_fn. Here we add the labels to those arguments.
logits.update({"labels": labels})
return tf.contrib.tpu.TPUEstimatorSpec(
tf.estimator.ModeKeys.EVAL,
eval_metrics=(eval_metrics_fn, logits),
loss=loss)
else:
return tf.contrib.tpu.TPUEstimatorSpec(
tf.estimator.ModeKeys.EVAL,
eval_metrics=(eval_metrics_fn, [logits, labels]),
loss=loss)
else:
eval_metrics_fns = metrics.create_evaluation_metrics([problem], hparams)
eval_metrics = {}
for metric_name, metric_fn in six.iteritems(eval_metrics_fns):
eval_metrics[metric_name] = metric_fn(logits, features)
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.EVAL,
predictions={"predictions": logits},
eval_metric_ops=eval_metrics,
loss=loss)
def estimator_spec_predict(self, features):
"""Construct EstimatorSpec for PREDICT mode."""
decode_hparams = self._decode_hparams
infer_out = self.infer(
features,
beam_size=decode_hparams.beam_size,
top_beams=(decode_hparams.beam_size
if decode_hparams.return_beams else 1),
alpha=decode_hparams.alpha,
decode_length=decode_hparams.extra_length)
if isinstance(infer_out, dict):
outputs = infer_out["outputs"]
scores = infer_out["scores"]
else:
outputs = infer_out
scores = None
batched_problem_choice = (
features["problem_choice"] * tf.ones(
(common_layers.shape_list(features["inputs"])[0],), dtype=tf.int32))
predictions = {
"outputs": outputs,
"scores": scores,
"inputs": features.get("inputs"),
"targets": features.get("infer_targets"),
"problem_choice": batched_problem_choice,
}
_del_dict_nones(predictions)
export_out = {"outputs": predictions["outputs"]}
if "scores" in predictions:
export_out["scores"] = predictions["scores"]
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
"output": tf.estimator.export.PredictOutput(export_out)
})
def _normalize_body_output(self, body_out):
if isinstance(body_out, tuple):
output, losses = body_out
if not isinstance(losses, dict):
losses = {"extra": tf.reduce_mean(losses)}
else:
output = body_out
losses = {"extra": 0.0}
return output, losses
def _warn_changed_modality_type(new_name, old_name, feature_name):
new_type, new_name = registry.parse_modality_name(new_name)
old_type, old_name = registry.parse_modality_name(old_name)
if new_type != old_type:
log_warn("%s has a designated modality type %s (%s) but has been "
"overridden with a modality of type %s (%s).", feature_name,
old_type, old_name, new_type, new_name)
def _with_timing(fn, msg, silent=False):
def fn_with_timing(*args, **kwargs):
start_time = time.time()
res = fn(*args, **kwargs)
if not silent:
log_info("Doing %s took %.3f sec." % (msg, time.time() - start_time))
return res
return fn_with_timing
def _create_dummy_vars():
"""Dummy vars for restore to work when not using TPU codepath."""
var_names = set([v.name for v in tf.global_variables()])
if "losses_avg/problem_0/total_loss:0" in var_names:
return
with tf.variable_scope("losses_avg"):
with tf.variable_scope("problem_0"):
for var_name in ["total", "extra", "training"]:
tf.get_variable(
"%s_loss" % var_name, initializer=100.0, trainable=False)
with tf.variable_scope("train_stats"):
tf.get_variable("problem_0_steps", initializer=0, trainable=False)
# These metrics are implemented with py_funcs and therefore do no work with TPU
TPU_METRIC_BLACKLIST = set([
metrics.Metrics.APPROX_BLEU,
metrics.Metrics.ROUGE_2_F,
metrics.Metrics.ROUGE_L_F,
])
def _create_tpu_eval_metrics_fn(problem, hparams):
"""Create the metrics_fn that TPUEstimatorSpec expects."""
tm = problem.get_hparams().target_modality
if isinstance(tm, tuple):
tm = registry.create_modality(tm, hparams)
weights_fn = tm.targets_weights_fn
def make_metric_fn(metric_fn):
def wrapped_metric_fn(logits, labels):
num, den = metric_fn(logits, labels, weights_fn=weights_fn)
return tf.metrics.mean(num, den)
return wrapped_metric_fn
metric_fns = []
eval_metrics = problem.eval_metrics()
for metric in eval_metrics:
if metric in TPU_METRIC_BLACKLIST:
log_warn("Skipping eval metric %s in TPU_METRIC_BLACKLIST", metric)
continue
name = "metrics-%s/%s" % (problem.name, metric)
metric_fns.append((name, make_metric_fn(metrics.METRICS_FNS[metric])))
def all_metrics_fn(logits=None, labels=None, **kwargs):
"""Construct metrics dictionary."""
metrics_dict = {}
if logits is None:
logits = kwargs
for name, fn in metric_fns:
if isinstance(logits, dict):
for k, v in logits.iteritems():
metrics_dict["%s/%s" % (name, k)] = fn(v, labels)
else:
metrics_dict[name] = fn(logits, labels)
return metrics_dict
return all_metrics_fn
def _remove_summaries():
g = tf.get_default_graph()
key = tf.GraphKeys.SUMMARIES
del g.get_collection_ref(key)[:]
assert not g.get_collection(key)
def _del_dict_nones(d):
for k in list(d.keys()):
if d[k] is None:
del d[k]
class DummyVariableStore(object):
@contextlib.contextmanager
def as_default(self):
yield
def create_eager_var_store():
if context.in_eager_mode():
return variable_scope.EagerVariableStore()
else:
return DummyVariableStore()
def scheduled_sampling(hparams, problem_hparams, dp, sharded_logits, losses,
sharded_features, transformed_features, model):
"""Scheduled sampling."""
target_modality = problem_hparams.target_modality
def sample(x):
"""Multinomial sampling from a n-dimensional tensor."""
vocab_size = target_modality.top_dimensionality
samples = tf.multinomial(tf.reshape(x, [-1, vocab_size]), 1)
reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:-1])
return tf.to_int32(reshaped_samples)
def mix_gold_sampled(gold_targets, sampled_targets):
return tf.where(
tf.less(
tf.random_uniform(common_layers.shape_list(sampled_targets)),
hparams.scheduled_sampling_gold_mixin_prob), gold_targets,
sampled_targets)
def sampled_results():
"""Generate scheduled sampling results."""
sampled_targets = dp(sample, sharded_logits)
new_targets = dp(mix_gold_sampled, sharded_features["targets"],
sampled_targets)
new_features = transformed_features
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
with tf.variable_scope(target_modality.name):
new_features["targets"] = target_modality.targets_bottom_sharded(
new_targets, dp)
with tf.variable_scope("body"):
body_outputs, losses = model.model_fn_sharded(new_features)
if not isinstance(losses, dict): # If it's a single extra loss.
losses = {"extra": losses}
with tf.variable_scope(target_modality.name):
new_sharded_logits = target_modality.top_sharded(
body_outputs, sharded_features["targets"], dp)
if "training" not in losses:
training_loss = target_modality.loss_sharded(
sharded_logits, sharded_features["targets"], dp)
training_loss *= problem_hparams.loss_multiplier
losses["training"] = training_loss
return new_sharded_logits, losses
# Run the above conditionally.
prob = hparams.scheduled_sampling_prob
prob *= common_layers.inverse_exp_decay(
hparams.scheduled_sampling_warmup_steps, min_value=0.001)
sharded_logits, losses = tf.cond(
tf.less(tf.random_uniform([]), prob), sampled_results,
lambda: (sharded_logits, losses))
return sharded_logits, losses
def average_sharded_losses(sharded_losses):
"""Average losses across datashards.
Args:
sharded_losses: list<dict<str loss_name, Tensor loss>>. The loss
can be a single Tensor or a 2-tuple (numerator and denominator).
Returns:
losses: dict<str loss_name, Tensor avg_loss>
"""
losses = {}
for loss_name in sharded_losses[0]:
all_shards = [shard_losses[loss_name] for shard_losses in sharded_losses]
if isinstance(all_shards[0], tuple):
sharded_num, sharded_den = zip(*all_shards)
mean_loss = (
tf.add_n(sharded_num) / tf.maximum(1.0, tf.add_n(sharded_den)))
else:
mean_loss = tf.reduce_mean(all_shards)
losses[loss_name] = mean_loss
return losses
def summarize_features(features, num_shards=1):
with tf.name_scope("input_stats"):
for (k, v) in six.iteritems(features):
if isinstance(v, tf.Tensor) and v.get_shape().ndims > 1:
tf.summary.scalar("%s_batch" % k, tf.shape(v)[0] // num_shards)
tf.summary.scalar("%s_length" % k, tf.shape(v)[1])
nonpadding = tf.to_float(tf.not_equal(v, 0))
nonpadding_tokens = tf.reduce_sum(nonpadding)
tf.summary.scalar("%s_nonpadding_tokens" % k, nonpadding_tokens)
tf.summary.scalar("%s_nonpadding_fraction" % k,
tf.reduce_mean(nonpadding))
_already_logged = set()
def _eager_log(level, *args):
if context.in_eager_mode() and args in _already_logged:
return
_already_logged.add(args)
getattr(tf.logging, level)(*args)
def log_info(*args):
_eager_log("info", *args)
def log_warn(*args):
_eager_log("warn", *args)
| [
"tensorflow.convert_to_tensor",
"tensorflow.get_variable",
"tensorflow.logging.warning",
"tensorflow.concat",
"tensorflow.python.ops.variable_scope.EagerVariableStore",
"tensorflow.zeros",
"tensorflow.reduce_sum",
"tensorflow.global_variables",
"tensorflow.equal",
"tensorflow.python.eager.context.in_eager_mode",
"tensorflow.pad",
"tensorflow.get_default_graph",
"tensorflow.to_int32",
"tensorflow.summary.scalar",
"tensorflow.add_n",
"tensorflow.contrib.tpu.TPUEstimatorSpec",
"tensorflow.to_int64",
"tensorflow.estimator.export.PredictOutput",
"tensorflow.squeeze",
"tensorflow.name_scope",
"tensorflow.argmax",
"tensorflow.tile",
"tensorflow.metrics.mean",
"tensorflow.TensorShape",
"tensorflow.shape",
"tensorflow.multinomial",
"tensorflow.split",
"tensorflow.not_equal",
"tensorflow.constant",
"tensorflow.reduce_mean",
"tensorflow.slice",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.variable_scope",
"tensorflow.get_variable_scope",
"tensorflow.random_uniform",
"tensorflow.logical_and"
] | tensor2tensor/utils/t2t_model.py | [(1024, 'tensor2tensor.utils.registry.parse_modality_name', 'registry.parse_modality_name', (['new_name'], {}), False, 'from tensor2tensor.utils import registry\n'), (1025, 'tensor2tensor.utils.registry.parse_modality_name', 'registry.parse_modality_name', (['old_name'], {}), False, 'from tensor2tensor.utils import registry\n'), (1112, 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (1132, 'tensorflow.python.eager.context.in_eager_mode', 'context.in_eager_mode', ([], {}), False, 'from tensorflow.python.eager import context\n'), (1183, 'tensor2tensor.layers.common_layers.inverse_exp_decay', 'common_layers.inverse_exp_decay', (['hparams.scheduled_sampling_warmup_steps'], {'min_value': '(0.001)'}), False, 'from tensor2tensor.layers import common_layers\n'), (99, 'copy.copy', 'copy.copy', (['hparams'], {}), False, 'import copy\n'), (240, 'six.iteritems', 'six.iteritems', (['self._problem_hparams.input_modality'], {}), False, 'import six\n'), (343, 'tensor2tensor.utils.learning_rate.learning_rate_schedule', 'learning_rate.learning_rate_schedule', (['self.hparams'], {}), False, 'from tensor2tensor.utils import learning_rate\n'), (355, 'copy.copy', 'copy.copy', (['self._original_hparams'], {}), False, 'import copy\n'), (381, 'six.iteritems', 'six.iteritems', (['problem_hparams.input_modality'], {}), False, 'import six\n'), (394, 'tensor2tensor.utils.registry.create_modality', 'registry.create_modality', (['target_modality_spec', 'hparams'], {}), False, 'from tensor2tensor.utils import registry\n'), (544, 'tensorflow.zeros', 'tf.zeros', (['[batch_size]'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (560, 'tensorflow.constant', 'tf.constant', (['decode_length'], {}), True, 'import tensorflow as tf\n'), (563, 'tensor2tensor.utils.beam_search.beam_search', 'beam_search.beam_search', (['symbols_to_logits_fn', 'initial_ids', 'beam_size', 'decode_length', 'vocab_size', 'alpha'], {'stop_early': '(top_beams == 1)'}), False, 'from tensor2tensor.utils import beam_search\n'), (689, 'tensorflow.zeros', 'tf.zeros', (['(batch_size, 0, 1, 1, target_modality.top_dimensionality)'], {}), True, 'import tensorflow as tf\n'), (781, 'six.iteritems', 'six.iteritems', (['features'], {}), False, 'import six\n'), (805, 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), False, 'import collections\n'), (816, 'tensor2tensor.utils.registry.model', 'registry.model', (['model_name'], {}), False, 'from tensor2tensor.utils import registry\n'), (857, 'copy.deepcopy', 'copy.deepcopy', (['hparams'], {}), False, 'import copy\n'), (927, 'tensor2tensor.layers.common_layers.is_on_tpu', 'common_layers.is_on_tpu', ([], {}), False, 'from tensor2tensor.layers import common_layers\n'), (943, 'tensor2tensor.layers.common_layers.is_on_tpu', 'common_layers.is_on_tpu', ([], {}), False, 'from tensor2tensor.layers import common_layers\n'), (1035, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (1049, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""losses_avg"""'], {}), True, 'import tensorflow as tf\n'), (1054, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""train_stats"""'], {}), True, 'import tensorflow as tf\n'), (1055, 'tensorflow.get_variable', 'tf.get_variable', (['"""problem_0_steps"""'], {'initializer': '(0)', 'trainable': '(False)'}), True, 'import tensorflow as tf\n'), (1071, 'tensor2tensor.utils.registry.create_modality', 'registry.create_modality', (['tm', 'hparams'], {}), False, 'from tensor2tensor.utils import registry\n'), (1133, 'tensorflow.python.ops.variable_scope.EagerVariableStore', 'variable_scope.EagerVariableStore', ([], {}), False, 'from tensorflow.python.ops import variable_scope\n'), (1148, 'tensorflow.to_int32', 'tf.to_int32', (['reshaped_samples'], {}), True, 'import tensorflow as tf\n'), (1216, 'tensorflow.name_scope', 'tf.name_scope', (['"""input_stats"""'], {}), True, 'import tensorflow as tf\n'), (1217, 'six.iteritems', 'six.iteritems', (['features'], {}), False, 'import six\n'), (1232, 'tensorflow.python.eager.context.in_eager_mode', 'context.in_eager_mode', ([], {}), False, 'from tensorflow.python.eager import context\n'), (114, 'tensor2tensor.utils.expert_utils.Parallelism', 'eu.Parallelism', (["['']"], {}), True, 'from tensor2tensor.utils import expert_utils as eu\n'), (134, 'tensor2tensor.utils.optimize.get_variable_initializer', 'optimize.get_variable_initializer', (['self.hparams'], {}), False, 'from tensor2tensor.utils import optimize\n'), (216, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""body"""'], {}), True, 'import tensorflow as tf\n'), (254, 'tensorflow.variable_scope', 'tf.variable_scope', (['target_modality.name'], {}), True, 'import tensorflow as tf\n'), (293, 'tensorflow.variable_scope', 'tf.variable_scope', (['target_modality.name'], {}), True, 'import tensorflow as tf\n'), (386, 'tensor2tensor.utils.registry.create_modality', 'registry.create_modality', (['modality_spec', 'hparams'], {}), False, 'from tensor2tensor.utils import registry\n'), (516, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (["features['inputs']"], {}), False, 'from tensor2tensor.layers import common_layers\n'), (521, 'tensorflow.pad', 'tf.pad', (['ids[:, 1:]', '[[0, 0], [0, 1], [0, 0], [0, 0]]'], {}), True, 'import tensorflow as tf\n'), (542, 'tensorflow.squeeze', 'tf.squeeze', (['logits'], {'axis': '[1, 2]'}), True, 'import tensorflow as tf\n'), (548, 'tensorflow.expand_dims', 'tf.expand_dims', (["features['inputs']", '(1)'], {}), True, 'import tensorflow as tf\n'), (552, 'tensorflow.tile', 'tf.tile', (["features['inputs']", '[1, beam_size, 1, 1, 1]'], {}), True, 'import tensorflow as tf\n'), (553, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (["features['inputs']"], {}), False, 'from tensor2tensor.layers import common_layers\n'), (554, 'tensorflow.reshape', 'tf.reshape', (["features['inputs']", '[s[0] * s[1], s[2], s[3], s[4]]'], {}), True, 'import tensorflow as tf\n'), (630, 'tensorflow.expand_dims', 'tf.expand_dims', (["features['inputs']", '(2)'], {}), True, 'import tensorflow as tf\n'), (632, 'tensorflow.to_int64', 'tf.to_int64', (["features['inputs']"], {}), True, 'import tensorflow as tf\n'), (644, 'tensorflow.pad', 'tf.pad', (['recent_output', '[[0, 0], [0, 1], [0, 0], [0, 0]]'], {}), True, 'import tensorflow as tf\n'), (657, 'tensorflow.concat', 'tf.concat', (['[recent_output, cur_sample]'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (662, 'tensorflow.concat', 'tf.concat', (['[recent_logits, logits[:, -1:]]', '(1)'], {}), True, 'import tensorflow as tf\n'), (669, 'tensorflow.to_int64', 'tf.to_int64', (["features['partial_targets']"], {}), True, 'import tensorflow as tf\n'), (675, 'tensorflow.zeros', 'tf.zeros', (['(batch_size, 0, 1, 1)'], {'dtype': 'tf.int64'}), True, 'import tensorflow as tf\n'), (679, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (['initial_output'], {}), False, 'from tensor2tensor.layers import common_layers\n'), (690, 'tensorflow.python.eager.context.in_eager_mode', 'context.in_eager_mode', ([], {}), False, 'from tensorflow.python.eager import context\n'), (741, 'tensorflow.slice', 'tf.slice', (['result', '[0, partial_target_length, 0, 0]', '[-1, -1, -1, -1]'], {}), True, 'import tensorflow as tf\n'), (763, 'tensorflow.argmax', 'tf.argmax', (['logits'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (782, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['v'], {}), True, 'import tensorflow as tf\n'), (783, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (['v'], {}), False, 'from tensor2tensor.layers import common_layers\n'), (807, 'six.iteritems', 'six.iteritems', (['feats'], {}), False, 'import six\n'), (904, 'tensorflow.name_scope', 'tf.name_scope', (['"""losses"""'], {}), True, 'import tensorflow as tf\n'), (929, 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'tf.contrib.tpu.TPUEstimatorSpec', (['tf.estimator.ModeKeys.TRAIN'], {'loss': 'loss', 'train_op': 'train_op'}), True, 'import tensorflow as tf\n'), (932, 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', (['tf.estimator.ModeKeys.TRAIN'], {'loss': 'loss', 'train_op': 'train_op'}), True, 'import tensorflow as tf\n'), (960, 'tensor2tensor.utils.metrics.create_evaluation_metrics', 'metrics.create_evaluation_metrics', (['[problem]', 'hparams'], {}), False, 'from tensor2tensor.utils import metrics\n'), (962, 'six.iteritems', 'six.iteritems', (['eval_metrics_fns'], {}), False, 'import six\n'), (965, 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', (['tf.estimator.ModeKeys.EVAL'], {'predictions': "{'predictions': logits}", 'eval_metric_ops': 'eval_metrics', 'loss': 'loss'}), True, 'import tensorflow as tf\n'), (1050, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""problem_0"""'], {}), True, 'import tensorflow as tf\n'), (1078, 'tensorflow.metrics.mean', 'tf.metrics.mean', (['num', 'den'], {}), True, 'import tensorflow as tf\n'), (1146, 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, vocab_size]'], {}), True, 'import tensorflow as tf\n'), (1186, 'tensorflow.random_uniform', 'tf.random_uniform', (['[]'], {}), True, 'import tensorflow as tf\n'), (1209, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['all_shards'], {}), True, 'import tensorflow as tf\n'), (113, 'tensor2tensor.utils.decoding.decode_hparams', 'decoding.decode_hparams', ([], {}), False, 'from tensor2tensor.utils import decoding\n'), (133, 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), True, 'import tensorflow as tf\n'), (243, 'tensorflow.logging.warning', 'tf.logging.warning', (["('Missing feature %s - ignoring.' % key)"], {}), True, 'import tensorflow as tf\n'), (246, 'tensorflow.variable_scope', 'tf.variable_scope', (['input_modality.name'], {'reuse': 'do_reuse'}), True, 'import tensorflow as tf\n'), (303, 'tensorflow.expand_dims', 'tf.expand_dims', (['body_output[:, (-1), :, :]'], {'axis': '[1]'}), True, 'import tensorflow as tf\n'), (305, 'tensorflow.expand_dims', 'tf.expand_dims', (["features['targets'][:, -1:, :, :]"], {'axis': '[1]'}), True, 'import tensorflow as tf\n'), (323, 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (324, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (349, 'tensor2tensor.layers.common_layers.is_on_tpu', 'common_layers.is_on_tpu', ([], {}), False, 'from tensor2tensor.layers import common_layers\n'), (421, 'tensor2tensor.data_generators.problem.problem_hparams_to_features', 'problem_hparams_to_features', (['self._problem_hparams'], {}), False, 'from tensor2tensor.data_generators.problem import problem_hparams_to_features\n'), (520, 'tensorflow.expand_dims', 'tf.expand_dims', (['ids'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (525, 'tensorflow.tile', 'tf.tile', (['pt', '[1, beam_size]'], {}), True, 'import tensorflow as tf\n'), (526, 'tensorflow.reshape', 'tf.reshape', (['pt', '[batch_size * beam_size, pt_length, 1, 1]'], {}), True, 'import tensorflow as tf\n'), (527, 'tensorflow.concat', 'tf.concat', (['[pt, ids]'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (550, 'tensorflow.expand_dims', 'tf.expand_dims', (["features['inputs']", '(4)'], {}), True, 'import tensorflow as tf\n'), (562, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (["features['inputs']"], {}), False, 'from tensor2tensor.layers import common_layers\n'), (642, 'tensorflow.python.eager.context.in_eager_mode', 'context.in_eager_mode', ([], {}), False, 'from tensorflow.python.eager import context\n'), (656, 'tensorflow.expand_dims', 'tf.expand_dims', (['cur_sample'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (658, 'tensorflow.python.eager.context.in_eager_mode', 'context.in_eager_mode', ([], {}), False, 'from tensorflow.python.eager import context\n'), (671, 'tensorflow.expand_dims', 'tf.expand_dims', (['initial_output', '(2)'], {}), True, 'import tensorflow as tf\n'), (672, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (['initial_output'], {}), False, 'from tensor2tensor.layers import common_layers\n'), (674, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (["features['inputs']"], {}), False, 'from tensor2tensor.layers import common_layers\n'), (696, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (['result'], {}), False, 'from tensor2tensor.layers import common_layers\n'), (739, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (["features['partial_targets']"], {}), False, 'from tensor2tensor.layers import common_layers\n'), (768, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (['logits'], {}), False, 'from tensor2tensor.layers import common_layers\n'), (771, 'tensorflow.multinomial', 'tf.multinomial', (['reshaped_logits', '(1)'], {}), True, 'import tensorflow as tf\n'), (772, 'tensorflow.reshape', 'tf.reshape', (['choices', 'logits_shape[:-1]'], {}), True, 'import tensorflow as tf\n'), (785, 'tensorflow.expand_dims', 'tf.expand_dims', (['v'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (788, 'tensorflow.tile', 'tf.tile', (['v', '[self._num_datashards]'], {}), True, 'import tensorflow as tf\n'), (790, 'tensorflow.split', 'tf.split', (['v', 'self._num_datashards', '(0)'], {}), True, 'import tensorflow as tf\n'), (906, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['loss_name', 'loss_val'], {}), True, 'import tensorflow as tf\n'), (950, 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'tf.contrib.tpu.TPUEstimatorSpec', (['tf.estimator.ModeKeys.EVAL'], {'eval_metrics': '(eval_metrics_fn, logits)', 'loss': 'loss'}), True, 'import tensorflow as tf\n'), (955, 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'tf.contrib.tpu.TPUEstimatorSpec', (['tf.estimator.ModeKeys.EVAL'], {'eval_metrics': '(eval_metrics_fn, [logits, labels])', 'loss': 'loss'}), True, 'import tensorflow as tf\n'), (1046, 'tensorflow.global_variables', 'tf.global_variables', ([], {}), True, 'import tensorflow as tf\n'), (1052, 'tensorflow.get_variable', 'tf.get_variable', (["('%s_loss' % var_name)"], {'initializer': '(100.0)', 'trainable': '(False)'}), True, 'import tensorflow as tf\n'), (1147, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (['x'], {}), False, 'from tensor2tensor.layers import common_layers\n'), (1163, 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), True, 'import tensorflow as tf\n'), (1164, 'tensorflow.variable_scope', 'tf.variable_scope', (['target_modality.name'], {}), True, 'import tensorflow as tf\n'), (1167, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""body"""'], {}), True, 'import tensorflow as tf\n'), (1171, 'tensorflow.variable_scope', 'tf.variable_scope', (['target_modality.name'], {}), True, 'import tensorflow as tf\n'), (1207, 'tensorflow.add_n', 'tf.add_n', (['sharded_num'], {}), True, 'import tensorflow as tf\n'), (1222, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['nonpadding'], {}), True, 'import tensorflow as tf\n'), (1223, 'tensorflow.summary.scalar', 'tf.summary.scalar', (["('%s_nonpadding_tokens' % k)", 'nonpadding_tokens'], {}), True, 'import tensorflow as tf\n'), (142, 'tensorflow.concat', 'tf.concat', (['v', '(0)'], {}), True, 'import tensorflow as tf\n'), (145, 'tensorflow.concat', 'tf.concat', (['sharded_logits', '(0)'], {}), True, 'import tensorflow as tf\n'), (423, 'tensorflow.constant', 'tf.constant', (['v'], {'name': 'k'}), True, 'import tensorflow as tf\n'), (524, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (['pt'], {}), False, 'from tensor2tensor.layers import common_layers\n'), (538, 'tensorflow.squeeze', 'tf.squeeze', (['logits'], {'axis': '[1, 2, 3]'}), True, 'import tensorflow as tf\n'), (540, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (['ids'], {}), False, 'from tensor2tensor.layers import common_layers\n'), (684, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (["features['inputs']"], {}), False, 'from tensor2tensor.layers import common_layers\n'), (709, 'tensorflow.not_equal', 'tf.not_equal', (['length', '(0)'], {}), True, 'import tensorflow as tf\n'), (715, 'tensorflow.equal', 'tf.equal', (['batch_size', '(1)'], {}), True, 'import tensorflow as tf\n'), (726, 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, None, None, None]'], {}), True, 'import tensorflow as tf\n'), (727, 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, None, None, None, None]'], {}), True, 'import tensorflow as tf\n'), (728, 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), True, 'import tensorflow as tf\n'), (770, 'tensorflow.reshape', 'tf.reshape', (['logits', '[-1, logits_shape[-1]]'], {}), True, 'import tensorflow as tf\n'), (799, 'six.iteritems', 'six.iteritems', (['features'], {}), False, 'import six\n'), (1008, 'tensorflow.estimator.export.PredictOutput', 'tf.estimator.export.PredictOutput', (['export_out'], {}), True, 'import tensorflow as tf\n'), (1015, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['losses'], {}), True, 'import tensorflow as tf\n'), (1153, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (['sampled_targets'], {}), False, 'from tensor2tensor.layers import common_layers\n'), (1207, 'tensorflow.add_n', 'tf.add_n', (['sharded_den'], {}), True, 'import tensorflow as tf\n'), (1221, 'tensorflow.not_equal', 'tf.not_equal', (['v', '(0)'], {}), True, 'import tensorflow as tf\n'), (1225, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['nonpadding'], {}), True, 'import tensorflow as tf\n'), (704, 'tensorflow.squeeze', 'tf.squeeze', (['result[:, (-1), :, :]'], {}), True, 'import tensorflow as tf\n'), (717, 'tensorflow.logical_and', 'tf.logical_and', (['not_overflow', 'not_eos'], {}), True, 'import tensorflow as tf\n'), (990, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (["features['inputs']"], {}), False, 'from tensor2tensor.layers import common_layers\n'), (1220, 'tensorflow.shape', 'tf.shape', (['v'], {}), True, 'import tensorflow as tf\n'), (1038, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (1219, 'tensorflow.shape', 'tf.shape', (['v'], {}), True, 'import tensorflow as tf\n'), (655, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (['recent_output'], {}), False, 'from tensor2tensor.layers import common_layers\n')] |
devhliu/TFDeepSurv | 1847244b68abe987c1d7cb468856c06a912727f0 | from __future__ import print_function
import numpy as np
import tensorflow as tf
from lifelines.utils import concordance_index
from supersmoother import SuperSmoother
from tfdeepsurv import vision, utils
class L2DeepSurv(object):
def __init__(self, X, label,
input_node, hidden_layers_node, output_node,
learning_rate=0.001, learning_rate_decay=1.0,
activation='tanh',
L2_reg=0.0, L1_reg=0.0, optimizer='sgd',
dropout_keep_prob=1.0,
seed=1):
"""
L2DeepSurv Class Constructor.
Parameters:
X: np.array, covariate variables.
label: dict, like {'e': event, 't': time}, Observation and Time in survival analyze.
input_node: int, number of covariate variables.
hidden_layers_node: list, hidden layers in network.
output_node: int, number of output.
learning_rate: float, learning rate.
learning_rate_decay: float, decay of learning rate.
activation: string, type of activation function.
L1_reg: float, coefficient of L1 regularizate item.
L2_reg: float, coefficient of L2 regularizate item.
optimizer: string, type of optimize algorithm.
dropout_keep_prob: float, probability of dropout.
seed: set random state.
Returns:
L2DeepSurv Class.
"""
# Prepare data
self.train_data = {}
self.train_data['X'], self.train_data['E'], \
self.train_data['T'], self.train_data['failures'], \
self.train_data['atrisk'], self.train_data['ties'] = utils.parse_data(X, label)
# New Graph
G = tf.Graph()
with G.as_default():
# Data input
X = tf.placeholder(tf.float32, [None, input_node], name = 'x-Input')
y_ = tf.placeholder(tf.float32, [None, output_node], name = 'label-Input')
# hidden layers
self.nnweights = [] # collect weights of network
prev_node = input_node
prev_x = X
for i in range(len(hidden_layers_node)):
layer_name = 'layer' + str(i+1)
with tf.variable_scope(layer_name, reuse=tf.AUTO_REUSE):
weights = tf.get_variable('weights', [prev_node, hidden_layers_node[i]],
initializer=tf.truncated_normal_initializer(stddev=0.1))
self.nnweights.append(weights)
biases = tf.get_variable('biases', [hidden_layers_node[i]],
initializer=tf.constant_initializer(0.0))
layer_out = tf.nn.dropout(tf.matmul(prev_x, weights) + biases, dropout_keep_prob)
if activation == 'relu':
layer_out = tf.nn.relu(layer_out)
elif activation == 'sigmoid':
layer_out = tf.nn.sigmoid(layer_out)
elif activation == 'tanh':
layer_out = tf.nn.tanh(layer_out)
else:
raise NotImplementedError('activation not recognized')
prev_node = hidden_layers_node[i]
prev_x = layer_out
# output layers
layer_name = 'layer_last'
with tf.variable_scope(layer_name, reuse=tf.AUTO_REUSE):
weights = tf.get_variable('weights', [prev_node, output_node],
initializer=tf.truncated_normal_initializer(stddev=0.1))
self.nnweights.append(weights)
biases = tf.get_variable('biases', [output_node],
initializer=tf.constant_initializer(0.0))
layer_out = tf.matmul(prev_x, weights) + biases
# Output of Network
y = layer_out
# Global step
with tf.variable_scope('training_step', reuse=tf.AUTO_REUSE):
global_step = tf.get_variable("global_step", [],
dtype=tf.int32,
initializer=tf.constant_initializer(0),
trainable=False)
# Loss value
reg_item = tf.contrib.layers.l1_l2_regularizer(L1_reg,
L2_reg)
reg_term = tf.contrib.layers.apply_regularization(reg_item, self.nnweights)
loss_fun = self._negative_log_likelihood(y_, y)
loss = loss_fun + reg_term
# SGD Optimizer
if optimizer == 'sgd':
lr = tf.train.exponential_decay(
learning_rate,
global_step,
1,
learning_rate_decay
)
train_step = tf.train.GradientDescentOptimizer(lr).minimize(loss, global_step=global_step)
elif optimizer == 'adam':
train_step = tf.train.GradientDescentOptimizer(learning_rate).\
minimize(loss, global_step=global_step)
else:
raise NotImplementedError('activation not recognized')
# init op
init_op = tf.global_variables_initializer()
# Save into class members
self.X = X
self.y_ = y_
self.y = y
self.global_step = global_step
self.loss = loss
self.train_step = train_step
self.configuration = {
'input_node': input_node,
'hidden_layers_node': hidden_layers_node,
'output_node': output_node,
'learning_rate': learning_rate,
'learning_rate_decay': learning_rate_decay,
'activation': activation,
'L1_reg': L1_reg,
'L2_reg': L2_reg,
'optimizer': optimizer,
'dropout': dropout_keep_prob
}
# Set random state
tf.set_random_seed(seed)
# create new Session for the DeepSurv Class
self.sess = tf.Session(graph=G)
# Initialize all global variables
self.sess.run(init_op)
def train(self, num_epoch=5000, iteration=-1,
plot_train_loss=False, plot_train_CI=False):
"""
Training DeepSurv network.
Parameters:
num_epoch: times of iterating whole train set.
iteration: print information on train set every iteration train steps.
default -1, means keep silence.
plot_train_loss: plot curve of loss value during training.
plot_train_CI: plot curve of CI on train set during training.
Returns:
"""
# Record training steps
loss_list = []
CI_list = []
N = self.train_data['E'].shape[0]
# Train steps
for i in range(num_epoch):
_, output_y, loss_value, step = self.sess.run([self.train_step, self.y, self.loss, self.global_step],
feed_dict = {self.X: self.train_data['X'],
self.y_: self.train_data['E'].reshape((N, 1))})
# Record information
loss_list.append(loss_value)
label = {'t': self.train_data['T'],
'e': self.train_data['E']}
CI = self._Metrics_CI(label, output_y)
CI_list.append(CI)
# Print evaluation on test set
if (iteration != -1) and (i % iteration == 0):
print("-------------------------------------------------")
print("training steps %d:\nloss = %g.\n" % (step, loss_value))
print("CI = %g.\n" % CI)
# Plot curve
if plot_train_loss:
vision.plot_train_curve(loss_list, title="Loss(train)")
if plot_train_CI:
vision.plot_train_curve(CI_list, title="CI(train)")
def ties_type(self):
"""
return the type of ties in train data.
"""
return self.train_data['ties']
def predict(self, X):
"""
Predict risk of X using trained network.
Parameters:
X: np.array, covariate variables.
Returns:
np.array, shape(n,), Proportional risk of X.
"""
risk = self.sess.run([self.y], feed_dict = {self.X: X})
return np.squeeze(risk)
def eval(self, X, label):
"""
Evaluate test set using CI metrics.
Parameters:
X: np.array, covariate variables.
label: dict, like {'e': event, 't': time}, Observation and Time in survival analyze.
Returns:
np.array, shape(n,), Proportional risk of X.
"""
pred_risk = self.predict(X)
CI = self._Metrics_CI(label, pred_risk)
return CI
def close(self):
"""
close session of tensorflow.
"""
self.sess.close()
print("Current session closed!")
def _negative_log_likelihood(self, y_true, y_pred):
"""
Callable loss function for DeepSurv network.
the negative average log-likelihood of the prediction
of this model under a given target distribution.
Parameters:
y_true: tensor, observations.
y_pred: tensor, output of network.
Returns:
loss value, means negative log-likelihood.
"""
logL = 0
# pre-calculate cumsum
cumsum_y_pred = tf.cumsum(y_pred)
hazard_ratio = tf.exp(y_pred)
cumsum_hazard_ratio = tf.cumsum(hazard_ratio)
if self.train_data['ties'] == 'noties':
log_risk = tf.log(cumsum_hazard_ratio)
likelihood = y_pred - log_risk
# dimension for E: np.array -> [None, 1]
uncensored_likelihood = likelihood * y_true
logL = -tf.reduce_sum(uncensored_likelihood)
else:
# Loop for death times
for t in self.train_data['failures']:
tfail = self.train_data['failures'][t]
trisk = self.train_data['atrisk'][t]
d = len(tfail)
dr = len(trisk)
logL += -cumsum_y_pred[tfail[-1]] + (0 if tfail[0] == 0 else cumsum_y_pred[tfail[0]-1])
if self.train_data['ties'] == 'breslow':
s = cumsum_hazard_ratio[trisk[-1]]
logL += tf.log(s) * d
elif self.train_data['ties'] == 'efron':
s = cumsum_hazard_ratio[trisk[-1]]
r = cumsum_hazard_ratio[tfail[-1]] - (0 if tfail[0] == 0 else cumsum_hazard_ratio[tfail[0]-1])
for j in range(d):
logL += tf.log(s - j * r / d)
else:
raise NotImplementedError('tie breaking method not recognized')
# negative average log-likelihood
observations = tf.reduce_sum(y_true)
return logL / observations
def _Metrics_CI(self, label_true, y_pred):
"""
Compute the concordance-index value.
Parameters:
label_true: dict, like {'e': event, 't': time}, Observation and Time in survival analyze.
y_pred: np.array, predictive proportional risk of network.
Returns:
concordance index.
"""
hr_pred = -y_pred
ci = concordance_index(label_true['t'],
hr_pred,
label_true['e'])
return ci
def evaluate_var_byWeights(self):
"""
evaluate feature importance by weights of NN.
"""
# fetch weights of network
W = [self.sess.run(w) for w in self.nnweights]
n_w = len(W)
# matrix multiplication for all hidden layers except last output layer
hiddenMM = W[- 2].T
for i in range(n_w - 3, -1, -1):
hiddenMM = np.dot(hiddenMM, W[i].T)
# multiply last layer matrix and compute the sum of each variable for VIP
last_layer = W[-1]
s = np.dot(np.diag(last_layer[:, 0]), hiddenMM)
sumr = s / s.sum(axis=1).reshape(s.shape[0] ,1)
score = sumr.sum(axis=0)
VIP = score / score.max()
for i, v in enumerate(VIP):
print("%dth feature score : %g." % (i, v))
return VIP
def survivalRate(self, X, algo="wwe", base_X=None, base_label=None, smoothed=False):
"""
Estimator of survival function for data X.
Parameters:
X: np.array, covariate variables of patients.
algo: algorithm for estimating survival function.
base_X: X of patients for estimating survival function.
base_label: label of patients for estimating survival function.
smoothed: smooth survival function or not.
Returns:
T0: time points of survival function.
ST: survival rate of survival function.
"""
risk = self.predict(X)
hazard_ratio = np.exp(risk.reshape((risk.shape[0], 1)))
# Estimate S0(t) using data(base_X, base_label)
T0, S0 = self.basesurv(algo=algo, X=base_X, label=base_label, smoothed=smoothed)
ST = S0**(hazard_ratio)
vision.plt_surLines(T0, ST)
return T0, ST
def basesurv(self, algo="wwe", X=None, label=None, smoothed=False):
"""
Estimate base survival function S0(t) based on data(X, label).
Parameters:
algo: algorithm for estimating survival function.
X: X of patients for estimating survival function.
label: label of patients for estimating survival function.
smoothed: smooth survival function or not.
Returns:
T0: time points of base survival function.
ST: survival rate of base survival function.
See:
Algorithm for estimating basel survival function:
(1). wwe: WWE(with ties)
(2). kp: Kalbfleisch & Prentice Estimator(without ties)
(3). bsl: breslow(with ties, but exists negative value)
"""
# Get data for estimating S0(t)
if X is None or label is None:
X = self.train_data['X']
label = {'t': self.train_data['T'],
'e': self.train_data['E']}
X, E, T, failures, atrisk, ties = utils.parse_data(X, label)
s0 = [1]
risk = self.predict(X)
hz_ratio = np.exp(risk)
if algo == 'wwe':
for t in T[::-1]:
if t in atrisk:
# R(t_i) - D_i
trisk = [j for j in atrisk[t] if j not in failures[t]]
dt = len(failures[t]) * 1.0
s = np.sum(hz_ratio[trisk])
cj = 1 - dt / (dt + s)
s0.append(cj)
else:
s0.append(1)
elif algo == 'kp':
for t in T[::-1]:
if t in atrisk:
# R(t_i)
trisk = atrisk[t]
s = np.sum(hz_ratio[trisk])
si = hz_ratio[failures[t][0]]
cj = (1 - si / s) ** (1 / si)
s0.append(cj)
else:
s0.append(1)
elif algo == 'bsl':
for t in T[::-1]:
if t in atrisk:
# R(t_i)
trisk = atrisk[t]
dt = len(failures[t]) * 1.0
s = np.sum(hz_ratio[trisk])
cj = 1 - dt / s
s0.append(cj)
else:
s0.append(1)
else:
raise NotImplementedError('tie breaking method not recognized')
# base survival function
S0 = np.cumprod(s0, axis=0)
T0 = np.insert(T[::-1], 0, 0, axis=0)
if smoothed:
# smooth the baseline hazard
ss = SuperSmoother()
#Check duplication points
ss.fit(T0, S0, dy=100)
S0 = ss.predict(T0)
return T0, S0 | [
"numpy.diag",
"numpy.dot",
"tensorflow.contrib.layers.apply_regularization",
"tensorflow.contrib.layers.l1_l2_regularizer",
"tensorflow.reduce_sum",
"numpy.squeeze",
"numpy.exp",
"tensorflow.cumsum",
"tensorflow.Graph",
"tensorflow.truncated_normal_initializer",
"tensorflow.train.exponential_decay",
"numpy.insert",
"tensorflow.Session",
"tensorflow.matmul",
"tensorflow.nn.sigmoid",
"tensorflow.exp",
"tensorflow.placeholder",
"tensorflow.nn.tanh",
"tensorflow.global_variables_initializer",
"numpy.cumprod",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.set_random_seed",
"numpy.sum",
"tensorflow.nn.relu",
"tensorflow.constant_initializer",
"tensorflow.log",
"tensorflow.variable_scope"
] | tfdeepsurv/L2DeepSurv.py | [(41, 'tfdeepsurv.utils.parse_data', 'utils.parse_data', (['X', 'label'], {}), False, 'from tfdeepsurv import vision, utils\n'), (43, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (137, 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), True, 'import tensorflow as tf\n'), (139, 'tensorflow.Session', 'tf.Session', ([], {'graph': 'G'}), True, 'import tensorflow as tf\n'), (202, 'numpy.squeeze', 'np.squeeze', (['risk'], {}), True, 'import numpy as np\n'), (241, 'tensorflow.cumsum', 'tf.cumsum', (['y_pred'], {}), True, 'import tensorflow as tf\n'), (242, 'tensorflow.exp', 'tf.exp', (['y_pred'], {}), True, 'import tensorflow as tf\n'), (243, 'tensorflow.cumsum', 'tf.cumsum', (['hazard_ratio'], {}), True, 'import tensorflow as tf\n'), (271, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['y_true'], {}), True, 'import tensorflow as tf\n'), (286, 'lifelines.utils.concordance_index', 'concordance_index', (["label_true['t']", 'hr_pred', "label_true['e']"], {}), False, 'from lifelines.utils import concordance_index\n'), (334, 'tfdeepsurv.vision.plt_surLines', 'vision.plt_surLines', (['T0', 'ST'], {}), False, 'from tfdeepsurv import vision, utils\n'), (362, 'tfdeepsurv.utils.parse_data', 'utils.parse_data', (['X', 'label'], {}), False, 'from tfdeepsurv import vision, utils\n'), (366, 'numpy.exp', 'np.exp', (['risk'], {}), True, 'import numpy as np\n'), (403, 'numpy.cumprod', 'np.cumprod', (['s0'], {'axis': '(0)'}), True, 'import numpy as np\n'), (404, 'numpy.insert', 'np.insert', (['T[::-1]', '(0)', '(0)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (46, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, input_node]'], {'name': '"""x-Input"""'}), True, 'import tensorflow as tf\n'), (47, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, output_node]'], {'name': '"""label-Input"""'}), True, 'import tensorflow as tf\n'), (95, 'tensorflow.contrib.layers.l1_l2_regularizer', 'tf.contrib.layers.l1_l2_regularizer', (['L1_reg', 'L2_reg'], {}), True, 'import tensorflow as tf\n'), (97, 'tensorflow.contrib.layers.apply_regularization', 'tf.contrib.layers.apply_regularization', (['reg_item', 'self.nnweights'], {}), True, 'import tensorflow as tf\n'), (115, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (180, 'tfdeepsurv.vision.plot_train_curve', 'vision.plot_train_curve', (['loss_list'], {'title': '"""Loss(train)"""'}), False, 'from tfdeepsurv import vision, utils\n'), (183, 'tfdeepsurv.vision.plot_train_curve', 'vision.plot_train_curve', (['CI_list'], {'title': '"""CI(train)"""'}), False, 'from tfdeepsurv import vision, utils\n'), (245, 'tensorflow.log', 'tf.log', (['cumsum_hazard_ratio'], {}), True, 'import tensorflow as tf\n'), (301, 'numpy.dot', 'np.dot', (['hiddenMM', 'W[i].T'], {}), True, 'import numpy as np\n'), (304, 'numpy.diag', 'np.diag', (['last_layer[:, (0)]'], {}), True, 'import numpy as np\n'), (408, 'supersmoother.SuperSmoother', 'SuperSmoother', ([], {}), False, 'from supersmoother import SuperSmoother\n'), (77, 'tensorflow.variable_scope', 'tf.variable_scope', (['layer_name'], {'reuse': 'tf.AUTO_REUSE'}), True, 'import tensorflow as tf\n'), (89, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""training_step"""'], {'reuse': 'tf.AUTO_REUSE'}), True, 'import tensorflow as tf\n'), (102, 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['learning_rate', 'global_step', '(1)', 'learning_rate_decay'], {}), True, 'import tensorflow as tf\n'), (249, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['uncensored_likelihood'], {}), True, 'import tensorflow as tf\n'), (54, 'tensorflow.variable_scope', 'tf.variable_scope', (['layer_name'], {'reuse': 'tf.AUTO_REUSE'}), True, 'import tensorflow as tf\n'), (85, 'tensorflow.matmul', 'tf.matmul', (['prev_x', 'weights'], {}), True, 'import tensorflow as tf\n'), (373, 'numpy.sum', 'np.sum', (['hz_ratio[trisk]'], {}), True, 'import numpy as np\n'), (65, 'tensorflow.nn.relu', 'tf.nn.relu', (['layer_out'], {}), True, 'import tensorflow as tf\n'), (79, 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.1)'}), True, 'import tensorflow as tf\n'), (83, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (92, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0)'], {}), True, 'import tensorflow as tf\n'), (108, 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['lr'], {}), True, 'import tensorflow as tf\n'), (262, 'tensorflow.log', 'tf.log', (['s'], {}), True, 'import tensorflow as tf\n'), (383, 'numpy.sum', 'np.sum', (['hz_ratio[trisk]'], {}), True, 'import numpy as np\n'), (56, 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.1)'}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (62, 'tensorflow.matmul', 'tf.matmul', (['prev_x', 'weights'], {}), True, 'import tensorflow as tf\n'), (67, 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['layer_out'], {}), True, 'import tensorflow as tf\n'), (110, 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['learning_rate'], {}), True, 'import tensorflow as tf\n'), (267, 'tensorflow.log', 'tf.log', (['(s - j * r / d)'], {}), True, 'import tensorflow as tf\n'), (395, 'numpy.sum', 'np.sum', (['hz_ratio[trisk]'], {}), True, 'import numpy as np\n'), (69, 'tensorflow.nn.tanh', 'tf.nn.tanh', (['layer_out'], {}), True, 'import tensorflow as tf\n')] |
zuoanqh/trfl | eee6c84bc565517c56e74828e26f7e7e401b33a0 | # Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tensorflow ops for multistep return evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import tensorflow as tf
def _reverse_seq(sequence, sequence_lengths=None):
"""Reverse sequence along dim 0.
Args:
sequence: Tensor of shape [T, B, ...].
sequence_lengths: (optional) tensor of shape [B]. If `None`, only reverse
along dim 0.
Returns:
Tensor of same shape as sequence with dim 0 reversed up to sequence_lengths.
"""
if sequence_lengths is None:
return tf.reverse(sequence, [0])
sequence_lengths = tf.convert_to_tensor(sequence_lengths)
with tf.control_dependencies(
[tf.assert_equal(sequence.shape[1], sequence_lengths.shape[0])]):
return tf.reverse_sequence(
sequence, sequence_lengths, seq_axis=0, batch_axis=1)
def scan_discounted_sum(sequence, decay, initial_value, reverse=False,
sequence_lengths=None, back_prop=True,
name="scan_discounted_sum"):
"""Evaluates a cumulative discounted sum along dimension 0.
```python
if reverse = False:
result[1] = sequence[1] + decay[1] * initial_value
result[k] = sequence[k] + decay[k] * result[k - 1]
if reverse = True:
result[last] = sequence[last] + decay[last] * initial_value
result[k] = sequence[k] + decay[k] * result[k + 1]
```
Respective dimensions T, B and ... have to be the same for all input tensors.
T: temporal dimension of the sequence; B: batch dimension of the sequence.
if sequence_lengths is set then x1 and x2 below are equivalent:
```python
x1 = zero_pad_to_length(
scan_discounted_sum(
sequence[:length], decays[:length], **kwargs), length=T)
x2 = scan_discounted_sum(sequence, decays,
sequence_lengths=[length], **kwargs)
```
Args:
sequence: Tensor of shape `[T, B, ...]` containing values to be summed.
decay: Tensor of shape `[T, B, ...]` containing decays/discounts.
initial_value: Tensor of shape `[B, ...]` containing initial value.
reverse: Whether to process the sum in a reverse order.
sequence_lengths: Tensor of shape `[B]` containing sequence lengths to be
(reversed and then) summed.
back_prop: Whether to backpropagate.
name: Sets the name_scope for this op.
Returns:
Cumulative sum with discount. Same shape and type as `sequence`.
"""
# Note this can be implemented in terms of cumprod and cumsum,
# approximately as (ignoring boundary issues and initial_value):
#
# cumsum(decay_prods * sequence) / decay_prods
# where decay_prods = reverse_cumprod(decay)
#
# One reason this hasn't been done is that multiplying then dividing again by
# products of decays isn't ideal numerically, in particular if any of the
# decays are zero it results in NaNs.
with tf.name_scope(name, values=[sequence, decay, initial_value]):
if sequence_lengths is not None:
# Zero out sequence and decay beyond sequence_lengths.
with tf.control_dependencies(
[tf.assert_equal(sequence.shape[0], decay.shape[0])]):
mask = tf.sequence_mask(sequence_lengths, maxlen=sequence.shape[0],
dtype=sequence.dtype)
mask = tf.transpose(mask)
# Adding trailing dimensions to mask to allow for broadcasting.
to_seq = mask.shape.dims + [1] * (sequence.shape.ndims - mask.shape.ndims)
sequence *= tf.reshape(mask, to_seq)
to_decay = mask.shape.dims + [1] * (decay.shape.ndims - mask.shape.ndims)
decay *= tf.reshape(mask, to_decay)
sequences = [sequence, decay]
if reverse:
sequences = [_reverse_seq(s, sequence_lengths) for s in sequences]
summed = tf.scan(lambda a, x: x[0] + x[1] * a,
sequences,
initializer=tf.convert_to_tensor(initial_value),
parallel_iterations=1,
back_prop=back_prop)
if reverse:
summed = _reverse_seq(summed, sequence_lengths)
return summed
def multistep_forward_view(rewards, pcontinues, state_values, lambda_,
back_prop=True, sequence_lengths=None,
name="multistep_forward_view_op"):
"""Evaluates complex backups (forward view of eligibility traces).
```python
result[t] = rewards[t] +
pcontinues[t]*(lambda_[t]*result[t+1] + (1-lambda_[t])*state_values[t])
result[last] = rewards[last] + pcontinues[last]*state_values[last]
```
This operation evaluates multistep returns where lambda_ parameter controls
mixing between full returns and boostrapping. It is users responsibility
to provide state_values. Depending on how state_values are evaluated this
function can evaluate targets for Q(lambda), Sarsa(lambda) or some other
multistep boostrapping algorithm.
More information about a forward view is given here:
http://incompleteideas.net/sutton/book/ebook/node74.html
Please note that instead of evaluating traces and then explicitly summing
them we instead evaluate mixed returns in the reverse temporal order
by using the recurrent relationship given above.
The parameter lambda_ can either be a constant value (e.g for Peng's
Q(lambda) and Sarsa(_lambda)) or alternatively it can be a tensor containing
arbitrary values (Watkins' Q(lambda), Munos' Retrace, etc).
The result of evaluating this recurrence relation is a weighted sum of
n-step returns, as depicted in the diagram below. One strategy to prove this
equivalence notes that many of the terms in adjacent n-step returns
"telescope", or cancel out, when the returns are summed.
Below L3 is lambda at time step 3 (important: this diagram is 1-indexed, not
0-indexed like Python). If lambda is scalar then L1=L2=...=Ln.
g1,...,gn are discounts.
```
Weights: (1-L1) (1-L2)*l1 (1-L3)*l1*l2 ... L1*L2*...*L{n-1}
Returns: |r1*(g1)+ |r1*(g1)+ |r1*(g1)+ |r1*(g1)+
v1*(g1) |r2*(g1*g2)+ |r2*(g1*g2)+ |r2*(g1*g2)+
v2*(g1*g2) |r3*(g1*g2*g3)+ |r3*(g1*g2*g3)+
v3*(g1*g2*g3) ...
|rn*(g1*...*gn)+
vn*(g1*...*gn)
```
Args:
rewards: Tensor of shape `[T, B]` containing rewards.
pcontinues: Tensor of shape `[T, B]` containing discounts.
state_values: Tensor of shape `[T, B]` containing state values.
lambda_: Mixing parameter lambda.
The parameter can either be a scalar or a Tensor of shape `[T, B]`
if mixing is a function of state.
back_prop: Whether to backpropagate.
sequence_lengths: Tensor of shape `[B]` containing sequence lengths to be
(reversed and then) summed, same as in `scan_discounted_sum`.
name: Sets the name_scope for this op.
Returns:
Tensor of shape `[T, B]` containing multistep returns.
"""
with tf.name_scope(name, values=[rewards, pcontinues, state_values]):
# Regroup:
# result[t] = (rewards[t] + pcontinues[t]*(1-lambda_)*state_values[t]) +
# pcontinues[t]*lambda_*result[t + 1]
# Define:
# sequence[t] = rewards[t] + pcontinues[t]*(1-lambda_)*state_values[t]
# discount[t] = pcontinues[t]*lambda_
# Substitute:
# result[t] = sequence[t] + discount[t]*result[t + 1]
# Boundary condition:
# result[last] = rewards[last] + pcontinues[last]*state_values[last]
# Add and subtract the same quantity at BC:
# state_values[last] =
# lambda_*state_values[last] + (1-lambda_)*state_values[last]
# This makes:
# result[last] =
# (rewards[last] + pcontinues[last]*(1-lambda_)*state_values[last]) +
# pcontinues[last]*lambda_*state_values[last]
# Substitute in definitions for sequence and discount:
# result[last] = sequence[last] + discount[last]*state_values[last]
# Define:
# initial_value=state_values[last]
# We get the following recurrent relationship:
# result[last] = sequence[last] + decay[last]*initial_value
# result[k] = sequence[k] + decay[k] * result[k + 1]
# This matches the form of scan_discounted_sum:
# result = scan_sum_with_discount(sequence, discount,
# initial_value = state_values[last])
sequence = rewards + pcontinues * state_values * (1 - lambda_)
discount = pcontinues * lambda_
return scan_discounted_sum(sequence, discount, state_values[-1],
reverse=True, sequence_lengths=sequence_lengths,
back_prop=back_prop)
| [
"tensorflow.convert_to_tensor",
"tensorflow.reverse",
"tensorflow.transpose",
"tensorflow.reshape",
"tensorflow.assert_equal",
"tensorflow.reverse_sequence",
"tensorflow.name_scope",
"tensorflow.sequence_mask"
] | trfl/sequence_ops.py | [(39, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['sequence_lengths'], {}), True, 'import tensorflow as tf\n'), (37, 'tensorflow.reverse', 'tf.reverse', (['sequence', '[0]'], {}), True, 'import tensorflow as tf\n'), (42, 'tensorflow.reverse_sequence', 'tf.reverse_sequence', (['sequence', 'sequence_lengths'], {'seq_axis': '(0)', 'batch_axis': '(1)'}), True, 'import tensorflow as tf\n'), (93, 'tensorflow.name_scope', 'tf.name_scope', (['name'], {'values': '[sequence, decay, initial_value]'}), True, 'import tensorflow as tf\n'), (183, 'tensorflow.name_scope', 'tf.name_scope', (['name'], {'values': '[rewards, pcontinues, state_values]'}), True, 'import tensorflow as tf\n'), (104, 'tensorflow.reshape', 'tf.reshape', (['mask', 'to_seq'], {}), True, 'import tensorflow as tf\n'), (106, 'tensorflow.reshape', 'tf.reshape', (['mask', 'to_decay'], {}), True, 'import tensorflow as tf\n'), (41, 'tensorflow.assert_equal', 'tf.assert_equal', (['sequence.shape[1]', 'sequence_lengths.shape[0]'], {}), True, 'import tensorflow as tf\n'), (98, 'tensorflow.sequence_mask', 'tf.sequence_mask', (['sequence_lengths'], {'maxlen': 'sequence.shape[0]', 'dtype': 'sequence.dtype'}), True, 'import tensorflow as tf\n'), (100, 'tensorflow.transpose', 'tf.transpose', (['mask'], {}), True, 'import tensorflow as tf\n'), (114, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['initial_value'], {}), True, 'import tensorflow as tf\n'), (97, 'tensorflow.assert_equal', 'tf.assert_equal', (['sequence.shape[0]', 'decay.shape[0]'], {}), True, 'import tensorflow as tf\n')] |
ADALabUCSD/DeepPostures | f51acc8fea2aa76fe0150f87284f624840016095 | # Copyright 2021 Supun Nakandala. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import h5py
import numpy as np
import tensorflow
if int(tensorflow.__version__.split(".")[0]) == 2:
import tensorflow.compat.v1 as tf
else:
import tensorflow as tf
from datetime import datetime, timedelta
def input_iterator(data_root, subject_id, train=False):
fnames = [name.split('.')[0] for name in os.listdir(os.path.join(data_root, subject_id)) if not name.startswith('.')]
fnames.sort()
for i in range(len(fnames) - 1):
assert datetime.strptime(fnames[i+1], "%Y-%m-%d").date() - datetime.strptime(fnames[i], "%Y-%m-%d").date() == timedelta(days=1)
data_batch = []
timestamps_batch = []
label_batch = []
for fname in fnames:
h5f = h5py.File(os.path.join(data_root, subject_id, '{}.h5'.format(fname)), 'r')
timestamps = h5f.get('time')[:]
data = h5f.get('data')[:]
sleeping = h5f.get('sleeping')[:]
non_wear = h5f.get('non_wear')[:]
label = h5f.get('label')[:]
for d, t, s, nw, l in zip(data, timestamps, sleeping, non_wear, label):
# if train and l == -1:
# raise Exception('Missing ground truth label information in pre-processed data')
if s == 1 or nw == 1 or (train and l == -1):
if len(timestamps_batch) > 0:
yield np.array(data_batch), np.array(timestamps_batch), np.array(label_batch)
data_batch = []
timestamps_batch = []
label_batch = []
continue
data_batch.append(d)
timestamps_batch.append(t)
label_batch.append(l)
h5f.close()
if len(timestamps_batch) > 0:
yield np.array(data_batch), np.array(timestamps_batch), np.array(label_batch)
def cnn_bi_lstm_model(x, amp_factor, bil_lstm_win_size, num_classes):
logits = cnn_model(x, amp_factor=amp_factor)
logits = tf.reshape(logits, [-1, bil_lstm_win_size, 256*amp_factor])
forward_cell = tf.nn.rnn_cell.LSTMCell(128)
backward_cell = tf.nn.rnn_cell.LSTMCell(128)
encoder_outputs,_ = tf.nn.bidirectional_dynamic_rnn(
forward_cell,
backward_cell,
logits,
dtype=tf.float32
)
encoder_outputs = tf.concat(encoder_outputs, axis=2)
logits = tf.reshape(tf.layers.dense(encoder_outputs, units=num_classes), [-1, bil_lstm_win_size, num_classes])
return logits
def cnn_model(x, amp_factor=1):
with tf.variable_scope('model'):
conv1 = tf.layers.conv2d(x, filters=32*amp_factor, kernel_size=[5, 3],
data_format='channels_last', padding= "same",
strides=(2, 1),
activation=tf.nn.relu)
pool1 = conv1
conv2 = tf.layers.conv2d(pool1, filters=64*amp_factor, kernel_size=[5, 1],
data_format='channels_last', padding= "same",
strides=(2, 1),
activation=tf.nn.relu)
pool2 = conv2
conv3 = tf.layers.conv2d(pool2, filters=128*amp_factor, kernel_size=[5, 1],
data_format='channels_last', padding= "same",
strides=(2, 1),
activation=tf.nn.relu)
pool3 = conv3
conv4 = tf.layers.conv2d(pool3, filters=256*amp_factor, kernel_size=[5, 1],
data_format='channels_last', padding= "same",
strides=(2, 1),
activation=tf.nn.relu)
pool4 = conv4
conv5 = tf.layers.conv2d(pool4, filters=256*amp_factor, kernel_size=[5, 1],
data_format='channels_last', padding= "same",
strides=(2, 1),
activation=tf.nn.relu)
pool5 = conv5
pool5 = tf.transpose(pool5, [0, 3, 1, 2])
size = pool5.shape[-1] * pool5.shape[-2] * pool5.shape[-3]
logits = tf.layers.dense(tf.reshape(pool5,(-1, size)), units=256*amp_factor)
return logits
| [
"tensorflow.layers.conv2d",
"tensorflow.__version__.split",
"tensorflow.concat",
"tensorflow.transpose",
"tensorflow.nn.rnn_cell.LSTMCell",
"tensorflow.reshape",
"tensorflow.layers.dense",
"tensorflow.nn.bidirectional_dynamic_rnn",
"tensorflow.variable_scope",
"numpy.array"
] | MSSE-2021/commons.py | [(68, 'tensorflow.reshape', 'tf.reshape', (['logits', '[-1, bil_lstm_win_size, 256 * amp_factor]'], {}), True, 'import tensorflow as tf\n'), (70, 'tensorflow.nn.rnn_cell.LSTMCell', 'tf.nn.rnn_cell.LSTMCell', (['(128)'], {}), True, 'import tensorflow as tf\n'), (71, 'tensorflow.nn.rnn_cell.LSTMCell', 'tf.nn.rnn_cell.LSTMCell', (['(128)'], {}), True, 'import tensorflow as tf\n'), (72, 'tensorflow.nn.bidirectional_dynamic_rnn', 'tf.nn.bidirectional_dynamic_rnn', (['forward_cell', 'backward_cell', 'logits'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (78, 'tensorflow.concat', 'tf.concat', (['encoder_outputs'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (79, 'tensorflow.layers.dense', 'tf.layers.dense', (['encoder_outputs'], {'units': 'num_classes'}), True, 'import tensorflow as tf\n'), (84, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {}), True, 'import tensorflow as tf\n'), (85, 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['x'], {'filters': '(32 * amp_factor)', 'kernel_size': '[5, 3]', 'data_format': '"""channels_last"""', 'padding': '"""same"""', 'strides': '(2, 1)', 'activation': 'tf.nn.relu'}), True, 'import tensorflow as tf\n'), (91, 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['pool1'], {'filters': '(64 * amp_factor)', 'kernel_size': '[5, 1]', 'data_format': '"""channels_last"""', 'padding': '"""same"""', 'strides': '(2, 1)', 'activation': 'tf.nn.relu'}), True, 'import tensorflow as tf\n'), (97, 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['pool2'], {'filters': '(128 * amp_factor)', 'kernel_size': '[5, 1]', 'data_format': '"""channels_last"""', 'padding': '"""same"""', 'strides': '(2, 1)', 'activation': 'tf.nn.relu'}), True, 'import tensorflow as tf\n'), (103, 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['pool3'], {'filters': '(256 * amp_factor)', 'kernel_size': '[5, 1]', 'data_format': '"""channels_last"""', 'padding': '"""same"""', 'strides': '(2, 1)', 'activation': 'tf.nn.relu'}), True, 'import tensorflow as tf\n'), (109, 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['pool4'], {'filters': '(256 * amp_factor)', 'kernel_size': '[5, 1]', 'data_format': '"""channels_last"""', 'padding': '"""same"""', 'strides': '(2, 1)', 'activation': 'tf.nn.relu'}), True, 'import tensorflow as tf\n'), (114, 'tensorflow.transpose', 'tf.transpose', (['pool5', '[0, 3, 1, 2]'], {}), True, 'import tensorflow as tf\n'), (20, 'tensorflow.__version__.split', 'tensorflow.__version__.split', (['"""."""'], {}), False, 'import tensorflow\n'), (31, 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), False, 'from datetime import datetime, timedelta\n'), (117, 'tensorflow.reshape', 'tf.reshape', (['pool5', '(-1, size)'], {}), True, 'import tensorflow as tf\n'), (28, 'os.path.join', 'os.path.join', (['data_root', 'subject_id'], {}), False, 'import os\n'), (63, 'numpy.array', 'np.array', (['data_batch'], {}), True, 'import numpy as np\n'), (63, 'numpy.array', 'np.array', (['timestamps_batch'], {}), True, 'import numpy as np\n'), (63, 'numpy.array', 'np.array', (['label_batch'], {}), True, 'import numpy as np\n'), (31, 'datetime.datetime.strptime', 'datetime.strptime', (['fnames[i + 1]', '"""%Y-%m-%d"""'], {}), False, 'from datetime import datetime, timedelta\n'), (31, 'datetime.datetime.strptime', 'datetime.strptime', (['fnames[i]', '"""%Y-%m-%d"""'], {}), False, 'from datetime import datetime, timedelta\n'), (50, 'numpy.array', 'np.array', (['data_batch'], {}), True, 'import numpy as np\n'), (50, 'numpy.array', 'np.array', (['timestamps_batch'], {}), True, 'import numpy as np\n'), (50, 'numpy.array', 'np.array', (['label_batch'], {}), True, 'import numpy as np\n')] |
knarfamlap/tensor2tensor | 92ebc7152e0f4f42871251f17dbe6db8409d4fae | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for common image attention utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensor2tensor.layers import common_image_attention
import tensorflow as tf
class CommonImageAttentionTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(common_image_attention.DistributionType.DMOL, 5, 50),
(common_image_attention.DistributionType.CAT, None, 256),
)
def testPostProcessImageTrainMode(self, likelihood, num_mixtures, depth):
batch = 1
rows = 8
cols = 24
hparams = tf.contrib.training.HParams(
hidden_size=2,
likelihood=likelihood,
mode=tf.estimator.ModeKeys.TRAIN,
num_mixtures=num_mixtures,
)
inputs = tf.random_uniform([batch, rows, cols, hparams.hidden_size],
minval=-1., maxval=1.)
outputs = common_image_attention.postprocess_image(
inputs, rows, cols, hparams)
self.assertEqual(outputs.shape, (batch, rows, cols, depth))
@parameterized.parameters(
(common_image_attention.DistributionType.DMOL, 5, 50),
(common_image_attention.DistributionType.CAT, None, 256),
)
def testPostProcessImageInferMode(self, likelihood, num_mixtures, depth):
batch = 1
rows = 8
cols = 24
block_length = 4
block_width = 2
hparams = tf.contrib.training.HParams(
block_raster_scan=True,
hidden_size=2,
likelihood=likelihood,
mode=tf.estimator.ModeKeys.PREDICT,
num_mixtures=num_mixtures,
query_shape=[block_length, block_width],
)
inputs = tf.random_uniform([batch, rows, cols, hparams.hidden_size],
minval=-1., maxval=1.)
outputs = common_image_attention.postprocess_image(
inputs, rows, cols, hparams)
num_blocks_rows = rows // block_length
num_blocks_cols = cols // block_width
self.assertEqual(outputs.shape,
(batch, num_blocks_rows, num_blocks_cols,
block_length, block_width, depth))
@parameterized.parameters(
(common_image_attention.DistributionType.DMOL, 5, 50),
(common_image_attention.DistributionType.CAT, None, 256),
)
def testCreateOutputTrainMode(self, likelihood, num_mixtures, depth):
batch = 1
height = 8
width = 8
channels = 3
rows = height
if likelihood == common_image_attention.DistributionType.CAT:
cols = channels * width
else:
cols = width
hparams = tf.contrib.training.HParams(
hidden_size=2,
likelihood=likelihood,
mode=tf.estimator.ModeKeys.TRAIN,
num_mixtures=num_mixtures,
)
decoder_output = tf.random_normal([batch, rows, cols, hparams.hidden_size])
targets = tf.random_uniform([batch, height, width, channels],
minval=-1., maxval=1.)
output = common_image_attention.create_output(
decoder_output, rows, cols, targets, hparams)
if hparams.likelihood == common_image_attention.DistributionType.CAT:
self.assertEqual(output.shape, (batch, height, width, channels, depth))
else:
self.assertEqual(output.shape, (batch, height, width, depth))
if __name__ == "__main__":
tf.test.main()
| [
"tensorflow.random_uniform",
"tensorflow.contrib.training.HParams",
"tensorflow.test.main",
"tensorflow.random_normal"
] | tensor2tensor/layers/common_image_attention_test.py | [(30, 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(common_image_attention.DistributionType.DMOL, 5, 50)', '(common_image_attention.DistributionType.CAT, None, 256)'], {}), False, 'from absl.testing import parameterized\n'), (50, 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(common_image_attention.DistributionType.DMOL, 5, 50)', '(common_image_attention.DistributionType.CAT, None, 256)'], {}), False, 'from absl.testing import parameterized\n'), (78, 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(common_image_attention.DistributionType.DMOL, 5, 50)', '(common_image_attention.DistributionType.CAT, None, 256)'], {}), False, 'from absl.testing import parameterized\n'), (109, 'tensorflow.test.main', 'tf.test.main', ([], {}), True, 'import tensorflow as tf\n'), (38, 'tensorflow.contrib.training.HParams', 'tf.contrib.training.HParams', ([], {'hidden_size': '(2)', 'likelihood': 'likelihood', 'mode': 'tf.estimator.ModeKeys.TRAIN', 'num_mixtures': 'num_mixtures'}), True, 'import tensorflow as tf\n'), (44, 'tensorflow.random_uniform', 'tf.random_uniform', (['[batch, rows, cols, hparams.hidden_size]'], {'minval': '(-1.0)', 'maxval': '(1.0)'}), True, 'import tensorflow as tf\n'), (46, 'tensor2tensor.layers.common_image_attention.postprocess_image', 'common_image_attention.postprocess_image', (['inputs', 'rows', 'cols', 'hparams'], {}), False, 'from tensor2tensor.layers import common_image_attention\n'), (60, 'tensorflow.contrib.training.HParams', 'tf.contrib.training.HParams', ([], {'block_raster_scan': '(True)', 'hidden_size': '(2)', 'likelihood': 'likelihood', 'mode': 'tf.estimator.ModeKeys.PREDICT', 'num_mixtures': 'num_mixtures', 'query_shape': '[block_length, block_width]'}), True, 'import tensorflow as tf\n'), (68, 'tensorflow.random_uniform', 'tf.random_uniform', (['[batch, rows, cols, hparams.hidden_size]'], {'minval': '(-1.0)', 'maxval': '(1.0)'}), True, 'import tensorflow as tf\n'), (70, 'tensor2tensor.layers.common_image_attention.postprocess_image', 'common_image_attention.postprocess_image', (['inputs', 'rows', 'cols', 'hparams'], {}), False, 'from tensor2tensor.layers import common_image_attention\n'), (92, 'tensorflow.contrib.training.HParams', 'tf.contrib.training.HParams', ([], {'hidden_size': '(2)', 'likelihood': 'likelihood', 'mode': 'tf.estimator.ModeKeys.TRAIN', 'num_mixtures': 'num_mixtures'}), True, 'import tensorflow as tf\n'), (98, 'tensorflow.random_normal', 'tf.random_normal', (['[batch, rows, cols, hparams.hidden_size]'], {}), True, 'import tensorflow as tf\n'), (99, 'tensorflow.random_uniform', 'tf.random_uniform', (['[batch, height, width, channels]'], {'minval': '(-1.0)', 'maxval': '(1.0)'}), True, 'import tensorflow as tf\n'), (101, 'tensor2tensor.layers.common_image_attention.create_output', 'common_image_attention.create_output', (['decoder_output', 'rows', 'cols', 'targets', 'hparams'], {}), False, 'from tensor2tensor.layers import common_image_attention\n')] |
chenynCV/SENet | 08b22d9961e3b2a6eb1b8cd25d33287d10eaddd5 | import tensorflow as tf
import numpy as np
import os
from tensorpack import imgaug, dataset, ModelDesc, InputDesc
from tensorpack.dataflow import (PrefetchDataZMQ, BatchData)
from dataflow_input import MyDataFlow
import resnet_model
from IPython import embed
os.environ['CUDA_VISIBLE_DEVICES']= '0'
init_learning_rate = 0.1
batch_size = 128
image_size = 224
img_channels = 3
class_num = 365
weight_decay = 1e-4
momentum = 0.9
total_epochs = 30
iteration = 14089 // 1
# 128 * 14089 ~ 1,803,460
test_iteration = 10
def center_loss(features, label, alfa, nrof_classes):
"""Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
(http://ydwen.github.io/papers/WenECCV16.pdf)
"""
nrof_features = features.get_shape()[1]
centers = tf.get_variable('centers', [nrof_classes, nrof_features], dtype=tf.float32,
initializer=tf.constant_initializer(0), trainable=False)
label = tf.reshape(label, [-1])
centers_batch = tf.gather(centers, label)
diff = (1 - alfa) * (centers_batch - features)
centers = tf.scatter_sub(centers, label, diff)
# centers = tf.nn.l2_normalize(centers, 1, 1e-10, name='centers_norm')
loss = tf.reduce_mean(tf.square(features - centers_batch))
return loss, centers
def focal_loss(onehot_labels, cls_preds,
alpha=0.25, gamma=2.0, name=None, scope=None):
"""Compute softmax focal loss between logits and onehot labels
logits and onehot_labels must have same shape [batchsize, num_classes] and
the same data type (float16, 32, 64)
Args:
onehot_labels: Each row labels[i] must be a valid probability distribution
cls_preds: Unscaled log probabilities
alpha: The hyperparameter for adjusting biased samples, default is 0.25
gamma: The hyperparameter for penalizing the easy labeled samples
name: A name for the operation (optional)
Returns:
A 1-D tensor of length batch_size of same type as logits with softmax focal loss
"""
with tf.name_scope(scope, 'focal_loss', [cls_preds, onehot_labels]) as sc:
logits = tf.convert_to_tensor(cls_preds)
onehot_labels = tf.convert_to_tensor(onehot_labels)
precise_logits = tf.cast(logits, tf.float32) if (
logits.dtype == tf.float16) else logits
onehot_labels = tf.cast(onehot_labels, precise_logits.dtype)
predictions = tf.nn.sigmoid(logits)
predictions_pt = tf.where(tf.equal(onehot_labels, 1), predictions, 1.-predictions)
# add small value to avoid 0
epsilon = 1e-8
alpha_t = tf.scalar_mul(alpha, tf.ones_like(onehot_labels, dtype=tf.float32))
alpha_t = tf.where(tf.equal(onehot_labels, 1.0), alpha_t, 1-alpha_t)
losses = tf.reduce_sum(-alpha_t * tf.pow(1. - predictions_pt, gamma) * tf.log(predictions_pt+epsilon),
name=name, axis=1)
return losses
def Evaluate(sess):
test_acc = 0.0
test_loss = 0.0
for it in range(test_iteration):
batch_data = next(scene_data_val)
test_batch_x = batch_data['data']
test_batch_y = batch_data['label']
test_feed_dict = {
x: test_batch_x,
label: test_batch_y,
learning_rate: epoch_learning_rate,
training_flag: False
}
loss_, acc_ = sess.run([Total_loss, accuracy], feed_dict=test_feed_dict)
test_loss += loss_
test_acc += acc_
test_loss /= test_iteration # average loss
test_acc /= test_iteration # average accuracy
summary = tf.Summary(value=[tf.Summary.Value(tag='test_loss', simple_value=test_loss),
tf.Summary.Value(tag='test_accuracy', simple_value=test_acc)])
return test_acc, test_loss, summary
def resnet_model_fn(inputs, training):
"""Our model_fn for ResNet to be used with our Estimator."""
network = resnet_model.imagenet_resnet_v2(
resnet_size=18, num_classes=class_num, mode='se', data_format=None)
inputs= network(inputs=inputs, is_training=training)
feat = tf.nn.l2_normalize(inputs, 1, 1e-10, name='feat')
inputs = tf.layers.dense(inputs=inputs, units=class_num)
# inputs = tf.layers.dense(inputs=feat, units=class_num)
inputs = tf.identity(inputs, 'final_dense')
return inputs, feat
# image_size = 32, img_channels = 3, class_num = 10 in cifar10
x = tf.placeholder(tf.float32, shape=[None, image_size, image_size, img_channels])
label = tf.placeholder(tf.float32, shape=[None,])
one_hot_labels = tf.one_hot(indices=tf.cast(label, tf.int32), depth=class_num)
training_flag = tf.placeholder(tf.bool)
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
logits, feat = resnet_model_fn(x, training=training_flag)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_labels, logits=logits))
Focal_loss = tf.reduce_mean(focal_loss(one_hot_labels, logits, alpha=0.5))
l2_loss = weight_decay * tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])
Center_loss, Centers = center_loss(feat, tf.cast(label, dtype=tf.int32), 0.95, class_num)
Total_loss = cost + l2_loss
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=momentum, use_nesterov=True)
# Batch norm requires update_ops to be added as a train_op dependency.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(Total_loss)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# val_dir = '/data0/AIChallenger/ai_challenger_scene_validation_20170908/scene_validation_images_20170908/'
# annotations = '/data0/AIChallenger/ai_challenger_scene_validation_20170908/scene_validation_annotations_20170908.json'
# # a DataFlow you implement to produce [tensor1, tensor2, ..] lists from whatever sources:
# df = MyDataFlow(val_dir, annotations, is_training=False, batch_size=batch_size, img_size=image_size)
# # start 3 processes to run the dataflow in parallel
# df = PrefetchDataZMQ(df, nr_proc=10)
# df.reset_state()
# scene_data_val = df.get_data()
train_dir = '/data0/AIChallenger/data_256'
annotations = '/data0/AIChallenger/data_256.json'
# a DataFlow you implement to produce [tensor1, tensor2, ..] lists from whatever sources:
df = MyDataFlow(train_dir, annotations, is_training=True, batch_size=batch_size, img_size=image_size)
# start 3 processes to run the dataflow in parallel
df = PrefetchDataZMQ(df, nr_proc=10)
df.reset_state()
scene_data = df.get_data()
saver = tf.train.Saver(tf.global_variables())
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state('./model_pretrain')
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
print("loading checkpoint...")
saver.restore(sess, ckpt.model_checkpoint_path)
else:
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter('./logs_pretrain', sess.graph)
_x = x[:, :, :, ::-1]
tf.summary.image('x', _x, 4)
summary_op = tf.summary.merge_all()
epoch_learning_rate = init_learning_rate
for epoch in range(1, total_epochs + 1):
if epoch % 10 == 0 :
epoch_learning_rate = epoch_learning_rate / 10
train_acc = 0.0
train_loss = 0.0
for step in range(1, iteration + 1):
batch_data = next(scene_data)
batch_x = batch_data['data']
batch_y = batch_data['label']
train_feed_dict = {
x: batch_x,
label: batch_y,
learning_rate: epoch_learning_rate,
training_flag: True
}
_, batch_loss = sess.run([train_op, Total_loss], feed_dict=train_feed_dict)
batch_acc = accuracy.eval(feed_dict=train_feed_dict)
print("epoch: %d/%d, iter: %d/%d, batch_loss: %.4f, batch_acc: %.4f \n" % (
epoch, total_epochs, step, iteration, batch_loss, batch_acc))
train_loss += batch_loss
train_acc += batch_acc
if step % 30 == 0 :
summary_str = sess.run(summary_op, feed_dict=train_feed_dict)
summary_writer.add_summary(summary=summary_str, global_step=epoch)
summary_writer.flush()
train_loss /= iteration # average loss
train_acc /= iteration # average accuracy
train_summary = tf.Summary(value=[tf.Summary.Value(tag='train_loss', simple_value=train_loss),
tf.Summary.Value(tag='train_accuracy', simple_value=train_acc)])
# test_acc, test_loss, test_summary = Evaluate(sess)
summary_writer.add_summary(summary=train_summary, global_step=epoch)
# summary_writer.add_summary(summary=test_summary, global_step=epoch)
summary_writer.flush()
# line = "epoch: %d/%d, train_loss: %.4f, train_acc: %.4f, test_loss: %.4f, test_acc: %.4f \n" % (
# epoch, total_epochs, train_loss, train_acc, test_loss, test_acc)
line = "epoch: %d/%d, train_loss: %.4f, train_acc: %.4f \n" % (
epoch, total_epochs, train_loss, train_acc)
print(line)
with open('./logs_pretrain/logs.txt', 'a') as f:
f.write(line)
saver.save(sess=sess, save_path='./model_pretrain/model.ckpt')
| [
"tensorflow.convert_to_tensor",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.control_dependencies",
"tensorflow.cast",
"tensorflow.global_variables",
"tensorflow.equal",
"tensorflow.nn.l2_loss",
"tensorflow.get_collection",
"tensorflow.summary.image",
"tensorflow.layers.dense",
"tensorflow.gather",
"tensorflow.train.MomentumOptimizer",
"tensorflow.name_scope",
"tensorflow.Session",
"tensorflow.square",
"tensorflow.trainable_variables",
"tensorflow.argmax",
"tensorflow.nn.l2_normalize",
"tensorflow.nn.sigmoid",
"tensorflow.pow",
"tensorflow.identity",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.summary.merge_all",
"tensorflow.train.checkpoint_exists",
"tensorflow.train.get_checkpoint_state",
"tensorflow.summary.FileWriter",
"tensorflow.reshape",
"tensorflow.ones_like",
"tensorflow.constant_initializer",
"tensorflow.Summary.Value",
"tensorflow.log",
"tensorflow.scatter_sub"
] | pre_train.py | [(115, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, image_size, image_size, img_channels]'}), True, 'import tensorflow as tf\n'), (116, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None]'}), True, 'import tensorflow as tf\n'), (119, 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {}), True, 'import tensorflow as tf\n'), (120, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""learning_rate"""'}), True, 'import tensorflow as tf\n'), (130, 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', ([], {'learning_rate': 'learning_rate', 'momentum': 'momentum', 'use_nesterov': '(True)'}), True, 'import tensorflow as tf\n'), (132, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), True, 'import tensorflow as tf\n'), (151, 'dataflow_input.MyDataFlow', 'MyDataFlow', (['train_dir', 'annotations'], {'is_training': '(True)', 'batch_size': 'batch_size', 'img_size': 'image_size'}), False, 'from dataflow_input import MyDataFlow\n'), (153, 'tensorpack.dataflow.PrefetchDataZMQ', 'PrefetchDataZMQ', (['df'], {'nr_proc': '(10)'}), False, 'from tensorpack.dataflow import PrefetchDataZMQ, BatchData\n'), (33, 'tensorflow.reshape', 'tf.reshape', (['label', '[-1]'], {}), True, 'import tensorflow as tf\n'), (34, 'tensorflow.gather', 'tf.gather', (['centers', 'label'], {}), True, 'import tensorflow as tf\n'), (36, 'tensorflow.scatter_sub', 'tf.scatter_sub', (['centers', 'label', 'diff'], {}), True, 'import tensorflow as tf\n'), (104, 'resnet_model.imagenet_resnet_v2', 'resnet_model.imagenet_resnet_v2', ([], {'resnet_size': '(18)', 'num_classes': 'class_num', 'mode': '"""se"""', 'data_format': 'None'}), False, 'import resnet_model\n'), (107, 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['inputs', '(1)', '(1e-10)'], {'name': '"""feat"""'}), True, 'import tensorflow as tf\n'), (108, 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'inputs', 'units': 'class_num'}), True, 'import tensorflow as tf\n'), (110, 'tensorflow.identity', 'tf.identity', (['inputs', '"""final_dense"""'], {}), True, 'import tensorflow as tf\n'), (124, 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'labels': 'one_hot_labels', 'logits': 'logits'}), True, 'import tensorflow as tf\n'), (127, 'tensorflow.cast', 'tf.cast', (['label'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (133, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['update_ops'], {}), True, 'import tensorflow as tf\n'), (136, 'tensorflow.argmax', 'tf.argmax', (['logits', '(1)'], {}), True, 'import tensorflow as tf\n'), (136, 'tensorflow.argmax', 'tf.argmax', (['one_hot_labels', '(1)'], {}), True, 'import tensorflow as tf\n'), (137, 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (157, 'tensorflow.global_variables', 'tf.global_variables', ([], {}), True, 'import tensorflow as tf\n'), (159, 'tensorflow.Session', 'tf.Session', ([], {}), True, 'import tensorflow as tf\n'), (160, 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['"""./model_pretrain"""'], {}), True, 'import tensorflow as tf\n'), (167, 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""./logs_pretrain"""', 'sess.graph'], {}), True, 'import tensorflow as tf\n'), (170, 'tensorflow.summary.image', 'tf.summary.image', (['"""x"""', '_x', '(4)'], {}), True, 'import tensorflow as tf\n'), (172, 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), True, 'import tensorflow as tf\n'), (38, 'tensorflow.square', 'tf.square', (['(features - centers_batch)'], {}), True, 'import tensorflow as tf\n'), (55, 'tensorflow.name_scope', 'tf.name_scope', (['scope', '"""focal_loss"""', '[cls_preds, onehot_labels]'], {}), True, 'import tensorflow as tf\n'), (56, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['cls_preds'], {}), True, 'import tensorflow as tf\n'), (57, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['onehot_labels'], {}), True, 'import tensorflow as tf\n'), (61, 'tensorflow.cast', 'tf.cast', (['onehot_labels', 'precise_logits.dtype'], {}), True, 'import tensorflow as tf\n'), (62, 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['logits'], {}), True, 'import tensorflow as tf\n'), (117, 'tensorflow.cast', 'tf.cast', (['label', 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (161, 'tensorflow.train.checkpoint_exists', 'tf.train.checkpoint_exists', (['ckpt.model_checkpoint_path'], {}), True, 'import tensorflow as tf\n'), (32, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0)'], {}), True, 'import tensorflow as tf\n'), (59, 'tensorflow.cast', 'tf.cast', (['logits', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (63, 'tensorflow.equal', 'tf.equal', (['onehot_labels', '(1)'], {}), True, 'import tensorflow as tf\n'), (66, 'tensorflow.ones_like', 'tf.ones_like', (['onehot_labels'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (67, 'tensorflow.equal', 'tf.equal', (['onehot_labels', '(1.0)'], {}), True, 'import tensorflow as tf\n'), (126, 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['v'], {}), True, 'import tensorflow as tf\n'), (165, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (68, 'tensorflow.log', 'tf.log', (['(predictions_pt + epsilon)'], {}), True, 'import tensorflow as tf\n'), (96, 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""test_loss"""', 'simple_value': 'test_loss'}), True, 'import tensorflow as tf\n'), (97, 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""test_accuracy"""', 'simple_value': 'test_acc'}), True, 'import tensorflow as tf\n'), (126, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (68, 'tensorflow.pow', 'tf.pow', (['(1.0 - predictions_pt)', 'gamma'], {}), True, 'import tensorflow as tf\n'), (212, 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""train_loss"""', 'simple_value': 'train_loss'}), True, 'import tensorflow as tf\n'), (213, 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""train_accuracy"""', 'simple_value': 'train_acc'}), True, 'import tensorflow as tf\n')] |
covernal/mask-rcnn-tensorflow | 8d5e6c8adcf1ea5208f361ec29287696ff80cc98 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# -*- coding: utf-8 -*-
# File: box_ops.py
import tensorflow as tf
from tensorpack.tfutils.scope_utils import under_name_scope
"""
This file is modified from
https://github.com/tensorflow/models/blob/master/object_detection/core/box_list_ops.py
"""
@under_name_scope()
def area(boxes):
"""
Args:
boxes: nx4 floatbox
Returns:
n
"""
x_min, y_min, x_max, y_max = tf.split(boxes, 4, axis=1)
return tf.squeeze((y_max - y_min) * (x_max - x_min), [1])
@under_name_scope()
def pairwise_intersection(boxlist1, boxlist2):
"""Compute pairwise intersection areas between boxes.
Args:
boxlist1: Nx4 floatbox
boxlist2: Mx4
Returns:
a tensor with shape [N, M] representing pairwise intersections
"""
x_min1, y_min1, x_max1, y_max1 = tf.split(boxlist1, 4, axis=1)
x_min2, y_min2, x_max2, y_max2 = tf.split(boxlist2, 4, axis=1)
all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))
all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))
intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))
all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))
intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
@under_name_scope()
def pairwise_iou(boxlist1, boxlist2):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxlist1: Nx4 floatbox
boxlist2: Mx4
Returns:
a tensor with shape [N, M] representing pairwise iou scores.
"""
intersections = pairwise_intersection(boxlist1, boxlist2)
areas1 = area(boxlist1)
areas2 = area(boxlist2)
unions = (
tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections)
return tf.where(
tf.equal(intersections, 0.0),
tf.zeros_like(intersections), tf.truediv(intersections, unions))
@under_name_scope()
def pairwise_iou_batch(proposal_boxes, gt_boxes, orig_gt_counts, batch_size):
"""Computes pairwise intersection-over-union between box collections.
Args:
proposal_boxes: K x 5 (batch_index, x1, y1, x2, y2)
gt_boxes: BS x MaxNumGTs x 4
orig_gt_counts: BS
Returns:
list of length BS, each element is output of pairwise_iou: N x M
(where N is number of boxes for image and M is number of GTs for image)
"""
prefix = "pairwise_iou_batch"
# For each image index, extract a ?x4 boxlist and gt_boxlist
per_images_iou = []
for batch_idx in range(batch_size):
box_mask_for_image = tf.equal(proposal_boxes[:, 0], batch_idx)
single_image_boxes = tf.boolean_mask(proposal_boxes, box_mask_for_image)
single_image_boxes = single_image_boxes[:, 1:]
single_image_gt_boxes = gt_boxes[batch_idx, 0:orig_gt_counts[batch_idx], :]
single_image_iou = pairwise_iou(single_image_boxes, single_image_gt_boxes)
per_images_iou.append(single_image_iou)
return per_images_iou
| [
"tensorflow.boolean_mask",
"tensorflow.truediv",
"tensorflow.transpose",
"tensorflow.maximum",
"tensorflow.equal",
"tensorflow.squeeze",
"tensorflow.expand_dims",
"tensorflow.zeros_like",
"tensorflow.split"
] | MaskRCNN/utils/box_ops.py | [(17, 'tensorpack.tfutils.scope_utils.under_name_scope', 'under_name_scope', ([], {}), False, 'from tensorpack.tfutils.scope_utils import under_name_scope\n'), (30, 'tensorpack.tfutils.scope_utils.under_name_scope', 'under_name_scope', ([], {}), False, 'from tensorpack.tfutils.scope_utils import under_name_scope\n'), (52, 'tensorpack.tfutils.scope_utils.under_name_scope', 'under_name_scope', ([], {}), False, 'from tensorpack.tfutils.scope_utils import under_name_scope\n'), (74, 'tensorpack.tfutils.scope_utils.under_name_scope', 'under_name_scope', ([], {}), False, 'from tensorpack.tfutils.scope_utils import under_name_scope\n'), (26, 'tensorflow.split', 'tf.split', (['boxes', '(4)'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (27, 'tensorflow.squeeze', 'tf.squeeze', (['((y_max - y_min) * (x_max - x_min))', '[1]'], {}), True, 'import tensorflow as tf\n'), (41, 'tensorflow.split', 'tf.split', (['boxlist1', '(4)'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (42, 'tensorflow.split', 'tf.split', (['boxlist2', '(4)'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (45, 'tensorflow.maximum', 'tf.maximum', (['(0.0)', '(all_pairs_min_ymax - all_pairs_max_ymin)'], {}), True, 'import tensorflow as tf\n'), (48, 'tensorflow.maximum', 'tf.maximum', (['(0.0)', '(all_pairs_min_xmax - all_pairs_max_xmin)'], {}), True, 'import tensorflow as tf\n'), (43, 'tensorflow.transpose', 'tf.transpose', (['y_max2'], {}), True, 'import tensorflow as tf\n'), (44, 'tensorflow.transpose', 'tf.transpose', (['y_min2'], {}), True, 'import tensorflow as tf\n'), (46, 'tensorflow.transpose', 'tf.transpose', (['x_max2'], {}), True, 'import tensorflow as tf\n'), (47, 'tensorflow.transpose', 'tf.transpose', (['x_min2'], {}), True, 'import tensorflow as tf\n'), (69, 'tensorflow.equal', 'tf.equal', (['intersections', '(0.0)'], {}), True, 'import tensorflow as tf\n'), (70, 'tensorflow.zeros_like', 'tf.zeros_like', (['intersections'], {}), True, 'import tensorflow as tf\n'), (70, 'tensorflow.truediv', 'tf.truediv', (['intersections', 'unions'], {}), True, 'import tensorflow as tf\n'), (93, 'tensorflow.equal', 'tf.equal', (['proposal_boxes[:, (0)]', 'batch_idx'], {}), True, 'import tensorflow as tf\n'), (95, 'tensorflow.boolean_mask', 'tf.boolean_mask', (['proposal_boxes', 'box_mask_for_image'], {}), True, 'import tensorflow as tf\n'), (67, 'tensorflow.expand_dims', 'tf.expand_dims', (['areas1', '(1)'], {}), True, 'import tensorflow as tf\n'), (67, 'tensorflow.expand_dims', 'tf.expand_dims', (['areas2', '(0)'], {}), True, 'import tensorflow as tf\n')] |
RobRomijnders/bbvi | 613c4c9ba79f0b40488fe1d18a0b7f3c023b639f | import numpy as np
import tensorflow as tf
from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split
from scipy.stats import multivariate_normal
class DataLoader:
"""
Small wrapper to abstract all code relating to loading data
"""
def __init__(self, batch_size=16):
# Wine data set
dataset = load_wine()
# For now, just create a binary classification problem
selection = dataset.target < 2
X, y = dataset.data[selection], dataset.target[selection]
y = self.random_flip(y, 0.1)
# Dummy data set
# Uncomment these lines to create a dummy data sets of two highly separable point clouds
# num_feat = 8
# num_half = 500
# X1 = multivariate_normal(5 * np.ones((8,)), np.eye(num_feat)).rvs(num_half)
# X2 = multivariate_normal(-5 * np.ones((8,)), np.eye(num_feat)).rvs(num_half)
#
# X = np.concatenate((X1, X2), axis=0)
# y = np.concatenate((np.zeros((num_half)), np.ones((num_half))), axis=0)
self.data = dict()
self.data['X_train'], self.data['X_test'], self.data['y_train'], self.data['y_test'] = train_test_split(X, y)
self.mean, self.std = None, None
# self._normalize_data()
self.batch_size = batch_size
@property
def num_features(self):
return self.data['X_train'].shape[1]
@staticmethod
def random_flip(data, portion):
"""
Randomly flip a portion of the binary labels. To spice up the problem a bit :)
:param data:
:param portion:
:return:
"""
# Establish the sizes
num_samples = len(data)
num_flip = int(num_samples * portion)
# Select random indices to flip
idx = np.random.choice(num_samples, num_flip, replace=False)
# Do the flipping
data[idx] = (data[idx] - 1/2) * -1 + 1/2
return data
def _normalize_data(self):
# Calculate the first and second moment from the train data
self.mean = np.mean(self.data['X_train'], axis=0)
self.std = np.std(self.data['X_train'], axis=0)
# Standardize the training data
self.data['X_train'] -= self.mean
self.data['X_train'] /= self.std
# Standardize the test data
self.data['X_test'] -= self.mean
self.data['X_test'] /= self.std
def sample_batch(self, data_split='train'):
# Sample from batch
datasplit_size = len(self.data['y_' + data_split])
idx = np.random.choice(datasplit_size, self.batch_size, replace=False)
return self.data['X_' + data_split][idx], self.data['y_' + data_split][idx]
def get_random_normal_variable(name, shape, dtype=tf.float32, num_samples=13):
"""
Create weight tensors with factorized Gaussian approximation of each element.
Define the standard deviation behind a softplus to enforce positivity
Credits for code inspiration: https://github.com/DeNeutoy/bayesian-rnn/
:param name: Name for the corresponding tf variables
:param shape: shape for the variable. Note that weights are sampled and thus have +1 dimension
:param dtype: dtype for the variables involved
:param num_samples: number of samples from the variational distro over W
:return:
"""
# Inverse of a softplus function, so that the value of the standard deviation
# will be equal to what the user specifies, but we can still enforce positivity
# by wrapping the standard deviation in the softplus function.
# standard_dev = tf.log(tf.exp(standard_dev) - 1.0) * tf.ones(shape)
# it's important to initialize variances with care, otherwise the model takes too long to converge
sigma_min = 1-1/10
sigma_max = 1+1/10
rho_max_init = tf.log(tf.exp(sigma_max) - 1.0)
rho_min_init = tf.log(tf.exp(sigma_min) - 1.0)
std_init = tf.random_uniform_initializer(rho_min_init, rho_max_init)
# Initialize the mean
mean = tf.get_variable(name + "_mean", shape, dtype=dtype)
# Initialize the standard deviation
pre_sigma = tf.get_variable(name + "_standard_deviation",
shape,
initializer=std_init,
dtype=dtype)
standard_deviation = tf.nn.softplus(pre_sigma) + 1e-5
# The famous reparametrization formula for the factorized Gaussian
noise = tf.random_normal([num_samples] + shape, 0.0, 1.0, dtype)
weights = mean + standard_deviation * noise
return weights, mean, standard_deviation, pre_sigma, noise | [
"tensorflow.get_variable",
"tensorflow.random_uniform_initializer",
"numpy.random.choice",
"sklearn.datasets.load_wine",
"sklearn.model_selection.train_test_split",
"tensorflow.exp",
"numpy.std",
"numpy.mean",
"tensorflow.nn.softplus",
"tensorflow.random_normal"
] | bbvi/util.py | [(110, 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['rho_min_init', 'rho_max_init'], {}), True, 'import tensorflow as tf\n'), (113, 'tensorflow.get_variable', 'tf.get_variable', (["(name + '_mean')", 'shape'], {'dtype': 'dtype'}), True, 'import tensorflow as tf\n'), (116, 'tensorflow.get_variable', 'tf.get_variable', (["(name + '_standard_deviation')", 'shape'], {'initializer': 'std_init', 'dtype': 'dtype'}), True, 'import tensorflow as tf\n'), (124, 'tensorflow.random_normal', 'tf.random_normal', (['([num_samples] + shape)', '(0.0)', '(1.0)', 'dtype'], {}), True, 'import tensorflow as tf\n'), (14, 'sklearn.datasets.load_wine', 'load_wine', ([], {}), False, 'from sklearn.datasets import load_wine\n'), (33, 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {}), False, 'from sklearn.model_selection import train_test_split\n'), (57, 'numpy.random.choice', 'np.random.choice', (['num_samples', 'num_flip'], {'replace': '(False)'}), True, 'import numpy as np\n'), (65, 'numpy.mean', 'np.mean', (["self.data['X_train']"], {'axis': '(0)'}), True, 'import numpy as np\n'), (66, 'numpy.std', 'np.std', (["self.data['X_train']"], {'axis': '(0)'}), True, 'import numpy as np\n'), (79, 'numpy.random.choice', 'np.random.choice', (['datasplit_size', 'self.batch_size'], {'replace': '(False)'}), True, 'import numpy as np\n'), (121, 'tensorflow.nn.softplus', 'tf.nn.softplus', (['pre_sigma'], {}), True, 'import tensorflow as tf\n'), (108, 'tensorflow.exp', 'tf.exp', (['sigma_max'], {}), True, 'import tensorflow as tf\n'), (109, 'tensorflow.exp', 'tf.exp', (['sigma_min'], {}), True, 'import tensorflow as tf\n')] |
tongni1975/TensorFlow-Machine-Learning-Cookbook-Second-Edition | 4f57ea4ad79c8111fb29bad3da5d151858c6a050 | # Data gathering
#----------------------------------
#
# This function gives us the ways to access
# the various data sets we will need
# Data Gathering
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
ops.reset_default_graph()
# Iris Data
from sklearn import datasets
iris = datasets.load_iris()
print(len(iris.data))
print(len(iris.target))
print(iris.data[0])
print(set(iris.target))
# Low Birthrate Data
import requests
birthdata_url = 'https://github.com/nfmcclure/tensorflow_cookbook/raw/master/01_Introduction/07_Working_with_Data_Sources/birthweight_data/birthweight.dat'
birth_file = requests.get(birthdata_url)
birth_data = birth_file.text.split('\r\n')
birth_header = birth_data[0].split('\t')
birth_data = [[float(x) for x in y.split('\t') if len(x)>=1] for y in birth_data[1:] if len(y)>=1]
print(len(birth_data))
print(len(birth_data[0]))
# Housing Price Data
import requests
housing_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data'
housing_header = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
housing_file = requests.get(housing_url)
housing_data = [[float(x) for x in y.split(' ') if len(x)>=1] for y in housing_file.text.split('\n') if len(y)>=1]
print(len(housing_data))
print(len(housing_data[0]))
# MNIST Handwriting Data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
print(len(mnist.train.images))
print(len(mnist.test.images))
print(len(mnist.validation.images))
print(mnist.train.labels[1,:])
# CIFAR-10 Image Category Dataset
# The CIFAR-10 data ( https://www.cs.toronto.edu/~kriz/cifar.html ) contains 60,000 32x32 color images of 10 classes.
# It was collected by Alex Krizhevsky, Vinod Nair, and Geoffrey Hinton.
# Alex Krizhevsky maintains the page referenced here.
# This is such a common dataset, that there are built in functions in TensorFlow to access this data.
# Running this command requires an internet connection and a few minutes to download all the images.
(X_train, y_train), (X_test, y_test) = tf.contrib.keras.datasets.cifar10.load_data()
print(X_train.shape)
print(y_train.shape)
print(y_train[0,]) # this is a frog
# Plot the 0-th image (a frog)
from PIL import Image
img = Image.fromarray(X_train[0,:,:,:])
plt.imshow(img)
# Ham/Spam Text Data
import requests
import io
from zipfile import ZipFile
# Get/read zip file
zip_url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip'
r = requests.get(zip_url)
z = ZipFile(io.BytesIO(r.content))
file = z.read('SMSSpamCollection')
# Format Data
text_data = file.decode()
text_data = text_data.encode('ascii',errors='ignore')
text_data = text_data.decode().split('\n')
text_data = [x.split('\t') for x in text_data if len(x)>=1]
[text_data_target, text_data_train] = [list(x) for x in zip(*text_data)]
print(len(text_data_train))
print(set(text_data_target))
print(text_data_train[1])
# Movie Review Data
import requests
import io
import tarfile
movie_data_url = 'http://www.cs.cornell.edu/people/pabo/movie-review-data/rt-polaritydata.tar.gz'
r = requests.get(movie_data_url)
# Stream data into temp object
stream_data = io.BytesIO(r.content)
tmp = io.BytesIO()
while True:
s = stream_data.read(16384)
if not s:
break
tmp.write(s)
stream_data.close()
tmp.seek(0)
# Extract tar file
tar_file = tarfile.open(fileobj=tmp, mode="r:gz")
pos = tar_file.extractfile('rt-polaritydata/rt-polarity.pos')
neg = tar_file.extractfile('rt-polaritydata/rt-polarity.neg')
# Save pos/neg reviews
pos_data = []
for line in pos:
pos_data.append(line.decode('ISO-8859-1').encode('ascii',errors='ignore').decode())
neg_data = []
for line in neg:
neg_data.append(line.decode('ISO-8859-1').encode('ascii',errors='ignore').decode())
tar_file.close()
print(len(pos_data))
print(len(neg_data))
print(neg_data[0])
# The Works of Shakespeare Data
import requests
shakespeare_url = 'http://www.gutenberg.org/cache/epub/100/pg100.txt'
# Get Shakespeare text
response = requests.get(shakespeare_url)
shakespeare_file = response.content
# Decode binary into string
shakespeare_text = shakespeare_file.decode('utf-8')
# Drop first few descriptive paragraphs.
shakespeare_text = shakespeare_text[7675:]
print(len(shakespeare_text))
# English-German Sentence Translation Data
import requests
import io
from zipfile import ZipFile
sentence_url = 'http://www.manythings.org/anki/deu-eng.zip'
r = requests.get(sentence_url)
z = ZipFile(io.BytesIO(r.content))
file = z.read('deu.txt')
# Format Data
eng_ger_data = file.decode()
eng_ger_data = eng_ger_data.encode('ascii',errors='ignore')
eng_ger_data = eng_ger_data.decode().split('\n')
eng_ger_data = [x.split('\t') for x in eng_ger_data if len(x)>=1]
[english_sentence, german_sentence] = [list(x) for x in zip(*eng_ger_data)]
print(len(english_sentence))
print(len(german_sentence))
print(eng_ger_data[10])
| [
"matplotlib.pyplot.imshow",
"sklearn.datasets.load_iris",
"tensorflow.contrib.keras.datasets.cifar10.load_data",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.python.framework.ops.reset_default_graph"
] | Chapter01/01_Introduction/07_Working_with_Data_Sources/07_data_gathering.py | [(11, 'tensorflow.python.framework.ops.reset_default_graph', 'ops.reset_default_graph', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (17, 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), False, 'from sklearn import datasets\n'), (27, 'requests.get', 'requests.get', (['birthdata_url'], {}), False, 'import requests\n'), (40, 'requests.get', 'requests.get', (['housing_url'], {}), False, 'import requests\n'), (49, 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['"""MNIST_data/"""'], {'one_hot': '(True)'}), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), (62, 'tensorflow.contrib.keras.datasets.cifar10.load_data', 'tf.contrib.keras.datasets.cifar10.load_data', ([], {}), True, 'import tensorflow as tf\n'), (70, 'PIL.Image.fromarray', 'Image.fromarray', (['X_train[(0), :, :, :]'], {}), False, 'from PIL import Image\n'), (71, 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), True, 'import matplotlib.pyplot as plt\n'), (81, 'requests.get', 'requests.get', (['zip_url'], {}), False, 'import requests\n'), (101, 'requests.get', 'requests.get', (['movie_data_url'], {}), False, 'import requests\n'), (103, 'io.BytesIO', 'io.BytesIO', (['r.content'], {}), False, 'import io\n'), (104, 'io.BytesIO', 'io.BytesIO', ([], {}), False, 'import io\n'), (113, 'tarfile.open', 'tarfile.open', ([], {'fileobj': 'tmp', 'mode': '"""r:gz"""'}), False, 'import tarfile\n'), (135, 'requests.get', 'requests.get', (['shakespeare_url'], {}), False, 'import requests\n'), (149, 'requests.get', 'requests.get', (['sentence_url'], {}), False, 'import requests\n'), (82, 'io.BytesIO', 'io.BytesIO', (['r.content'], {}), False, 'import io\n'), (150, 'io.BytesIO', 'io.BytesIO', (['r.content'], {}), False, 'import io\n')] |
P2333/Reverse-Cross-Entropy | 2514af4a7fbd52423e4cac0da3ea58ac92b841c0 | from __future__ import division
from __future__ import absolute_import
import six
import cifar_input
import mnist_input
import resnet_model_cifar
import resnet_model_mnist
import t_sne
import numpy as np
import tensorflow as tf
import attacks
import sys
sys.path.append('..')
sys.path.append('./../attacks')
import time
import copy
from scipy.io import loadmat
from scipy.misc import imsave
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('dataset', '', 'cifar10 or cifar100.')
tf.app.flags.DEFINE_string('mode', 'train', 'train or eval.')
tf.app.flags.DEFINE_string('train_data_path', '',
'Filepattern for training data.')
tf.app.flags.DEFINE_string('eval_data_path', '',
'Filepattern for eval data')
tf.app.flags.DEFINE_string('train_dir', '',
'Directory to keep training outputs.')
tf.app.flags.DEFINE_string('eval_dir', '',
'Directory to keep eval outputs.')
tf.app.flags.DEFINE_integer('eval_batch_count', 10,
'Number of batches to eval.')
tf.app.flags.DEFINE_bool('eval_once', False,
'Whether evaluate the model only once.')
tf.app.flags.DEFINE_string('log_root', '',
'Directory to keep the checkpoints. Should be a '
'parent directory of FLAGS.train_dir/eval_dir.')
tf.app.flags.DEFINE_integer('num_gpus', 0,
'Number of gpus used for training. (0 or 1)')
tf.app.flags.DEFINE_integer('num_residual_units', 5,
'num of residual units')
tf.app.flags.DEFINE_string('Optimizer', 'mom',
'The optimizer used to train the model.')
tf.app.flags.DEFINE_bool('RCE_train', False,
'Whether use RCE to train the model.')
tf.app.flags.DEFINE_string('attack_method', 'fgsm',
'The attacking method used')
tf.app.flags.DEFINE_float('eps', 0.01,
'The eps in attacking methods.')
tf.app.flags.DEFINE_string('save_pwd', None,
'')
epoch_jsma = 100
num_classes = 10
if FLAGS.dataset == 'cifar10':
image_size = 32
num_channel = 3
model_name = resnet_model_cifar
input_name = cifar_input
elif FLAGS.dataset == 'mnist':
image_size = 28
num_channel = 1
model_name = resnet_model_mnist
input_name = mnist_input
else:
print('Unrecognized dataset')
image_size = None
num_channel = None
model_name = None
input_name = None
if FLAGS.RCE_train == True:
sigma2 = 0.1 / 0.26
f1 = 'RCE'
else:
sigma2 = 1.0 / 0.26
f1 = 'CE'
def models(hps, images, RCE_train, logits=False, tsne_logits=False):
model = model_name.ResNet(hps, images, FLAGS.mode, Reuse=True)
model.build_graph()
op = model.predictions.op
logit, = op.inputs
if RCE_train==True:
logit = -logit
if tsne_logits==True:
return tf.nn.softmax(logit), model.t_SNE_logits
if logits==True:
return tf.nn.softmax(logit), logit
return tf.nn.softmax(logit)
class models_carlini:
def __init__(self,hps):
self.image_size = image_size
self.num_channels = num_channel############MNIST and CIFAR10 are different ar here
self.num_labels = num_classes
self.hps = hps
def predict(self,images,tsne_logits=False):
model = model_name.ResNet(self.hps, images, FLAGS.mode, Reuse=True)
model.build_graph()
op = model.predictions.op
logit, = op.inputs
if FLAGS.RCE_train==True:
logit = -logit
if tsne_logits==True:
return logit,model.t_SNE_logits
return logit
def adv_craft_func(hps, images, method, eps=0.01,RCE_train=False, target_labels=None):
if method=='fgsm':
print('Attacking method is fgsm')
adversarial_sample = attacks.fgsm.fgsm(models, images, hps, RCE_train,
eps=eps, epochs=1, clip_min=-0.5, clip_max=0.5)
elif method=='random':
print('Attacking method is random')
adversarial_sample = tf.clip_by_value(images + tf.random_uniform((hps.batch_size,image_size,image_size,num_channel),
minval=-eps, maxval=eps), clip_value_min=-0.5, clip_value_max=0.5)
elif method=='bim':
print('Attacking method is bim')
adversarial_sample = attacks.fgsm.fgsm(models, images, hps, RCE_train,
eps=eps/10, epochs=10, clip_min=-0.5, clip_max=0.5)
elif method=='tgsm':
print('Attacking method is tgsm')
adversarial_sample = attacks.tgsm.tgsm(models, images, hps, RCE_train, y=None,
eps=eps/10, epochs=10, clip_min=-0.5, clip_max=0.5)
elif method=='jsma':
print('Attacking method is jsma')
if target_labels==None:
print('Target label is the argmin label')
model_target_y = models(hps, images, FLAGS.RCE_train, logits=False)
target_y64 = tf.argmin(model_target_y,axis=1)
else:
target_y64=target_labels
target_y = tf.cast(target_y64, tf.int32)
adversarial_sample = attacks.jsma.jsma(models, images, hps, RCE_train, target_y,epochs=epoch_jsma, eps=eps,
clip_min=-0.5, clip_max=0.5, pair=False, min_proba=0.0)
elif method=='smda':
print('Attacking method is smda')
if target_labels==None:
print('Target label is the argmin label')
model_target_y = models(hps, images, FLAGS.RCE_train, logits=False)
target_y64 = tf.argmin(model_target_y,axis=1)
else:
target_y64=target_labels
target_y = tf.cast(target_y64, tf.int32)
adversarial_sample = attacks.smda.smda(models, images, hps, RCE_train, target_y, epochs=epoch_jsma, eps=eps,
clip_min=-0.5, clip_max=0.5, min_proba=0.0)
else:
print('Not recognized method')
adversarial_sample = None
return adversarial_sample
def tSNE_visual(hps,num_batch):
# Construct graph
images, labels = input_name.build_input(
FLAGS.dataset, FLAGS.eval_data_path, hps.batch_size, FLAGS.mode) # FLAGS.mode='attack', batch_size=200
Res = model_name.ResNet(hps, images, FLAGS.mode, Reuse=False)
Res.build_graph()
saver = tf.train.Saver()
adv_images = adv_craft_func(hps, images, FLAGS.attack_method, eps=FLAGS.eps, RCE_train=FLAGS.RCE_train)
model_nor = model_name.ResNet(hps, images, FLAGS.mode, Reuse=True)
model_nor.build_graph()
model_adv = model_name.ResNet(hps, adv_images, FLAGS.mode, Reuse=True)
model_adv.build_graph()
# Open session and restore checkpoint
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
tf.train.start_queue_runners(sess)
sess.run(tf.global_variables_initializer())
ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root) # Choose dir according to rt
tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path)
saver.restore(sess, ckpt_state.model_checkpoint_path)
logits_nor = model_nor.t_SNE_logits
logits_adv = model_adv.t_SNE_logits
dim_logits = logits_nor.shape[1]
if hps.batch_size!=logits_nor.shape[0]:
print('Error!!!!!')
return
logits_all = np.reshape(np.array([]),(0,dim_logits))
labels_all = np.array([])
is_adv_all = np.array([])
#make the num of adv the same as per class
if FLAGS.attack_method == 'fgsm' or FLAGS.attack_method == 'tgsm':
num_adv = int(hps.batch_size/10)
print('num_adv is %d'%(num_adv))
else:
num_adv = hps.batch_size
for i in six.moves.range(num_batch):
print(i)
(logits_part_nor, logits_part_adv, labels_part) = sess.run([logits_nor, logits_adv, tf.argmax(labels, 1)])
logits_all = np.concatenate((logits_all, logits_part_nor), axis=0)
labels_all = np.concatenate((labels_all, labels_part), axis=0)
is_adv_all = np.concatenate((is_adv_all, np.zeros(hps.batch_size)), axis=0)
logits_all = np.concatenate((logits_all, logits_part_adv[:num_adv]), axis=0)
labels_all = np.concatenate((labels_all, labels_part[:num_adv]), axis=0)
is_adv_all = np.concatenate((is_adv_all, np.ones(num_adv)), axis=0)
tsne_return = t_sne.tsne(logits_all, no_dims=2, initial_dims=60, perplexity=30.0)
# Save results
if FLAGS.RCE_train == True:
f1 = 'RCE'
else:
f1 = 'CE'
np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/tSNE/tSNE_' + f1, tsne_return)
np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/tSNE/tSNElabels_' + f1, labels_all)
np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/tSNE/tSNEisadv_' + f1, is_adv_all)
return None
def tSNE_visual_carliniLi(hps, num_batch):
# Construct graph
images, labels = input_name.build_input(
FLAGS.dataset, FLAGS.eval_data_path, hps.batch_size, FLAGS.mode) # FLAGS.mode='attack', batch_size=200
Res = model_name.ResNet(hps, images, FLAGS.mode, Reuse=False)
Res.build_graph()
saver = tf.train.Saver()
# Open session and restore checkpoint
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
tf.train.start_queue_runners(sess)
sess.run(tf.global_variables_initializer())
ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root) # Choose dir according to rt
tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path)
saver.restore(sess, ckpt_state.model_checkpoint_path)
model_carlini = models_carlini(hps)
if FLAGS.attack_method == 'carliniLi':
attack_carlini = attacks.carliniLi.CarliniLi(sess, model_carlini, largest_const=10 ** -3)
elif FLAGS.attack_method == 'carliniL2':
attack_carlini = attacks.carliniL2.CarliniL2(sess, model_carlini, batch_size=10, max_iterations=1000, confidence=0,binary_search_steps=3)
adv_image = tf.placeholder(tf.float32, shape=[hps.batch_size, image_size, image_size, num_channel])
_, logits_nor = model_carlini.predict(images, tsne_logits=True)
_, logits_adv = model_carlini.predict(adv_image, tsne_logits=True)
dim_logits = logits_nor.shape[1]
if hps.batch_size != logits_nor.shape[0]:
print('Error!!!!!')
return
logits_all = np.reshape(np.array([]), (0, dim_logits))
labels_all = np.array([])
is_adv_all = np.array([])
# make the num of adv the same as per class
# if FLAGS.attack_method == 'fgsm' or FLAGS.attack_method == 'tgsm':
# num_adv = int(hps.batch_size/10)
# print('num_adv is %d'%(num_adv))
# else:
# num_adv = hps.batch_size
num_adv = hps.batch_size
for i in six.moves.range(num_batch):
print(i)
input_data = sess.run(images)
target_label = sess.run(labels)
adv = attack_carlini.attack(input_data, target_label)
(logits_part_nor, logits_part_adv, labels_part) = sess.run([logits_nor, logits_adv, tf.argmax(labels, 1)],
feed_dict={adv_image: adv})
logits_all = np.concatenate((logits_all, logits_part_nor), axis=0)
labels_all = np.concatenate((labels_all, labels_part), axis=0)
is_adv_all = np.concatenate((is_adv_all, np.zeros(hps.batch_size)), axis=0)
logits_all = np.concatenate((logits_all, logits_part_adv[:num_adv]), axis=0)
labels_all = np.concatenate((labels_all, labels_part[:num_adv]), axis=0)
is_adv_all = np.concatenate((is_adv_all, np.ones(num_adv)), axis=0)
tsne_return = t_sne.tsne(logits_all, no_dims=2, initial_dims=60, perplexity=30.0)
# Save results
if FLAGS.RCE_train == True:
f1 = 'RCE'
else:
f1 = 'CE'
np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/tSNE/tSNE_' + f1, tsne_return)
np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/tSNE/tSNElabels_' + f1, labels_all)
np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/tSNE/tSNEisadv_' + f1, is_adv_all)
return None
def apply_attack_carlini(hps):
# Construct graph
images, labels = input_name.build_input(
FLAGS.dataset, FLAGS.eval_data_path, hps.batch_size, FLAGS.mode) # FLAGS.mode='attack', batch_size=200
Res = model_name.ResNet(hps, images, FLAGS.mode, Reuse=False)
Res.build_graph()
saver = tf.train.Saver()
# Open session and restore checkpoint
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
tf.train.start_queue_runners(sess)
sess.run(tf.global_variables_initializer())
ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root) # Choose dir according to rt
tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path)
saver.restore(sess, ckpt_state.model_checkpoint_path)
num_sample = hps.batch_size * FLAGS.eval_batch_count
# Initialize results to save
entropy_test_adv_all = np.array([])
confidence_test_adv_all = np.array([])
entropy_test_nor_all = np.array([])
confidence_test_nor_all = np.array([])
logits_adv_all = np.reshape(np.array([]), (0, 64))
logits_nor_all = np.reshape(np.array([]), (0, 64))
labels_adv_all = np.array([])
labels_true_all = np.array([])
labels_nor_all = np.array([])
L2_distance = np.array([])
nor_img_all = np.reshape(np.array([]), (0, image_size,image_size,num_channel))
adv_img_all = np.reshape(np.array([]), (0, image_size,image_size,num_channel))
print('Num of sample per eps is %d' % (num_sample))
#Construct carlini adversarial samples
model_carlini_adv = models_carlini(hps)
#Construct predictions
image = tf.placeholder(tf.float32,shape=[hps.batch_size, image_size, image_size,
num_channel])############MNIST and CIFAR10 are different ar here
adv_image = tf.placeholder(tf.float32,shape=[hps.batch_size, image_size, image_size,
num_channel])############MNIST and CIFAR10 are different ar here
predict = tf.placeholder(tf.float32,shape=[hps.batch_size, 10])
logit_nor,tsne_logit_nor = model_carlini_adv.predict(image,tsne_logits=True)
logit_adv,tsne_logit_adv = model_carlini_adv.predict(adv_image,tsne_logits=True)
predict_nor = tf.nn.softmax(logit_nor)
predict_adv = tf.nn.softmax(logit_adv)
# Calculate entropy
argmax_y_onehot = tf.one_hot(tf.argmax(predict, 1), 10, on_value=0.0, off_value=1.0, axis=-1)
normalized_y_nonmaximal = tf.reduce_sum(predict * argmax_y_onehot, 1)
entropy = tf.reduce_sum(-tf.log(predict) * predict * argmax_y_onehot,1) / normalized_y_nonmaximal + tf.log(normalized_y_nonmaximal)
for k in range(1):
result_dict = loadmat('kernel_para_'+FLAGS.dataset+'/kernel1000_for_attack_' + f1 + '.mat')
result_dict_median = loadmat('kernel_para_'+FLAGS.dataset+'/kernel1000_median_for_attack_' + f1 + '.mat')
# e_mean = result_dict['mean_logits_' + f1] # 10X64
# e_invcovar = result_dict['inv_covar_' + f1] # 64X64X10
e_kernel_train = result_dict['kernel_'+f1+'_for_attack'] #100X64X10
e_median = result_dict_median['median_out'] # 10X1
if FLAGS.attack_method == 'carliniL2':
attack1 = attacks.carliniL2.CarliniL2(sess, model_carlini_adv, batch_size=10, max_iterations=10,targeted=True,
confidence=0, initial_const=1.0,binary_search_steps=9)
attack2 = None
elif FLAGS.attack_method == 'carliniL2_highcon':
attack1 = attacks.carliniL2.CarliniL2(sess, model_carlini_adv, batch_size=10, max_iterations=10000,targeted=True,
confidence=10, initial_const=1.0,binary_search_steps=9)
attack2 = None
elif FLAGS.attack_method == 'carliniL2_highden':
attack1 = attacks.carliniL2.CarliniL2(sess, model_carlini_adv, batch_size=1,
max_iterations=5000,
targeted=True,
initial_const=1.0,
confidence=0, binary_search_steps=3)
attack2 = attacks.carliniL2_specific.CarliniL2_specific(sess, model_carlini_adv, batch_size=1,
max_iterations=10000,
targeted=True,
initial_const=1.0,
confidence=0, binary_search_steps=8,
extra_loss=True
, e_kernel_train=e_kernel_train,
e_median=e_median,
sigma2=sigma2)
elif FLAGS.attack_method == 'carliniL2_specific':
attack1 = attacks.carliniL2.CarliniL2(sess, model_carlini_adv, batch_size=1,
max_iterations=5000,
targeted=True,
initial_const=10.0,
confidence=5, binary_search_steps=3)
attack2 = attacks.carliniL2_specific.CarliniL2_specific(sess, model_carlini_adv, batch_size=1,
max_iterations=10000,
targeted=True,
initial_const=100.0,
confidence=5, binary_search_steps=9, extra_loss=True
, e_kernel_train=e_kernel_train ,
e_median = e_median,
sigma2 = sigma2)
else:
print('Error!!!!')
attack1 = None
attack2 = None
success = 0
efficient = 0
L2_distance_print = 0
for i in six.moves.range(FLAGS.eval_batch_count):
time_start = time.time()
(nor_img,true_label) = sess.run([images,labels])
#Crafting target labels
target_lab = np.zeros((hps.batch_size, 10))
for j in range(hps.batch_size):
r = np.random.random_integers(0, 9)
while r == np.argmax(true_label[j]):
r = np.random.random_integers(0, 9)
target_lab[j, r] = 1
(predict_NOR, logits_part_nor) = sess.run(
[predict_nor, tsne_logit_nor],
feed_dict={image: nor_img}
)
#Attack1, craft adversarial samples in oblivious attack
adv_img,succ = attack1.attack(nor_img, target_lab,predict_NOR)
#Attack, craft adversarial samples in white-box attack
if FLAGS.attack_method == 'carliniL2_specific' or FLAGS.attack_method == 'carliniL2_highden':
if succ[0] == 1:
is_succ = 'Success'
else:
is_succ = 'Fail'
print('Finish attack 1. The %d batch in total %d(%f sec) %s' % (
i, FLAGS.eval_batch_count, time.time() - time_start,is_succ))
time_start = time.time()
adv_img, succ, log_density_ratio = attack2.attack(nor_img, adv_img, target_lab,predict_NOR)
if succ[0] == 1:
is_succ = 'Success'
else:
is_succ = 'Fail'
print('Finish attack 2. The %d batch in total %d(%f sec) %s' % (
i, FLAGS.eval_batch_count, time.time() - time_start, is_succ))
else:
print('The %d batch in total %d, the eps = %f (%f sec)' % (
i, FLAGS.eval_batch_count, 0.05 * k, time.time() - time_start))
#Local logits
(predict_ADV,logits_part_adv) = sess.run(
[predict_adv, tsne_logit_adv],feed_dict={adv_image:adv_img}
)
#Local entropy and confidence for nor_img
(entropy_test_nor_help,labels_nor_help,confidence_test_nor_help) = sess.run(
[entropy,tf.argmax(predict,axis=1),tf.reduce_max(predict, axis=1)],feed_dict={predict:predict_NOR}
)
# Local entropy and confidence for adv_img
(entropy_test_adv_help, labels_adv_help, confidence_test_adv_help) = sess.run(
[entropy, tf.argmax(predict, axis=1), tf.reduce_max(predict, axis=1)], feed_dict={predict: predict_ADV}
)
if FLAGS.attack_method == 'carliniL2_specific' or FLAGS.attack_method == 'carliniL2_highden':
print('Log-density-ratio in attacking function of nor/adv is %f'%np.sum(log_density_ratio))
m_tsne_logits_adv = (copy.copy(logits_part_adv)).reshape((1, 64))
m_tsne_logits_adv = np.repeat(m_tsne_logits_adv,100,axis=0)
kernel_train = (copy.copy(e_kernel_train[:,:,np.argmax(target_lab)])).reshape((100,64))
log_density_ratio2 = -np.log(1e-30+np.mean(np.exp(-np.sum(np.square(m_tsne_logits_adv
- kernel_train), axis=1) / sigma2),
axis=0)) + np.log(e_median[np.argmax(target_lab)])
# m_tsne_logits_adv = (copy.copy(logits_part_adv-e_mean[np.argmax(target_lab)])).reshape((64,1))
# inter_mat_adv = np.matmul(e_invcovar[:,:,np.argmax(target_lab)].reshape((64,64)), m_tsne_logits_adv)
# m_tsne_logits_nor = (copy.copy(logits_part_nor-e_mean[labels_nor_help])).reshape((64,1))
# inter_mat_nor = np.matmul(e_invcovar[:,:,labels_nor_help].reshape((64,64)), m_tsne_logits_nor)
# log_density_ratio2 = np.matmul(m_tsne_logits_adv.reshape((1,64)), inter_mat_adv) \
# - np.matmul(m_tsne_logits_nor.reshape((1,64)), inter_mat_nor)
#log_density_ratio2 = np.matmul(m_tsne_logits_adv.reshape((1, 64)), inter_mat_adv)+e_median[np.argmax(target_lab)]
print('Log-density-ratio in saving results of nor/adv is %f'%np.sum(log_density_ratio2))
entropy_test_adv_all = np.concatenate((entropy_test_adv_all,entropy_test_adv_help),axis=0)
confidence_test_adv_all = np.concatenate((confidence_test_adv_all,confidence_test_adv_help),axis=0)
entropy_test_nor_all = np.concatenate((entropy_test_nor_all, entropy_test_nor_help), axis=0)
confidence_test_nor_all = np.concatenate((confidence_test_nor_all, confidence_test_nor_help), axis=0)
logits_nor_all = np.concatenate((logits_nor_all, logits_part_nor), axis=0)
labels_nor_all = np.concatenate((labels_nor_all, labels_nor_help), axis=0)
logits_adv_all = np.concatenate((logits_adv_all,logits_part_adv),axis=0)
labels_adv_all = np.concatenate((labels_adv_all, labels_adv_help), axis=0)
labels_true_all = np.concatenate((labels_true_all, np.argmax(true_label,axis=1)), axis=0)
L2_distance = np.concatenate((L2_distance,np.sqrt(np.mean(np.square(nor_img-adv_img),axis=(1,2,3)))), axis=0)
nor_img_all = np.concatenate((nor_img_all,nor_img),axis=0)
adv_img_all = np.concatenate((adv_img_all,adv_img),axis=0)
#Efficient index refers to the indexes that are correctly classified and misclassified as adversarial samples
efficient_index = succ*np.equal(np.argmax(true_label, axis=1),labels_nor_help)
if FLAGS.attack_method != 'carliniL2_specific'or FLAGS.attack_method == 'carliniL2_highden':
print('Num of attacking success is %d'%(np.sum(succ)))
efficient += np.sum(efficient_index)
L2_distance_print += np.sum(efficient_index*np.sqrt(np.mean(np.square(nor_img - adv_img), axis=(1, 2, 3))), axis=0)
L2_distance_print = L2_distance_print/efficient
k_index_begin = k*num_sample
k_index_end = (k+1)*num_sample
# Show local results
precision_nor = np.mean(np.equal(labels_nor_all[k_index_begin:k_index_end],labels_true_all[k_index_begin:k_index_end]))
precision_adv = np.mean(np.equal(labels_adv_all[k_index_begin:k_index_end],labels_true_all[k_index_begin:k_index_end]))
mean_confidence_nor = np.mean(confidence_test_nor_all[k_index_begin:k_index_end])
mean_confidence_adv = np.mean(confidence_test_adv_all[k_index_begin:k_index_end])
mean_entropy_nor = np.mean(entropy_test_nor_all[k_index_begin:k_index_end])
mean_entropy_adv = np.mean(entropy_test_adv_all[k_index_begin:k_index_end])
print('Precision on nor images is %f, on adv images is %f' % (precision_nor, precision_adv))
print('Confidence on nor images is %f, on adv images is %f' % (mean_confidence_nor, mean_confidence_adv))
print('non-ME on nor images is %f, on adv images is %f' % (mean_entropy_nor, mean_entropy_adv))
print('Average L2-distance between nor and adv imgs is %f'%(L2_distance_print))
print('Total success num of attack 1 is %d'%(success))
print('Total efficient num of attack 1 is %d' % (efficient))
# # Save results
# np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/'+f1+'/entropy_nor', entropy_test_nor_all)
# np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/'+f1+ '/confidence_nor', confidence_test_nor_all)
# np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/'+f1+'/entropy_adv', entropy_test_adv_all)
# np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/'+f1+ '/confidence_adv', confidence_test_adv_all)
# np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/'+f1+ '/logits_nor', logits_nor_all)
# np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/'+f1+ '/logits_adv', logits_adv_all)
# np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/'+f1+'/labels_nor', labels_nor_all)
# np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/'+f1+'/labels_adv', labels_adv_all)
# np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/'+f1+'/labels_true', labels_true_all)
# np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/L2_distance', L2_distance)
# np.save(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/nor_img.npy', nor_img_all)
# np.save(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/adv_img.npy', adv_img_all)
# #Save img
# nor_img_all = nor_img_all + 0.5
# adv_img_all = adv_img_all + 0.5
# noise_img_all = 0.5 * (adv_img_all - nor_img_all + 1.0)
# if FLAGS.dataset=='cifar10':
# for i in range(nor_img_all.shape[0]):
# imsave(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/nor_img/nor_img_' + str(i) + '.png', nor_img_all[i])
# imsave(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/adv_img/adv_img_' + str(i) + '.png', adv_img_all[i])
# imsave(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/noise_img/noise_img_' + str(i) + '.png', noise_img_all[i])
# elif FLAGS.dataset=='mnist':
# for i in range(nor_img_all.shape[0]):
# imsave(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/nor_img/nor_img_' + str(i) + '.png', nor_img_all[i,:,:,0])
# imsave(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/adv_img/adv_img_' + str(i) + '.png', adv_img_all[i,:,:,0])
# imsave(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/noise_img/noise_img_' + str(i) + '.png', noise_img_all[i, :, :, 0])
return None
def apply_attack_loop(hps):
#Construct graph
images, labels = input_name.build_input(
FLAGS.dataset, FLAGS.eval_data_path, hps.batch_size, FLAGS.mode)#FLAGS.mode='attack', batch_size=200
Res = model_name.ResNet(hps, images, FLAGS.mode, Reuse=False)
Res.build_graph()
saver = tf.train.Saver()
#Open session and restore checkpoint
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
tf.train.start_queue_runners(sess)
ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root) # Choose dir according to rt
tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path)
num_sample = hps.batch_size*FLAGS.eval_batch_count
# Initialize results to save
entropy_test_adv_all = np.array([])
confidence_test_adv_all = np.array([])
entropy_test_nor_all = np.array([])
confidence_test_nor_all = np.array([])
logits_adv_all = np.reshape(np.array([]), (0, 64))
logits_nor_all = np.reshape(np.array([]), (0, 64))
labels_adv_all = np.array([])
labels_true_all = np.array([])
labels_nor_all = np.array([])
L2_distance = np.array([])
nor_img_all = np.reshape(np.array([]), (0, image_size, image_size, num_channel))
adv_img_all = np.reshape(np.array([]), (0, image_size, image_size, num_channel))
print('Num of sample per eps is %d' % (num_sample))
# Construct predictions
image = tf.placeholder(tf.float32, shape=[hps.batch_size, image_size, image_size,
num_channel]) ############MNIST and CIFAR10 are different ar here
adv_image = tf.placeholder(tf.float32, shape=[hps.batch_size, image_size, image_size,
num_channel]) ############MNIST and CIFAR10 are different ar here
predict = tf.placeholder(tf.float32, shape=[hps.batch_size, 10])
predict_nor, tsne_logit_nor = models(hps, image, FLAGS.RCE_train, logits=False, tsne_logits=True)
predict_adv, tsne_logit_adv = models(hps, adv_image, FLAGS.RCE_train, logits=False, tsne_logits=True)
# Calculate entropy
argmax_y_onehot = tf.one_hot(tf.argmax(predict, 1), 10, on_value=0.0, off_value=1.0, axis=-1)
normalized_y_nonmaximal = tf.reduce_sum(predict * argmax_y_onehot, 1)
entropy = tf.reduce_sum(-tf.log(predict) * predict * argmax_y_onehot, 1) / normalized_y_nonmaximal + tf.log(
normalized_y_nonmaximal)
for k in range(10):
adv_image_craft = adv_craft_func(hps, image, FLAGS.attack_method, eps=0.02 * k + 0.02, RCE_train=FLAGS.RCE_train)
#adv_image_craft = adv_craft_func(hps, image, FLAGS.attack_method, eps=0.04,RCE_train=FLAGS.RCE_train)
sess.run(tf.global_variables_initializer())
saver.restore(sess, ckpt_state.model_checkpoint_path)
for i in six.moves.range(FLAGS.eval_batch_count):
time_start = time.time()
(nor_img,true_label) = sess.run([images,labels])
adv_img = sess.run(adv_image_craft,feed_dict={image:nor_img})
# Local logits
(predict_NOR, predict_ADV, logits_part_nor, logits_part_adv) = sess.run(
[predict_nor, predict_adv, tsne_logit_nor, tsne_logit_adv],
feed_dict={image: nor_img, adv_image: adv_img}
)
# Local entropy and confidence for nor_img
(entropy_test_nor_help, labels_nor_help, confidence_test_nor_help) = sess.run(
[entropy, tf.argmax(predict, axis=1), tf.reduce_max(predict, axis=1)], feed_dict={predict: predict_NOR}
)
# Local entropy and confidence for adv_img
(entropy_test_adv_help, labels_adv_help, confidence_test_adv_help) = sess.run(
[entropy, tf.argmax(predict, axis=1), tf.reduce_max(predict, axis=1)], feed_dict={predict: predict_ADV}
)
entropy_test_adv_all = np.concatenate((entropy_test_adv_all, entropy_test_adv_help), axis=0)
confidence_test_adv_all = np.concatenate((confidence_test_adv_all, confidence_test_adv_help), axis=0)
entropy_test_nor_all = np.concatenate((entropy_test_nor_all, entropy_test_nor_help), axis=0)
confidence_test_nor_all = np.concatenate((confidence_test_nor_all, confidence_test_nor_help), axis=0)
logits_nor_all = np.concatenate((logits_nor_all, logits_part_nor), axis=0)
labels_nor_all = np.concatenate((labels_nor_all, labels_nor_help), axis=0)
logits_adv_all = np.concatenate((logits_adv_all, logits_part_adv), axis=0)
labels_adv_all = np.concatenate((labels_adv_all, labels_adv_help), axis=0)
labels_true_all = np.concatenate((labels_true_all, np.argmax(true_label, axis=1)), axis=0)
L2_distance = np.concatenate((L2_distance,np.sqrt(np.mean(np.square(nor_img-adv_img),axis=(1,2,3)))), axis=0)
nor_img_all = np.concatenate((nor_img_all, nor_img), axis=0)
adv_img_all = np.concatenate((adv_img_all, adv_img), axis=0)
print('The %d batch in total %d, the eps = %f (%f sec)' % (
i, FLAGS.eval_batch_count, 0.02 * k + 0.02, time.time() - time_start))
k_index_begin = k * num_sample
k_index_end = (k + 1) * num_sample
# Show local results
precision_nor = np.mean(
np.equal(labels_nor_all[k_index_begin:k_index_end], labels_true_all[k_index_begin:k_index_end]))
precision_adv = np.mean(
np.equal(labels_adv_all[k_index_begin:k_index_end], labels_true_all[k_index_begin:k_index_end]))
mean_confidence_nor = np.mean(confidence_test_nor_all[k_index_begin:k_index_end])
mean_confidence_adv = np.mean(confidence_test_adv_all[k_index_begin:k_index_end])
mean_entropy_nor = np.mean(entropy_test_nor_all[k_index_begin:k_index_end])
mean_entropy_adv = np.mean(entropy_test_adv_all[k_index_begin:k_index_end])
print('Precision on nor images is %f, on adv images is %f' % (precision_nor, precision_adv))
print('Confidence on nor images is %f, on adv images is %f' % (mean_confidence_nor, mean_confidence_adv))
print('non-ME on nor images is %f, on adv images is %f' % (mean_entropy_nor, mean_entropy_adv))
print('Average L2-distance between nor and adv imgs is %f'%(np.mean(L2_distance)))
# Save results
if FLAGS.save_pwd ==None:
np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/entropy_nor', entropy_test_nor_all)
np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/confidence_nor', confidence_test_nor_all)
np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/entropy_adv', entropy_test_adv_all)
np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/confidence_adv', confidence_test_adv_all)
np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/logits_nor', logits_nor_all)
np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/logits_adv', logits_adv_all)
np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/labels_nor', labels_nor_all)
np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/labels_adv', labels_adv_all)
np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/labels_true', labels_true_all)
np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/L2_distance', L2_distance)
np.save(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/nor_img.npy', nor_img_all)
np.save(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/adv_img.npy', adv_img_all)
else:
np.savetxt(FLAGS.save_pwd + '/entropy_nor', entropy_test_nor_all)
np.savetxt(FLAGS.save_pwd + '/confidence_nor', confidence_test_nor_all)
np.savetxt(FLAGS.save_pwd + '/entropy_adv', entropy_test_adv_all)
np.savetxt(FLAGS.save_pwd + '/confidence_adv', confidence_test_adv_all)
np.savetxt(FLAGS.save_pwd + '/logits_nor', logits_nor_all)
np.savetxt(FLAGS.save_pwd + '/logits_adv', logits_adv_all)
np.savetxt(FLAGS.save_pwd + '/labels_nor', labels_nor_all)
np.savetxt(FLAGS.save_pwd + '/labels_adv', labels_adv_all)
np.savetxt(FLAGS.save_pwd + '/labels_true', labels_true_all)
np.savetxt(FLAGS.save_pwd + '/L2_distance', L2_distance)
np.save(FLAGS.save_pwd + '/nor_img.npy', nor_img_all)
np.save(FLAGS.save_pwd + '/adv_img.npy', adv_img_all)
return None
def main(_):
print('attacking method is %s' % (FLAGS.attack_method))
print('mode is %s'%(FLAGS.mode))
if FLAGS.attack_method == 'carliniL2' or FLAGS.attack_method == 'carliniL2_highcon' \
or FLAGS.attack_method == 'carliniL2_specific' or FLAGS.attack_method == 'carliniL2_highden':
is_carliniL2 = True
else:
is_carliniL2 = False
if FLAGS.attack_method == 'jsma' or FLAGS.attack_method == 'smda'\
or FLAGS.attack_method == 'carliniL2_specific' or FLAGS.attack_method == 'carliniL2_highden':
batch_size = 1
num_batch = 1000
elif FLAGS.attack_method == 'fgsm' or FLAGS.attack_method == 'tgsm' or FLAGS.attack_method == 'bim' or FLAGS.attack_method == 'random':
batch_size = 200
num_batch = 5
elif FLAGS.attack_method == 'carliniL2'or FLAGS.attack_method == 'carliniL2_highcon':
batch_size = 10
num_batch = 100
else:
print('Undefined attacking method')
batch_size = None
num_batch = None
hps = model_name.HParams(batch_size=batch_size,
num_classes=num_classes,
min_lrn_rate=0.0001,
lrn_rate=0.1,
num_residual_units=FLAGS.num_residual_units,
use_bottleneck=False,
weight_decay_rate=0.0002,
relu_leakiness=0.1,
optimizer=FLAGS.Optimizer,
RCE_train=FLAGS.RCE_train)
if FLAGS.mode == 'attack':
if is_carliniL2 == True:
apply_attack_carlini(hps)
else:
apply_attack_loop(hps)
elif FLAGS.mode == 'tSNE_logits':
if is_carliniL2 == True:
tSNE_visual_carliniLi(hps,num_batch)
else:
tSNE_visual(hps,num_batch)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run() | [
"tensorflow.reduce_sum",
"tensorflow.cast",
"numpy.concatenate",
"tensorflow.app.flags.DEFINE_string",
"numpy.mean",
"tensorflow.argmin",
"numpy.square",
"scipy.io.loadmat",
"tensorflow.app.flags.DEFINE_integer",
"numpy.save",
"tensorflow.ConfigProto",
"numpy.argmax",
"tensorflow.logging.set_verbosity",
"tensorflow.train.Saver",
"tensorflow.argmax",
"numpy.repeat",
"numpy.zeros",
"tensorflow.app.run",
"tensorflow.app.flags.DEFINE_bool",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.logging.info",
"numpy.equal",
"numpy.random.random_integers",
"numpy.savetxt",
"numpy.array",
"numpy.sum",
"tensorflow.train.get_checkpoint_state",
"tensorflow.reduce_max",
"tensorflow.nn.softmax",
"tensorflow.train.start_queue_runners",
"numpy.ones",
"tensorflow.log",
"tensorflow.app.flags.DEFINE_float",
"tensorflow.random_uniform"
] | test_adv.py | [(14, 'sys.path.append', 'sys.path.append', (['""".."""'], {}), False, 'import sys\n'), (15, 'sys.path.append', 'sys.path.append', (['"""./../attacks"""'], {}), False, 'import sys\n'), (22, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""dataset"""', '""""""', '"""cifar10 or cifar100."""'], {}), True, 'import tensorflow as tf\n'), (23, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""mode"""', '"""train"""', '"""train or eval."""'], {}), True, 'import tensorflow as tf\n'), (24, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""train_data_path"""', '""""""', '"""Filepattern for training data."""'], {}), True, 'import tensorflow as tf\n'), (26, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""eval_data_path"""', '""""""', '"""Filepattern for eval data"""'], {}), True, 'import tensorflow as tf\n'), (28, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""train_dir"""', '""""""', '"""Directory to keep training outputs."""'], {}), True, 'import tensorflow as tf\n'), (30, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""eval_dir"""', '""""""', '"""Directory to keep eval outputs."""'], {}), True, 'import tensorflow as tf\n'), (32, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""eval_batch_count"""', '(10)', '"""Number of batches to eval."""'], {}), True, 'import tensorflow as tf\n'), (34, 'tensorflow.app.flags.DEFINE_bool', 'tf.app.flags.DEFINE_bool', (['"""eval_once"""', '(False)', '"""Whether evaluate the model only once."""'], {}), True, 'import tensorflow as tf\n'), (36, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""log_root"""', '""""""', '"""Directory to keep the checkpoints. Should be a parent directory of FLAGS.train_dir/eval_dir."""'], {}), True, 'import tensorflow as tf\n'), (39, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_gpus"""', '(0)', '"""Number of gpus used for training. (0 or 1)"""'], {}), True, 'import tensorflow as tf\n'), (41, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_residual_units"""', '(5)', '"""num of residual units"""'], {}), True, 'import tensorflow as tf\n'), (43, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""Optimizer"""', '"""mom"""', '"""The optimizer used to train the model."""'], {}), True, 'import tensorflow as tf\n'), (45, 'tensorflow.app.flags.DEFINE_bool', 'tf.app.flags.DEFINE_bool', (['"""RCE_train"""', '(False)', '"""Whether use RCE to train the model."""'], {}), True, 'import tensorflow as tf\n'), (47, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""attack_method"""', '"""fgsm"""', '"""The attacking method used"""'], {}), True, 'import tensorflow as tf\n'), (49, 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""eps"""', '(0.01)', '"""The eps in attacking methods."""'], {}), True, 'import tensorflow as tf\n'), (51, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""save_pwd"""', 'None', '""""""'], {}), True, 'import tensorflow as tf\n'), (96, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logit'], {}), True, 'import tensorflow as tf\n'), (169, 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), True, 'import tensorflow as tf\n'), (178, 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', (['sess'], {}), True, 'import tensorflow as tf\n'), (181, 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['FLAGS.log_root'], {}), True, 'import tensorflow as tf\n'), (182, 'tensorflow.logging.info', 'tf.logging.info', (['"""Loading checkpoint %s"""', 'ckpt_state.model_checkpoint_path'], {}), True, 'import tensorflow as tf\n'), (193, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (194, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (204, 'six.moves.range', 'six.moves.range', (['num_batch'], {}), False, 'import six\n'), (214, 't_sne.tsne', 't_sne.tsne', (['logits_all'], {'no_dims': '(2)', 'initial_dims': '(60)', 'perplexity': '(30.0)'}), False, 'import t_sne\n'), (221, 'numpy.savetxt', 'np.savetxt', (["(FLAGS.attack_method + '_' + FLAGS.dataset + '/tSNE/tSNE_' + f1)", 'tsne_return'], {}), True, 'import numpy as np\n'), (222, 'numpy.savetxt', 'np.savetxt', (["(FLAGS.attack_method + '_' + FLAGS.dataset + '/tSNE/tSNElabels_' + f1)", 'labels_all'], {}), True, 'import numpy as np\n'), (223, 'numpy.savetxt', 'np.savetxt', (["(FLAGS.attack_method + '_' + FLAGS.dataset + '/tSNE/tSNEisadv_' + f1)", 'is_adv_all'], {}), True, 'import numpy as np\n'), (232, 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), True, 'import tensorflow as tf\n'), (235, 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', (['sess'], {}), True, 'import tensorflow as tf\n'), (238, 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['FLAGS.log_root'], {}), True, 'import tensorflow as tf\n'), (239, 'tensorflow.logging.info', 'tf.logging.info', (['"""Loading checkpoint %s"""', 'ckpt_state.model_checkpoint_path'], {}), True, 'import tensorflow as tf\n'), (247, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[hps.batch_size, image_size, image_size, num_channel]'}), True, 'import tensorflow as tf\n'), (257, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (258, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (269, 'six.moves.range', 'six.moves.range', (['num_batch'], {}), False, 'import six\n'), (284, 't_sne.tsne', 't_sne.tsne', (['logits_all'], {'no_dims': '(2)', 'initial_dims': '(60)', 'perplexity': '(30.0)'}), False, 'import t_sne\n'), (291, 'numpy.savetxt', 'np.savetxt', (["(FLAGS.attack_method + '_' + FLAGS.dataset + '/tSNE/tSNE_' + f1)", 'tsne_return'], {}), True, 'import numpy as np\n'), (292, 'numpy.savetxt', 'np.savetxt', (["(FLAGS.attack_method + '_' + FLAGS.dataset + '/tSNE/tSNElabels_' + f1)", 'labels_all'], {}), True, 'import numpy as np\n'), (293, 'numpy.savetxt', 'np.savetxt', (["(FLAGS.attack_method + '_' + FLAGS.dataset + '/tSNE/tSNEisadv_' + f1)", 'is_adv_all'], {}), True, 'import numpy as np\n'), (302, 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), True, 'import tensorflow as tf\n'), (306, 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', (['sess'], {}), True, 'import tensorflow as tf\n'), (309, 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['FLAGS.log_root'], {}), True, 'import tensorflow as tf\n'), (310, 'tensorflow.logging.info', 'tf.logging.info', (['"""Loading checkpoint %s"""', 'ckpt_state.model_checkpoint_path'], {}), True, 'import tensorflow as tf\n'), (316, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (317, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (318, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (319, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (322, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (323, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (324, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (325, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (335, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[hps.batch_size, image_size, image_size, num_channel]'}), True, 'import tensorflow as tf\n'), (337, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[hps.batch_size, image_size, image_size, num_channel]'}), True, 'import tensorflow as tf\n'), (339, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[hps.batch_size, 10]'}), True, 'import tensorflow as tf\n'), (342, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logit_nor'], {}), True, 'import tensorflow as tf\n'), (343, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logit_adv'], {}), True, 'import tensorflow as tf\n'), (347, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(predict * argmax_y_onehot)', '(1)'], {}), True, 'import tensorflow as tf\n'), (555, 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), True, 'import tensorflow as tf\n'), (559, 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', (['sess'], {}), True, 'import tensorflow as tf\n'), (561, 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['FLAGS.log_root'], {}), True, 'import tensorflow as tf\n'), (562, 'tensorflow.logging.info', 'tf.logging.info', (['"""Loading checkpoint %s"""', 'ckpt_state.model_checkpoint_path'], {}), True, 'import tensorflow as tf\n'), (567, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (568, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (569, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (570, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (573, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (574, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (575, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (576, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (582, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[hps.batch_size, image_size, image_size, num_channel]'}), True, 'import tensorflow as tf\n'), (584, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[hps.batch_size, image_size, image_size, num_channel]'}), True, 'import tensorflow as tf\n'), (586, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[hps.batch_size, 10]'}), True, 'import tensorflow as tf\n'), (592, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(predict * argmax_y_onehot)', '(1)'], {}), True, 'import tensorflow as tf\n'), (735, 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), True, 'import tensorflow as tf\n'), (736, 'tensorflow.app.run', 'tf.app.run', ([], {}), True, 'import tensorflow as tf\n'), (119, 'attacks.fgsm.fgsm', 'attacks.fgsm.fgsm', (['models', 'images', 'hps', 'RCE_train'], {'eps': 'eps', 'epochs': '(1)', 'clip_min': '(-0.5)', 'clip_max': '(0.5)'}), False, 'import attacks\n'), (179, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (192, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (207, 'numpy.concatenate', 'np.concatenate', (['(logits_all, logits_part_nor)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (208, 'numpy.concatenate', 'np.concatenate', (['(labels_all, labels_part)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (210, 'numpy.concatenate', 'np.concatenate', (['(logits_all, logits_part_adv[:num_adv])'], {'axis': '(0)'}), True, 'import numpy as np\n'), (211, 'numpy.concatenate', 'np.concatenate', (['(labels_all, labels_part[:num_adv])'], {'axis': '(0)'}), True, 'import numpy as np\n'), (236, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (244, 'attacks.carliniLi.CarliniLi', 'attacks.carliniLi.CarliniLi', (['sess', 'model_carlini'], {'largest_const': '(10 ** -3)'}), False, 'import attacks\n'), (256, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (277, 'numpy.concatenate', 'np.concatenate', (['(logits_all, logits_part_nor)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (278, 'numpy.concatenate', 'np.concatenate', (['(labels_all, labels_part)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (280, 'numpy.concatenate', 'np.concatenate', (['(logits_all, logits_part_adv[:num_adv])'], {'axis': '(0)'}), True, 'import numpy as np\n'), (281, 'numpy.concatenate', 'np.concatenate', (['(labels_all, labels_part[:num_adv])'], {'axis': '(0)'}), True, 'import numpy as np\n'), (307, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (320, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (321, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (326, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (327, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (346, 'tensorflow.argmax', 'tf.argmax', (['predict', '(1)'], {}), True, 'import tensorflow as tf\n'), (348, 'tensorflow.log', 'tf.log', (['normalized_y_nonmaximal'], {}), True, 'import tensorflow as tf\n'), (351, 'scipy.io.loadmat', 'loadmat', (["('kernel_para_' + FLAGS.dataset + '/kernel1000_for_attack_' + f1 + '.mat')"], {}), False, 'from scipy.io import loadmat\n'), (352, 'scipy.io.loadmat', 'loadmat', (["('kernel_para_' + FLAGS.dataset + '/kernel1000_median_for_attack_' + f1 +\n '.mat')"], {}), False, 'from scipy.io import loadmat\n'), (407, 'six.moves.range', 'six.moves.range', (['FLAGS.eval_batch_count'], {}), False, 'import six\n'), (506, 'numpy.mean', 'np.mean', (['confidence_test_nor_all[k_index_begin:k_index_end]'], {}), True, 'import numpy as np\n'), (507, 'numpy.mean', 'np.mean', (['confidence_test_adv_all[k_index_begin:k_index_end]'], {}), True, 'import numpy as np\n'), (508, 'numpy.mean', 'np.mean', (['entropy_test_nor_all[k_index_begin:k_index_end]'], {}), True, 'import numpy as np\n'), (509, 'numpy.mean', 'np.mean', (['entropy_test_adv_all[k_index_begin:k_index_end]'], {}), True, 'import numpy as np\n'), (571, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (572, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (577, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (578, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (591, 'tensorflow.argmax', 'tf.argmax', (['predict', '(1)'], {}), True, 'import tensorflow as tf\n'), (593, 'tensorflow.log', 'tf.log', (['normalized_y_nonmaximal'], {}), True, 'import tensorflow as tf\n'), (602, 'six.moves.range', 'six.moves.range', (['FLAGS.eval_batch_count'], {}), False, 'import six\n'), (645, 'numpy.mean', 'np.mean', (['confidence_test_nor_all[k_index_begin:k_index_end]'], {}), True, 'import numpy as np\n'), (646, 'numpy.mean', 'np.mean', (['confidence_test_adv_all[k_index_begin:k_index_end]'], {}), True, 'import numpy as np\n'), (647, 'numpy.mean', 'np.mean', (['entropy_test_nor_all[k_index_begin:k_index_end]'], {}), True, 'import numpy as np\n'), (648, 'numpy.mean', 'np.mean', (['entropy_test_adv_all[k_index_begin:k_index_end]'], {}), True, 'import numpy as np\n'), (657, 'numpy.savetxt', 'np.savetxt', (["(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/entropy_nor')", 'entropy_test_nor_all'], {}), True, 'import numpy as np\n'), (658, 'numpy.savetxt', 'np.savetxt', (["(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/confidence_nor')", 'confidence_test_nor_all'], {}), True, 'import numpy as np\n'), (659, 'numpy.savetxt', 'np.savetxt', (["(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/entropy_adv')", 'entropy_test_adv_all'], {}), True, 'import numpy as np\n'), (660, 'numpy.savetxt', 'np.savetxt', (["(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/confidence_adv')", 'confidence_test_adv_all'], {}), True, 'import numpy as np\n'), (661, 'numpy.savetxt', 'np.savetxt', (["(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/logits_nor')", 'logits_nor_all'], {}), True, 'import numpy as np\n'), (662, 'numpy.savetxt', 'np.savetxt', (["(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/logits_adv')", 'logits_adv_all'], {}), True, 'import numpy as np\n'), (663, 'numpy.savetxt', 'np.savetxt', (["(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/labels_nor')", 'labels_nor_all'], {}), True, 'import numpy as np\n'), (664, 'numpy.savetxt', 'np.savetxt', (["(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/labels_adv')", 'labels_adv_all'], {}), True, 'import numpy as np\n'), (665, 'numpy.savetxt', 'np.savetxt', (["(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/labels_true')", 'labels_true_all'], {}), True, 'import numpy as np\n'), (666, 'numpy.savetxt', 'np.savetxt', (["(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/L2_distance')", 'L2_distance'], {}), True, 'import numpy as np\n'), (667, 'numpy.save', 'np.save', (["(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/nor_img.npy')", 'nor_img_all'], {}), True, 'import numpy as np\n'), (668, 'numpy.save', 'np.save', (["(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/adv_img.npy')", 'adv_img_all'], {}), True, 'import numpy as np\n'), (670, 'numpy.savetxt', 'np.savetxt', (["(FLAGS.save_pwd + '/entropy_nor')", 'entropy_test_nor_all'], {}), True, 'import numpy as np\n'), (671, 'numpy.savetxt', 'np.savetxt', (["(FLAGS.save_pwd + '/confidence_nor')", 'confidence_test_nor_all'], {}), True, 'import numpy as np\n'), (672, 'numpy.savetxt', 'np.savetxt', (["(FLAGS.save_pwd + '/entropy_adv')", 'entropy_test_adv_all'], {}), True, 'import numpy as np\n'), (673, 'numpy.savetxt', 'np.savetxt', (["(FLAGS.save_pwd + '/confidence_adv')", 'confidence_test_adv_all'], {}), True, 'import numpy as np\n'), (674, 'numpy.savetxt', 'np.savetxt', (["(FLAGS.save_pwd + '/logits_nor')", 'logits_nor_all'], {}), True, 'import numpy as np\n'), (675, 'numpy.savetxt', 'np.savetxt', (["(FLAGS.save_pwd + '/logits_adv')", 'logits_adv_all'], {}), True, 'import numpy as np\n'), (676, 'numpy.savetxt', 'np.savetxt', (["(FLAGS.save_pwd + '/labels_nor')", 'labels_nor_all'], {}), True, 'import numpy as np\n'), (677, 'numpy.savetxt', 'np.savetxt', (["(FLAGS.save_pwd + '/labels_adv')", 'labels_adv_all'], {}), True, 'import numpy as np\n'), (678, 'numpy.savetxt', 'np.savetxt', (["(FLAGS.save_pwd + '/labels_true')", 'labels_true_all'], {}), True, 'import numpy as np\n'), (679, 'numpy.savetxt', 'np.savetxt', (["(FLAGS.save_pwd + '/L2_distance')", 'L2_distance'], {}), True, 'import numpy as np\n'), (680, 'numpy.save', 'np.save', (["(FLAGS.save_pwd + '/nor_img.npy')", 'nor_img_all'], {}), True, 'import numpy as np\n'), (681, 'numpy.save', 'np.save', (["(FLAGS.save_pwd + '/adv_img.npy')", 'adv_img_all'], {}), True, 'import numpy as np\n'), (93, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logit'], {}), True, 'import tensorflow as tf\n'), (95, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logit'], {}), True, 'import tensorflow as tf\n'), (177, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), True, 'import tensorflow as tf\n'), (234, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), True, 'import tensorflow as tf\n'), (246, 'attacks.carliniL2.CarliniL2', 'attacks.carliniL2.CarliniL2', (['sess', 'model_carlini'], {'batch_size': '(10)', 'max_iterations': '(1000)', 'confidence': '(0)', 'binary_search_steps': '(3)'}), False, 'import attacks\n'), (305, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), True, 'import tensorflow as tf\n'), (359, 'attacks.carliniL2.CarliniL2', 'attacks.carliniL2.CarliniL2', (['sess', 'model_carlini_adv'], {'batch_size': '(10)', 'max_iterations': '(10)', 'targeted': '(True)', 'confidence': '(0)', 'initial_const': '(1.0)', 'binary_search_steps': '(9)'}), False, 'import attacks\n'), (408, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (412, 'numpy.zeros', 'np.zeros', (['(hps.batch_size, 10)'], {}), True, 'import numpy as np\n'), (479, 'numpy.concatenate', 'np.concatenate', (['(entropy_test_adv_all, entropy_test_adv_help)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (480, 'numpy.concatenate', 'np.concatenate', (['(confidence_test_adv_all, confidence_test_adv_help)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (481, 'numpy.concatenate', 'np.concatenate', (['(entropy_test_nor_all, entropy_test_nor_help)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (482, 'numpy.concatenate', 'np.concatenate', (['(confidence_test_nor_all, confidence_test_nor_help)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (483, 'numpy.concatenate', 'np.concatenate', (['(logits_nor_all, logits_part_nor)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (484, 'numpy.concatenate', 'np.concatenate', (['(labels_nor_all, labels_nor_help)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (485, 'numpy.concatenate', 'np.concatenate', (['(logits_adv_all, logits_part_adv)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (486, 'numpy.concatenate', 'np.concatenate', (['(labels_adv_all, labels_adv_help)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (489, 'numpy.concatenate', 'np.concatenate', (['(nor_img_all, nor_img)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (490, 'numpy.concatenate', 'np.concatenate', (['(adv_img_all, adv_img)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (496, 'numpy.sum', 'np.sum', (['efficient_index'], {}), True, 'import numpy as np\n'), (504, 'numpy.equal', 'np.equal', (['labels_nor_all[k_index_begin:k_index_end]', 'labels_true_all[k_index_begin:k_index_end]'], {}), True, 'import numpy as np\n'), (505, 'numpy.equal', 'np.equal', (['labels_adv_all[k_index_begin:k_index_end]', 'labels_true_all[k_index_begin:k_index_end]'], {}), True, 'import numpy as np\n'), (558, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), True, 'import tensorflow as tf\n'), (599, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (603, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (623, 'numpy.concatenate', 'np.concatenate', (['(entropy_test_adv_all, entropy_test_adv_help)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (624, 'numpy.concatenate', 'np.concatenate', (['(confidence_test_adv_all, confidence_test_adv_help)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (625, 'numpy.concatenate', 'np.concatenate', (['(entropy_test_nor_all, entropy_test_nor_help)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (626, 'numpy.concatenate', 'np.concatenate', (['(confidence_test_nor_all, confidence_test_nor_help)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (627, 'numpy.concatenate', 'np.concatenate', (['(logits_nor_all, logits_part_nor)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (628, 'numpy.concatenate', 'np.concatenate', (['(labels_nor_all, labels_nor_help)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (629, 'numpy.concatenate', 'np.concatenate', (['(logits_adv_all, logits_part_adv)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (630, 'numpy.concatenate', 'np.concatenate', (['(labels_adv_all, labels_adv_help)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (633, 'numpy.concatenate', 'np.concatenate', (['(nor_img_all, nor_img)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (634, 'numpy.concatenate', 'np.concatenate', (['(adv_img_all, adv_img)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (642, 'numpy.equal', 'np.equal', (['labels_nor_all[k_index_begin:k_index_end]', 'labels_true_all[k_index_begin:k_index_end]'], {}), True, 'import numpy as np\n'), (644, 'numpy.equal', 'np.equal', (['labels_adv_all[k_index_begin:k_index_end]', 'labels_true_all[k_index_begin:k_index_end]'], {}), True, 'import numpy as np\n'), (127, 'attacks.fgsm.fgsm', 'attacks.fgsm.fgsm', (['models', 'images', 'hps', 'RCE_train'], {'eps': '(eps / 10)', 'epochs': '(10)', 'clip_min': '(-0.5)', 'clip_max': '(0.5)'}), False, 'import attacks\n'), (206, 'tensorflow.argmax', 'tf.argmax', (['labels', '(1)'], {}), True, 'import tensorflow as tf\n'), (209, 'numpy.zeros', 'np.zeros', (['hps.batch_size'], {}), True, 'import numpy as np\n'), (212, 'numpy.ones', 'np.ones', (['num_adv'], {}), True, 'import numpy as np\n'), (275, 'tensorflow.argmax', 'tf.argmax', (['labels', '(1)'], {}), True, 'import tensorflow as tf\n'), (279, 'numpy.zeros', 'np.zeros', (['hps.batch_size'], {}), True, 'import numpy as np\n'), (282, 'numpy.ones', 'np.ones', (['num_adv'], {}), True, 'import numpy as np\n'), (364, 'attacks.carliniL2.CarliniL2', 'attacks.carliniL2.CarliniL2', (['sess', 'model_carlini_adv'], {'batch_size': '(10)', 'max_iterations': '(10000)', 'targeted': '(True)', 'confidence': '(10)', 'initial_const': '(1.0)', 'binary_search_steps': '(9)'}), False, 'import attacks\n'), (414, 'numpy.random.random_integers', 'np.random.random_integers', (['(0)', '(9)'], {}), True, 'import numpy as np\n'), (435, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (465, 'numpy.repeat', 'np.repeat', (['m_tsne_logits_adv', '(100)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (653, 'numpy.mean', 'np.mean', (['L2_distance'], {}), True, 'import numpy as np\n'), (123, 'tensorflow.random_uniform', 'tf.random_uniform', (['(hps.batch_size, image_size, image_size, num_channel)'], {'minval': '(-eps)', 'maxval': 'eps'}), True, 'import tensorflow as tf\n'), (131, 'attacks.tgsm.tgsm', 'attacks.tgsm.tgsm', (['models', 'images', 'hps', 'RCE_train'], {'y': 'None', 'eps': '(eps / 10)', 'epochs': '(10)', 'clip_min': '(-0.5)', 'clip_max': '(0.5)'}), False, 'import attacks\n'), (369, 'attacks.carliniL2.CarliniL2', 'attacks.carliniL2.CarliniL2', (['sess', 'model_carlini_adv'], {'batch_size': '(1)', 'max_iterations': '(5000)', 'targeted': '(True)', 'initial_const': '(1.0)', 'confidence': '(0)', 'binary_search_steps': '(3)'}), False, 'import attacks\n'), (374, 'attacks.carliniL2_specific.CarliniL2_specific', 'attacks.carliniL2_specific.CarliniL2_specific', (['sess', 'model_carlini_adv'], {'batch_size': '(1)', 'max_iterations': '(10000)', 'targeted': '(True)', 'initial_const': '(1.0)', 'confidence': '(0)', 'binary_search_steps': '(8)', 'extra_loss': '(True)', 'e_kernel_train': 'e_kernel_train', 'e_median': 'e_median', 'sigma2': 'sigma2'}), False, 'import attacks\n'), (415, 'numpy.argmax', 'np.argmax', (['true_label[j]'], {}), True, 'import numpy as np\n'), (416, 'numpy.random.random_integers', 'np.random.random_integers', (['(0)', '(9)'], {}), True, 'import numpy as np\n'), (454, 'tensorflow.argmax', 'tf.argmax', (['predict'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (454, 'tensorflow.reduce_max', 'tf.reduce_max', (['predict'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (459, 'tensorflow.argmax', 'tf.argmax', (['predict'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (459, 'tensorflow.reduce_max', 'tf.reduce_max', (['predict'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (487, 'numpy.argmax', 'np.argmax', (['true_label'], {'axis': '(1)'}), True, 'import numpy as np\n'), (493, 'numpy.argmax', 'np.argmax', (['true_label'], {'axis': '(1)'}), True, 'import numpy as np\n'), (615, 'tensorflow.argmax', 'tf.argmax', (['predict'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (615, 'tensorflow.reduce_max', 'tf.reduce_max', (['predict'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (620, 'tensorflow.argmax', 'tf.argmax', (['predict'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (620, 'tensorflow.reduce_max', 'tf.reduce_max', (['predict'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (631, 'numpy.argmax', 'np.argmax', (['true_label'], {'axis': '(1)'}), True, 'import numpy as np\n'), (142, 'tensorflow.cast', 'tf.cast', (['target_y64', 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (143, 'attacks.jsma.jsma', 'attacks.jsma.jsma', (['models', 'images', 'hps', 'RCE_train', 'target_y'], {'epochs': 'epoch_jsma', 'eps': 'eps', 'clip_min': '(-0.5)', 'clip_max': '(0.5)', 'pair': '(False)', 'min_proba': '(0.0)'}), False, 'import attacks\n'), (385, 'attacks.carliniL2.CarliniL2', 'attacks.carliniL2.CarliniL2', (['sess', 'model_carlini_adv'], {'batch_size': '(1)', 'max_iterations': '(5000)', 'targeted': '(True)', 'initial_const': '(10.0)', 'confidence': '(5)', 'binary_search_steps': '(3)'}), False, 'import attacks\n'), (390, 'attacks.carliniL2_specific.CarliniL2_specific', 'attacks.carliniL2_specific.CarliniL2_specific', (['sess', 'model_carlini_adv'], {'batch_size': '(1)', 'max_iterations': '(10000)', 'targeted': '(True)', 'initial_const': '(100.0)', 'confidence': '(5)', 'binary_search_steps': '(9)', 'extra_loss': '(True)', 'e_kernel_train': 'e_kernel_train', 'e_median': 'e_median', 'sigma2': 'sigma2'}), False, 'import attacks\n'), (463, 'numpy.sum', 'np.sum', (['log_density_ratio'], {}), True, 'import numpy as np\n'), (464, 'copy.copy', 'copy.copy', (['logits_part_adv'], {}), False, 'import copy\n'), (477, 'numpy.sum', 'np.sum', (['log_density_ratio2'], {}), True, 'import numpy as np\n'), (495, 'numpy.sum', 'np.sum', (['succ'], {}), True, 'import numpy as np\n'), (139, 'tensorflow.argmin', 'tf.argmin', (['model_target_y'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (154, 'tensorflow.cast', 'tf.cast', (['target_y64', 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (155, 'attacks.smda.smda', 'attacks.smda.smda', (['models', 'images', 'hps', 'RCE_train', 'target_y'], {'epochs': 'epoch_jsma', 'eps': 'eps', 'clip_min': '(-0.5)', 'clip_max': '(0.5)', 'min_proba': '(0.0)'}), False, 'import attacks\n'), (348, 'tensorflow.log', 'tf.log', (['predict'], {}), True, 'import tensorflow as tf\n'), (488, 'numpy.square', 'np.square', (['(nor_img - adv_img)'], {}), True, 'import numpy as np\n'), (497, 'numpy.square', 'np.square', (['(nor_img - adv_img)'], {}), True, 'import numpy as np\n'), (593, 'tensorflow.log', 'tf.log', (['predict'], {}), True, 'import tensorflow as tf\n'), (632, 'numpy.square', 'np.square', (['(nor_img - adv_img)'], {}), True, 'import numpy as np\n'), (636, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (151, 'tensorflow.argmin', 'tf.argmin', (['model_target_y'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (434, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (442, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (445, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (469, 'numpy.argmax', 'np.argmax', (['target_lab'], {}), True, 'import numpy as np\n'), (466, 'numpy.argmax', 'np.argmax', (['target_lab'], {}), True, 'import numpy as np\n'), (467, 'numpy.square', 'np.square', (['(m_tsne_logits_adv - kernel_train)'], {}), True, 'import numpy as np\n')] |
hongliangduan/Reproducing-the-invention-of-a-named-reaction-Zero-shot-prediction-of-unseen-chemical-reactions | 2d688bff2202e37321dedba7cdac67cd3c1e1fad | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data generators for translation data-sets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tarfile
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import text_problems
from tensor2tensor.utils import bleu_hook
import tensorflow as tf
FLAGS = tf.flags.FLAGS
class TranslateProblem(text_problems.Text2TextProblem):
"""Base class for translation problems."""
def is_generate_per_split(self):
return True
@property
def approx_vocab_size(self):
return 2**15
def source_data_files(self, dataset_split):
"""Files to be passed to compile_data."""
raise NotImplementedError()
def vocab_data_files(self):
"""Files to be passed to get_or_generate_vocab."""
return self.source_data_files(problem.DatasetSplit.TRAIN)
def generate_samples(self, data_dir, tmp_dir, dataset_split):
datasets = self.source_data_files(dataset_split)
tag = "train" if dataset_split == problem.DatasetSplit.TRAIN else "dev"
data_path = compile_data(tmp_dir, datasets, "%s-compiled-%s" % (self.name,
tag))
return text_problems.text2text_txt_iterator(data_path + ".lang1",
data_path + ".lang2")
def generate_text_for_vocab(self, data_dir, tmp_dir):
return generator_utils.generate_lines_for_vocab(tmp_dir,
self.vocab_data_files())
@property
def decode_hooks(self):
return [compute_bleu_summaries]
def compute_bleu_summaries(hook_args):
"""Compute BLEU core summaries using the decoder output.
Args:
hook_args: DecodeHookArgs namedtuple
Returns:
A list of tf.Summary values if hook_args.hparams contains the
reference file and the translated file.
"""
decode_hparams = hook_args.decode_hparams
if (decode_hparams.decode_reference is None or
decode_hparams.decode_to_file is None):
return None
values = []
bleu = 100 * bleu_hook.bleu_wrapper(
decode_hparams.decode_reference, decode_hparams.decode_to_file)
values.append(tf.Summary.Value(tag="BLEU", simple_value=bleu))
tf.logging.info("%s: BLEU = %6.2f" % (decode_hparams.decode_to_file, bleu))
return values
def _preprocess_sgm(line, is_sgm):
"""Preprocessing to strip tags in SGM files."""
if not is_sgm:
return line
# In SGM files, remove <srcset ...>, <p>, <doc ...> lines.
if line.startswith("<srcset") or line.startswith("</srcset"):
return ""
if line.startswith("<doc") or line.startswith("</doc"):
return ""
if line.startswith("<p>") or line.startswith("</p>"):
return ""
# Strip <seg> tags.
line = line.strip()
if line.startswith("<seg") and line.endswith("</seg>"):
i = line.index(">")
return line[i + 1:-6] # Strip first <seg ...> and last </seg>.
def compile_data(tmp_dir, datasets, filename):
"""Concatenate all `datasets` and save to `filename`."""
filename = os.path.join(tmp_dir, filename)
# lang1_fname = filename + ".lang1"
# lang2_fname = filename + ".lang2"
lang1_fname = filename + ".source"
lang2_fname = filename + ".target"
if tf.gfile.Exists(lang1_fname) and tf.gfile.Exists(lang2_fname):
tf.logging.info("Skipping compile data, found files:\n%s\n%s", lang1_fname,
lang2_fname)
return filename
with tf.gfile.GFile(lang1_fname, mode="w") as lang1_resfile:
with tf.gfile.GFile(lang2_fname, mode="w") as lang2_resfile:
for dataset in datasets:
url = dataset[0]
compressed_filename = os.path.basename(url)
compressed_filepath = os.path.join(tmp_dir, compressed_filename)
if url.startswith("http"):
generator_utils.maybe_download(tmp_dir, compressed_filename, url)
if dataset[1][0] == "tsv":
_, src_column, trg_column, glob_pattern = dataset[1]
filenames = tf.gfile.Glob(os.path.join(tmp_dir, glob_pattern))
if not filenames:
# Capture *.tgz and *.tar.gz too.
mode = "r:gz" if compressed_filepath.endswith("gz") else "r"
with tarfile.open(compressed_filepath, mode) as corpus_tar:
corpus_tar.extractall(tmp_dir)
filenames = tf.gfile.Glob(os.path.join(tmp_dir, glob_pattern))
for tsv_filename in filenames:
if tsv_filename.endswith(".gz"):
new_filename = tsv_filename.strip(".gz")
generator_utils.gunzip_file(tsv_filename, new_filename)
tsv_filename = new_filename
with tf.gfile.Open(tsv_filename) as tsv_file:
for line in tsv_file:
if line and "\t" in line:
parts = line.split("\t")
source, target = parts[src_column], parts[trg_column]
source, target = source.strip(), target.strip()
if source and target:
lang1_resfile.write(source)
lang1_resfile.write("\n")
lang2_resfile.write(target)
lang2_resfile.write("\n")
else:
lang1_filename, lang2_filename = dataset[1]
lang1_filepath = os.path.join(tmp_dir, lang1_filename)
lang2_filepath = os.path.join(tmp_dir, lang2_filename)
is_sgm = (
lang1_filename.endswith("sgm") and lang2_filename.endswith("sgm"))
if not (tf.gfile.Exists(lang1_filepath) and
tf.gfile.Exists(lang2_filepath)):
# For .tar.gz and .tgz files, we read compressed.
mode = "r:gz" if compressed_filepath.endswith("gz") else "r"
with tarfile.open(compressed_filepath, mode) as corpus_tar:
corpus_tar.extractall(tmp_dir)
if lang1_filepath.endswith(".gz"):
new_filepath = lang1_filepath.strip(".gz")
generator_utils.gunzip_file(lang1_filepath, new_filepath)
lang1_filepath = new_filepath
if lang2_filepath.endswith(".gz"):
new_filepath = lang2_filepath.strip(".gz")
generator_utils.gunzip_file(lang2_filepath, new_filepath)
lang2_filepath = new_filepath
for example in text_problems.text2text_txt_iterator(
lang1_filepath, lang2_filepath):
line1res = _preprocess_sgm(example["inputs"], is_sgm)
line2res = _preprocess_sgm(example["targets"], is_sgm)
if line1res and line2res:
lang1_resfile.write(line1res)
lang1_resfile.write("\n")
lang2_resfile.write(line2res)
lang2_resfile.write("\n")
return filename
class TranslateDistillProblem(TranslateProblem):
"""Base class for translation problems."""
def is_generate_per_split(self):
return True
def example_reading_spec(self):
data_fields = {"dist_targets": tf.VarLenFeature(tf.int64)}
if self.has_inputs:
data_fields["inputs"] = tf.VarLenFeature(tf.int64)
# hack: ignoring true targets and putting dist_targets in targets
data_items_to_decoders = {
"inputs": tf.contrib.slim.tfexample_decoder.Tensor("inputs"),
"targets": tf.contrib.slim.tfexample_decoder.Tensor("dist_targets"),
}
return (data_fields, data_items_to_decoders)
def get_or_create_vocab(self, data_dir, tmp_dir, force_get=False):
"""Get vocab for distill problems."""
# We assume that vocab file is present in data_dir directory where the
# data generated will be stored.
vocab_filepath = os.path.join(data_dir, self.vocab_filename)
encoder = text_encoder.SubwordTextEncoder(vocab_filepath)
return encoder
def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split):
generator = self.generate_samples(data_dir, tmp_dir, dataset_split)
vocab = self.get_or_create_vocab(data_dir, tmp_dir)
# For each example, encode the text and append EOS ID.
for sample in generator:
if self.has_inputs:
sample["inputs"] = vocab.encode(sample["inputs"])
sample["inputs"].append(text_encoder.EOS_ID)
sample["targets"] = vocab.encode(sample["targets"])
sample["targets"].append(text_encoder.EOS_ID)
sample["dist_targets"] = vocab.encode(sample["dist_targets"])
sample["dist_targets"].append(text_encoder.EOS_ID)
yield sample
def generate_samples(self, data_dir, tmp_dir, dataset_split):
data_path = self.source_data_files(dataset_split)
assert tf.gfile.Exists(data_path)
return text_problems.text2text_distill_iterator(data_path + "inputs",
data_path + "gold",
data_path + "prediction")
| [
"tensorflow.gfile.Open",
"tensorflow.gfile.Exists",
"tensorflow.gfile.GFile",
"tensorflow.Summary.Value",
"tensorflow.logging.info",
"tensorflow.contrib.slim.tfexample_decoder.Tensor",
"tensorflow.VarLenFeature"
] | data_generators/translate.py | [(88, 'tensorflow.logging.info', 'tf.logging.info', (["('%s: BLEU = %6.2f' % (decode_hparams.decode_to_file, bleu))"], {}), True, 'import tensorflow as tf\n'), (112, 'os.path.join', 'os.path.join', (['tmp_dir', 'filename'], {}), False, 'import os\n'), (57, 'tensor2tensor.data_generators.text_problems.text2text_txt_iterator', 'text_problems.text2text_txt_iterator', (["(data_path + '.lang1')", "(data_path + '.lang2')"], {}), False, 'from tensor2tensor.data_generators import text_problems\n'), (85, 'tensor2tensor.utils.bleu_hook.bleu_wrapper', 'bleu_hook.bleu_wrapper', (['decode_hparams.decode_reference', 'decode_hparams.decode_to_file'], {}), False, 'from tensor2tensor.utils import bleu_hook\n'), (87, 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""BLEU"""', 'simple_value': 'bleu'}), True, 'import tensorflow as tf\n'), (117, 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['lang1_fname'], {}), True, 'import tensorflow as tf\n'), (117, 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['lang2_fname'], {}), True, 'import tensorflow as tf\n'), (118, 'tensorflow.logging.info', 'tf.logging.info', (['"""Skipping compile data, found files:\n%s\n%s"""', 'lang1_fname', 'lang2_fname'], {}), True, 'import tensorflow as tf\n'), (121, 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['lang1_fname'], {'mode': '"""w"""'}), True, 'import tensorflow as tf\n'), (214, 'os.path.join', 'os.path.join', (['data_dir', 'self.vocab_filename'], {}), False, 'import os\n'), (215, 'tensor2tensor.data_generators.text_encoder.SubwordTextEncoder', 'text_encoder.SubwordTextEncoder', (['vocab_filepath'], {}), False, 'from tensor2tensor.data_generators import text_encoder\n'), (234, 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['data_path'], {}), True, 'import tensorflow as tf\n'), (235, 'tensor2tensor.data_generators.text_problems.text2text_distill_iterator', 'text_problems.text2text_distill_iterator', (["(data_path + 'inputs')", "(data_path + 'gold')", "(data_path + 'prediction')"], {}), False, 'from tensor2tensor.data_generators import text_problems\n'), (122, 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['lang2_fname'], {'mode': '"""w"""'}), True, 'import tensorflow as tf\n'), (197, 'tensorflow.VarLenFeature', 'tf.VarLenFeature', (['tf.int64'], {}), True, 'import tensorflow as tf\n'), (200, 'tensorflow.VarLenFeature', 'tf.VarLenFeature', (['tf.int64'], {}), True, 'import tensorflow as tf\n'), (204, 'tensorflow.contrib.slim.tfexample_decoder.Tensor', 'tf.contrib.slim.tfexample_decoder.Tensor', (['"""inputs"""'], {}), True, 'import tensorflow as tf\n'), (205, 'tensorflow.contrib.slim.tfexample_decoder.Tensor', 'tf.contrib.slim.tfexample_decoder.Tensor', (['"""dist_targets"""'], {}), True, 'import tensorflow as tf\n'), (125, 'os.path.basename', 'os.path.basename', (['url'], {}), False, 'import os\n'), (126, 'os.path.join', 'os.path.join', (['tmp_dir', 'compressed_filename'], {}), False, 'import os\n'), (128, 'tensor2tensor.data_generators.generator_utils.maybe_download', 'generator_utils.maybe_download', (['tmp_dir', 'compressed_filename', 'url'], {}), False, 'from tensor2tensor.data_generators import generator_utils\n'), (157, 'os.path.join', 'os.path.join', (['tmp_dir', 'lang1_filename'], {}), False, 'import os\n'), (158, 'os.path.join', 'os.path.join', (['tmp_dir', 'lang2_filename'], {}), False, 'import os\n'), (177, 'tensor2tensor.data_generators.text_problems.text2text_txt_iterator', 'text_problems.text2text_txt_iterator', (['lang1_filepath', 'lang2_filepath'], {}), False, 'from tensor2tensor.data_generators import text_problems\n'), (132, 'os.path.join', 'os.path.join', (['tmp_dir', 'glob_pattern'], {}), False, 'import os\n'), (170, 'tensor2tensor.data_generators.generator_utils.gunzip_file', 'generator_utils.gunzip_file', (['lang1_filepath', 'new_filepath'], {}), False, 'from tensor2tensor.data_generators import generator_utils\n'), (174, 'tensor2tensor.data_generators.generator_utils.gunzip_file', 'generator_utils.gunzip_file', (['lang2_filepath', 'new_filepath'], {}), False, 'from tensor2tensor.data_generators import generator_utils\n'), (136, 'tarfile.open', 'tarfile.open', (['compressed_filepath', 'mode'], {}), False, 'import tarfile\n'), (138, 'os.path.join', 'os.path.join', (['tmp_dir', 'glob_pattern'], {}), False, 'import os\n'), (142, 'tensor2tensor.data_generators.generator_utils.gunzip_file', 'generator_utils.gunzip_file', (['tsv_filename', 'new_filename'], {}), False, 'from tensor2tensor.data_generators import generator_utils\n'), (144, 'tensorflow.gfile.Open', 'tf.gfile.Open', (['tsv_filename'], {}), True, 'import tensorflow as tf\n'), (162, 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['lang1_filepath'], {}), True, 'import tensorflow as tf\n'), (163, 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['lang2_filepath'], {}), True, 'import tensorflow as tf\n'), (166, 'tarfile.open', 'tarfile.open', (['compressed_filepath', 'mode'], {}), False, 'import tarfile\n')] |
KonduitAI/ImportTests | 1b05adac04d1b04fe4492d3fd35f3c4573774ceb | import numpy as np
import tensorflow as tf
from tfoptests.persistor import TensorFlowPersistor
from tfoptests.test_graph import TestGraph
'''
No training.
Tensor Transforms with rearranging values and some random ops
'''
class TensorRearrange(TestGraph):
def __init__(self, *args, **kwargs):
super(TensorRearrange, self).__init__(*args, **kwargs)
self.a = np.random.uniform(size=(2, 5, 4))
self.b = np.random.uniform(size=(2, 3, 5, 4))
self.c = np.random.uniform(size=(3, 1, 5, 4))
def list_inputs(self):
return ["input_0", "input_1", "input_2"]
def get_placeholder_input(self, name):
if name == "input_0":
return self.a
if name == "input_1":
return self.b
if name == "input_2":
return self.c
def _get_placeholder_shape(self, name):
if name == "input_0":
return self.a.shape
if name == "input_1":
return self.b.shape
if name == "input_2":
return self.c.shape
def test_tensor_rearrange():
tensor_rearrange = TensorRearrange(seed=713)
in_node_a = tensor_rearrange.get_placeholder("input_0")
in_node_b = tensor_rearrange.get_placeholder("input_1")
in_node_c = tensor_rearrange.get_placeholder("input_2")
stitched = tf.dynamic_stitch([[1, 10], [[0, 7, 9], [5, 8, 3]], [[6], [4], [2]]],
[in_node_a, in_node_b, in_node_c]) # should be 11,5,4
list_of_parts = tf.dynamic_partition(tf.transpose(stitched, perm=[1, 2, 0]),
[[0, 1, 2, 3], [1, 0, 2, 3], [2, 3, 1, 0], [2, 1, 0, 3], [0, 1, 2, 3]],
num_partitions=4) # after permute becomes 5,4,11, return all partitions 5,11
node_a = tf.div(list_of_parts[0], list_of_parts[1])
node_b = tf.divide(list_of_parts[2], list_of_parts[3])
trace_node = tf.trace(node_a) + node_b # there is a broadcast here
out_node = tf.cast(tf.count_nonzero(trace_node), dtype=tf.float32) + tf.Variable(tf.random_normal(shape=(2, 3)))
placeholders = [in_node_a, in_node_b, in_node_c]
predictions = [out_node]
# Run and persist
tfp = TensorFlowPersistor(save_dir="partition_stitch_misc")
tfp.set_placeholders(placeholders) \
.set_output_tensors(predictions) \
.set_test_data(tensor_rearrange.get_test_data()) \
.build_save_frozen_graph()
if __name__ == '__main__':
test_tensor_rearrange() | [
"tensorflow.transpose",
"tensorflow.count_nonzero",
"tensorflow.divide",
"tensorflow.div",
"tensorflow.dynamic_stitch",
"tensorflow.random_normal",
"numpy.random.uniform",
"tensorflow.trace"
] | tests/OLD/mathops/test_partition_stitch_misc.py | [(44, 'tensorflow.dynamic_stitch', 'tf.dynamic_stitch', (['[[1, 10], [[0, 7, 9], [5, 8, 3]], [[6], [4], [2]]]', '[in_node_a, in_node_b, in_node_c]'], {}), True, 'import tensorflow as tf\n'), (49, 'tensorflow.div', 'tf.div', (['list_of_parts[0]', 'list_of_parts[1]'], {}), True, 'import tensorflow as tf\n'), (50, 'tensorflow.divide', 'tf.divide', (['list_of_parts[2]', 'list_of_parts[3]'], {}), True, 'import tensorflow as tf\n'), (58, 'tfoptests.persistor.TensorFlowPersistor', 'TensorFlowPersistor', ([], {'save_dir': '"""partition_stitch_misc"""'}), False, 'from tfoptests.persistor import TensorFlowPersistor\n'), (15, 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(2, 5, 4)'}), True, 'import numpy as np\n'), (16, 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(2, 3, 5, 4)'}), True, 'import numpy as np\n'), (17, 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(3, 1, 5, 4)'}), True, 'import numpy as np\n'), (46, 'tensorflow.transpose', 'tf.transpose', (['stitched'], {'perm': '[1, 2, 0]'}), True, 'import tensorflow as tf\n'), (51, 'tensorflow.trace', 'tf.trace', (['node_a'], {}), True, 'import tensorflow as tf\n'), (52, 'tensorflow.count_nonzero', 'tf.count_nonzero', (['trace_node'], {}), True, 'import tensorflow as tf\n'), (52, 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '(2, 3)'}), True, 'import tensorflow as tf\n')] |
taipahuchu/language-Identification- | 68660bc110d374f0d8802b942792b15f8782e647 | import tensorflow as tf
import numpy as np
class BaseModel(object):
"""Holds code shared between all the different model variants."""
def __init__(self, batch_size, max_sequence_len, out_vocab_size, c2v,
dropout_keep_prob=0.0):
self._batch_size = batch_size
self._dropout_keep_prob = dropout_keep_prob
self._out_vocab_size = out_vocab_size
self.x = tf.placeholder(tf.int32, [batch_size, max_sequence_len],
name='x')
self.y = tf.placeholder(tf.float32, [batch_size, out_vocab_size],
name='y')
# The bidirectional rnn code requires seq_lens as int64
self.seq_lens = tf.placeholder(tf.int64, [batch_size], name='seq_lens')
self.example_weights = tf.placeholder(tf.float32, [batch_size],
name='example_weights')
embeddings = c2v.GetEmbeddings(self.x)
self._inputs = [tf.squeeze(input_, [1]) for input_ in
tf.split(1, max_sequence_len, embeddings)]
# Need to prepare a mask to zero out the padding symbols.
# Make a batch_size x max_sequence_len matrix where each
# row contains the length repeated max_sequence_len times.
lengths_transposed = tf.expand_dims(tf.to_int32(self.seq_lens), 1)
lengths_tiled = tf.tile(lengths_transposed, [1, max_sequence_len])
# Make a matrix where each row contains [0, 1, ..., max_sequence_len]
r = tf.range(0, max_sequence_len, 1)
range_row = tf.expand_dims(r, 0)
range_tiled = tf.tile(range_row, [batch_size, 1])
# Use the logical operations to create a mask
indicator = tf.less(range_tiled, lengths_tiled)
sz = [batch_size, max_sequence_len]
self._mask = tf.select(indicator, tf.ones(sz), tf.zeros(sz))
def _DoPredictions(self, in_size, mats, class_weights=None):
"""Takes in an array of states and calculates predictions.
Get the cross-entropy for each example in the vector self._xent.
Args:
in_size: size of the hidden state vectors
mats: list of hidden state vectors
"""
pred_mat = tf.get_variable('pred_mat',
[in_size, self._out_vocab_size])
pred_bias = tf.get_variable('pred_bias', [self._out_vocab_size])
# Make a prediction on every word.
def GetWordPred(o_):
logits = tf.nn.xw_plus_b(o_, pred_mat, pred_bias)
return tf.nn.softmax(logits)
self.preds_by_word = tf.pack([GetWordPred(o_) for o_ in mats])
self.cs = self._mask / tf.reduce_sum(self._mask, 1, keep_dims=True)
# The final prediction is the average of the predictions for each word
# weighted by the individual confidence/utility scores.
preds_weighted = tf.mul(tf.reshape(tf.transpose(self.cs), [-1, 1]),
tf.reshape(self.preds_by_word,
[-1, self._out_vocab_size]))
preds_weighted_reshaped = tf.reshape(preds_weighted,
self.preds_by_word.get_shape())
self.probs = tf.reduce_sum(preds_weighted_reshaped, 0)
self._xent = _SafeXEnt(self.y, self.probs, class_weights=class_weights)
class WordAvgModel(BaseModel): #formerly SimpleModel
"""A bag of word /predictions/."""
def __init__(self, out_vocab_size=None,
batch_size=10,
model_params=None,
c2v=None,
max_sequence_len=None,
dropout_keep_prob=None,
weights=None):
super(WordAvgModel, self).__init__(batch_size, max_sequence_len,
out_vocab_size, c2v)
super(WordAvgModel, self)._DoPredictions(c2v.embedding_dims,
self._inputs)
self.cost = tf.reduce_mean(self.example_weights * self._xent)
class WordSeqModel(BaseModel):
"""A bag of word embeddings."""
def __init__(self, out_vocab_size=None,
batch_size=10,
model_params=None,
c2v=None,
max_sequence_len=None,
dropout_keep_prob=None,
weights=None):
super(WordSeqModel, self).__init__(batch_size, max_sequence_len,
out_vocab_size, c2v)
in_size = self._inputs[0].get_shape()[1].value
# Also, output confidence scores at every word.
confidence_mat = tf.get_variable('confidence_mat', [in_size, 1])
confidence_scores = tf.concat(1, [tf.matmul(o_, confidence_mat)
for o_ in self._inputs])
# dropout on confidence_scores
random_tensor = (1.0 - self._dropout_keep_prob +
tf.random_uniform(tf.shape(confidence_scores)))
binary_tensor = -50.0 * tf.floor(random_tensor)
csshape = confidence_scores.get_shape()
self.cs = tf.nn.softmax(tf.constant(1.0, shape=csshape))
# The final prediction is the average of the predictions for each word
# weighted by the individual confidence/utility scores.
wvs = tf.pack(self._inputs)
wvs_weighted = tf.mul(tf.reshape(tf.transpose(self.cs), [-1, 1]),
tf.reshape(wvs, [-1, in_size]))
wvs_weighted_reshaped = tf.reshape(wvs_weighted, wvs.get_shape())
wvsum = tf.reduce_sum(wvs_weighted_reshaped,0)
pred_mat = tf.get_variable('pred_mat', [in_size, self._out_vocab_size])
pred_bias = tf.get_variable('pred_bias', [self._out_vocab_size])
# Make a prediction for each tweet.
def GetWordPred(o_):
logits = tf.nn.xw_plus_b(o_, pred_mat, pred_bias)
return tf.nn.softmax(logits)
preds = GetWordPred(wvsum)
z = tf.tile(tf.reshape(tf.reduce_sum(preds,1),[-1,1]), [1, out_vocab_size])
self.preds, self.z = preds, z
self.probs = tf.div(preds, z) #normalize
self.unweighted_xent = _SafeXEnt(self.y, self.probs)
self._xent = _SafeXEnt(self.y, self.probs, class_weights=weights)
self.cost = tf.reduce_mean(self.example_weights * self._xent)
class TweetSeqModel(BaseModel): #formerly SeqModel
"""Single layer LSTM on top of the word embeddings.
Lang id predictions are done on each word and then combined via
a weighted average.
"""
def __init__(self, out_vocab_size=None,
batch_size=10, model_params=None,
c2v=None,
max_sequence_len=None,
dropout_keep_prob=None,
weights=None):
"""Initialize the TweetSeqModel
Args:
out_vocab_size: how many languages we are predicting
batch_size: minibatch size
model_params: dictionary of other model parameters
c2v: char2vec class instance
max_sequence_len: length of all the input sequences
dropout_keep_prob: dropout probability indicator
weights: class weights
"""
hidden_size = model_params['model_hidden_size']
proj_size = model_params['model_proj_size'] # optional, can be None
super(TweetSeqModel, self).__init__(batch_size, max_sequence_len,
out_vocab_size, c2v,
dropout_keep_prob)
weights = tf.constant(weights, dtype=tf.float32, name='class_weights')
def GetCell():
"""Creates an LSTM cell with dropout."""
c = tf.nn.rnn_cell.LSTMCell(hidden_size,
use_peepholes=model_params['peepholes'],
num_proj=proj_size)
if dropout_keep_prob is not None:
c = tf.nn.rnn_cell.DropoutWrapper(c, input_keep_prob=dropout_keep_prob)
return c
# Create the bi-directional LSTM
with tf.variable_scope('wordrnn'):
with tf.variable_scope('fw'):
cell_fw = GetCell()
with tf.variable_scope('bw'):
cell_bw = GetCell()
rnnout, _, _ = tf.nn.bidirectional_rnn(cell_fw, cell_bw, self._inputs,
dtype=tf.float32,
sequence_length=self.seq_lens)
if proj_size:
out_size = 2 * proj_size
else:
out_size = 2 * hidden_size
super(TweetSeqModel, self)._DoPredictions(out_size, rnnout, class_weights=weights)
self.cost = tf.reduce_mean(self.example_weights * self._xent)
class CharSeqModel(object): #formerly TweetSeqModel
"""
Treats each document (tweet) as a single "word," which is fed through c2v,
and the output "embedding" sized to be a vector of language predictions.
"""
def __init__(self, out_vocab_size=None,
batch_size=10, model_params=None, c2v=None,
max_sequence_len=None,
dropout_keep_prob=None,
weights=None):
self.params = model_params
self._out_vocab_size = out_vocab_size # num. of languages
self.weights = tf.constant(weights, dtype=tf.float32, name='class_weights')
with tf.variable_scope("tweetff"):
hidden = tf.get_variable("ff_hidden",
[c2v.embedding_dims, out_vocab_size])
bias = tf.get_variable('ff_bias', [out_vocab_size])
#probably useless. at least I don't want to use it
self.seq_lens = tf.placeholder(tf.int64, [batch_size], name='seq_lens')
self.x = tf.placeholder(tf.int32, [batch_size, max_sequence_len],
name='x')
self.y = tf.placeholder(tf.float32, [batch_size, out_vocab_size],
name='y')
self.example_weights = tf.placeholder(tf.float32, [batch_size],
name='example_weights')
# get one 'word' embedding for the full tweet
tweet_embedding = c2v.GetEmbeddings(self.x)[:,1,:]
logits = tf.nn.xw_plus_b(tweet_embedding, hidden, bias)
self.probs = tf.nn.softmax(logits)
self._xent = tf.nn.softmax_cross_entropy_with_logits(logits, self.y)
self.cost = tf.reduce_mean(self.example_weights * self._xent)
class WordLevelModel(object):
"""
Model to evaluate on word-level predictions
Args:
batch_size: minibatch size
model_params: dictionary of other model parameters
c2v: char2vec class instance
max_sequence_len: length of all the input/output sequences
out_vocab_size: how many languages we are predicting
dropout_keep_prob: dropout probability indicator
weights: class weights
"""
def __init__(self, batch_size, model_params, c2v, max_sequence_len,
out_vocab_size, dropout_keep_prob=0.0, weights=None):
self._batch_size = batch_size
self._dropout_keep_prob = dropout_keep_prob
self._out_vocab_size = out_vocab_size
self.x = tf.placeholder(tf.int32, [batch_size, max_sequence_len],
name='x')
self.y = tf.placeholder(tf.float32,
[batch_size, max_sequence_len, out_vocab_size],
name='y')
# The bidirectional rnn code requires seq_lens as int64
self.seq_lens = tf.placeholder(tf.int64, [batch_size], name='seq_lens')
self.example_weights = tf.placeholder(tf.float32, [batch_size],
name='example_weights')
embeddings = c2v.GetEmbeddings(self.x)
self._inputs = [tf.squeeze(input_, [1]) for input_ in
tf.split(1, max_sequence_len, embeddings)]
# Need to prepare a mask to zero out the padding symbols.
# Make a batch_size x max_sequence_len matrix where each
# row contains the length repeated max_sequence_len times.
lengths_transposed = tf.expand_dims(tf.to_int32(self.seq_lens), 1)
lengths_tiled = tf.tile(lengths_transposed, [1, max_sequence_len])
# Make a matrix where each row contains [0, 1, ..., max_sequence_len]
r = tf.range(0, max_sequence_len, 1)
range_row = tf.expand_dims(r, 0)
range_tiled = tf.tile(range_row, [batch_size, 1])
self.lengths_transposed = lengths_transposed
self.lengths_tiled = lengths_tiled
self.range_row = range_row
self.range_tiled = range_tiled
# Use the logical operations to create a mask
indicator = tf.less(range_tiled, lengths_tiled+1) #i.e. where seq len is less than index
trim = np.ones(indicator.get_shape())
trim[:,0] = 0 #ignore start symbol
indicator = tf.logical_and(indicator, trim.astype(bool))
self.indicator = indicator
sz = [batch_size, max_sequence_len]
self._mask = tf.select(indicator, tf.ones(sz), tf.zeros(sz))
#-------------------------------#
self.weights = tf.constant(weights, dtype=tf.float32, name='class_weights')
hidden_size = model_params['model_hidden_size']
proj_size = model_params['model_proj_size'] # optional, can be None
def GetCell():
"""Creates an LSTM cell with dropout."""
c = tf.nn.rnn_cell.LSTMCell(hidden_size,
use_peepholes=model_params['peepholes'],
num_proj=proj_size)
if dropout_keep_prob is not None:
c = tf.nn.rnn_cell.DropoutWrapper(c, input_keep_prob=dropout_keep_prob)
return c
# Create the bi-directional LSTM
with tf.variable_scope('wordrnn'):
with tf.variable_scope('fw'):
cell_fw = GetCell()
with tf.variable_scope('bw'):
cell_bw = GetCell()
rnnout, _, _ = tf.nn.bidirectional_rnn(cell_fw, cell_bw, self._inputs,
dtype=tf.float32,
sequence_length=self.seq_lens)
if proj_size:
out_size = 2 * proj_size
else:
out_size = 2 * hidden_size
self._DoPredictions(out_size, rnnout, self.weights)
self.cost = tf.reduce_mean(self.example_weights * self._xent)
def _DoPredictions(self, in_size, mats, class_weights=None):
"""Takes in an array of states and calculates predictions.
Get the cross-entropy for each example in the vector self._xent.
Args:
in_size: size of the hidden state vectors
mats: list of hidden state vectors
"""
pred_mat = tf.get_variable('pred_mat',
[in_size, self._out_vocab_size])
pred_bias = tf.get_variable('pred_bias', [self._out_vocab_size])
# Make a prediction on every word.
def GetWordPred(o_):
logits = tf.nn.xw_plus_b(o_, pred_mat, pred_bias)
return tf.nn.softmax(logits)
#self.preds_by_word1 = tf.pack([GetWordPred(o_) for o_ in mats])
#self.preds_by_word = tf.reshape(self.preds_by_word1, self.y.get_shape())
#self.probs = tf.mul(tf.expand_dims(self._mask,2), self.preds_by_word)
self.preds_by_word = tf.pack([GetWordPred(o_) for o_ in mats])
self.preds_by_instance = tf.pack([self.preds_by_word[:,i,:] for i in range(self.preds_by_word.get_shape()[1])])
self.probs = tf.mul(tf.expand_dims(self._mask,2), self.preds_by_instance)
self._xent = _SafeXEnt(self.y, self.probs, class_weights=class_weights, sumd=[1,2])
def _SafeXEnt(y, probs, eps=0.0001, class_weights=None, sumd=[1]):
"""Version of cross entropy loss that should not produce NaNs.
If the predicted proability for the true class is near zero then when
taking the log it can produce a NaN, which ruins everything. This
function ensures each probability is at least eps and no more than one
before taking the log.
Args:
y: matrix of true probabilities same size as probs
probs: matrix of probabilities for the minibatch
eps: value to clip the probabilities at
class_weights: vector of relative weights to be assigned to each class
sumd: dimensions along which to sum the x-ent matrix
Returns:
cross entropy loss for each example in the minibatch
"""
adjusted_probs = tf.clip_by_value(probs, eps, 1.0 - eps)
xent_mat = -y * tf.log(adjusted_probs)
if class_weights is not None:
xent_mat *= class_weights
return tf.reduce_sum(xent_mat, sumd)
def _SafeNegEntropy(probs, batch_size, eps=0.0001):
"""Computes negative entropy in a way that will not overflow."""
adjusted_probs = tf.clip_by_value(probs, eps, 1.0 - eps)
entropy = tf.mul(probs, tf.log(adjusted_probs))
return tf.reduce_sum(entropy) / batch_size
| [
"tensorflow.get_variable",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.zeros",
"tensorflow.reduce_sum",
"tensorflow.to_int32",
"tensorflow.pack",
"tensorflow.floor",
"tensorflow.squeeze",
"tensorflow.div",
"tensorflow.tile",
"tensorflow.nn.xw_plus_b",
"tensorflow.matmul",
"tensorflow.less",
"tensorflow.shape",
"tensorflow.placeholder",
"tensorflow.split",
"tensorflow.clip_by_value",
"tensorflow.nn.softmax",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.range",
"tensorflow.reduce_mean",
"tensorflow.nn.rnn_cell.DropoutWrapper",
"tensorflow.nn.rnn_cell.LSTMCell",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.ones",
"tensorflow.log",
"tensorflow.nn.bidirectional_rnn",
"tensorflow.variable_scope"
] | code/models.py | [(392, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['probs', 'eps', '(1.0 - eps)'], {}), True, 'import tensorflow as tf\n'), (397, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['xent_mat', 'sumd'], {}), True, 'import tensorflow as tf\n'), (402, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['probs', 'eps', '(1.0 - eps)'], {}), True, 'import tensorflow as tf\n'), (14, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[batch_size, max_sequence_len]'], {'name': '"""x"""'}), True, 'import tensorflow as tf\n'), (16, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[batch_size, out_vocab_size]'], {'name': '"""y"""'}), True, 'import tensorflow as tf\n'), (19, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64', '[batch_size]'], {'name': '"""seq_lens"""'}), True, 'import tensorflow as tf\n'), (20, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[batch_size]'], {'name': '"""example_weights"""'}), True, 'import tensorflow as tf\n'), (32, 'tensorflow.tile', 'tf.tile', (['lengths_transposed', '[1, max_sequence_len]'], {}), True, 'import tensorflow as tf\n'), (35, 'tensorflow.range', 'tf.range', (['(0)', 'max_sequence_len', '(1)'], {}), True, 'import tensorflow as tf\n'), (36, 'tensorflow.expand_dims', 'tf.expand_dims', (['r', '(0)'], {}), True, 'import tensorflow as tf\n'), (37, 'tensorflow.tile', 'tf.tile', (['range_row', '[batch_size, 1]'], {}), True, 'import tensorflow as tf\n'), (40, 'tensorflow.less', 'tf.less', (['range_tiled', 'lengths_tiled'], {}), True, 'import tensorflow as tf\n'), (53, 'tensorflow.get_variable', 'tf.get_variable', (['"""pred_mat"""', '[in_size, self._out_vocab_size]'], {}), True, 'import tensorflow as tf\n'), (55, 'tensorflow.get_variable', 'tf.get_variable', (['"""pred_bias"""', '[self._out_vocab_size]'], {}), True, 'import tensorflow as tf\n'), (72, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['preds_weighted_reshaped', '(0)'], {}), True, 'import tensorflow as tf\n'), (92, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(self.example_weights * self._xent)'], {}), True, 'import tensorflow as tf\n'), (110, 'tensorflow.get_variable', 'tf.get_variable', (['"""confidence_mat"""', '[in_size, 1]'], {}), True, 'import tensorflow as tf\n'), (125, 'tensorflow.pack', 'tf.pack', (['self._inputs'], {}), True, 'import tensorflow as tf\n'), (129, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['wvs_weighted_reshaped', '(0)'], {}), True, 'import tensorflow as tf\n'), (131, 'tensorflow.get_variable', 'tf.get_variable', (['"""pred_mat"""', '[in_size, self._out_vocab_size]'], {}), True, 'import tensorflow as tf\n'), (132, 'tensorflow.get_variable', 'tf.get_variable', (['"""pred_bias"""', '[self._out_vocab_size]'], {}), True, 'import tensorflow as tf\n'), (142, 'tensorflow.div', 'tf.div', (['preds', 'z'], {}), True, 'import tensorflow as tf\n'), (147, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(self.example_weights * self._xent)'], {}), True, 'import tensorflow as tf\n'), (181, 'tensorflow.constant', 'tf.constant', (['weights'], {'dtype': 'tf.float32', 'name': '"""class_weights"""'}), True, 'import tensorflow as tf\n'), (223, 'tensorflow.constant', 'tf.constant', (['weights'], {'dtype': 'tf.float32', 'name': '"""class_weights"""'}), True, 'import tensorflow as tf\n'), (231, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64', '[batch_size]'], {'name': '"""seq_lens"""'}), True, 'import tensorflow as tf\n'), (233, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[batch_size, max_sequence_len]'], {'name': '"""x"""'}), True, 'import tensorflow as tf\n'), (235, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[batch_size, out_vocab_size]'], {'name': '"""y"""'}), True, 'import tensorflow as tf\n'), (237, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[batch_size]'], {'name': '"""example_weights"""'}), True, 'import tensorflow as tf\n'), (243, 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['tweet_embedding', 'hidden', 'bias'], {}), True, 'import tensorflow as tf\n'), (244, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), True, 'import tensorflow as tf\n'), (246, 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', (['logits', 'self.y'], {}), True, 'import tensorflow as tf\n'), (247, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(self.example_weights * self._xent)'], {}), True, 'import tensorflow as tf\n'), (270, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[batch_size, max_sequence_len]'], {'name': '"""x"""'}), True, 'import tensorflow as tf\n'), (272, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[batch_size, max_sequence_len, out_vocab_size]'], {'name': '"""y"""'}), True, 'import tensorflow as tf\n'), (276, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64', '[batch_size]'], {'name': '"""seq_lens"""'}), True, 'import tensorflow as tf\n'), (277, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[batch_size]'], {'name': '"""example_weights"""'}), True, 'import tensorflow as tf\n'), (289, 'tensorflow.tile', 'tf.tile', (['lengths_transposed', '[1, max_sequence_len]'], {}), True, 'import tensorflow as tf\n'), (292, 'tensorflow.range', 'tf.range', (['(0)', 'max_sequence_len', '(1)'], {}), True, 'import tensorflow as tf\n'), (293, 'tensorflow.expand_dims', 'tf.expand_dims', (['r', '(0)'], {}), True, 'import tensorflow as tf\n'), (294, 'tensorflow.tile', 'tf.tile', (['range_row', '[batch_size, 1]'], {}), True, 'import tensorflow as tf\n'), (302, 'tensorflow.less', 'tf.less', (['range_tiled', '(lengths_tiled + 1)'], {}), True, 'import tensorflow as tf\n'), (313, 'tensorflow.constant', 'tf.constant', (['weights'], {'dtype': 'tf.float32', 'name': '"""class_weights"""'}), True, 'import tensorflow as tf\n'), (354, 'tensorflow.get_variable', 'tf.get_variable', (['"""pred_mat"""', '[in_size, self._out_vocab_size]'], {}), True, 'import tensorflow as tf\n'), (356, 'tensorflow.get_variable', 'tf.get_variable', (['"""pred_bias"""', '[self._out_vocab_size]'], {}), True, 'import tensorflow as tf\n'), (393, 'tensorflow.log', 'tf.log', (['adjusted_probs'], {}), True, 'import tensorflow as tf\n'), (403, 'tensorflow.log', 'tf.log', (['adjusted_probs'], {}), True, 'import tensorflow as tf\n'), (404, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['entropy'], {}), True, 'import tensorflow as tf\n'), (24, 'tensorflow.squeeze', 'tf.squeeze', (['input_', '[1]'], {}), True, 'import tensorflow as tf\n'), (31, 'tensorflow.to_int32', 'tf.to_int32', (['self.seq_lens'], {}), True, 'import tensorflow as tf\n'), (42, 'tensorflow.ones', 'tf.ones', (['sz'], {}), True, 'import tensorflow as tf\n'), (42, 'tensorflow.zeros', 'tf.zeros', (['sz'], {}), True, 'import tensorflow as tf\n'), (59, 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['o_', 'pred_mat', 'pred_bias'], {}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), True, 'import tensorflow as tf\n'), (63, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self._mask', '(1)'], {'keep_dims': '(True)'}), True, 'import tensorflow as tf\n'), (68, 'tensorflow.reshape', 'tf.reshape', (['self.preds_by_word', '[-1, self._out_vocab_size]'], {}), True, 'import tensorflow as tf\n'), (117, 'tensorflow.floor', 'tf.floor', (['random_tensor'], {}), True, 'import tensorflow as tf\n'), (120, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': 'csshape'}), True, 'import tensorflow as tf\n'), (127, 'tensorflow.reshape', 'tf.reshape', (['wvs', '[-1, in_size]'], {}), True, 'import tensorflow as tf\n'), (136, 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['o_', 'pred_mat', 'pred_bias'], {}), True, 'import tensorflow as tf\n'), (137, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), True, 'import tensorflow as tf\n'), (185, 'tensorflow.nn.rnn_cell.LSTMCell', 'tf.nn.rnn_cell.LSTMCell', (['hidden_size'], {'use_peepholes': "model_params['peepholes']", 'num_proj': 'proj_size'}), True, 'import tensorflow as tf\n'), (193, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""wordrnn"""'], {}), True, 'import tensorflow as tf\n'), (199, 'tensorflow.nn.bidirectional_rnn', 'tf.nn.bidirectional_rnn', (['cell_fw', 'cell_bw', 'self._inputs'], {'dtype': 'tf.float32', 'sequence_length': 'self.seq_lens'}), True, 'import tensorflow as tf\n'), (208, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(self.example_weights * self._xent)'], {}), True, 'import tensorflow as tf\n'), (225, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""tweetff"""'], {}), True, 'import tensorflow as tf\n'), (226, 'tensorflow.get_variable', 'tf.get_variable', (['"""ff_hidden"""', '[c2v.embedding_dims, out_vocab_size]'], {}), True, 'import tensorflow as tf\n'), (228, 'tensorflow.get_variable', 'tf.get_variable', (['"""ff_bias"""', '[out_vocab_size]'], {}), True, 'import tensorflow as tf\n'), (281, 'tensorflow.squeeze', 'tf.squeeze', (['input_', '[1]'], {}), True, 'import tensorflow as tf\n'), (288, 'tensorflow.to_int32', 'tf.to_int32', (['self.seq_lens'], {}), True, 'import tensorflow as tf\n'), (309, 'tensorflow.ones', 'tf.ones', (['sz'], {}), True, 'import tensorflow as tf\n'), (309, 'tensorflow.zeros', 'tf.zeros', (['sz'], {}), True, 'import tensorflow as tf\n'), (320, 'tensorflow.nn.rnn_cell.LSTMCell', 'tf.nn.rnn_cell.LSTMCell', (['hidden_size'], {'use_peepholes': "model_params['peepholes']", 'num_proj': 'proj_size'}), True, 'import tensorflow as tf\n'), (328, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""wordrnn"""'], {}), True, 'import tensorflow as tf\n'), (334, 'tensorflow.nn.bidirectional_rnn', 'tf.nn.bidirectional_rnn', (['cell_fw', 'cell_bw', 'self._inputs'], {'dtype': 'tf.float32', 'sequence_length': 'self.seq_lens'}), True, 'import tensorflow as tf\n'), (343, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(self.example_weights * self._xent)'], {}), True, 'import tensorflow as tf\n'), (360, 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['o_', 'pred_mat', 'pred_bias'], {}), True, 'import tensorflow as tf\n'), (361, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), True, 'import tensorflow as tf\n'), (369, 'tensorflow.expand_dims', 'tf.expand_dims', (['self._mask', '(2)'], {}), True, 'import tensorflow as tf\n'), (25, 'tensorflow.split', 'tf.split', (['(1)', 'max_sequence_len', 'embeddings'], {}), True, 'import tensorflow as tf\n'), (67, 'tensorflow.transpose', 'tf.transpose', (['self.cs'], {}), True, 'import tensorflow as tf\n'), (111, 'tensorflow.matmul', 'tf.matmul', (['o_', 'confidence_mat'], {}), True, 'import tensorflow as tf\n'), (116, 'tensorflow.shape', 'tf.shape', (['confidence_scores'], {}), True, 'import tensorflow as tf\n'), (126, 'tensorflow.transpose', 'tf.transpose', (['self.cs'], {}), True, 'import tensorflow as tf\n'), (140, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['preds', '(1)'], {}), True, 'import tensorflow as tf\n'), (189, 'tensorflow.nn.rnn_cell.DropoutWrapper', 'tf.nn.rnn_cell.DropoutWrapper', (['c'], {'input_keep_prob': 'dropout_keep_prob'}), True, 'import tensorflow as tf\n'), (194, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""fw"""'], {}), True, 'import tensorflow as tf\n'), (196, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""bw"""'], {}), True, 'import tensorflow as tf\n'), (282, 'tensorflow.split', 'tf.split', (['(1)', 'max_sequence_len', 'embeddings'], {}), True, 'import tensorflow as tf\n'), (324, 'tensorflow.nn.rnn_cell.DropoutWrapper', 'tf.nn.rnn_cell.DropoutWrapper', (['c'], {'input_keep_prob': 'dropout_keep_prob'}), True, 'import tensorflow as tf\n'), (329, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""fw"""'], {}), True, 'import tensorflow as tf\n'), (331, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""bw"""'], {}), True, 'import tensorflow as tf\n')] |
S4NdeeP/sat-tensorflow | cdc237b2bed24afc655af06b6e9570c557311af7 | # =========================================================================================
# Implementation of "Show, Attend and Tell: Neural Caption Generator With Visual Attention".
# There are some notations.
# N is batch size.
# L is spacial size of feature vector (196).
# D is dimension of image feature vector (512).
# T is the number of time step which is equal to caption's length-1 (16).
# V is vocabulary size (about 10000).
# M is dimension of word vector which is embedding size (default is 512).
# H is dimension of hidden state (default is 1024).
# =========================================================================================
from __future__ import division
import tensorflow as tf
class CaptionGenerator(object):
def __init__(self, word_to_idx, dim_feature=[196, 512], dim_embed=512, dim_hidden=1024, n_time_step=16,
prev2out=True, ctx2out=True, alpha_c=0.0, selector=True, dropout=True):
"""
Args:
word_to_idx: word-to-index mapping dictionary.
dim_feature: (optional) Dimension of vggnet19 conv5_3 feature vectors.
dim_embed: (optional) Dimension of word embedding.
dim_hidden: (optional) Dimension of all hidden state.
n_time_step: (optional) Time step size of LSTM.
prev2out: (optional) previously generated word to hidden state. (see Eq (7) for explanation)
ctx2out: (optional) context to hidden state (see Eq (7) for explanation)
alpha_c: (optional) Doubly stochastic regularization coefficient. (see Section (4.2.1) for explanation)
selector: (optional) gating scalar for context vector. (see Section (4.2.1) for explanation)
dropout: (optional) If true then dropout layer is added.
"""
self.word_to_idx = word_to_idx
self.idx_to_word = {i: w for w, i in word_to_idx.iteritems()}
self.prev2out = prev2out
self.ctx2out = ctx2out
self.alpha_c = alpha_c
self.selector = selector
self.dropout = dropout
self.V = len(word_to_idx)
self.L = dim_feature[0]
self.D = dim_feature[1]
self.M = dim_embed
self.H = dim_hidden
self.T = n_time_step
self._start = word_to_idx['<START>']
self._null = word_to_idx['<NULL>']
self.weight_initializer = tf.contrib.layers.xavier_initializer()
self.const_initializer = tf.constant_initializer(0.0)
self.emb_initializer = tf.random_uniform_initializer(minval=-1.0, maxval=1.0)
# Place holder for features and captions
self.features = tf.placeholder(tf.float32, [None, self.L, self.D])
self.captions = tf.placeholder(tf.int32, [None, self.T + 1])
def _get_initial_lstm(self, features):
with tf.variable_scope('initial_lstm'):
features_mean = tf.reduce_mean(features, 1)
w_h = tf.get_variable('w_h', [self.D, self.H], initializer=self.weight_initializer)
b_h = tf.get_variable('b_h', [self.H], initializer=self.const_initializer)
h = tf.nn.tanh(tf.matmul(features_mean, w_h) + b_h)
w_c = tf.get_variable('w_c', [self.D, self.H], initializer=self.weight_initializer)
b_c = tf.get_variable('b_c', [self.H], initializer=self.const_initializer)
c = tf.nn.tanh(tf.matmul(features_mean, w_c) + b_c)
return c, h
def _word_embedding(self, inputs, reuse=False):
with tf.variable_scope('word_embedding', reuse=reuse):
w = tf.get_variable('w', [self.V, self.M], initializer=self.emb_initializer)
x = tf.nn.embedding_lookup(w, inputs, name='word_vector') # (N, T, M) or (N, M)
return x
def _project_features(self, features):
with tf.variable_scope('project_features'):
w = tf.get_variable('w', [self.D, self.D], initializer=self.weight_initializer)
features_flat = tf.reshape(features, [-1, self.D])
features_proj = tf.matmul(features_flat, w)
features_proj = tf.reshape(features_proj, [-1, self.L, self.D])
return features_proj
def _attention_layer(self, features, features_proj, h, reuse=False):
with tf.variable_scope('attention_layer', reuse=reuse):
w = tf.get_variable('w', [self.H, self.D], initializer=self.weight_initializer)
b = tf.get_variable('b', [self.D], initializer=self.const_initializer)
w_att = tf.get_variable('w_att', [self.D, 1], initializer=self.weight_initializer)
h_att = tf.nn.relu(features_proj + tf.expand_dims(tf.matmul(h, w), 1) + b) # (N, L, D)
out_att = tf.reshape(tf.matmul(tf.reshape(h_att, [-1, self.D]), w_att), [-1, self.L]) # (N, L)
alpha = tf.nn.softmax(out_att)
context = tf.reduce_sum(features * tf.expand_dims(alpha, 2), 1, name='context') #(N, D)
return context, alpha
def _selector(self, context, h, reuse=False):
with tf.variable_scope('selector', reuse=reuse):
w = tf.get_variable('w', [self.H, 1], initializer=self.weight_initializer)
b = tf.get_variable('b', [1], initializer=self.const_initializer)
beta = tf.nn.sigmoid(tf.matmul(h, w) + b, 'beta') # (N, 1)
context = tf.multiply(beta, context, name='selected_context')
return context, beta
def _decode_lstm(self, x, h, context, dropout=False, reuse=False):
with tf.variable_scope('logits', reuse=reuse):
w_h = tf.get_variable('w_h', [self.H, self.M], initializer=self.weight_initializer)
b_h = tf.get_variable('b_h', [self.M], initializer=self.const_initializer)
w_out = tf.get_variable('w_out', [self.M, self.V], initializer=self.weight_initializer)
b_out = tf.get_variable('b_out', [self.V], initializer=self.const_initializer)
if dropout:
h = tf.nn.dropout(h, 0.5)
h_logits = tf.matmul(h, w_h) + b_h
if self.ctx2out:
w_ctx2out = tf.get_variable('w_ctx2out', [self.D, self.M], initializer=self.weight_initializer)
h_logits += tf.matmul(context, w_ctx2out)
if self.prev2out:
h_logits += x
h_logits = tf.nn.tanh(h_logits)
if dropout:
h_logits = tf.nn.dropout(h_logits, 0.5)
out_logits = tf.matmul(h_logits, w_out) + b_out
return out_logits
def _batch_norm(self, x, mode='train', name=None):
return tf.contrib.layers.batch_norm(inputs=x,
decay=0.95,
center=True,
scale=True,
is_training=(mode=='train'),
updates_collections=None,
scope=(name+'batch_norm'))
def build_model(self):
features = self.features
captions = self.captions
batch_size = tf.shape(features)[0]
captions_in = captions[:, :self.T]
captions_out = captions[:, 1:]
mask = tf.to_float(tf.not_equal(captions_out, self._null))
# batch normalize feature vectors
features = self._batch_norm(features, mode='train', name='conv_features')
c, h = self._get_initial_lstm(features=features)
x = self._word_embedding(inputs=captions_in)
features_proj = self._project_features(features=features)
loss = 0.0
alpha_list = []
lstm_cell = tf.contrib.rnn.BasicLSTMCell(num_units=self.H)
for t in range(self.T):
context, alpha = self._attention_layer(features, features_proj, h, reuse=(t!=0))
alpha_list.append(alpha)
if self.selector:
context, beta = self._selector(context, h, reuse=(t!=0))
with tf.variable_scope('lstm', reuse=(t!=0)):
_, (c, h) = lstm_cell(inputs=tf.concat(axis=1, values=[x[:,t,:], context]), state=[c, h])
logits = self._decode_lstm(x[:,t,:], h, context, dropout=self.dropout, reuse=(t!=0))
loss += tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=captions_out[:, t]) * mask[:, t])
if self.alpha_c > 0:
alphas = tf.transpose(tf.stack(alpha_list), (1, 0, 2)) # (N, T, L)
alphas_all = tf.reduce_sum(alphas, 1) # (N, L)
alpha_reg = self.alpha_c * tf.reduce_sum((16./196 - alphas_all) ** 2)
loss += alpha_reg
return loss / tf.to_float(batch_size)
def build_sampler(self, max_len=20):
features = self.features
# batch normalize feature vectors
features = self._batch_norm(features, mode='test', name='conv_features')
c, h = self._get_initial_lstm(features=features)
features_proj = self._project_features(features=features)
sampled_word_list = []
alpha_list = []
beta_list = []
lstm_cell = tf.contrib.rnn.BasicLSTMCell(num_units=self.H, reuse=tf.get_variable_scope().reuse)
for t in range(max_len):
if t == 0:
x = self._word_embedding(inputs=tf.fill([tf.shape(features)[0]], self._start))
else:
x = self._word_embedding(inputs=sampled_word, reuse=True)
context, alpha = self._attention_layer(features, features_proj, h, reuse=(t!=0))
alpha_list.append(alpha)
if self.selector:
context, beta = self._selector(context, h, reuse=(t!=0))
beta_list.append(beta)
with tf.variable_scope('lstm', reuse=(t!=0)):
_, (c, h) = lstm_cell(inputs=tf.concat(axis=1, values=[x, context]), state=[c, h])
logits = self._decode_lstm(x, h, context, reuse=(t!=0))
sampled_word = tf.argmax(logits, 1)
sampled_word_list.append(sampled_word)
alphas = tf.transpose(tf.stack(alpha_list), (1, 0, 2)) # (N, T, L)
betas = tf.transpose(tf.squeeze(beta_list), (1, 0)) # (N, T)
sampled_captions = tf.transpose(tf.stack(sampled_word_list), (1, 0)) # (N, max_len)
return alphas, betas, sampled_captions
| [
"tensorflow.get_variable",
"tensorflow.concat",
"tensorflow.reduce_sum",
"tensorflow.stack",
"tensorflow.random_uniform_initializer",
"tensorflow.squeeze",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.to_float",
"tensorflow.argmax",
"tensorflow.nn.dropout",
"tensorflow.matmul",
"tensorflow.shape",
"tensorflow.placeholder",
"tensorflow.nn.tanh",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.contrib.layers.batch_norm",
"tensorflow.nn.embedding_lookup",
"tensorflow.not_equal",
"tensorflow.nn.softmax",
"tensorflow.multiply",
"tensorflow.reduce_mean",
"tensorflow.contrib.rnn.BasicLSTMCell",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.constant_initializer",
"tensorflow.variable_scope",
"tensorflow.get_variable_scope"
] | core/model.py | [(51, 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), True, 'import tensorflow as tf\n'), (52, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (53, 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', ([], {'minval': '(-1.0)', 'maxval': '(1.0)'}), True, 'import tensorflow as tf\n'), (56, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.L, self.D]'], {}), True, 'import tensorflow as tf\n'), (57, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.T + 1]'], {}), True, 'import tensorflow as tf\n'), (131, 'tensorflow.contrib.layers.batch_norm', 'tf.contrib.layers.batch_norm', ([], {'inputs': 'x', 'decay': '(0.95)', 'center': '(True)', 'scale': '(True)', 'is_training': "(mode == 'train')", 'updates_collections': 'None', 'scope': "(name + 'batch_norm')"}), True, 'import tensorflow as tf\n'), (158, 'tensorflow.contrib.rnn.BasicLSTMCell', 'tf.contrib.rnn.BasicLSTMCell', ([], {'num_units': 'self.H'}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""initial_lstm"""'], {}), True, 'import tensorflow as tf\n'), (61, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['features', '(1)'], {}), True, 'import tensorflow as tf\n'), (63, 'tensorflow.get_variable', 'tf.get_variable', (['"""w_h"""', '[self.D, self.H]'], {'initializer': 'self.weight_initializer'}), True, 'import tensorflow as tf\n'), (64, 'tensorflow.get_variable', 'tf.get_variable', (['"""b_h"""', '[self.H]'], {'initializer': 'self.const_initializer'}), True, 'import tensorflow as tf\n'), (67, 'tensorflow.get_variable', 'tf.get_variable', (['"""w_c"""', '[self.D, self.H]'], {'initializer': 'self.weight_initializer'}), True, 'import tensorflow as tf\n'), (68, 'tensorflow.get_variable', 'tf.get_variable', (['"""b_c"""', '[self.H]'], {'initializer': 'self.const_initializer'}), True, 'import tensorflow as tf\n'), (73, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""word_embedding"""'], {'reuse': 'reuse'}), True, 'import tensorflow as tf\n'), (74, 'tensorflow.get_variable', 'tf.get_variable', (['"""w"""', '[self.V, self.M]'], {'initializer': 'self.emb_initializer'}), True, 'import tensorflow as tf\n'), (75, 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['w', 'inputs'], {'name': '"""word_vector"""'}), True, 'import tensorflow as tf\n'), (79, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""project_features"""'], {}), True, 'import tensorflow as tf\n'), (80, 'tensorflow.get_variable', 'tf.get_variable', (['"""w"""', '[self.D, self.D]'], {'initializer': 'self.weight_initializer'}), True, 'import tensorflow as tf\n'), (81, 'tensorflow.reshape', 'tf.reshape', (['features', '[-1, self.D]'], {}), True, 'import tensorflow as tf\n'), (82, 'tensorflow.matmul', 'tf.matmul', (['features_flat', 'w'], {}), True, 'import tensorflow as tf\n'), (83, 'tensorflow.reshape', 'tf.reshape', (['features_proj', '[-1, self.L, self.D]'], {}), True, 'import tensorflow as tf\n'), (87, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""attention_layer"""'], {'reuse': 'reuse'}), True, 'import tensorflow as tf\n'), (88, 'tensorflow.get_variable', 'tf.get_variable', (['"""w"""', '[self.H, self.D]'], {'initializer': 'self.weight_initializer'}), True, 'import tensorflow as tf\n'), (89, 'tensorflow.get_variable', 'tf.get_variable', (['"""b"""', '[self.D]'], {'initializer': 'self.const_initializer'}), True, 'import tensorflow as tf\n'), (90, 'tensorflow.get_variable', 'tf.get_variable', (['"""w_att"""', '[self.D, 1]'], {'initializer': 'self.weight_initializer'}), True, 'import tensorflow as tf\n'), (94, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['out_att'], {}), True, 'import tensorflow as tf\n'), (99, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""selector"""'], {'reuse': 'reuse'}), True, 'import tensorflow as tf\n'), (100, 'tensorflow.get_variable', 'tf.get_variable', (['"""w"""', '[self.H, 1]'], {'initializer': 'self.weight_initializer'}), True, 'import tensorflow as tf\n'), (101, 'tensorflow.get_variable', 'tf.get_variable', (['"""b"""', '[1]'], {'initializer': 'self.const_initializer'}), True, 'import tensorflow as tf\n'), (103, 'tensorflow.multiply', 'tf.multiply', (['beta', 'context'], {'name': '"""selected_context"""'}), True, 'import tensorflow as tf\n'), (107, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""logits"""'], {'reuse': 'reuse'}), True, 'import tensorflow as tf\n'), (108, 'tensorflow.get_variable', 'tf.get_variable', (['"""w_h"""', '[self.H, self.M]'], {'initializer': 'self.weight_initializer'}), True, 'import tensorflow as tf\n'), (109, 'tensorflow.get_variable', 'tf.get_variable', (['"""b_h"""', '[self.M]'], {'initializer': 'self.const_initializer'}), True, 'import tensorflow as tf\n'), (110, 'tensorflow.get_variable', 'tf.get_variable', (['"""w_out"""', '[self.M, self.V]'], {'initializer': 'self.weight_initializer'}), True, 'import tensorflow as tf\n'), (111, 'tensorflow.get_variable', 'tf.get_variable', (['"""b_out"""', '[self.V]'], {'initializer': 'self.const_initializer'}), True, 'import tensorflow as tf\n'), (123, 'tensorflow.nn.tanh', 'tf.nn.tanh', (['h_logits'], {}), True, 'import tensorflow as tf\n'), (142, 'tensorflow.shape', 'tf.shape', (['features'], {}), True, 'import tensorflow as tf\n'), (146, 'tensorflow.not_equal', 'tf.not_equal', (['captions_out', 'self._null'], {}), True, 'import tensorflow as tf\n'), (175, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['alphas', '(1)'], {}), True, 'import tensorflow as tf\n'), (179, 'tensorflow.to_float', 'tf.to_float', (['batch_size'], {}), True, 'import tensorflow as tf\n'), (212, 'tensorflow.argmax', 'tf.argmax', (['logits', '(1)'], {}), True, 'import tensorflow as tf\n'), (215, 'tensorflow.stack', 'tf.stack', (['alpha_list'], {}), True, 'import tensorflow as tf\n'), (216, 'tensorflow.squeeze', 'tf.squeeze', (['beta_list'], {}), True, 'import tensorflow as tf\n'), (217, 'tensorflow.stack', 'tf.stack', (['sampled_word_list'], {}), True, 'import tensorflow as tf\n'), (114, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['h', '(0.5)'], {}), True, 'import tensorflow as tf\n'), (115, 'tensorflow.matmul', 'tf.matmul', (['h', 'w_h'], {}), True, 'import tensorflow as tf\n'), (118, 'tensorflow.get_variable', 'tf.get_variable', (['"""w_ctx2out"""', '[self.D, self.M]'], {'initializer': 'self.weight_initializer'}), True, 'import tensorflow as tf\n'), (119, 'tensorflow.matmul', 'tf.matmul', (['context', 'w_ctx2out'], {}), True, 'import tensorflow as tf\n'), (126, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['h_logits', '(0.5)'], {}), True, 'import tensorflow as tf\n'), (127, 'tensorflow.matmul', 'tf.matmul', (['h_logits', 'w_out'], {}), True, 'import tensorflow as tf\n'), (167, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""lstm"""'], {'reuse': '(t != 0)'}), True, 'import tensorflow as tf\n'), (174, 'tensorflow.stack', 'tf.stack', (['alpha_list'], {}), True, 'import tensorflow as tf\n'), (176, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['((16.0 / 196 - alphas_all) ** 2)'], {}), True, 'import tensorflow as tf\n'), (208, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""lstm"""'], {'reuse': '(t != 0)'}), True, 'import tensorflow as tf\n'), (65, 'tensorflow.matmul', 'tf.matmul', (['features_mean', 'w_h'], {}), True, 'import tensorflow as tf\n'), (69, 'tensorflow.matmul', 'tf.matmul', (['features_mean', 'w_c'], {}), True, 'import tensorflow as tf\n'), (93, 'tensorflow.reshape', 'tf.reshape', (['h_att', '[-1, self.D]'], {}), True, 'import tensorflow as tf\n'), (95, 'tensorflow.expand_dims', 'tf.expand_dims', (['alpha', '(2)'], {}), True, 'import tensorflow as tf\n'), (102, 'tensorflow.matmul', 'tf.matmul', (['h', 'w'], {}), True, 'import tensorflow as tf\n'), (171, 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'logits', 'labels': 'captions_out[:, (t)]'}), True, 'import tensorflow as tf\n'), (193, 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), True, 'import tensorflow as tf\n'), (168, 'tensorflow.concat', 'tf.concat', ([], {'axis': '(1)', 'values': '[x[:, (t), :], context]'}), True, 'import tensorflow as tf\n'), (209, 'tensorflow.concat', 'tf.concat', ([], {'axis': '(1)', 'values': '[x, context]'}), True, 'import tensorflow as tf\n'), (92, 'tensorflow.matmul', 'tf.matmul', (['h', 'w'], {}), True, 'import tensorflow as tf\n'), (197, 'tensorflow.shape', 'tf.shape', (['features'], {}), True, 'import tensorflow as tf\n')] |
whigy/chair-gan | 8144b34919a7c61487edc559738801b341a70331 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import tensorflow as tf
import numpy as np
import tfimage as im
import threading
import time
import cv2
from skimage.morphology import thin
edge_pool = None
parser = argparse.ArgumentParser()
parser.add_argument("--input_dir", required=True, help="path to folder containing images")
parser.add_argument("--output_dir", required=True, help="output path")
parser.add_argument("--operation", required=True, choices=["grayscale", "resize", "blank", "combine", "edges", "skeletonize"])
parser.add_argument("--workers", type=int, default=1, help="number of workers")
# resize
parser.add_argument("--pad", action="store_true", help="pad instead of crop for resize operation")
parser.add_argument("--size", type=int, default=256, help="size to use for resize operation")
# combine
parser.add_argument("--b_dir", type=str, help="path to folder containing B images for combine operation")
# edges
parser.add_argument("--crop", action="store_true", help="crop the image before edge detection. Only works when background is white.")
parser.add_argument("--crop_dir", help="path for cropped original images")
a = parser.parse_args()
def resize(src):
height, width, _ = src.shape
dst = src
if height != width:
if a.pad:
size = max(height, width)
# pad to correct ratio
oh = (size - height) // 2
ow = (size - width) // 2
dst = im.pad(image=dst, offset_height=oh, offset_width=ow, target_height=size, target_width=size)
else:
# crop to correct ratio
size = min(height, width)
oh = (height - size) // 2
ow = (width - size) // 2
dst = im.crop(image=dst, offset_height=oh, offset_width=ow, target_height=size, target_width=size)
assert(dst.shape[0] == dst.shape[1])
size, _, _ = dst.shape
if size > a.size:
dst = im.downscale(images=dst, size=[a.size, a.size])
elif size < a.size:
dst = im.upscale(images=dst, size=[a.size, a.size])
return dst
def blank(src):
height, width, _ = src.shape
if height != width:
raise Exception("non-square image")
image_size = width
size = int(image_size * 0.3)
offset = int(image_size / 2 - size / 2)
dst = src
dst[offset:offset + size,offset:offset + size,:] = np.ones([size, size, 3])
return dst
def combine(src, src_path):
if a.b_dir is None:
raise Exception("missing b_dir")
# find corresponding file in b_dir, could have a different extension
basename, _ = os.path.splitext(os.path.basename(src_path))
for ext in [".png", ".jpg"]:
sibling_path = os.path.join(a.b_dir, basename + ext)
if tf.io.gfile.exists(sibling_path):
sibling = im.load(sibling_path)
break
else:
raise Exception("could not find sibling image for " + src_path)
# make sure that dimensions are correct
height, width, _ = src.shape
if height != sibling.shape[0] or width != sibling.shape[1]:
raise Exception("differing sizes")
# convert both images to RGB if necessary
if src.shape[2] == 1:
src = im.grayscale_to_rgb(images=src)
if sibling.shape[2] == 1:
sibling = im.grayscale_to_rgb(images=sibling)
# remove alpha channel
if src.shape[2] == 4:
src = src[:,:,:3]
if sibling.shape[2] == 4:
sibling = sibling[:,:,:3]
return np.concatenate([src, sibling], axis=1)
def grayscale(src):
return im.grayscale_to_rgb(images=im.rgb_to_grayscale(images=src))
def crop_and_resize(src, return_gray = False):
"""
crop edge image to discard white pad, and resize to training size
based on: https://stackoverflow.com/questions/48395434/how-to-crop-or-remove-white-background-from-an-image
[OBS!] only works on image with white background
"""
height, width, _ = src.shape
# (1) Convert to gray, and threshold
gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
th, threshed = cv2.threshold(gray, 240, 255, cv2.THRESH_BINARY_INV)
# (2) Morph-op to remove noise
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
morphed = cv2.morphologyEx(threshed, cv2.MORPH_CLOSE, kernel)
# (3) Find the max-area contour
cnts = cv2.findContours(morphed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
cnt = sorted(cnts, key=cv2.contourArea)[-1]
# (4) Crop
x, y, w, h = cv2.boundingRect(cnt)
x_1 = max(x, x - 10)
y_1 = max(y, y - 10)
x_2 = min(x+w, width)
y_2 = min(y+h, height)
if return_gray:
dst = gray[y_1:y_2, x_1:x_2]
else:
dst = src[y_1:y_2, x_1:x_2]
# pad white to resize
height = int(max(0, w - h) / 2.0)
width = int(max(0, h - w) / 2.0)
padded = cv2.copyMakeBorder(dst, height, height, width, width, cv2.BORDER_CONSTANT, value=[255, 255, 255])
return cv2.resize(padded, (a.size, a.size), interpolation=cv2.INTER_NEAREST)
def edges(src):
src = np.asarray(src * 255, np.uint8)
if a.crop:
src = crop_and_resize(src)
# detect edges based on Canny Edge Dection
edge = cv2.bitwise_not(cv2.Canny(src, 80, 130))
dst = cv2.cvtColor(edge, cv2.COLOR_GRAY2RGB)
if a.crop:
return np.asarray(src/255., np.float32), dst
else:
return dst
def skeletonize_edge(src):
# Process sketch to fit input. Only used for test input
src = np.asarray(src * 255, np.uint8)
# Crop the sketch and minimize white padding.
cropped = crop_and_resize(src, return_gray=True)
# Skeletonize the lines
skeleton = thin(cv2.bitwise_not(cropped))
final = np.asarray(1 - np.float32(skeleton))
return cv2.cvtColor(final, cv2.COLOR_GRAY2BGR)
def process(src_path, dst_path):
src = im.load(src_path)
if a.operation == "edges":
if a.crop:
name = dst_path.split("/")[-1]
src, dst = edges(src)
im.save(src, os.path.join(a.crop_dir, name))
else:
dst = edges(src)
elif a.operation == "grayscale":
dst = grayscale(src)
elif a.operation == "resize":
dst = resize(src)
elif a.operation == "blank":
dst = blank(src)
elif a.operation == "combine":
dst = combine(src, src_path)
elif a.operation == "skeletonize":
dst = skeletonize_edge(src)
else:
raise Exception("invalid operation")
im.save(dst, dst_path)
complete_lock = threading.Lock()
start = None
num_complete = 0
total = 0
def complete():
global num_complete, rate, last_complete
with complete_lock:
num_complete += 1
now = time.time()
elapsed = now - start
rate = num_complete / elapsed
if rate > 0:
remaining = (total - num_complete) / rate
else:
remaining = 0
print("%d/%d complete %0.2f images/sec %dm%ds elapsed %dm%ds remaining" % (num_complete, total, rate, elapsed // 60, elapsed % 60, remaining // 60, remaining % 60))
last_complete = now
def main():
if not tf.io.gfile.exists(a.output_dir):
tf.io.gfile.makedirs(a.output_dir)
if a.operation == "edges" and a.crop:
try:
if not tf.io.gfile.exists(a.crop_dir):
tf.io.gfile.makedirs(a.crop_dir)
except Exception as e:
raise Exception("invalid crop_dir: {:s}".format(e))
src_paths = []
dst_paths = []
skipped = 0
for src_path in im.find(a.input_dir):
name, _ = os.path.splitext(os.path.basename(src_path))
dst_path = os.path.join(a.output_dir, name + ".png")
if tf.io.gfile.exists(dst_path):
skipped += 1
else:
src_paths.append(src_path)
dst_paths.append(dst_path)
print("skipping %d files that already exist" % skipped)
global total
total = len(src_paths)
print("processing %d files" % total)
global start
start = time.time()
if a.workers == 1:
with tf.Session() as sess:
for src_path, dst_path in zip(src_paths, dst_paths):
process(src_path, dst_path)
complete()
else:
queue = tf.train.input_producer(zip(src_paths, dst_paths), shuffle=False, num_epochs=1)
dequeue_op = queue.dequeue()
def worker(coord):
with sess.as_default():
while not coord.should_stop():
try:
src_path, dst_path = sess.run(dequeue_op)
except tf.errors.OutOfRangeError:
coord.request_stop()
break
process(src_path, dst_path)
complete()
# init epoch counter for the queue
local_init_op = tf.local_variables_initializer()
with tf.Session() as sess:
sess.run(local_init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(a.workers):
t = threading.Thread(target=worker, args=(coord,))
t.start()
threads.append(t)
try:
coord.join(threads)
except KeyboardInterrupt:
coord.request_stop()
coord.join(threads)
main()
| [
"tensorflow.local_variables_initializer",
"tensorflow.io.gfile.exists",
"numpy.asarray",
"tensorflow.train.start_queue_runners",
"tensorflow.train.Coordinator",
"tensorflow.io.gfile.makedirs",
"numpy.ones",
"numpy.concatenate",
"numpy.float32",
"tensorflow.Session"
] | tools/process.py | [(19, 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), False, 'import argparse\n'), (202, 'threading.Lock', 'threading.Lock', ([], {}), False, 'import threading\n'), (72, 'numpy.ones', 'np.ones', (['[size, size, 3]'], {}), True, 'import numpy as np\n'), (109, 'numpy.concatenate', 'np.concatenate', (['[src, sibling]'], {'axis': '(1)'}), True, 'import numpy as np\n'), (125, 'cv2.cvtColor', 'cv2.cvtColor', (['src', 'cv2.COLOR_BGR2GRAY'], {}), False, 'import cv2\n'), (126, 'cv2.threshold', 'cv2.threshold', (['gray', '(240)', '(255)', 'cv2.THRESH_BINARY_INV'], {}), False, 'import cv2\n'), (129, 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(11, 11)'], {}), False, 'import cv2\n'), (130, 'cv2.morphologyEx', 'cv2.morphologyEx', (['threshed', 'cv2.MORPH_CLOSE', 'kernel'], {}), False, 'import cv2\n'), (137, 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), False, 'import cv2\n'), (149, 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['dst', 'height', 'height', 'width', 'width', 'cv2.BORDER_CONSTANT'], {'value': '[255, 255, 255]'}), False, 'import cv2\n'), (151, 'cv2.resize', 'cv2.resize', (['padded', '(a.size, a.size)'], {'interpolation': 'cv2.INTER_NEAREST'}), False, 'import cv2\n'), (155, 'numpy.asarray', 'np.asarray', (['(src * 255)', 'np.uint8'], {}), True, 'import numpy as np\n'), (160, 'cv2.cvtColor', 'cv2.cvtColor', (['edge', 'cv2.COLOR_GRAY2RGB'], {}), False, 'import cv2\n'), (169, 'numpy.asarray', 'np.asarray', (['(src * 255)', 'np.uint8'], {}), True, 'import numpy as np\n'), (175, 'cv2.cvtColor', 'cv2.cvtColor', (['final', 'cv2.COLOR_GRAY2BGR'], {}), False, 'import cv2\n'), (178, 'tfimage.load', 'im.load', (['src_path'], {}), True, 'import tfimage as im\n'), (199, 'tfimage.save', 'im.save', (['dst', 'dst_path'], {}), True, 'import tfimage as im\n'), (239, 'tfimage.find', 'im.find', (['a.input_dir'], {}), True, 'import tfimage as im\n'), (256, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (56, 'tfimage.downscale', 'im.downscale', ([], {'images': 'dst', 'size': '[a.size, a.size]'}), True, 'import tfimage as im\n'), (81, 'os.path.basename', 'os.path.basename', (['src_path'], {}), False, 'import os\n'), (83, 'os.path.join', 'os.path.join', (['a.b_dir', '(basename + ext)'], {}), False, 'import os\n'), (84, 'tensorflow.io.gfile.exists', 'tf.io.gfile.exists', (['sibling_path'], {}), True, 'import tensorflow as tf\n'), (97, 'tfimage.grayscale_to_rgb', 'im.grayscale_to_rgb', ([], {'images': 'src'}), True, 'import tfimage as im\n'), (100, 'tfimage.grayscale_to_rgb', 'im.grayscale_to_rgb', ([], {'images': 'sibling'}), True, 'import tfimage as im\n'), (133, 'cv2.findContours', 'cv2.findContours', (['morphed', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), False, 'import cv2\n'), (159, 'cv2.Canny', 'cv2.Canny', (['src', '(80)', '(130)'], {}), False, 'import cv2\n'), (173, 'cv2.bitwise_not', 'cv2.bitwise_not', (['cropped'], {}), False, 'import cv2\n'), (212, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (226, 'tensorflow.io.gfile.exists', 'tf.io.gfile.exists', (['a.output_dir'], {}), True, 'import tensorflow as tf\n'), (227, 'tensorflow.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['a.output_dir'], {}), True, 'import tensorflow as tf\n'), (241, 'os.path.join', 'os.path.join', (['a.output_dir', "(name + '.png')"], {}), False, 'import os\n'), (242, 'tensorflow.io.gfile.exists', 'tf.io.gfile.exists', (['dst_path'], {}), True, 'import tensorflow as tf\n'), (282, 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (44, 'tfimage.pad', 'im.pad', ([], {'image': 'dst', 'offset_height': 'oh', 'offset_width': 'ow', 'target_height': 'size', 'target_width': 'size'}), True, 'import tfimage as im\n'), (50, 'tfimage.crop', 'im.crop', ([], {'image': 'dst', 'offset_height': 'oh', 'offset_width': 'ow', 'target_height': 'size', 'target_width': 'size'}), True, 'import tfimage as im\n'), (58, 'tfimage.upscale', 'im.upscale', ([], {'images': 'dst', 'size': '[a.size, a.size]'}), True, 'import tfimage as im\n'), (85, 'tfimage.load', 'im.load', (['sibling_path'], {}), True, 'import tfimage as im\n'), (113, 'tfimage.rgb_to_grayscale', 'im.rgb_to_grayscale', ([], {'images': 'src'}), True, 'import tfimage as im\n'), (162, 'numpy.asarray', 'np.asarray', (['(src / 255.0)', 'np.float32'], {}), True, 'import numpy as np\n'), (174, 'numpy.float32', 'np.float32', (['skeleton'], {}), True, 'import numpy as np\n'), (240, 'os.path.basename', 'os.path.basename', (['src_path'], {}), False, 'import os\n'), (261, 'tensorflow.Session', 'tf.Session', ([], {}), True, 'import tensorflow as tf\n'), (283, 'tensorflow.Session', 'tf.Session', ([], {}), True, 'import tensorflow as tf\n'), (286, 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), True, 'import tensorflow as tf\n'), (287, 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'coord': 'coord'}), True, 'import tensorflow as tf\n'), (183, 'os.path.join', 'os.path.join', (['a.crop_dir', 'name'], {}), False, 'import os\n'), (230, 'tensorflow.io.gfile.exists', 'tf.io.gfile.exists', (['a.crop_dir'], {}), True, 'import tensorflow as tf\n'), (231, 'tensorflow.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['a.crop_dir'], {}), True, 'import tensorflow as tf\n'), (289, 'threading.Thread', 'threading.Thread', ([], {'target': 'worker', 'args': '(coord,)'}), False, 'import threading\n')] |
shfshf/ner_s2s | a04311310bddf396b551969fd1e63fdb3fc2ca0b | from pathlib import Path
import tensorflow as tf
import numpy as np
from ner_s2s.metrics import precision, recall, f1, correct_rate
class Model(object):
@classmethod
def default_params(cls):
return {}
@classmethod
def get_model_name(cls):
return cls.__name__
@classmethod
def model_fn(cls, features, labels, mode, params):
instance = cls(features, labels, mode, params)
return instance()
def __init__(self, features, labels, mode, params):
self.features = features
self.labels = labels
self.mode = mode
self.params = params
def input_layer(self):
# data = np.loadtxt(self.params['vocab'], dtype=np.unicode, encoding=None)
data = self.params["vocab_data"]
mapping_strings = tf.Variable(data)
vocab_words = tf.contrib.lookup.index_table_from_tensor(
mapping_strings, num_oov_buckets=1
)
# Word Embeddings
words = tf.identity(self.features["words"], name="input_words")
word_ids = vocab_words.lookup(words)
#
# raw_nwords = tf.identity(features['words_len'], name='input_words_len')
# nwords = tf.feature_column.input_layer({'words_len': raw_nwords}, params['words_len_feature_columns'])
# nwords = tf.reshape(nwords, [-1])
# nwords = tf.to_int32(nwords)
# words = features['words']
# words = tf.convert_to_tensor(words)
#
# nwords = features['words_len']
# nwords = tf.convert_to_tensor(nwords)
nwords = tf.identity(self.features["words_len"], name="input_words_len")
# get tag info
# with Path(self.params['tags']).open() as f:
indices = [
idx
for idx, tag in enumerate(self.params["tags_data"])
if tag.strip() != "O"
]
num_tags = len(indices) + 1
# # true tags to ids
# if self.mode == tf.estimator.ModeKeys.PREDICT:
# true_tag_ids = 0
# else:
# true_tag_ids = self.tag2id(self.labels)
return indices, num_tags, word_ids, nwords
def embedding_layer(self, word_ids):
# load pre-trained data from file
# glove = np.load(params['glove'])['embeddings'] # np.array
# training the embedding during training
glove = np.zeros(
(self.params["embedding_vocabulary_size"], self.params["embedding_dim"]),
dtype=np.float32,
)
# Add OOV word embedding
embedding_array = np.vstack([glove, [[0.0] * self.params["embedding_dim"]]])
embedding_variable = tf.Variable(
embedding_array, dtype=tf.float32, trainable=True
)
# embedding_variable = tf.get_variable(
# 'embedding_variable',
# shape=(self.params["embedding_vocabulary_size"] + 1, self.params["embedding_dim"]),
# dtype=tf.float32,
# initializer=tf.contrib.layers.xavier_initializer(),
# regularizer=tf.contrib.layers.l2_regularizer(self.params["regularizer_rate"]),
# trainable=True
# )
embeddings = tf.nn.embedding_lookup(embedding_variable, word_ids)
return embeddings
def dropout_layer(self, data):
training = self.mode == tf.estimator.ModeKeys.TRAIN
output = tf.layers.dropout(data, rate=self.params["dropout"], training=training)
return output
def layer_normalization_layer(self, data):
output = tf.contrib.layers.layer_norm(data)
return output
def dense_layer(self, data, num_tags):
logits = tf.layers.dense(data, num_tags)
return logits
def load_tag_data(self):
# data = np.loadtxt(self.params['tags'], dtype=np.unicode, encoding=None)
data = self.params["tags_data"]
mapping_strings = tf.Variable(data)
return mapping_strings
def load_word_data(self):
data = np.loadtxt(self.params["words"], dtype=np.unicode, encoding=None)
mapping_strings = tf.Variable(data.reshape((-1,)))
return mapping_strings
def tag2id(self, labels, name=None):
mapping_strings = self.load_tag_data()
vocab_tags = tf.contrib.lookup.index_table_from_tensor(
mapping_strings, name=name
)
tags = vocab_tags.lookup(labels)
return tags
def id2tag(self, pred_ids, name=None):
mapping_strings = self.load_tag_data()
reverse_vocab_tags = tf.contrib.lookup.index_to_string_table_from_tensor(
mapping_strings, name=name
)
pred_strings = reverse_vocab_tags.lookup(tf.to_int64(pred_ids))
return pred_strings
def id2word(self, word_ids, name=None):
mapping_strings = self.load_word_data()
reverse_vocab_tags = tf.contrib.lookup.index_to_string_table_from_tensor(
mapping_strings, name=name
)
word_strings = reverse_vocab_tags.lookup(tf.to_int64(word_ids))
return word_strings
def loss_layer(self, preds, ground_true, nwords, crf_params):
with tf.name_scope("CRF_log_likelihood"):
log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(
preds, ground_true, nwords, crf_params
)
loss = tf.reduce_mean(-log_likelihood)
# regularizer = tf.contrib.layers.l2_regularizer(0.001)
# reg = regularizer(embedding_variable)
# loss += reg
return loss
def crf_decode_layer(self, logits, crf_params, nwords):
with tf.name_scope("CRF_decode"):
pred_ids, _ = tf.contrib.crf.crf_decode(logits, crf_params, nwords)
return pred_ids
def compute_metrics(self, tags, pred_ids, num_tags, indices, nwords):
weights = tf.sequence_mask(nwords)
# metrics_correct_rate, golden, predict = correct_rate(tags, pred_ids)
# metrics_correct_rate = correct_rate(tags, pred_ids, weights)
metrics = {
"acc": tf.metrics.accuracy(tags, pred_ids, weights),
"precision": precision(tags, pred_ids, num_tags, indices, weights),
"recall": recall(tags, pred_ids, num_tags, indices, weights),
"f1": f1(tags, pred_ids, num_tags, indices, weights),
"correct_rate": correct_rate(tags, pred_ids, weights),
# 'golden': (golden, tf.zeros([], tf.int32)),
# 'predict': (predict, tf.zeros([], tf.int32))
}
for metric_name, op in metrics.items():
tf.summary.scalar(metric_name, op[1])
return metrics
def call(self, embeddings, nwords):
raise NotImplementedError
def __call__(self):
with tf.variable_scope("task_independent"):
indices, num_tags, word_ids, nwords = self.input_layer()
embeddings = self.embedding_layer(word_ids)
data = self.call(embeddings, nwords)
data = self.dropout_layer(data)
data = self.layer_normalization_layer(data)
with tf.variable_scope("task_dependent"):
logits = self.dense_layer(data, num_tags)
crf_params = tf.get_variable("crf", [num_tags, num_tags], dtype=tf.float32)
pred_ids = self.crf_decode_layer(logits, crf_params, nwords)
pred_strings = self.id2tag(pred_ids, name="predict")
# word_strings = self.id2word(word_ids, name='word_strings')
# print(word_strings)
if self.mode == tf.estimator.ModeKeys.PREDICT:
predictions = {"pred_ids": pred_ids, "tags": pred_strings}
return tf.estimator.EstimatorSpec(self.mode, predictions=predictions)
else:
# true_tag_ids = self.labels
true_tag_ids = self.tag2id(self.labels, "labels")
# print(pred_strings)
# print(self.labels)
loss = self.loss_layer(logits, true_tag_ids, nwords, crf_params)
metrics = self.compute_metrics(
true_tag_ids, pred_ids, num_tags, indices, nwords
)
if self.mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(
self.mode, loss=loss, eval_metric_ops=metrics
)
elif self.mode == tf.estimator.ModeKeys.TRAIN:
optimizer_params = self.params.get("optimizer_params", {})
global_step = tf.train.get_or_create_global_step()
# apply learning rate decay if it's setup already.
lr_decay_params = optimizer_params.pop("learning_rate_exp_decay", {})
# learning_rate = tf.train.exponential_decay(
# self.params["learning_rate"],
# global_step,
# decay_steps=self.params["lr_decay_steps"],
# decay_rate=self.params["lr_decay_rate"],
# staircase=True
# )
if lr_decay_params:
learning_rate = tf.train.exponential_decay(
lr_decay_params["learning_rate"],
global_step,
decay_steps=lr_decay_params["lr_decay_steps"],
decay_rate=lr_decay_params["lr_decay_rate"],
staircase=lr_decay_params.get("staircase", True),
)
optimizer_params["learning_rate"] = learning_rate
var_list = None
if self.params["warm_start_dir"]:
output_vars1 = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="task_dependent")
output_vars2 = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="task_independent/Variable_1")
var_list = [output_vars1, output_vars2]
train_op = tf.train.AdamOptimizer(
# learning_rate=self.params["learning_rate"]
# **self.params.get("optimizer_params", {})
# learning_rate=learning_rate
**optimizer_params
).minimize(loss, global_step=global_step, var_list=var_list)
return tf.estimator.EstimatorSpec(
self.mode, loss=loss, train_op=train_op
)
| [
"tensorflow.get_variable",
"tensorflow.contrib.lookup.index_table_from_tensor",
"tensorflow.metrics.accuracy",
"tensorflow.layers.dropout",
"numpy.vstack",
"tensorflow.train.AdamOptimizer",
"tensorflow.contrib.crf.crf_decode",
"tensorflow.contrib.crf.crf_log_likelihood",
"tensorflow.summary.scalar",
"tensorflow.to_int64",
"tensorflow.Variable",
"tensorflow.get_collection",
"tensorflow.layers.dense",
"tensorflow.train.get_or_create_global_step",
"tensorflow.name_scope",
"numpy.zeros",
"tensorflow.identity",
"tensorflow.sequence_mask",
"tensorflow.nn.embedding_lookup",
"tensorflow.contrib.lookup.index_to_string_table_from_tensor",
"tensorflow.reduce_mean",
"tensorflow.contrib.layers.layer_norm",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.variable_scope",
"numpy.loadtxt"
] | ner_s2s/ner_estimator/algorithms/model.py | [(31, 'tensorflow.Variable', 'tf.Variable', (['data'], {}), True, 'import tensorflow as tf\n'), (32, 'tensorflow.contrib.lookup.index_table_from_tensor', 'tf.contrib.lookup.index_table_from_tensor', (['mapping_strings'], {'num_oov_buckets': '(1)'}), True, 'import tensorflow as tf\n'), (37, 'tensorflow.identity', 'tf.identity', (["self.features['words']"], {'name': '"""input_words"""'}), True, 'import tensorflow as tf\n'), (52, 'tensorflow.identity', 'tf.identity', (["self.features['words_len']"], {'name': '"""input_words_len"""'}), True, 'import tensorflow as tf\n'), (76, 'numpy.zeros', 'np.zeros', (["(self.params['embedding_vocabulary_size'], self.params['embedding_dim'])"], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (82, 'numpy.vstack', 'np.vstack', (["[glove, [[0.0] * self.params['embedding_dim']]]"], {}), True, 'import numpy as np\n'), (84, 'tensorflow.Variable', 'tf.Variable', (['embedding_array'], {'dtype': 'tf.float32', 'trainable': '(True)'}), True, 'import tensorflow as tf\n'), (97, 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding_variable', 'word_ids'], {}), True, 'import tensorflow as tf\n'), (103, 'tensorflow.layers.dropout', 'tf.layers.dropout', (['data'], {'rate': "self.params['dropout']", 'training': 'training'}), True, 'import tensorflow as tf\n'), (108, 'tensorflow.contrib.layers.layer_norm', 'tf.contrib.layers.layer_norm', (['data'], {}), True, 'import tensorflow as tf\n'), (113, 'tensorflow.layers.dense', 'tf.layers.dense', (['data', 'num_tags'], {}), True, 'import tensorflow as tf\n'), (120, 'tensorflow.Variable', 'tf.Variable', (['data'], {}), True, 'import tensorflow as tf\n'), (125, 'numpy.loadtxt', 'np.loadtxt', (["self.params['words']"], {'dtype': 'np.unicode', 'encoding': 'None'}), True, 'import numpy as np\n'), (132, 'tensorflow.contrib.lookup.index_table_from_tensor', 'tf.contrib.lookup.index_table_from_tensor', (['mapping_strings'], {'name': 'name'}), True, 'import tensorflow as tf\n'), (142, 'tensorflow.contrib.lookup.index_to_string_table_from_tensor', 'tf.contrib.lookup.index_to_string_table_from_tensor', (['mapping_strings'], {'name': 'name'}), True, 'import tensorflow as tf\n'), (152, 'tensorflow.contrib.lookup.index_to_string_table_from_tensor', 'tf.contrib.lookup.index_to_string_table_from_tensor', (['mapping_strings'], {'name': 'name'}), True, 'import tensorflow as tf\n'), (166, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(-log_likelihood)'], {}), True, 'import tensorflow as tf\n'), (181, 'tensorflow.sequence_mask', 'tf.sequence_mask', (['nwords'], {}), True, 'import tensorflow as tf\n'), (146, 'tensorflow.to_int64', 'tf.to_int64', (['pred_ids'], {}), True, 'import tensorflow as tf\n'), (156, 'tensorflow.to_int64', 'tf.to_int64', (['word_ids'], {}), True, 'import tensorflow as tf\n'), (161, 'tensorflow.name_scope', 'tf.name_scope', (['"""CRF_log_likelihood"""'], {}), True, 'import tensorflow as tf\n'), (162, 'tensorflow.contrib.crf.crf_log_likelihood', 'tf.contrib.crf.crf_log_likelihood', (['preds', 'ground_true', 'nwords', 'crf_params'], {}), True, 'import tensorflow as tf\n'), (175, 'tensorflow.name_scope', 'tf.name_scope', (['"""CRF_decode"""'], {}), True, 'import tensorflow as tf\n'), (176, 'tensorflow.contrib.crf.crf_decode', 'tf.contrib.crf.crf_decode', (['logits', 'crf_params', 'nwords'], {}), True, 'import tensorflow as tf\n'), (187, 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', (['tags', 'pred_ids', 'weights'], {}), True, 'import tensorflow as tf\n'), (188, 'ner_s2s.metrics.precision', 'precision', (['tags', 'pred_ids', 'num_tags', 'indices', 'weights'], {}), False, 'from ner_s2s.metrics import precision, recall, f1, correct_rate\n'), (189, 'ner_s2s.metrics.recall', 'recall', (['tags', 'pred_ids', 'num_tags', 'indices', 'weights'], {}), False, 'from ner_s2s.metrics import precision, recall, f1, correct_rate\n'), (190, 'ner_s2s.metrics.f1', 'f1', (['tags', 'pred_ids', 'num_tags', 'indices', 'weights'], {}), False, 'from ner_s2s.metrics import precision, recall, f1, correct_rate\n'), (191, 'ner_s2s.metrics.correct_rate', 'correct_rate', (['tags', 'pred_ids', 'weights'], {}), False, 'from ner_s2s.metrics import precision, recall, f1, correct_rate\n'), (197, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['metric_name', 'op[1]'], {}), True, 'import tensorflow as tf\n'), (205, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""task_independent"""'], {}), True, 'import tensorflow as tf\n'), (215, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""task_dependent"""'], {}), True, 'import tensorflow as tf\n'), (218, 'tensorflow.get_variable', 'tf.get_variable', (['"""crf"""', '[num_tags, num_tags]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (231, 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', (['self.mode'], {'predictions': 'predictions'}), True, 'import tensorflow as tf\n'), (247, 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', (['self.mode'], {'loss': 'loss', 'eval_metric_ops': 'metrics'}), True, 'import tensorflow as tf\n'), (254, 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), True, 'import tensorflow as tf\n'), (289, 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', (['self.mode'], {'loss': 'loss', 'train_op': 'train_op'}), True, 'import tensorflow as tf\n'), (278, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""task_dependent"""'}), True, 'import tensorflow as tf\n'), (279, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""task_independent/Variable_1"""'}), True, 'import tensorflow as tf\n'), (282, 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {}), True, 'import tensorflow as tf\n')] |
CrazyAlan/nextAI | e871b4078e9d591121f9093f2ba022e1c9115f7b | """Functions for building the face recognition network.
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
def conv(inpOp, nIn, nOut, kH, kW, dH, dW, padType, name, phase_train=True, use_batch_norm=True, weight_decay=0.0):
with tf.variable_scope(name):
l2_regularizer = lambda t: l2_loss(t, weight=weight_decay)
kernel = tf.get_variable("weights", [kH, kW, nIn, nOut],
initializer=tf.truncated_normal_initializer(stddev=1e-1),
regularizer=l2_regularizer, dtype=inpOp.dtype)
cnv = tf.nn.conv2d(inpOp, kernel, [1, dH, dW, 1], padding=padType)
if use_batch_norm:
conv_bn = batch_norm(cnv, phase_train)
else:
conv_bn = cnv
biases = tf.get_variable("biases", [nOut], initializer=tf.constant_initializer(), dtype=inpOp.dtype)
bias = tf.nn.bias_add(conv_bn, biases)
conv1 = tf.nn.relu(bias)
return conv1
def convLinear(inpOp, nIn, nOut, kH, kW, dH, dW, padType, name, phase_train=True, use_batch_norm=True, weight_decay=0.0):
with tf.variable_scope(name):
l2_regularizer = lambda t: l2_loss(t, weight=weight_decay)
kernel = tf.get_variable("weights", [kH, kW, nIn, nOut],
initializer=tf.truncated_normal_initializer(stddev=1e-1),
regularizer=l2_regularizer, dtype=inpOp.dtype)
cnv = tf.nn.conv2d(inpOp, kernel, [1, dH, dW, 1], padding=padType)
if use_batch_norm:
conv_bn = batch_norm(cnv, phase_train)
else:
conv_bn = cnv
# biases = tf.get_variable("biases", [nOut], initializer=tf.constant_initializer(), dtype=inpOp.dtype)
# bias = tf.nn.bias_add(conv_bn, biases)
# conv1 = tf.nn.relu(bias)
return conv_bn
def convMfm(inpOp, nIn, nOut, kH, kW, dH, dW, padType, name, phase_train=True, use_batch_norm=True, weight_decay=0.0):
net_1 = convLinear(inpOp, nIn, nOut, kH, kW, dH, dW, padType, name+'_1', phase_train, use_batch_norm, weight_decay)
net_2 = convLinear(inpOp, nIn, nOut, kH, kW, dH, dW, padType, name+'_2', phase_train, use_batch_norm, weight_decay)
out = tf.maximum(net_1, net_2)
return out
def affine(inpOp, nIn, nOut, name, weight_decay=0.0):
with tf.variable_scope(name):
l2_regularizer = lambda t: l2_loss(t, weight=weight_decay)
weights = tf.get_variable("weights", [nIn, nOut],
initializer=tf.truncated_normal_initializer(stddev=1e-1),
regularizer=l2_regularizer, dtype=inpOp.dtype)
biases = tf.get_variable("biases", [nOut], initializer=tf.constant_initializer(), dtype=inpOp.dtype)
affine1 = tf.nn.relu_layer(inpOp, weights, biases)
return affine1
def l2_loss(tensor, weight=1.0, scope=None):
"""Define a L2Loss, useful for regularize, i.e. weight decay.
Args:
tensor: tensor to regularize.
weight: an optional weight to modulate the loss.
scope: Optional scope for op_scope.
Returns:
the L2 loss op.
"""
with tf.name_scope(scope):
weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='loss_weight')
loss = tf.multiply(weight, tf.nn.l2_loss(tensor), name='value')
return loss
def lppool(inpOp, pnorm, kH, kW, dH, dW, padding, name):
with tf.variable_scope(name):
if pnorm == 2:
pwr = tf.square(inpOp)
else:
pwr = tf.pow(inpOp, pnorm)
subsamp = tf.nn.avg_pool(pwr,
ksize=[1, kH, kW, 1],
strides=[1, dH, dW, 1],
padding=padding)
subsamp_sum = tf.multiply(subsamp, kH*kW)
if pnorm == 2:
out = tf.sqrt(subsamp_sum)
else:
out = tf.pow(subsamp_sum, 1/pnorm)
return out
def mpool(inpOp, kH, kW, dH, dW, padding, name):
with tf.variable_scope(name):
maxpool = tf.nn.max_pool(inpOp,
ksize=[1, kH, kW, 1],
strides=[1, dH, dW, 1],
padding=padding)
return maxpool
def apool(inpOp, kH, kW, dH, dW, padding, name):
with tf.variable_scope(name):
avgpool = tf.nn.avg_pool(inpOp,
ksize=[1, kH, kW, 1],
strides=[1, dH, dW, 1],
padding=padding)
return avgpool
# def mfmpool(input1, input2, name):
# with tf.variable_scope(name):
# res = tf.maximum(input1, input2)
# return res
def batch_norm(x, phase_train):
"""
Batch normalization on convolutional maps.
Args:
x: Tensor, 4D BHWD input maps
n_out: integer, depth of input maps
phase_train: boolean tf.Variable, true indicates training phase
scope: string, variable scope
affn: whether to affn-transform outputs
Return:
normed: batch-normalized maps
Ref: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow/33950177
"""
name = 'batch_norm'
with tf.variable_scope(name):
phase_train = tf.convert_to_tensor(phase_train, dtype=tf.bool)
n_out = int(x.get_shape()[3])
beta = tf.Variable(tf.constant(0.0, shape=[n_out], dtype=x.dtype),
name=name+'/beta', trainable=True, dtype=x.dtype)
gamma = tf.Variable(tf.constant(1.0, shape=[n_out], dtype=x.dtype),
name=name+'/gamma', trainable=True, dtype=x.dtype)
batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=0.9)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = control_flow_ops.cond(phase_train,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)
return normed
def inception(inp, inSize, ks, o1s, o2s1, o2s2, o3s1, o3s2, o4s1, o4s2, o4s3, poolType, name,
phase_train=True, use_batch_norm=True, weight_decay=0.0):
print('name = ', name)
print('inputSize = ', inSize)
print('kernelSize = {3,5}')
print('kernelStride = {%d,%d}' % (ks,ks))
print('outputSize = {%d,%d}' % (o2s2,o3s2))
print('reduceSize = {%d,%d,%d,%d}' % (o2s1,o3s1,o4s2,o1s))
print('pooling = {%s, %d, %d, %d, %d}' % (poolType, o4s1, o4s1, o4s3, o4s3))
if (o4s2>0):
o4 = o4s2
else:
o4 = inSize
print('outputSize = ', o1s+o2s2+o3s2+o4)
print()
net = []
with tf.variable_scope(name):
with tf.variable_scope('branch1_1x1'):
if o1s>0:
conv1 = conv(inp, inSize, o1s, 1, 1, 1, 1, 'SAME', 'conv1x1', phase_train=phase_train, use_batch_norm=use_batch_norm, weight_decay=weight_decay)
net.append(conv1)
with tf.variable_scope('branch2_3x3'):
if o2s1>0:
conv3a = conv(inp, inSize, o2s1, 1, 1, 1, 1, 'SAME', 'conv1x1', phase_train=phase_train, use_batch_norm=use_batch_norm, weight_decay=weight_decay)
conv3 = conv(conv3a, o2s1, o2s2, 3, 3, ks, ks, 'SAME', 'conv3x3', phase_train=phase_train, use_batch_norm=use_batch_norm, weight_decay=weight_decay)
net.append(conv3)
with tf.variable_scope('branch3_5x5'):
if o3s1>0:
conv5a = conv(inp, inSize, o3s1, 1, 1, 1, 1, 'SAME', 'conv1x1', phase_train=phase_train, use_batch_norm=use_batch_norm, weight_decay=weight_decay)
conv5 = conv(conv5a, o3s1, o3s2, 5, 5, ks, ks, 'SAME', 'conv5x5', phase_train=phase_train, use_batch_norm=use_batch_norm, weight_decay=weight_decay)
net.append(conv5)
with tf.variable_scope('branch4_pool'):
if poolType=='MAX':
pool = mpool(inp, o4s1, o4s1, o4s3, o4s3, 'SAME', 'pool')
elif poolType=='L2':
pool = lppool(inp, 2, o4s1, o4s1, o4s3, o4s3, 'SAME', 'pool')
else:
raise ValueError('Invalid pooling type "%s"' % poolType)
if o4s2>0:
pool_conv = conv(pool, inSize, o4s2, 1, 1, 1, 1, 'SAME', 'conv1x1', phase_train=phase_train, use_batch_norm=use_batch_norm, weight_decay=weight_decay)
else:
pool_conv = pool
net.append(pool_conv)
incept = array_ops.concat(net, 3, name=name)
return incept
| [
"tensorflow.convert_to_tensor",
"tensorflow.control_dependencies",
"tensorflow.nn.max_pool",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.nn.l2_loss",
"tensorflow.nn.conv2d",
"tensorflow.nn.moments",
"tensorflow.truncated_normal_initializer",
"tensorflow.name_scope",
"tensorflow.square",
"tensorflow.nn.batch_normalization",
"tensorflow.pow",
"tensorflow.identity",
"tensorflow.nn.avg_pool",
"tensorflow.nn.bias_add",
"tensorflow.nn.relu",
"tensorflow.multiply",
"tensorflow.nn.relu_layer",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.constant",
"tensorflow.maximum",
"tensorflow.constant_initializer",
"tensorflow.variable_scope",
"tensorflow.sqrt"
] | src/models/network.py | [(72, 'tensorflow.maximum', 'tf.maximum', (['net_1', 'net_2'], {}), True, 'import tensorflow as tf\n'), (36, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (41, 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['inpOp', 'kernel', '[1, dH, dW, 1]'], {'padding': 'padType'}), True, 'import tensorflow as tf\n'), (48, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['conv_bn', 'biases'], {}), True, 'import tensorflow as tf\n'), (49, 'tensorflow.nn.relu', 'tf.nn.relu', (['bias'], {}), True, 'import tensorflow as tf\n'), (53, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (58, 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['inpOp', 'kernel', '[1, dH, dW, 1]'], {'padding': 'padType'}), True, 'import tensorflow as tf\n'), (77, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (83, 'tensorflow.nn.relu_layer', 'tf.nn.relu_layer', (['inpOp', 'weights', 'biases'], {}), True, 'import tensorflow as tf\n'), (95, 'tensorflow.name_scope', 'tf.name_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (96, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['weight'], {'dtype': 'tensor.dtype.base_dtype', 'name': '"""loss_weight"""'}), True, 'import tensorflow as tf\n'), (103, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (109, 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['pwr'], {'ksize': '[1, kH, kW, 1]', 'strides': '[1, dH, dW, 1]', 'padding': 'padding'}), True, 'import tensorflow as tf\n'), (113, 'tensorflow.multiply', 'tf.multiply', (['subsamp', '(kH * kW)'], {}), True, 'import tensorflow as tf\n'), (123, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (124, 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['inpOp'], {'ksize': '[1, kH, kW, 1]', 'strides': '[1, dH, dW, 1]', 'padding': 'padding'}), True, 'import tensorflow as tf\n'), (131, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (132, 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['inpOp'], {'ksize': '[1, kH, kW, 1]', 'strides': '[1, dH, dW, 1]', 'padding': 'padding'}), True, 'import tensorflow as tf\n'), (157, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (158, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['phase_train'], {'dtype': 'tf.bool'}), True, 'import tensorflow as tf\n'), (165, 'tensorflow.nn.moments', 'tf.nn.moments', (['x', '[0, 1, 2]'], {'name': '"""moments"""'}), True, 'import tensorflow as tf\n'), (166, 'tensorflow.train.ExponentialMovingAverage', 'tf.train.ExponentialMovingAverage', ([], {'decay': '(0.9)'}), True, 'import tensorflow as tf\n'), (174, 'tensorflow.nn.batch_normalization', 'tf.nn.batch_normalization', (['x', 'mean', 'var', 'beta', 'gamma', '(0.001)'], {}), True, 'import tensorflow as tf\n'), (196, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (228, 'tensorflow.python.ops.array_ops.concat', 'array_ops.concat', (['net', '(3)'], {'name': 'name'}), False, 'from tensorflow.python.ops import array_ops\n'), (99, 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['tensor'], {}), True, 'import tensorflow as tf\n'), (105, 'tensorflow.square', 'tf.square', (['inpOp'], {}), True, 'import tensorflow as tf\n'), (107, 'tensorflow.pow', 'tf.pow', (['inpOp', 'pnorm'], {}), True, 'import tensorflow as tf\n'), (116, 'tensorflow.sqrt', 'tf.sqrt', (['subsamp_sum'], {}), True, 'import tensorflow as tf\n'), (118, 'tensorflow.pow', 'tf.pow', (['subsamp_sum', '(1 / pnorm)'], {}), True, 'import tensorflow as tf\n'), (160, 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'shape': '[n_out]', 'dtype': 'x.dtype'}), True, 'import tensorflow as tf\n'), (162, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[n_out]', 'dtype': 'x.dtype'}), True, 'import tensorflow as tf\n'), (197, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""branch1_1x1"""'], {}), True, 'import tensorflow as tf\n'), (202, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""branch2_3x3"""'], {}), True, 'import tensorflow as tf\n'), (208, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""branch3_5x5"""'], {}), True, 'import tensorflow as tf\n'), (214, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""branch4_pool"""'], {}), True, 'import tensorflow as tf\n'), (39, 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.1)'}), True, 'import tensorflow as tf\n'), (47, 'tensorflow.constant_initializer', 'tf.constant_initializer', ([], {}), True, 'import tensorflow as tf\n'), (56, 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.1)'}), True, 'import tensorflow as tf\n'), (80, 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.1)'}), True, 'import tensorflow as tf\n'), (82, 'tensorflow.constant_initializer', 'tf.constant_initializer', ([], {}), True, 'import tensorflow as tf\n'), (169, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[ema_apply_op]'], {}), True, 'import tensorflow as tf\n'), (170, 'tensorflow.identity', 'tf.identity', (['batch_mean'], {}), True, 'import tensorflow as tf\n'), (170, 'tensorflow.identity', 'tf.identity', (['batch_var'], {}), True, 'import tensorflow as tf\n')] |
criteo-dexter/deepr | 4de9cb8afc09cb3d2f7c42da248a966bfea5fc83 | # pylint: disable=no-value-for-parameter,unexpected-keyword-arg
"""LSTM layers."""
import tensorflow as tf
from deepr.layers import base
@base.layer(n_in=2, n_out=3)
def LSTM(tensors, num_units: int, bidirectional: bool = False, **kwargs):
"""LSTM layer."""
words, nwords = tensors
t = tf.transpose(words, perm=[1, 0, 2])
lstm_cell_fw = tf.contrib.rnn.LSTMBlockFusedCell(num_units=num_units, **kwargs)
outputs_fw, (hidden_fw, output_fw) = lstm_cell_fw(t, dtype=tf.float32, sequence_length=nwords)
if bidirectional:
lstm_cell_bw = tf.contrib.rnn.LSTMBlockFusedCell(num_units=num_units, **kwargs)
lstm_cell_bw = tf.contrib.rnn.TimeReversedFusedRNN(lstm_cell_bw)
outputs_bw, (hidden_bw, output_bw) = lstm_cell_bw(t, dtype=tf.float32, sequence_length=nwords)
outputs = tf.concat([outputs_fw, outputs_bw], axis=-1)
hidden = tf.concat([hidden_fw, hidden_bw], axis=-1)
output = tf.concat([output_fw, output_bw], axis=-1)
else:
outputs = outputs_fw
hidden = hidden_fw
output = output_fw
outputs = tf.transpose(outputs, perm=[1, 0, 2])
return (outputs, hidden, output)
| [
"tensorflow.concat",
"tensorflow.contrib.rnn.TimeReversedFusedRNN",
"tensorflow.contrib.rnn.LSTMBlockFusedCell",
"tensorflow.transpose"
] | deepr/layers/lstm.py | [(9, 'deepr.layers.base.layer', 'base.layer', ([], {'n_in': '(2)', 'n_out': '(3)'}), False, 'from deepr.layers import base\n'), (13, 'tensorflow.transpose', 'tf.transpose', (['words'], {'perm': '[1, 0, 2]'}), True, 'import tensorflow as tf\n'), (14, 'tensorflow.contrib.rnn.LSTMBlockFusedCell', 'tf.contrib.rnn.LSTMBlockFusedCell', ([], {'num_units': 'num_units'}), True, 'import tensorflow as tf\n'), (29, 'tensorflow.transpose', 'tf.transpose', (['outputs'], {'perm': '[1, 0, 2]'}), True, 'import tensorflow as tf\n'), (18, 'tensorflow.contrib.rnn.LSTMBlockFusedCell', 'tf.contrib.rnn.LSTMBlockFusedCell', ([], {'num_units': 'num_units'}), True, 'import tensorflow as tf\n'), (19, 'tensorflow.contrib.rnn.TimeReversedFusedRNN', 'tf.contrib.rnn.TimeReversedFusedRNN', (['lstm_cell_bw'], {}), True, 'import tensorflow as tf\n'), (21, 'tensorflow.concat', 'tf.concat', (['[outputs_fw, outputs_bw]'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (22, 'tensorflow.concat', 'tf.concat', (['[hidden_fw, hidden_bw]'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (23, 'tensorflow.concat', 'tf.concat', (['[output_fw, output_bw]'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n')] |
flyliu2017/bert | cc6e676ff8693a6cc31ade9d7a6cbb0789d7877c | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
from tensorflow.python.ops.losses.losses_impl import Reduction
import modeling
import optimization_multigpus
import tokenization
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_bool(
"data_converted", True,
"Whether data had been converted to tfrecord.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_integer(
"num_gpus", 1,
"number of GPU to use.")
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % self.language))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if label == tokenization.convert_to_unicode("contradictory"):
label = tokenization.convert_to_unicode("contradiction")
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class CommentsTagsProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
data_dir, "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
data_dir, "eval")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
data_dir, "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, data_dir, set_type):
"""Creates examples for the training and dev sets."""
examples = []
with open(os.path.join(data_dir, '{}_xs_converted_tag.txt'.format(set_type)), 'r', encoding='utf8') as f:
txts = f.read().splitlines()
with open(os.path.join(data_dir, '{}_ys_converted_tag.txt'.format(set_type)), 'r', encoding='utf8') as f:
labels = f.read().splitlines()
for (i, n) in enumerate(zip(txts, labels)):
txt, label = n
guid = "%s-%s" % (set_type, i)
text_a,text_b=txt.split(' | ')
label = label.split(' | ')[0]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class SegmentedCommentsTagsProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
data_dir, "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
data_dir, "eval")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
data_dir, "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, data_dir, set_type):
"""Creates examples for the training and dev sets."""
examples = []
with open(os.path.join(data_dir, '{}_xs_converted_tag.txt'.format(set_type)), 'r', encoding='utf8') as f:
txts = f.read().splitlines()
with open(os.path.join(data_dir, '{}_ys_converted_tag.txt'.format(set_type)), 'r', encoding='utf8') as f:
labels = f.read().splitlines()
for (i, n) in enumerate(zip(txts, labels)):
txt, label = n
guid = "%s-%s" % (set_type, i)
text_a,text_b=txt.split(' | ')
text_a=' '.join(list(text_a))
label = label.split(' | ')[0]
label=' '.join(list(label))
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=[0]*max_seq_length,
is_real_example=False)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label=example.label
words=tokenizer.tokenize(label)
length=len(words)
start=end=None
for i in range(len(tokens)):
if tokens[i:i+length]==words:
start=i
end=i+length
break
if start is None:
# print(tokens)
# print(words)
# raise ValueError('can not find mark text in comment.')
return None
# start,end=label.split()
# start=int(start)
# end=int(end)
label_id = [0]*max_seq_length
label_id[start:end]=[1]*(end-start)
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label: {}" .format(label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
if feature:
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature(feature.label_id)
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([seq_length], tf.int64),
"is_real_example": tf.FixedLenFeature([1], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_sequence_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias",[], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.reduce_sum(tf.multiply(output_layer,output_weights),-1)
logits = tf.add(logits, output_bias)
probabilities=tf.sigmoid(logits)
# labels=tf.constant(labels,dtype=tf.int32)
per_example_loss=tf.losses.sigmoid_cross_entropy(multi_class_labels=labels, logits=logits,reduction=Reduction.NONE)
per_example_loss=tf.reduce_sum(per_example_loss,axis=-1)
loss = tf.reduce_mean(per_example_loss,name='train_loss')
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(label_ids.shape[0], dtype=tf.float32)
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
init_op=tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
scaffold_fn=tf.train.Scaffold(init_op=init_op)
# def train_scafflod():
# tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
#
# scaffold_fn=tf.train.Scaffold(init_fn=train_scafflod)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization_multigpus.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold=scaffold_fn
)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
# predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
# ones=tf.get_variable('ones',shape=logits.shape,initializer=tf.ones_initializer)
# zeros=tf.get_variable('zeros',shape=logits.shape,initializer=tf.zeros_initializer)
predictions=tf.where(logits>=0,tf.ones(tf.shape(logits)),tf.zeros(tf.shape(logits)))
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn,
[per_example_loss, label_ids, logits, is_real_example])
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
eval_metric_ops=eval_metrics,
scaffold=scaffold_fn)
else:
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
predictions={"probabilities": probabilities},
scaffold=scaffold_fn
)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples,seq_length], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features.append(feature)
return features
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"xnli": XnliProcessor,
"tag": CommentsTagsProcessor,
"segtag":SegmentedCommentsTagsProcessor
}
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# run_config = tf.contrib.tpu.RunConfig(
# cluster=tpu_cluster_resolver,
# master=FLAGS.master,
# model_dir=FLAGS.output_dir,
# save_checkpoints_steps=FLAGS.save_checkpoints_steps,
# tpu_config=tf.contrib.tpu.TPUConfig(
# iterations_per_loop=FLAGS.iterations_per_loop,
# num_shards=FLAGS.num_tpu_cores,
# per_host_input_for_training=is_per_host))
# # If TPU is not available, this will fall back to normal Estimator on CPU
# # or GPU.
# estimator = tf.contrib.tpu.TPUEstimator(
# use_tpu=FLAGS.use_tpu,
# model_fn=model_fn,
# config=run_config,
# train_batch_size=FLAGS.train_batch_size,
# eval_batch_size=FLAGS.eval_batch_size,
# predict_batch_size=FLAGS.predict_batch_size)
strategy=tf.contrib.distribute.MirroredStrategy(num_gpus=FLAGS.num_gpus,
cross_tower_ops=tf.contrib.distribute.AllReduceCrossTowerOps(
'nccl', num_packs=int(FLAGS.num_gpus))
)
run_config = tf.estimator.RunConfig(
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
train_distribute=strategy
)
estimator = tf.estimator.Estimator(
model_fn=model_fn,
config=run_config,
params={'batch_size':FLAGS.train_batch_size}
)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
if not tf.gfile.Exists(train_file) or not FLAGS.data_converted:
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
train_hook=tf.train.LoggingTensorHook(['loss/train_loss'],every_n_iter=100)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps,hooks=[train_hook])
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all tf.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
if not tf.gfile.Exists(eval_file) or not FLAGS.data_converted:
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
if not tf.gfile.Exists(predict_file) or not FLAGS.data_converted:
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with tf.gfile.GFile(output_predict_file, "w") as writer:
num_written_lines = 0
tf.logging.info("***** Predict results *****")
for (i, prediction) in enumerate(result):
if i >= num_actual_predict_examples:
break
probabilities = prediction["probabilities"]
texta=predict_examples[i].text_a
texta=tokenizer.tokenize(texta)
phrase=[texta[j] if probabilities[j]>=0.5 else ' ' for j in range(min(len(texta),128))]
phrase=''.join(phrase).strip()
# output_line = "\t".join(
# str(class_probability)
# for class_probability in probabilities) + "\n"
writer.write(phrase+'\n')
num_written_lines += 1
assert num_written_lines == num_actual_predict_examples
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
| [
"tensorflow.contrib.cluster_resolver.TPUClusterResolver",
"tensorflow.train.LoggingTensorHook",
"tensorflow.metrics.accuracy",
"tensorflow.FixedLenFeature",
"tensorflow.gfile.Exists",
"tensorflow.reduce_sum",
"tensorflow.gfile.GFile",
"tensorflow.cast",
"tensorflow.train.init_from_checkpoint",
"tensorflow.gfile.MakeDirs",
"tensorflow.estimator.RunConfig",
"tensorflow.to_int32",
"tensorflow.data.TFRecordDataset",
"tensorflow.truncated_normal_initializer",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.add",
"tensorflow.logging.set_verbosity",
"tensorflow.trainable_variables",
"tensorflow.parse_single_example",
"tensorflow.app.run",
"tensorflow.nn.dropout",
"tensorflow.metrics.mean",
"tensorflow.estimator.Estimator",
"tensorflow.gfile.Open",
"tensorflow.shape",
"tensorflow.zeros_initializer",
"tensorflow.logging.info",
"tensorflow.train.Features",
"tensorflow.multiply",
"tensorflow.train.Scaffold",
"tensorflow.constant",
"tensorflow.reduce_mean",
"tensorflow.flags.DEFINE_string",
"tensorflow.losses.sigmoid_cross_entropy",
"tensorflow.sigmoid",
"tensorflow.ones",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.variable_scope"
] | run_token_level_classifier_multigpus.py | [(109, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""tpu_name"""', 'None', '"""The Cloud TPU to use for training. This should be either the name used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url."""'], {}), True, 'import tensorflow as tf\n'), (115, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""tpu_zone"""', 'None', '"""[Optional] GCE zone where the Cloud TPU is located in. If not specified, we will attempt to automatically detect the GCE project from metadata."""'], {}), True, 'import tensorflow as tf\n'), (121, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""gcp_project"""', 'None', '"""[Optional] Project name for the Cloud TPU-enabled project. If not specified, we will attempt to automatically detect the GCE project from metadata."""'], {}), True, 'import tensorflow as tf\n'), (127, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""master"""', 'None', '"""[Optional] TensorFlow master URL."""'], {}), True, 'import tensorflow as tf\n'), (599, 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['output_file'], {}), True, 'import tensorflow as tf\n'), (694, 'modeling.BertModel', 'modeling.BertModel', ([], {'config': 'bert_config', 'is_training': 'is_training', 'input_ids': 'input_ids', 'input_mask': 'input_mask', 'token_type_ids': 'segment_ids', 'use_one_hot_embeddings': 'use_one_hot_embeddings'}), False, 'import modeling\n'), (913, 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), True, 'import tensorflow as tf\n'), (924, 'tokenization.validate_case_matches_checkpoint', 'tokenization.validate_case_matches_checkpoint', (['FLAGS.do_lower_case', 'FLAGS.init_checkpoint'], {}), False, 'import tokenization\n'), (931, 'modeling.BertConfig.from_json_file', 'modeling.BertConfig.from_json_file', (['FLAGS.bert_config_file'], {}), False, 'import modeling\n'), (939, 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.output_dir'], {}), True, 'import tensorflow as tf\n'), (950, 'tokenization.FullTokenizer', 'tokenization.FullTokenizer', ([], {'vocab_file': 'FLAGS.vocab_file', 'do_lower_case': 'FLAGS.do_lower_case'}), False, 'import tokenization\n'), (1003, 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'model_dir': 'FLAGS.output_dir', 'save_checkpoints_steps': 'FLAGS.save_checkpoints_steps', 'train_distribute': 'strategy'}), True, 'import tensorflow as tf\n'), (1008, 'tensorflow.estimator.Estimator', 'tf.estimator.Estimator', ([], {'model_fn': 'model_fn', 'config': 'run_config', 'params': "{'batch_size': FLAGS.train_batch_size}"}), True, 'import tensorflow as tf\n'), (1137, 'tensorflow.app.run', 'tf.app.run', ([], {}), True, 'import tensorflow as tf\n'), (577, 'tensorflow.logging.info', 'tf.logging.info', (['"""*** Example ***"""'], {}), True, 'import tensorflow as tf\n'), (578, 'tensorflow.logging.info', 'tf.logging.info', (["('guid: %s' % example.guid)"], {}), True, 'import tensorflow as tf\n'), (631, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (632, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (633, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (634, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (635, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[1]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (640, 'tensorflow.parse_single_example', 'tf.parse_single_example', (['record', 'name_to_features'], {}), True, 'import tensorflow as tf\n'), (658, 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['input_file'], {}), True, 'import tensorflow as tf\n'), (718, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""loss"""'], {}), True, 'import tensorflow as tf\n'), (724, 'tensorflow.add', 'tf.add', (['logits', 'output_bias'], {}), True, 'import tensorflow as tf\n'), (726, 'tensorflow.sigmoid', 'tf.sigmoid', (['logits'], {}), True, 'import tensorflow as tf\n'), (729, 'tensorflow.losses.sigmoid_cross_entropy', 'tf.losses.sigmoid_cross_entropy', ([], {'multi_class_labels': 'labels', 'logits': 'logits', 'reduction': 'Reduction.NONE'}), True, 'import tensorflow as tf\n'), (730, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['per_example_loss'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (731, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['per_example_loss'], {'name': '"""train_loss"""'}), True, 'import tensorflow as tf\n'), (744, 'tensorflow.logging.info', 'tf.logging.info', (['"""*** Features ***"""'], {}), True, 'import tensorflow as tf\n'), (764, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (787, 'tensorflow.logging.info', 'tf.logging.info', (['"""**** Trainable Variables ****"""'], {}), True, 'import tensorflow as tf\n'), (955, 'tensorflow.contrib.cluster_resolver.TPUClusterResolver', 'tf.contrib.cluster_resolver.TPUClusterResolver', (['FLAGS.tpu_name'], {'zone': 'FLAGS.tpu_zone', 'project': 'FLAGS.gcp_project'}), True, 'import tensorflow as tf\n'), (1015, 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""train.tf_record"""'], {}), False, 'import os\n'), (1019, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Running training *****"""'], {}), True, 'import tensorflow as tf\n'), (1021, 'tensorflow.logging.info', 'tf.logging.info', (['""" Batch size = %d"""', 'FLAGS.train_batch_size'], {}), True, 'import tensorflow as tf\n'), (1022, 'tensorflow.logging.info', 'tf.logging.info', (['""" Num steps = %d"""', 'num_train_steps'], {}), True, 'import tensorflow as tf\n'), (1028, 'tensorflow.train.LoggingTensorHook', 'tf.train.LoggingTensorHook', (["['loss/train_loss']"], {'every_n_iter': '(100)'}), True, 'import tensorflow as tf\n'), (1043, 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""eval.tf_record"""'], {}), False, 'import os\n'), (1048, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Running evaluation *****"""'], {}), True, 'import tensorflow as tf\n'), (1052, 'tensorflow.logging.info', 'tf.logging.info', (['""" Batch size = %d"""', 'FLAGS.eval_batch_size'], {}), True, 'import tensorflow as tf\n'), (1071, 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""eval_results.txt"""'], {}), False, 'import os\n'), (1089, 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""predict.tf_record"""'], {}), False, 'import os\n'), (1095, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Running prediction*****"""'], {}), True, 'import tensorflow as tf\n'), (1099, 'tensorflow.logging.info', 'tf.logging.info', (['""" Batch size = %d"""', 'FLAGS.predict_batch_size'], {}), True, 'import tensorflow as tf\n'), (1110, 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""test_results.tsv"""'], {}), False, 'import os\n'), (209, 'tensorflow.gfile.Open', 'tf.gfile.Open', (['input_file', '"""r"""'], {}), True, 'import tensorflow as tf\n'), (210, 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""\t"""', 'quotechar': 'quotechar'}), False, 'import csv\n'), (226, 'os.path.join', 'os.path.join', (['data_dir', '"""multinli"""', "('multinli.train.%s.tsv' % self.language)"], {}), False, 'import os\n'), (233, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), False, 'import tokenization\n'), (234, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), False, 'import tokenization\n'), (235, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[2]'], {}), False, 'import tokenization\n'), (244, 'os.path.join', 'os.path.join', (['data_dir', '"""xnli.dev.tsv"""'], {}), False, 'import os\n'), (250, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), False, 'import tokenization\n'), (253, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[6]'], {}), False, 'import tokenization\n'), (254, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[7]'], {}), False, 'import tokenization\n'), (255, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), False, 'import tokenization\n'), (295, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[8]'], {}), False, 'import tokenization\n'), (296, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[9]'], {}), False, 'import tokenization\n'), (335, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[3]'], {}), False, 'import tokenization\n'), (336, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[4]'], {}), False, 'import tokenization\n'), (613, 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), False, 'import collections\n'), (713, 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.02)'}), True, 'import tensorflow as tf\n'), (716, 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), True, 'import tensorflow as tf\n'), (721, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['output_layer'], {'keep_prob': '(0.9)'}), True, 'import tensorflow as tf\n'), (723, 'tensorflow.multiply', 'tf.multiply', (['output_layer', 'output_weights'], {}), True, 'import tensorflow as tf\n'), (746, 'tensorflow.logging.info', 'tf.logging.info', (["(' name = %s, shape = %s' % (name, features[name].shape))"], {}), True, 'import tensorflow as tf\n'), (754, 'tensorflow.cast', 'tf.cast', (["features['is_real_example']"], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (756, 'tensorflow.ones', 'tf.ones', (['label_ids.shape[0]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (769, 'modeling.get_assignment_map_from_checkpoint', 'modeling.get_assignment_map_from_checkpoint', (['tvars', 'init_checkpoint'], {}), False, 'import modeling\n'), (792, 'tensorflow.logging.info', 'tf.logging.info', (['""" name = %s, shape = %s%s"""', 'var.name', 'var.shape', 'init_string'], {}), True, 'import tensorflow as tf\n'), (798, 'optimization_multigpus.create_optimizer', 'optimization_multigpus.create_optimizer', (['total_loss', 'learning_rate', 'num_train_steps', 'num_warmup_steps', 'use_tpu'], {}), False, 'import optimization_multigpus\n'), (801, 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'train_op': 'train_op', 'scaffold': 'scaffold_fn'}), True, 'import tensorflow as tf\n'), (1072, 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['output_eval_file', '"""w"""'], {}), True, 'import tensorflow as tf\n'), (1073, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Eval results *****"""'], {}), True, 'import tensorflow as tf\n'), (1112, 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['output_predict_file', '"""w"""'], {}), True, 'import tensorflow as tf\n'), (1114, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Predict results *****"""'], {}), True, 'import tensorflow as tf\n'), (236, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['"""contradictory"""'], {}), False, 'import tokenization\n'), (237, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['"""contradiction"""'], {}), False, 'import tokenization\n'), (251, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['self.language'], {}), False, 'import tokenization\n'), (271, 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), False, 'import os\n'), (276, 'os.path.join', 'os.path.join', (['data_dir', '"""dev_matched.tsv"""'], {}), False, 'import os\n'), (282, 'os.path.join', 'os.path.join', (['data_dir', '"""test_matched.tsv"""'], {}), False, 'import os\n'), (300, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[-1]'], {}), False, 'import tokenization\n'), (312, 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), False, 'import os\n'), (317, 'os.path.join', 'os.path.join', (['data_dir', '"""dev.tsv"""'], {}), False, 'import os\n'), (322, 'os.path.join', 'os.path.join', (['data_dir', '"""test.tsv"""'], {}), False, 'import os\n'), (340, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), False, 'import tokenization\n'), (435, 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), False, 'import os\n'), (440, 'os.path.join', 'os.path.join', (['data_dir', '"""dev.tsv"""'], {}), False, 'import os\n'), (445, 'os.path.join', 'os.path.join', (['data_dir', '"""test.tsv"""'], {}), False, 'import os\n'), (460, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), False, 'import tokenization\n'), (463, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[3]'], {}), False, 'import tokenization\n'), (464, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), False, 'import tokenization\n'), (647, 'tensorflow.to_int32', 'tf.to_int32', (['t'], {}), True, 'import tensorflow as tf\n'), (778, 'tensorflow.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['init_checkpoint', 'assignment_map'], {}), True, 'import tensorflow as tf\n'), (779, 'tensorflow.train.Scaffold', 'tf.train.Scaffold', ([], {'init_op': 'init_op'}), True, 'import tensorflow as tf\n'), (824, 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'eval_metric_ops': 'eval_metrics', 'scaffold': 'scaffold_fn'}), True, 'import tensorflow as tf\n'), (830, 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'predictions': "{'probabilities': probabilities}", 'scaffold': 'scaffold_fn'}), True, 'import tensorflow as tf\n'), (867, 'tensorflow.constant', 'tf.constant', (['all_input_ids'], {'shape': '[num_examples, seq_length]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (871, 'tensorflow.constant', 'tf.constant', (['all_input_mask'], {'shape': '[num_examples, seq_length]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (876, 'tensorflow.constant', 'tf.constant', (['all_segment_ids'], {'shape': '[num_examples, seq_length]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (881, 'tensorflow.constant', 'tf.constant', (['all_label_ids'], {'shape': '[num_examples, seq_length]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (1016, 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['train_file'], {}), True, 'import tensorflow as tf\n'), (1044, 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['eval_file'], {}), True, 'import tensorflow as tf\n'), (1090, 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['predict_file'], {}), True, 'import tensorflow as tf\n'), (294, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), False, 'import tokenization\n'), (621, 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'features'}), True, 'import tensorflow as tf\n'), (773, 'tensorflow.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['init_checkpoint', 'assignment_map'], {}), True, 'import tensorflow as tf\n'), (774, 'tensorflow.train.Scaffold', 'tf.train.Scaffold', ([], {}), True, 'import tensorflow as tf\n'), (814, 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', ([], {'labels': 'label_ids', 'predictions': 'predictions', 'weights': 'is_real_example'}), True, 'import tensorflow as tf\n'), (816, 'tensorflow.metrics.mean', 'tf.metrics.mean', ([], {'values': 'per_example_loss', 'weights': 'is_real_example'}), True, 'import tensorflow as tf\n'), (580, 'tokenization.printable_text', 'tokenization.printable_text', (['x'], {}), False, 'import tokenization\n'), (813, 'tensorflow.shape', 'tf.shape', (['logits'], {}), True, 'import tensorflow as tf\n'), (813, 'tensorflow.shape', 'tf.shape', (['logits'], {}), True, 'import tensorflow as tf\n')] |
rohitgirdhar/ActionVLAD | 08d3d65301940991e0a0cdca2c0534edf6749f41 | # ------------------------------------------------------------------------------
# ActionVLAD: Learning spatio-temporal aggregation for action classification
# Copyright (c) 2017 Carnegie Mellon University and Adobe Systems Incorporated
# Please see LICENSE on https://github.com/rohitgirdhar/ActionVLAD/ for details
# ------------------------------------------------------------------------------
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess images.
The preprocessing steps for VGG were introduced in the following technical
report:
Very Deep Convolutional Networks For Large-Scale Image Recognition
Karen Simonyan and Andrew Zisserman
arXiv technical report, 2015
PDF: http://arxiv.org/pdf/1409.1556.pdf
ILSVRC 2014 Slides: http://www.robots.ox.ac.uk/~karen/pdf/ILSVRC_2014.pdf
CC-BY-4.0
More information can be obtained from the VGG website:
www.robots.ox.ac.uk/~vgg/research/very_deep/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from preprocessing.utils import _mean_image_subtraction
slim = tf.contrib.slim
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
_RESIZE_SIDE_MIN = 256
_RESIZE_SIDE_MAX = 512
def _crop(image, offset_height, offset_width, crop_height, crop_width):
"""Crops the given image using the provided offsets and sizes.
Note that the method doesn't assume we know the input image size but it does
assume we know the input image rank.
Args:
image: an image of shape [height, width, channels].
offset_height: a scalar tensor indicating the height offset.
offset_width: a scalar tensor indicating the width offset.
crop_height: the height of the cropped image.
crop_width: the width of the cropped image.
Returns:
the cropped (and resized) image.
Raises:
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
"""
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
cropped_shape = control_flow_ops.with_dependencies(
[rank_assertion],
tf.pack([crop_height, crop_width, original_shape[2]]))
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.pack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
image = control_flow_ops.with_dependencies(
[size_assertion],
tf.slice(image, offsets, cropped_shape))
return tf.reshape(image, cropped_shape)
def _random_crop(image_list, crop_height, crop_width):
"""Crops the given list of images.
The function applies the same crop to each image in the list. This can be
effectively applied when there are multiple image inputs of the same
dimension such as:
image, depths, normals = _random_crop([image, depths, normals], 120, 150)
Args:
image_list: a list of image tensors of the same dimension but possibly
varying channel.
crop_height: the new height.
crop_width: the new width.
Returns:
the image_list with cropped images.
Raises:
ValueError: if there are multiple image inputs provided with different size
or the images are smaller than the crop dimensions.
"""
if not image_list:
raise ValueError('Empty image_list.')
# Compute the rank assertions.
rank_assertions = []
for i in range(len(image_list)):
image_rank = tf.rank(image_list[i])
rank_assert = tf.Assert(
tf.equal(image_rank, 3),
['Wrong rank for tensor %s [expected] [actual]',
image_list[i].name, 3, image_rank])
rank_assertions.append(rank_assert)
image_shape = control_flow_ops.with_dependencies(
[rank_assertions[0]],
tf.shape(image_list[0]))
image_height = image_shape[0]
image_width = image_shape[1]
crop_size_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(image_height, crop_height),
tf.greater_equal(image_width, crop_width)),
['Crop size greater than the image size.'])
asserts = [rank_assertions[0], crop_size_assert]
for i in range(1, len(image_list)):
image = image_list[i]
asserts.append(rank_assertions[i])
shape = control_flow_ops.with_dependencies([rank_assertions[i]],
tf.shape(image))
height = shape[0]
width = shape[1]
height_assert = tf.Assert(
tf.equal(height, image_height),
['Wrong height for tensor %s [expected][actual]',
image.name, height, image_height])
width_assert = tf.Assert(
tf.equal(width, image_width),
['Wrong width for tensor %s [expected][actual]',
image.name, width, image_width])
asserts.extend([height_assert, width_assert])
# Create a random bounding box.
#
# Use tf.random_uniform and not numpy.random.rand as doing the former would
# generate random numbers at graph eval time, unlike the latter which
# generates random numbers at graph definition time.
max_offset_height = control_flow_ops.with_dependencies(
asserts, tf.reshape(image_height - crop_height + 1, []))
max_offset_width = control_flow_ops.with_dependencies(
asserts, tf.reshape(image_width - crop_width + 1, []))
offset_height = tf.random_uniform(
[], maxval=max_offset_height, dtype=tf.int32)
offset_width = tf.random_uniform(
[], maxval=max_offset_width, dtype=tf.int32)
return [_crop(image, offset_height, offset_width,
crop_height, crop_width) for image in image_list]
def _central_crop(image_list, crop_height, crop_width):
"""Performs central crops of the given image list.
Args:
image_list: a list of image tensors of the same dimension but possibly
varying channel.
crop_height: the height of the image following the crop.
crop_width: the width of the image following the crop.
Returns:
the list of cropped images.
"""
outputs = []
for image in image_list:
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
offset_height = (image_height - crop_height) / 2
offset_width = (image_width - crop_width) / 2
outputs.append(_crop(image, offset_height, offset_width,
crop_height, crop_width))
return outputs
def _smallest_size_at_least(height, width, smallest_side):
"""Computes new shape with the smallest side equal to `smallest_side`.
Computes new shape with the smallest side equal to `smallest_side` while
preserving the original aspect ratio.
Args:
height: an int32 scalar tensor indicating the current height.
width: an int32 scalar tensor indicating the current width.
smallest_side: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
new_height: an int32 scalar tensor indicating the new height.
new_width: and int32 scalar tensor indicating the new width.
"""
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
height = tf.to_float(height)
width = tf.to_float(width)
smallest_side = tf.to_float(smallest_side)
scale = tf.cond(tf.greater(height, width),
lambda: smallest_side / width,
lambda: smallest_side / height)
new_height = tf.to_int32(height * scale)
new_width = tf.to_int32(width * scale)
return new_height, new_width
def _aspect_preserving_resize(image, smallest_side):
"""Resize images preserving the original aspect ratio.
Args:
image: A 3-D image `Tensor`.
smallest_side: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
resized_image: A 3-D tensor containing the resized image.
"""
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
shape = tf.shape(image)
height = shape[0]
width = shape[1]
new_height, new_width = _smallest_size_at_least(height, width, smallest_side)
image = tf.expand_dims(image, 0)
resized_image = tf.image.resize_bilinear(image, [new_height, new_width],
align_corners=False)
resized_image = tf.squeeze(resized_image)
resized_image.set_shape([None, None, 3])
return resized_image
def preprocess_for_train(image,
output_height,
output_width,
resize_side_min=_RESIZE_SIDE_MIN,
resize_side_max=_RESIZE_SIDE_MAX):
"""Preprocesses the given image for training.
Note that the actual resizing scale is sampled from
[`resize_size_min`, `resize_size_max`].
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
resize_side_min: The lower bound for the smallest side of the image for
aspect-preserving resizing.
resize_side_max: The upper bound for the smallest side of the image for
aspect-preserving resizing.
Returns:
A preprocessed image.
"""
resize_side = tf.random_uniform(
[], minval=resize_side_min, maxval=resize_side_max+1, dtype=tf.int32)
image = _aspect_preserving_resize(image, resize_side)
image = _random_crop([image], output_height, output_width)[0]
image.set_shape([output_height, output_width, 3])
image = tf.to_float(image)
image = tf.image.random_flip_left_right(image)
return _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN])
def preprocess_for_eval(image, output_height, output_width, resize_side):
"""Preprocesses the given image for evaluation.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
resize_side: The smallest side of the image for aspect-preserving resizing.
Returns:
A preprocessed image.
"""
image = _aspect_preserving_resize(image, resize_side)
image = _central_crop([image], output_height, output_width)[0]
image.set_shape([output_height, output_width, 3])
image = tf.to_float(image)
return _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN])
def preprocess_image(image, output_height, output_width, is_training=False,
resize_side_min=_RESIZE_SIDE_MIN,
resize_side_max=_RESIZE_SIDE_MAX):
"""Preprocesses the given image.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
resize_side_min: The lower bound for the smallest side of the image for
aspect-preserving resizing. If `is_training` is `False`, then this value
is used for rescaling.
resize_side_max: The upper bound for the smallest side of the image for
aspect-preserving resizing. If `is_training` is `False`, this value is
ignored. Otherwise, the resize side is sampled from
[resize_size_min, resize_size_max].
Returns:
A preprocessed image.
"""
if is_training:
return preprocess_for_train(image, output_height, output_width,
resize_side_min, resize_side_max)
else:
return preprocess_for_eval(image, output_height, output_width,
resize_side_min)
| [
"tensorflow.convert_to_tensor",
"tensorflow.image.resize_bilinear",
"tensorflow.image.random_flip_left_right",
"tensorflow.shape",
"tensorflow.slice",
"tensorflow.greater",
"tensorflow.reshape",
"tensorflow.equal",
"tensorflow.expand_dims",
"tensorflow.squeeze",
"tensorflow.pack",
"tensorflow.to_float",
"tensorflow.rank",
"tensorflow.to_int32",
"tensorflow.greater_equal",
"tensorflow.random_uniform"
] | preprocessing/vgg_preprocessing.py | [(75, 'tensorflow.shape', 'tf.shape', (['image'], {}), True, 'import tensorflow as tf\n'), (97, 'tensorflow.reshape', 'tf.reshape', (['image', 'cropped_shape'], {}), True, 'import tensorflow as tf\n'), (175, 'tensorflow.random_uniform', 'tf.random_uniform', (['[]'], {'maxval': 'max_offset_height', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (177, 'tensorflow.random_uniform', 'tf.random_uniform', (['[]'], {'maxval': 'max_offset_width', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (225, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['smallest_side'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (227, 'tensorflow.to_float', 'tf.to_float', (['height'], {}), True, 'import tensorflow as tf\n'), (228, 'tensorflow.to_float', 'tf.to_float', (['width'], {}), True, 'import tensorflow as tf\n'), (229, 'tensorflow.to_float', 'tf.to_float', (['smallest_side'], {}), True, 'import tensorflow as tf\n'), (234, 'tensorflow.to_int32', 'tf.to_int32', (['(height * scale)'], {}), True, 'import tensorflow as tf\n'), (235, 'tensorflow.to_int32', 'tf.to_int32', (['(width * scale)'], {}), True, 'import tensorflow as tf\n'), (250, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['smallest_side'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (252, 'tensorflow.shape', 'tf.shape', (['image'], {}), True, 'import tensorflow as tf\n'), (256, 'tensorflow.expand_dims', 'tf.expand_dims', (['image', '(0)'], {}), True, 'import tensorflow as tf\n'), (257, 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['image', '[new_height, new_width]'], {'align_corners': '(False)'}), True, 'import tensorflow as tf\n'), (259, 'tensorflow.squeeze', 'tf.squeeze', (['resized_image'], {}), True, 'import tensorflow as tf\n'), (286, 'tensorflow.random_uniform', 'tf.random_uniform', (['[]'], {'minval': 'resize_side_min', 'maxval': '(resize_side_max + 1)', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (292, 'tensorflow.to_float', 'tf.to_float', (['image'], {}), True, 'import tensorflow as tf\n'), (293, 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['image'], {}), True, 'import tensorflow as tf\n'), (294, 'preprocessing.utils._mean_image_subtraction', '_mean_image_subtraction', (['image', '[_R_MEAN, _G_MEAN, _B_MEAN]'], {}), False, 'from preprocessing.utils import _mean_image_subtraction\n'), (312, 'tensorflow.to_float', 'tf.to_float', (['image'], {}), True, 'import tensorflow as tf\n'), (313, 'preprocessing.utils._mean_image_subtraction', '_mean_image_subtraction', (['image', '[_R_MEAN, _G_MEAN, _B_MEAN]'], {}), False, 'from preprocessing.utils import _mean_image_subtraction\n'), (82, 'tensorflow.pack', 'tf.pack', (['[crop_height, crop_width, original_shape[2]]'], {}), True, 'import tensorflow as tf\n'), (90, 'tensorflow.pack', 'tf.pack', (['[offset_height, offset_width, 0]'], {}), True, 'import tensorflow as tf\n'), (96, 'tensorflow.slice', 'tf.slice', (['image', 'offsets', 'cropped_shape'], {}), True, 'import tensorflow as tf\n'), (128, 'tensorflow.rank', 'tf.rank', (['image_list[i]'], {}), True, 'import tensorflow as tf\n'), (137, 'tensorflow.shape', 'tf.shape', (['image_list[0]'], {}), True, 'import tensorflow as tf\n'), (172, 'tensorflow.reshape', 'tf.reshape', (['(image_height - crop_height + 1)', '[]'], {}), True, 'import tensorflow as tf\n'), (174, 'tensorflow.reshape', 'tf.reshape', (['(image_width - crop_width + 1)', '[]'], {}), True, 'import tensorflow as tf\n'), (231, 'tensorflow.greater', 'tf.greater', (['height', 'width'], {}), True, 'import tensorflow as tf\n'), (78, 'tensorflow.rank', 'tf.rank', (['image'], {}), True, 'import tensorflow as tf\n'), (86, 'tensorflow.greater_equal', 'tf.greater_equal', (['original_shape[0]', 'crop_height'], {}), True, 'import tensorflow as tf\n'), (87, 'tensorflow.greater_equal', 'tf.greater_equal', (['original_shape[1]', 'crop_width'], {}), True, 'import tensorflow as tf\n'), (130, 'tensorflow.equal', 'tf.equal', (['image_rank', '(3)'], {}), True, 'import tensorflow as tf\n'), (142, 'tensorflow.greater_equal', 'tf.greater_equal', (['image_height', 'crop_height'], {}), True, 'import tensorflow as tf\n'), (143, 'tensorflow.greater_equal', 'tf.greater_equal', (['image_width', 'crop_width'], {}), True, 'import tensorflow as tf\n'), (152, 'tensorflow.shape', 'tf.shape', (['image'], {}), True, 'import tensorflow as tf\n'), (157, 'tensorflow.equal', 'tf.equal', (['height', 'image_height'], {}), True, 'import tensorflow as tf\n'), (161, 'tensorflow.equal', 'tf.equal', (['width', 'image_width'], {}), True, 'import tensorflow as tf\n'), (198, 'tensorflow.shape', 'tf.shape', (['image'], {}), True, 'import tensorflow as tf\n'), (199, 'tensorflow.shape', 'tf.shape', (['image'], {}), True, 'import tensorflow as tf\n')] |
Rahul-chunduru/meanfield-theory-of-activation-functions | 97abc52b25d7a57dc75ce21dcccc419f58a393d4 | """
Helper functions for FFN with ESP
=================================================================
Author: Mirco Milletari <[email protected]> (2018)
Tensorflow implementation of a Feed Forward Deep network with ESP
activation, as defined in
"Expectation propagation: a probabilistic view of Deep Feed Forward Networks"
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
#Math Libraries
import numpy as np
#Visualization libraries
import matplotlib.pyplot as plt
#Tensor Flow
import tensorflow as tf
from tensorflow.python.framework import ops
# ======================================
# Initialize the Computational Graph
# ======================================
#One hot encoding for multiclass classification
def one_hot_econding(vect, N_classes, N_ch):
"""
One hot encoding:
For multilcass classification we need to convert the ground truth input vector to a matrix using one hot encoding.
Labels: Each class appearing in the ground truth vector is encoded in a column vector using: I_i = \Kdelta[i,Y_j] for j in [0, len(Y)],
where \Kdelta is the kroenecker symbol. As a result, the number of columns in the matrix is equal to N_classes, each column being a binary
truth tabel: 1 if the text is classified as belonging to book Y_i, 0 if it does not.
Arguments:
Y_labels -- ground truth vector
N_classes -- the number of classes in the ground truth vector
N_ch -- number of channels, if any (for the feature vector only)
Returns:
one_hot -- one hot matrix encoding
"""
# Create a tensot flow constant equal to the number of classes
C = tf.constant(N_classes, name="C")
one_hot_matrix = tf.one_hot(vect-1, C, axis=0) #axis=0 means it is mapping to column vectors
if N_ch != 0:
one_hot_matrix= tf.expand_dims(one_hot_matrix, 1)
# Create tensodr flow session
sess = tf.Session()
vect_hot = sess.run(one_hot_matrix)
sess.close()
return vect_hot
#Place Holders for the input/output data
def create_placeholders(Nfeat, Nlab):
"""
Creates the placeholders for the tensorflow session.
Arguments:
Nfeat -- scalar, size of the feature vector (number of features)
Nlab -- scalar, size of the label vector (number of labels)
Returns:
X -- placeholder for the data input, of shape [n_x, None] and dtype "float"
Y -- placeholder for the input labels, of shape [n_y, None] and dtype "float"
"""
X = tf.placeholder(shape= [Nfeat, None], dtype= "float64" )
Y = tf.placeholder(shape= [Nlab, None], dtype= "float64" )
return X, Y
#parameters initialization
def initialize_parameters(layers, activation, stbeta):
'''
Initialise the parameters of the model:
Arguments:
layers: Topology of the network. Array contaning number of layers and number of units in each layer.
activation: list of activation functions, for each layer in the network.
Evaluate:
L-- number of layers in the network (excluding the ouput)
first-- activation of the first layer
w-- weight matrix, dim: (l, l-1) initialized to a small number drawn from a standard normal distribution
mean 0 and std 1.
b-- bias vector, dim: (l,1)
beta-- inverse "temperature". initialized by sampling from a normal distribution. We Initialise beta small, i.e.
high temperature. Note that each unit has its own beta as it attains only local equilibrium.
Another possible initialization of beta is to 1 for each unit.
Note: If one uses relu as an activation, beta shold be initialized to one and be non trainable.
initialization:
Orthogonal weights: tf.initializers.orthogonal()
Xavier : tf.contrib.layers.xavier_initializer(seed=1)
'''
tf.set_random_seed(1) # defines the seed of the random number generator
parameters={}
L = len(layers) # number of layers in the network
first = activation[0] #Activation of the first layer
if first == 'esp':
train = True
init = tf.random_normal_initializer(stddev= stbeta)
#init = tf.ones_initializer()
else:
train= False
init = tf.ones_initializer()
for l in range(1, L):
parameters['w' + str(l)] = tf.get_variable('w' + str(l), [layers[l], layers[l-1]],dtype= 'float64' , initializer= tf.contrib.layers.xavier_initializer(seed=1) )
parameters['b' + str(l)] = tf.get_variable('b' + str(l), [layers[l], 1],dtype= 'float64', initializer = tf.zeros_initializer())
parameters['beta' + str(l)] = tf.get_variable('beta'+ str(l), [layers[l], 1], dtype= 'float64', initializer = init, trainable= train )
assert(parameters['w' + str(l)].shape == (layers[l], layers[l-1]))
assert(parameters['b' + str(l)].shape == (layers[l], 1))
assert(parameters['beta'+ str(l)].shape == (layers[l], 1))
return parameters
#Activation functions
def act(h,beta, activation):
"""
Activation functions:
esp -- finite temperature message passing
relu -- zero noise limit of esp
sigma -- Fermi-Dirac distribution
"""
if activation == "esp" or activation == "softmax":
A = tf.multiply(h, tf.nn.sigmoid(tf.multiply(beta,h)) )
elif activation == "sigmoid":
A = tf.nn.sigmoid(tf.multiply(beta,h))
elif activation == "relu":
A = tf.nn.relu(h)
return A
#--------Forward propagation----------------------------------------------------------------
def FW_prop(X,parameters, activation):
"""
Arguments:
X-- placeholder of the input data.
parameters-- dictionary of parameters, layer by layer, in the network.
activations-- list of activation functions to apply to the pre-activation outputs
Evaluates:
A_prev --activation of the previous layer, used in the fwd pass
cache_linear["Z"+str(l)]-- dictionary of pre-activation outputs
cache_act["A"+str(l)]-- dictionary of post-activation outputs
Returns:
caches-- array containing all the post and pre- activation values, layer by layer
"""
cache_linear={} #dictionary, cache of the linear outputs
cache_act={} #dictionary, cache of activations
L= len(activation)+1 # number of layers
a_prev= X
for l in range(1,L):
cache_linear["h"+str(l)] = tf.matmul(parameters["w"+str(l)], a_prev)+ parameters["b"+str(l)]
cache_act["a"+str(l)] = act(cache_linear["h"+str(l)], parameters['beta'+str(l)], activation[l-1])
a_prev= cache_act["a"+str(l)]
an = cache_act["a"+str(L-1)]
hn = cache_linear['h'+str(L-1)]
return an, hn, cache_linear, cache_act
#---------------cost function-----------------------------------------------------------
def obj(zn, betan, Y, activation):
"""
Arguments:
zn -- value of the output layer. This can either be equal to the last post activation value for esp and relu
or the last pre-activation output for sigmoid. This is so because TF autmotically includes the sigmoid
function in the definition of the cross entropy.
Y -- ground truth. This needs to be transposed
Returns:
cost -- cost function
"""
L= len(activation) #number of layers
m = Y.shape[1] #number of training examples
last = activation[L-1]
labels= tf.transpose(Y)
if last == 'sigmoid' or last == 'softmax': #use cross entropy loss function
logits= tf.transpose(betan*zn[1])
cost = tf.reduce_mean(tf.losses.sigmoid_cross_entropy(logits = logits, multi_class_labels=labels))
elif last == 'esp' or last == 'relu': #use minimum squared error (L2 loss)
out = tf.transpose(zn[0])
cost = tf.reduce_mean(tf.squared_difference(out, labels))/2
return cost
#------------Hessian-------------------
def flatten(tensor):
'''
Flattening function:
input: a tensor list
returns: a rank one tensor
'''
s= len(tensor) #number of tensors in the list
for i in range(s):
dl = tensor[i] #take one element of the gradient list (hence the zero)
d1, d2 = dl.get_shape() #Obtain tensor dimensions
fl = tf.reshape(dl,[-1, d1*d2]) #reshape the tensor to a (1, d1*d2) tensor
#concatenate over all the elemets in the list
if i==0: flattened = fl # the first time
else: flattened = tf.concat([flattened, fl], axis=1)
return flattened
#Hessian
def hessian(grads, par):
'''
Evaluates the exact Hessian matrix.
This function uses the same convention of the Autograd package.
Inputs:
grads --- the evaluated gradeints of the cost function
Returns:
hessian matrix: a (dim,dim) matrix of second derivatives, where 'dim' is the dimension of
the flattened gradient tensor.
'''
flat_grads = flatten(grads)[0] #flat gradients
dim = flat_grads.get_shape()[0] #get the dimensions of the flattened tensor
hess = [] #list
for i in range (dim):
dg_i = tf.gradients(flat_grads[i], par) #for each element of grads evaluate the gradients
dg_i_flat = flatten(dg_i) #flatten the resulting hessian onto a 1 d array
hess.append(dg_i_flat) #store row by row
return tf.reshape(hess,[dim, dim]) #returns the reshaped matrix
#=======================
# Main
#=======================
def Run_DNN(X_train, Y_train, X_test, Y_test, layers, activation, epoch_sample, stdbeta, starter_learning, num_iterations, with_hessian, save_model, Plot):
"""
Run the DNN to find the optimal set of paramters
Arguments:
X -- data, iput marix
Y -- true "label" vector
layers -- list containing the input size and each layer size
learning_rate -- learning rate of the gradient descent update rule
num_iterations -- number of iterations of the optimization loop
with_hessian -- if true evaluates the exact Hessian matrix at predefinite training intervals
stdbeta -- standard deviation of the noise paramters for initialization
Returns:
costs -- list contaning the value of the cost funciton (energy) at predefinite training intervals
Training metrics:
acc_train -- list containing the value of the task specific, training set accuracy at predefinite training intervals
acc_test -- list containing the value of the task specific, test set accuracy at predefinite training intervals
task and metrics:
1) Regression: Returns the R2 score
2) Binary Classification: Accuracy score
3) Multiclass Classification: Accuracy score
Other metrics can be easily implemented, but this is not important for this work.
gradients_and_par -- list containing the value of the gradients and the training parameters at predefinite training intervals
1) The format is: gradients_and_par[a][b][c]; [a] runs over the epochs, [c] in (0,1) selects the
gradienst and the parameters respectevely. e.g. gradients_and_par[5][2][0] returns the value of the gradient
of b1 at the 5th entry epoch. The epoch value is predetermined, e.g. one may want to store the results every
100 epochs, then [5] -- > 500 epochs.
2) [b] runs over the training parameters for each layer. e.g. for a 2 layer network with esp:
[0] --> w1, [1] --> b1, [2] --> beta1
[3] --> w2, [4] --> b2, [5] --> beta2
for Relu, there is no trainable beta, and the indexing [b] is adjusted accordingly.
Npar -- Total number of trainable unit-paramters in the network. This is printed out during training.
hessians -- list containing the value of the hessian matrix at predefinite training intervals. The format is
hessians[a][b][c], where [a] runs over the epoch. For fixed [a], hessians stores the value of the hessian matrix
evaluated at the critical points; this is a nxn matrix indexed by [b][c]. The size of the matrix is predetermined
by the number of parameters in the network.
residuals -- list containing the value of the residuals at predefinite training intervals. As we are only interested in the
sign of the residuals, we define it as the difference between the predicted output \hat{y} (an in the code)
and the training labels y (Y in the code).
"""
ops.reset_default_graph() # reset the computational graph
tf.set_random_seed(1) # to keep consistent results
#----------training/test set features-------------------------
X_tr = np.transpose(X_train) # the transpose is taken to adapt to TF convenntion. This is also
f , m = X_tr.shape # f: number of features, m: number of training examples
X_tst = np.transpose(X_test) # the transpose is taken to adapt to TF convenntion. This is also
_ , mt = X_tst.shape
#------------Initialise network-------------------------------
network = np.append(f, layers) # add the input layer to the list
L= len(activation)
actL = activation[L-1] # activation of the last layer. It determines the task
#-----------training/test set labels-------------------------------
if actL == 'softmax':
l= len(np.unique(Y_train))
Y_tr = one_hot_econding(Y_train, l,0 )
Y_tst = one_hot_econding(Y_test, l,0 )
else:
Y_tr = np.transpose(Y_train) # how we defined the placeholders.
Y_tst = np.transpose(Y_test)
l = Y_tr.shape[0]
#-----------------initialize parameters of the model--------------------------------------------------------
X, Y= create_placeholders(f, l) # Create Placeholders
parameters = initialize_parameters(network, activation, stdbeta)
betan = tf.identity(parameters['beta'+str(L)], name="betan") #add the output noise to the graph for later retrieval
an, hn, _ , _ = FW_prop(X, parameters, activation) #post and pre-activation output of the last layer
an = tf.identity(an, name= "an") #add the output post-activation value to the graph for later retrieval
hn = tf.identity(hn, name='hn') #add the output pre-activation value to the graph for later retrieval
#Create a saver for the Model
if save_model == True:
saver = tf.train.Saver()
#-----------------Initialize the cost and gradients---------------------------------------------------------
costs = [] #store the cost for different opochs
cost = obj([an,hn], betan, Y, activation)
#-----------------Initialize the optimizer-----------------------------------------------------------------
# Implement an exponential learning rate decay every 1000 epochs
#Implement a dynamical learning rate
global_step = tf.Variable(0., trainable=False)
rate = tf.train.exponential_decay(starter_learning, global_step, 500, 0.9) #exponential learning rate decay
#rate = starter_learning
tvars = tf.trainable_variables() #list of trainable variables
Npar= flatten(tvars).get_shape()[1] #total number of paramters in the network
print('there are:', Npar,'parameters in the network')
optimizer = tf.train.AdamOptimizer(learning_rate = rate) #Initialize Adam optimizer
grads_var = optimizer.compute_gradients(cost, tvars ) #Get gradients layer by layer. Note that this function returns the pair (grads, var)
grads = [grads_var[i][0] for i in range(len(grads_var))] #extract the gradients
min = optimizer.apply_gradients(grads_and_vars= grads_var, global_step= global_step) #Apply the gradients to look for critical points
gradients_and_par = [] #store gradients and training paramters for different epochs
hessians = [] #store the hessian for different epochs
residuals= [] #store the value of the residuals for different epochs
#gs = [] #store the value of the phase space factor for different epochs
if with_hessian == True: #if true, it evaluates
hess = hessian(grads, tvars) #Hessian matrix
res = tf.subtract(an, Y) #residual error
#---------------------------Initialize evaluation metrics----------------------------------------------------
e_len = len(epoch_sample)
acc_train = [] #store train accuracy for each epoch
acc_test = [] #store test accuracy for each epoch
if actL == 'sigmoid': #accuracy score for binary class classification
Yp = tf.greater(an , 0.5)
accuracy = tf.reduce_mean(tf.cast(tf.equal(Yp, tf.equal(Y,1.0)), "float"))
elif actL == 'esp' or actL == 'relu': #r2 score
norm= tf.reduce_mean( tf.squared_difference(Y,tf.reduce_mean(Y)) )
accuracy = 1 - tf.divide( tf.reduce_mean(tf.squared_difference(an, Y)), norm)
elif actL == 'softmax': #accuracy score for multiclass classification
Yp = tf.sigmoid(betan*hn)
correct = tf.equal(tf.argmax(Yp), tf.argmax(Y))
accuracy= tf.reduce_mean(tf.cast(correct, "float"))
#-----------------Initialize the graph and start the session-------------------------------------------------
init = tf.global_variables_initializer()
with tf.Session() as sess:
# Run the initialization
sess.run(init)
jj=0
for epoch in range(num_iterations):
_ , epoch_cost, epoch_grad, epoch_acc_train = sess.run([min, cost, grads_var, accuracy], feed_dict={X: X_tr, Y: Y_tr})
# Print the cost every interval epoch (here uses the inhomogenous interval but you can change it)
if jj< e_len and epoch % epoch_sample[jj] == 0:
#if epoch % 50 == 0:
print("Epoch %i, Cost: %f, Train accuracy: %f" % (epoch, epoch_cost,epoch_acc_train))
costs.append(epoch_cost) #store the costs
gradients_and_par.append(epoch_grad) #store grads and trainable parameters
#--------------Store the evaluation metrics------------------------------------
epoch_acc_test = sess.run(accuracy, feed_dict={X: X_tst, Y: Y_tst})
acc_test.append(epoch_acc_test)
acc_train.append(epoch_acc_train)
#------------------------------------------------------------------------------
jj+=1 #increase counter
#---------------------Evaluate and store the Hessian---------------------------
if with_hessian == True:
epoch_hess, epoch_res = sess.run([hess,res], feed_dict={X: X_tr, Y: Y_tr})
assert(epoch_hess.shape[1] == Npar) #check the dimensions of the hessian matrix
hessians.append(epoch_hess) #store the hessian
residuals.append(epoch_res) #store the residuals
#gs.append(epoch_g) #store the gs
else:
hessians.append(1) #returns just ones
residuals.append(1)
#gs.append(1)
# plot the cost at the end of training
if Plot== True:
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations')
plt.title("Learning rate =" + str(starter_learning))
plt.show()
print('Train accuracy', acc_train[jj-1])
print('Test accuracy', acc_test[jj-1])
accuracy = (acc_train, acc_test)
if save_model == True:
saver.save(sess, "saver/esp_model.ckpt")
sess.close()
return costs, accuracy, gradients_and_par, hessians, residuals
| [
"tensorflow.concat",
"numpy.squeeze",
"tensorflow.equal",
"tensorflow.cast",
"tensorflow.train.AdamOptimizer",
"tensorflow.Variable",
"tensorflow.greater",
"numpy.unique",
"tensorflow.gradients",
"tensorflow.train.exponential_decay",
"tensorflow.subtract",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.Session",
"tensorflow.trainable_variables",
"tensorflow.train.Saver",
"tensorflow.argmax",
"tensorflow.random_normal_initializer",
"tensorflow.python.framework.ops.reset_default_graph",
"tensorflow.zeros_initializer",
"tensorflow.identity",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"numpy.append",
"tensorflow.one_hot",
"numpy.transpose",
"tensorflow.set_random_seed",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"tensorflow.nn.relu",
"tensorflow.multiply",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.losses.sigmoid_cross_entropy",
"tensorflow.expand_dims",
"tensorflow.sigmoid",
"tensorflow.ones_initializer",
"matplotlib.pyplot.xlabel",
"tensorflow.squared_difference"
] | TF/esp_tf_utils.py | [(61, 'tensorflow.constant', 'tf.constant', (['N_classes'], {'name': '"""C"""'}), True, 'import tensorflow as tf\n'), (62, 'tensorflow.one_hot', 'tf.one_hot', (['(vect - 1)', 'C'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (68, 'tensorflow.Session', 'tf.Session', ([], {}), True, 'import tensorflow as tf\n'), (91, 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[Nfeat, None]', 'dtype': '"""float64"""'}), True, 'import tensorflow as tf\n'), (92, 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[Nlab, None]', 'dtype': '"""float64"""'}), True, 'import tensorflow as tf\n'), (124, 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1)'], {}), True, 'import tensorflow as tf\n'), (236, 'tensorflow.transpose', 'tf.transpose', (['Y'], {}), True, 'import tensorflow as tf\n'), (301, 'tensorflow.reshape', 'tf.reshape', (['hess', '[dim, dim]'], {}), True, 'import tensorflow as tf\n'), (360, 'tensorflow.python.framework.ops.reset_default_graph', 'ops.reset_default_graph', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (361, 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1)'], {}), True, 'import tensorflow as tf\n'), (365, 'numpy.transpose', 'np.transpose', (['X_train'], {}), True, 'import numpy as np\n'), (368, 'numpy.transpose', 'np.transpose', (['X_test'], {}), True, 'import numpy as np\n'), (373, 'numpy.append', 'np.append', (['f', 'layers'], {}), True, 'import numpy as np\n'), (399, 'tensorflow.identity', 'tf.identity', (['an'], {'name': '"""an"""'}), True, 'import tensorflow as tf\n'), (400, 'tensorflow.identity', 'tf.identity', (['hn'], {'name': '"""hn"""'}), True, 'import tensorflow as tf\n'), (415, 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'trainable': '(False)'}), True, 'import tensorflow as tf\n'), (416, 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['starter_learning', 'global_step', '(500)', '(0.9)'], {}), True, 'import tensorflow as tf\n'), (419, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (424, 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'rate'}), True, 'import tensorflow as tf\n'), (464, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (65, 'tensorflow.expand_dims', 'tf.expand_dims', (['one_hot_matrix', '(1)'], {}), True, 'import tensorflow as tf\n'), (133, 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'stbeta'}), True, 'import tensorflow as tf\n'), (138, 'tensorflow.ones_initializer', 'tf.ones_initializer', ([], {}), True, 'import tensorflow as tf\n'), (239, 'tensorflow.transpose', 'tf.transpose', (['(betan * zn[1])'], {}), True, 'import tensorflow as tf\n'), (266, 'tensorflow.reshape', 'tf.reshape', (['dl', '[-1, d1 * d2]'], {}), True, 'import tensorflow as tf\n'), (297, 'tensorflow.gradients', 'tf.gradients', (['flat_grads[i]', 'par'], {}), True, 'import tensorflow as tf\n'), (386, 'numpy.transpose', 'np.transpose', (['Y_train'], {}), True, 'import numpy as np\n'), (387, 'numpy.transpose', 'np.transpose', (['Y_test'], {}), True, 'import numpy as np\n'), (404, 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), True, 'import tensorflow as tf\n'), (438, 'tensorflow.subtract', 'tf.subtract', (['an', 'Y'], {}), True, 'import tensorflow as tf\n'), (448, 'tensorflow.greater', 'tf.greater', (['an', '(0.5)'], {}), True, 'import tensorflow as tf\n'), (466, 'tensorflow.Session', 'tf.Session', ([], {}), True, 'import tensorflow as tf\n'), (240, 'tensorflow.losses.sigmoid_cross_entropy', 'tf.losses.sigmoid_cross_entropy', ([], {'logits': 'logits', 'multi_class_labels': 'labels'}), True, 'import tensorflow as tf\n'), (243, 'tensorflow.transpose', 'tf.transpose', (['zn[0]'], {}), True, 'import tensorflow as tf\n'), (270, 'tensorflow.concat', 'tf.concat', (['[flattened, fl]'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (381, 'numpy.unique', 'np.unique', (['Y_train'], {}), True, 'import numpy as np\n'), (512, 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""cost"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (513, 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iterations"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (515, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (142, 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {'seed': '(1)'}), True, 'import tensorflow as tf\n'), (143, 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), True, 'import tensorflow as tf\n'), (167, 'tensorflow.multiply', 'tf.multiply', (['beta', 'h'], {}), True, 'import tensorflow as tf\n'), (170, 'tensorflow.multiply', 'tf.multiply', (['beta', 'h'], {}), True, 'import tensorflow as tf\n'), (173, 'tensorflow.nn.relu', 'tf.nn.relu', (['h'], {}), True, 'import tensorflow as tf\n'), (458, 'tensorflow.sigmoid', 'tf.sigmoid', (['(betan * hn)'], {}), True, 'import tensorflow as tf\n'), (511, 'numpy.squeeze', 'np.squeeze', (['costs'], {}), True, 'import numpy as np\n'), (244, 'tensorflow.squared_difference', 'tf.squared_difference', (['out', 'labels'], {}), True, 'import tensorflow as tf\n'), (449, 'tensorflow.equal', 'tf.equal', (['Y', '(1.0)'], {}), True, 'import tensorflow as tf\n'), (453, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['Y'], {}), True, 'import tensorflow as tf\n'), (459, 'tensorflow.argmax', 'tf.argmax', (['Yp'], {}), True, 'import tensorflow as tf\n'), (459, 'tensorflow.argmax', 'tf.argmax', (['Y'], {}), True, 'import tensorflow as tf\n'), (460, 'tensorflow.cast', 'tf.cast', (['correct', '"""float"""'], {}), True, 'import tensorflow as tf\n'), (454, 'tensorflow.squared_difference', 'tf.squared_difference', (['an', 'Y'], {}), True, 'import tensorflow as tf\n')] |
mehrdad-shokri/tensornets | e36eff73e5fc984977c5ceadefc1adb089e7bab5 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Optimizer ops for use in layers and tf.learn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from .. import contrib_framework
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as vars_
from tensorflow.python.summary import summary
from tensorflow.python.training import moving_averages
from tensorflow.python.training import optimizer as optimizer_
from tensorflow.python.training import training as train
OPTIMIZER_CLS_NAMES = {
"Adagrad": train.AdagradOptimizer,
"Adam": train.AdamOptimizer,
"Ftrl": train.FtrlOptimizer,
"Momentum": lambda learning_rate: train.MomentumOptimizer(learning_rate, momentum=0.9), # pylint: disable=line-too-long
"RMSProp": train.RMSPropOptimizer,
"SGD": train.GradientDescentOptimizer,
}
OPTIMIZER_SUMMARIES = [
"learning_rate",
"loss",
"gradients",
"gradient_norm",
"global_gradient_norm",
]
def optimize_loss(loss,
global_step,
learning_rate,
optimizer,
gradient_noise_scale=None,
gradient_multipliers=None,
clip_gradients=None,
learning_rate_decay_fn=None,
update_ops=None,
variables=None,
name=None,
summaries=None,
colocate_gradients_with_ops=False,
increment_global_step=True):
"""Given loss and parameters for optimizer, returns a training op.
Various ways of passing optimizers include:
- by string specifying the name of the optimizer. See OPTIMIZER_CLS_NAMES
for full list. E.g. `optimize_loss(..., optimizer='Adam')`.
- by function taking learning rate `Tensor` as argument and returning an
`Optimizer` instance. E.g. `optimize_loss(...,
optimizer=lambda lr: tf.compat.v1.train.MomentumOptimizer(lr,
momentum=0.5))`.
Alternatively, if `learning_rate` is `None`, the function takes no
arguments. E.g. `optimize_loss(..., learning_rate=None,
optimizer=lambda: tf.compat.v1.train.MomentumOptimizer(0.5,
momentum=0.5))`.
- by a subclass of `Optimizer` having a single-argument constructor
(the argument is the learning rate), such as AdamOptimizer or
AdagradOptimizer. E.g. `optimize_loss(...,
optimizer=tf.compat.v1.train.AdagradOptimizer)`.
- by an instance of a subclass of `Optimizer`.
E.g., `optimize_loss(...,
optimizer=tf.compat.v1.train.AdagradOptimizer(0.5))`.
Args:
loss: Scalar `Tensor`.
global_step: Scalar int `Tensor`, step counter to update on each step unless
`increment_global_step` is `False`. If not supplied, it will be fetched
from the default graph (see `tf.compat.v1.train.get_global_step` for
details). If it has not been created, no step will be incremented with
each weight update. `learning_rate_decay_fn` requires `global_step`.
learning_rate: float or `Tensor`, magnitude of update per each training
step. Can be `None`.
optimizer: string, class or optimizer instance, used as trainer. string
should be name of optimizer, like 'SGD', 'Adam', 'Adagrad'. Full list in
OPTIMIZER_CLS_NAMES constant. class should be sub-class of `tf.Optimizer`
that implements `compute_gradients` and `apply_gradients` functions.
optimizer instance should be instantiation of `tf.Optimizer` sub-class and
have `compute_gradients` and `apply_gradients` functions.
gradient_noise_scale: float or None, adds 0-mean normal noise scaled by this
value.
gradient_multipliers: dict of variables or variable names to floats. If
present, gradients for specified variables will be multiplied by given
constant.
clip_gradients: float, callable or `None`. If a float is provided, a global
clipping is applied to prevent the norm of the gradient from exceeding
this value. Alternatively, a callable can be provided, e.g.,
`adaptive_clipping_fn()`. This callable takes a list of `(gradients,
variables)` tuples and returns the same thing with the gradients modified.
learning_rate_decay_fn: function, takes `learning_rate` and `global_step`
`Tensor`s, returns `Tensor`. Can be used to implement any learning rate
decay functions.
For example: `tf.compat.v1.train.exponential_decay`.
Ignored if `learning_rate` is not supplied.
update_ops: list of update `Operation`s to execute at each step. If `None`,
uses elements of UPDATE_OPS collection. The order of execution between
`update_ops` and `loss` is non-deterministic.
variables: list of variables to optimize or `None` to use all trainable
variables.
name: The name for this operation is used to scope operations and summaries.
summaries: List of internal quantities to visualize on tensorboard. If not
set, the loss, the learning rate, and the global norm of the gradients
will be reported. The complete list of possible values is in
OPTIMIZER_SUMMARIES.
colocate_gradients_with_ops: If True, try colocating gradients with the
corresponding op.
increment_global_step: Whether to increment `global_step`. If your model
calls `optimize_loss` multiple times per training step (e.g. to optimize
different parts of the model), use this arg to avoid incrementing
`global_step` more times than necessary.
Returns:
Training op.
Raises:
ValueError: if:
* `loss` is an invalid type or shape.
* `global_step` is an invalid type or shape.
* `learning_rate` is an invalid type or value.
* `optimizer` has the wrong type.
* `clip_gradients` is neither float nor callable.
* `learning_rate` and `learning_rate_decay_fn` are supplied, but no
`global_step` is available.
* `gradients` is empty.
"""
loss = ops.convert_to_tensor(loss)
contrib_framework.assert_scalar(loss)
if global_step is None:
global_step = train.get_global_step()
else:
train.assert_global_step(global_step)
with vs.variable_scope(name, "OptimizeLoss", [loss, global_step]):
# Update ops take UPDATE_OPS collection if not provided.
if update_ops is None:
update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS))
# Make sure update ops are ran before computing loss.
if update_ops:
loss = control_flow_ops.with_dependencies(list(update_ops), loss)
# Learning rate variable, with possible decay.
lr = None
if learning_rate is not None:
if (isinstance(learning_rate, ops.Tensor) and
learning_rate.get_shape().ndims == 0):
lr = learning_rate
elif isinstance(learning_rate, float):
if learning_rate < 0.0:
raise ValueError("Invalid learning_rate %s.", learning_rate)
lr = vs.get_variable(
"learning_rate", [],
trainable=False,
initializer=init_ops.constant_initializer(learning_rate))
else:
raise ValueError("Learning rate should be 0d Tensor or float. "
"Got %s of type %s" %
(str(learning_rate), str(type(learning_rate))))
if summaries is None:
summaries = ["loss", "learning_rate", "global_gradient_norm"]
else:
for summ in summaries:
if summ not in OPTIMIZER_SUMMARIES:
raise ValueError("Summaries should be one of [%s], you provided %s." %
(", ".join(OPTIMIZER_SUMMARIES), summ))
if learning_rate is not None and learning_rate_decay_fn is not None:
if global_step is None:
raise ValueError("global_step is required for learning_rate_decay_fn.")
lr = learning_rate_decay_fn(lr, global_step)
if "learning_rate" in summaries:
summary.scalar("learning_rate", lr)
# Create optimizer, given specified parameters.
if isinstance(optimizer, six.string_types):
if lr is None:
raise ValueError("Learning rate is None, but should be specified if "
"optimizer is string (%s)." % optimizer)
if optimizer not in OPTIMIZER_CLS_NAMES:
raise ValueError(
"Optimizer name should be one of [%s], you provided %s." %
(", ".join(OPTIMIZER_CLS_NAMES), optimizer))
opt = OPTIMIZER_CLS_NAMES[optimizer](learning_rate=lr)
elif (isinstance(optimizer, type) and
issubclass(optimizer, optimizer_.Optimizer)):
if lr is None:
raise ValueError("Learning rate is None, but should be specified if "
"optimizer is class (%s)." % optimizer)
opt = optimizer(learning_rate=lr)
elif isinstance(optimizer, optimizer_.Optimizer):
opt = optimizer
elif callable(optimizer):
if learning_rate is not None:
opt = optimizer(lr)
else:
opt = optimizer()
if not isinstance(opt, optimizer_.Optimizer):
raise ValueError("Unrecognized optimizer: function should return "
"subclass of Optimizer. Got %s." % str(opt))
else:
raise ValueError("Unrecognized optimizer: should be string, "
"subclass of Optimizer, instance of "
"subclass of Optimizer or function with one argument. "
"Got %s." % str(optimizer))
# All trainable variables, if specific variables are not specified.
if variables is None:
variables = vars_.trainable_variables()
# Compute gradients.
gradients = opt.compute_gradients(
loss,
variables,
colocate_gradients_with_ops=colocate_gradients_with_ops)
# Optionally add gradient noise.
if gradient_noise_scale is not None:
gradients = _add_scaled_noise_to_gradients(gradients,
gradient_noise_scale)
# Multiply some gradients.
if gradient_multipliers is not None:
gradients = _multiply_gradients(gradients, gradient_multipliers)
if not gradients:
raise ValueError(
"Empty list of (gradient, var) pairs encountered. This is most "
"likely to be caused by an improper value of gradient_multipliers.")
if "global_gradient_norm" in summaries or "gradient_norm" in summaries:
summary.scalar("global_norm/gradient_norm",
clip_ops.global_norm(list(zip(*gradients))[0]))
# Optionally clip gradients by global norm.
if isinstance(clip_gradients, float):
gradients = _clip_gradients_by_norm(gradients, clip_gradients)
elif callable(clip_gradients):
gradients = clip_gradients(gradients)
elif clip_gradients is not None:
raise ValueError("Unknown type %s for clip_gradients" %
type(clip_gradients))
# Add scalar summary for loss.
if "loss" in summaries:
summary.scalar("loss", loss)
# Add histograms for variables, gradients and gradient norms.
for gradient, variable in gradients:
if isinstance(gradient, ops.IndexedSlices):
grad_values = gradient.values
else:
grad_values = gradient
if grad_values is not None:
var_name = variable.name.replace(":", "_")
if "gradients" in summaries:
summary.histogram("gradients/%s" % var_name, grad_values)
if "gradient_norm" in summaries:
summary.scalar("gradient_norm/%s" % var_name,
clip_ops.global_norm([grad_values]))
if clip_gradients is not None and ("global_gradient_norm" in summaries or
"gradient_norm" in summaries):
summary.scalar("global_norm/clipped_gradient_norm",
clip_ops.global_norm(list(zip(*gradients))[0]))
# Create gradient updates.
grad_updates = opt.apply_gradients(
gradients,
global_step=global_step if increment_global_step else None,
name="train")
# Ensure the train_tensor computes grad_updates.
train_tensor = control_flow_ops.with_dependencies([grad_updates], loss)
return train_tensor
def _clip_gradients_by_norm(grads_and_vars, clip_gradients):
"""Clips gradients by global norm."""
gradients, variables = zip(*grads_and_vars)
clipped_gradients, _ = clip_ops.clip_by_global_norm(gradients, clip_gradients)
return list(zip(clipped_gradients, variables))
def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):
"""Find max_norm given norm and previous average."""
with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]):
log_norm = math_ops.log(norm + epsilon)
def moving_average(name, value, decay):
moving_average_variable = vs.get_variable(
name,
shape=value.get_shape(),
dtype=value.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False)
return moving_averages.assign_moving_average(
moving_average_variable, value, decay, zero_debias=False)
# quicker adaptation at the beginning
if global_step is not None:
n = math_ops.cast(global_step, dtypes.float32)
decay = math_ops.minimum(decay, n / (n + 1.))
# update averages
mean = moving_average("mean", log_norm, decay)
sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)
variance = sq_mean - math_ops.square(mean)
std = math_ops.sqrt(math_ops.maximum(epsilon, variance))
max_norms = math_ops.exp(mean + std_factor * std)
return max_norms, mean
def adaptive_clipping_fn(std_factor=2.,
decay=0.95,
static_max_norm=None,
global_step=None,
report_summary=False,
epsilon=1e-8,
name=None):
"""Adapt the clipping value using statistics on the norms.
Implement adaptive gradient as presented in section 3.2.1 of
https://arxiv.org/abs/1412.1602.
Keeps a moving average of the mean and std of the log(norm) of the gradient.
If the norm exceeds `exp(mean + std_factor*std)` then all gradients will be
rescaled such that the global norm becomes `exp(mean)`.
Args:
std_factor: Python scaler (or tensor). `max_norm = exp(mean +
std_factor*std)`
decay: The smoothing factor of the moving averages.
static_max_norm: If provided, will threshold the norm to this value as an
extra safety.
global_step: Optional global_step. If provided, `decay = decay*n/(n+1)`.
This provides a quicker adaptation of the mean for the first steps.
report_summary: If `True`, will add histogram summaries of the `max_norm`.
epsilon: Small value chosen to avoid zero variance.
name: The name for this operation is used to scope operations and summaries.
Returns:
A function for applying gradient clipping.
"""
def gradient_clipping(grads_and_vars):
"""Internal function for adaptive clipping."""
grads, variables = zip(*grads_and_vars)
norm = clip_ops.global_norm(grads)
max_norm, log_mean = _adaptive_max_norm(norm, std_factor, decay,
global_step, epsilon, name)
# reports the max gradient norm for debugging
if report_summary:
summary.scalar("global_norm/adaptive_max_gradient_norm", max_norm)
# factor will be 1. if norm is smaller than max_norm
factor = array_ops.where(norm < max_norm, array_ops.ones_like(norm),
math_ops.exp(log_mean) / norm)
if static_max_norm is not None:
factor = math_ops.minimum(static_max_norm / norm, factor)
# apply factor
clipped_grads = []
for grad in grads:
if grad is None:
clipped_grads.append(None)
elif isinstance(grad, ops.IndexedSlices):
clipped_grads.append(
ops.IndexedSlices(grad.values * factor, grad.indices,
grad.dense_shape))
else:
clipped_grads.append(grad * factor)
return list(zip(clipped_grads, variables))
return gradient_clipping
def _add_scaled_noise_to_gradients(grads_and_vars, gradient_noise_scale):
"""Adds scaled noise from a 0-mean normal distribution to gradients."""
gradients, variables = zip(*grads_and_vars)
noisy_gradients = []
for gradient in gradients:
if gradient is None:
noisy_gradients.append(None)
continue
if isinstance(gradient, ops.IndexedSlices):
gradient_shape = gradient.dense_shape
else:
gradient_shape = gradient.get_shape()
noise = random_ops.truncated_normal(gradient_shape) * gradient_noise_scale
noisy_gradients.append(gradient + noise)
return list(zip(noisy_gradients, variables))
def _multiply_gradients(grads_and_vars, gradient_multipliers):
"""Multiply specified gradients."""
multiplied_grads_and_vars = []
for grad, var in grads_and_vars:
if (grad is not None and
(var in gradient_multipliers or var.name in gradient_multipliers)):
key = var if var in gradient_multipliers else var.name
multiplier = gradient_multipliers[key]
if isinstance(grad, ops.IndexedSlices):
grad_values = grad.values * multiplier
grad = ops.IndexedSlices(grad_values, grad.indices, grad.dense_shape)
else:
grad *= math_ops.cast(multiplier, grad.dtype)
multiplied_grads_and_vars.append((grad, var))
return multiplied_grads_and_vars
| [
"tensorflow.python.ops.math_ops.log",
"tensorflow.python.training.training.assert_global_step",
"tensorflow.python.summary.summary.scalar",
"tensorflow.python.ops.math_ops.exp",
"tensorflow.python.summary.summary.histogram",
"tensorflow.python.ops.variables.trainable_variables",
"tensorflow.python.ops.init_ops.constant_initializer",
"tensorflow.python.training.training.get_global_step",
"tensorflow.python.training.moving_averages.assign_moving_average",
"tensorflow.python.ops.control_flow_ops.with_dependencies",
"tensorflow.python.framework.ops.get_collection",
"tensorflow.python.framework.ops.IndexedSlices",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.math_ops.minimum",
"tensorflow.python.ops.random_ops.truncated_normal",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.training.training.MomentumOptimizer",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.clip_ops.clip_by_global_norm",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.python.ops.clip_ops.global_norm",
"tensorflow.python.ops.init_ops.zeros_initializer",
"tensorflow.python.ops.math_ops.maximum"
] | tensornets/contrib_layers/optimizers.py | [(154, 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['loss'], {}), False, 'from tensorflow.python.framework import ops\n'), (306, 'tensorflow.python.ops.clip_ops.clip_by_global_norm', 'clip_ops.clip_by_global_norm', (['gradients', 'clip_gradients'], {}), False, 'from tensorflow.python.ops import clip_ops\n'), (43, 'tensorflow.python.training.training.MomentumOptimizer', 'train.MomentumOptimizer', (['learning_rate'], {'momentum': '(0.9)'}), True, 'from tensorflow.python.training import training as train\n'), (157, 'tensorflow.python.training.training.get_global_step', 'train.get_global_step', ([], {}), True, 'from tensorflow.python.training import training as train\n'), (159, 'tensorflow.python.training.training.assert_global_step', 'train.assert_global_step', (['global_step'], {}), True, 'from tensorflow.python.training import training as train\n'), (160, 'tensorflow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', (['name', '"""OptimizeLoss"""', '[loss, global_step]'], {}), True, 'from tensorflow.python.ops import variable_scope as vs\n'), (298, 'tensorflow.python.ops.control_flow_ops.with_dependencies', 'control_flow_ops.with_dependencies', (['[grad_updates]', 'loss'], {}), False, 'from tensorflow.python.ops import control_flow_ops\n'), (312, 'tensorflow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', (['name', '"""AdaptiveMaxNorm"""', '[norm]'], {}), True, 'from tensorflow.python.ops import variable_scope as vs\n'), (313, 'tensorflow.python.ops.math_ops.log', 'math_ops.log', (['(norm + epsilon)'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (336, 'tensorflow.python.ops.math_ops.exp', 'math_ops.exp', (['(mean + std_factor * std)'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (376, 'tensorflow.python.ops.clip_ops.global_norm', 'clip_ops.global_norm', (['grads'], {}), False, 'from tensorflow.python.ops import clip_ops\n'), (233, 'tensorflow.python.ops.variables.trainable_variables', 'vars_.trainable_variables', ([], {}), True, 'from tensorflow.python.ops import variables as vars_\n'), (269, 'tensorflow.python.summary.summary.scalar', 'summary.scalar', (['"""loss"""', 'loss'], {}), False, 'from tensorflow.python.summary import summary\n'), (322, 'tensorflow.python.training.moving_averages.assign_moving_average', 'moving_averages.assign_moving_average', (['moving_average_variable', 'value', 'decay'], {'zero_debias': '(False)'}), False, 'from tensorflow.python.training import moving_averages\n'), (327, 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['global_step', 'dtypes.float32'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (328, 'tensorflow.python.ops.math_ops.minimum', 'math_ops.minimum', (['decay', '(n / (n + 1.0))'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (332, 'tensorflow.python.ops.math_ops.square', 'math_ops.square', (['log_norm'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (334, 'tensorflow.python.ops.math_ops.square', 'math_ops.square', (['mean'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (335, 'tensorflow.python.ops.math_ops.maximum', 'math_ops.maximum', (['epsilon', 'variance'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (383, 'tensorflow.python.summary.summary.scalar', 'summary.scalar', (['"""global_norm/adaptive_max_gradient_norm"""', 'max_norm'], {}), False, 'from tensorflow.python.summary import summary\n'), (386, 'tensorflow.python.ops.array_ops.ones_like', 'array_ops.ones_like', (['norm'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (390, 'tensorflow.python.ops.math_ops.minimum', 'math_ops.minimum', (['(static_max_norm / norm)', 'factor'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (421, 'tensorflow.python.ops.random_ops.truncated_normal', 'random_ops.truncated_normal', (['gradient_shape'], {}), False, 'from tensorflow.python.ops import random_ops\n'), (163, 'tensorflow.python.framework.ops.get_collection', 'ops.get_collection', (['ops.GraphKeys.UPDATE_OPS'], {}), False, 'from tensorflow.python.framework import ops\n'), (197, 'tensorflow.python.summary.summary.scalar', 'summary.scalar', (['"""learning_rate"""', 'lr'], {}), False, 'from tensorflow.python.summary import summary\n'), (387, 'tensorflow.python.ops.math_ops.exp', 'math_ops.exp', (['log_mean'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (436, 'tensorflow.python.framework.ops.IndexedSlices', 'ops.IndexedSlices', (['grad_values', 'grad.indices', 'grad.dense_shape'], {}), False, 'from tensorflow.python.framework import ops\n'), (438, 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['multiplier', 'grad.dtype'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (281, 'tensorflow.python.summary.summary.histogram', 'summary.histogram', (["('gradients/%s' % var_name)", 'grad_values'], {}), False, 'from tensorflow.python.summary import summary\n'), (320, 'tensorflow.python.ops.init_ops.zeros_initializer', 'init_ops.zeros_initializer', ([], {}), False, 'from tensorflow.python.ops import init_ops\n'), (284, 'tensorflow.python.ops.clip_ops.global_norm', 'clip_ops.global_norm', (['[grad_values]'], {}), False, 'from tensorflow.python.ops import clip_ops\n'), (399, 'tensorflow.python.framework.ops.IndexedSlices', 'ops.IndexedSlices', (['(grad.values * factor)', 'grad.indices', 'grad.dense_shape'], {}), False, 'from tensorflow.python.framework import ops\n'), (180, 'tensorflow.python.ops.init_ops.constant_initializer', 'init_ops.constant_initializer', (['learning_rate'], {}), False, 'from tensorflow.python.ops import init_ops\n')] |
Archer-pro666/BAAF-Net | 663d1681d4d05ad3caaacd98e6dedfdc9caa4930 | """ Wrapper functions for TensorFlow layers.
Author: Charles R. Qi
Date: November 2016
"""
import numpy as np
import tensorflow as tf
def _variable_on_cpu(name, shape, initializer, use_fp16=False):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd, use_xavier=True):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
use_xavier: bool, whether to use xavier initializer
Returns:
Variable Tensor
"""
if use_xavier:
initializer = tf.contrib.layers.xavier_initializer()
var = _variable_on_cpu(name, shape, initializer)
else:
# initializer = tf.truncated_normal_initializer(stddev=stddev)
with tf.device('/cpu:0'):
var = tf.truncated_normal(shape, stddev=np.sqrt(2 / shape[-1]))
var = tf.round(var * tf.constant(1000, dtype=tf.float32)) / tf.constant(1000, dtype=tf.float32)
var = tf.Variable(var, name='weights')
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def conv1d(inputs,
num_output_channels,
kernel_size,
scope,
stride=1,
padding='SAME',
use_xavier=True,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn=False,
bn_decay=None,
is_training=None):
""" 1D convolution with non-linear operation.
Args:
inputs: 3-D tensor variable BxLxC
num_output_channels: int
kernel_size: int
scope: string
stride: int
padding: 'SAME' or 'VALID'
use_xavier: bool, use xavier_initializer if true
stddev: float, stddev for truncated_normal init
weight_decay: float
activation_fn: function
bn: bool, whether to use batch norm
bn_decay: float or float tensor variable in [0,1]
is_training: bool Tensor variable
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
num_in_channels = inputs.get_shape()[-1].value
kernel_shape = [kernel_size,
num_in_channels, num_output_channels]
kernel = _variable_with_weight_decay('weights',
shape=kernel_shape,
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
outputs = tf.nn.conv1d(inputs, kernel,
stride=stride,
padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_conv1d(outputs, is_training,
bn_decay=bn_decay, scope='bn')
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
def conv2d(inputs,
num_output_channels,
kernel_size,
scope,
stride=[1, 1],
padding='SAME',
bn=False,
is_training=None,
use_xavier=False,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn_decay=None):
""" 2D convolution with non-linear operation.
Args:
inputs: 4-D tensor variable BxHxWxC
num_output_channels: int
kernel_size: a list of 2 ints
scope: string
stride: a list of 2 ints
padding: 'SAME' or 'VALID'
use_xavier: bool, use xavier_initializer if true
stddev: float, stddev for truncated_normal init
weight_decay: float
activation_fn: function
bn: bool, whether to use batch norm
bn_decay: float or float tensor variable in [0,1]
is_training: bool Tensor variable
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_h, kernel_w = kernel_size
num_in_channels = inputs.get_shape()[-1].value
kernel_shape = [kernel_h, kernel_w,
num_in_channels, num_output_channels]
kernel = _variable_with_weight_decay('weights',
shape=kernel_shape,
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
stride_h, stride_w = stride
outputs = tf.nn.conv2d(inputs, kernel,
[1, stride_h, stride_w, 1],
padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = tf.layers.batch_normalization(outputs, momentum=0.99, epsilon=1e-6, training=is_training)
if activation_fn is not None:
outputs = tf.nn.leaky_relu(outputs, alpha=0.2)
return outputs
def conv2d_transpose(inputs,
num_output_channels,
kernel_size,
scope,
stride=[1, 1],
padding='SAME',
use_xavier=False,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn=False,
bn_decay=None,
is_training=None):
""" 2D convolution transpose with non-linear operation.
Args:
inputs: 4-D tensor variable BxHxWxC
num_output_channels: int
kernel_size: a list of 2 ints
scope: string
stride: a list of 2 ints
padding: 'SAME' or 'VALID'
use_xavier: bool, use xavier_initializer if true
stddev: float, stddev for truncated_normal init
weight_decay: float
activation_fn: function
bn: bool, whether to use batch norm
bn_decay: float or float tensor variable in [0,1]
is_training: bool Tensor variable
Returns:
Variable tensor
Note: conv2d(conv2d_transpose(a, num_out, ksize, stride), a.shape[-1], ksize, stride) == a
"""
with tf.variable_scope(scope) as sc:
kernel_h, kernel_w = kernel_size
num_in_channels = inputs.get_shape()[-1].value
kernel_shape = [kernel_h, kernel_w,
num_output_channels, num_in_channels] # reversed to conv2d
kernel = _variable_with_weight_decay('weights',
shape=kernel_shape,
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
stride_h, stride_w = stride
# from slim.convolution2d_transpose
def get_deconv_dim(dim_size, stride_size, kernel_size, padding):
dim_size *= stride_size
if padding == 'VALID' and dim_size is not None:
dim_size += max(kernel_size - stride_size, 0)
return dim_size
# caculate output shape
batch_size = tf.shape(inputs)[0]
height = tf.shape(inputs)[1]
width = tf.shape(inputs)[2]
out_height = get_deconv_dim(height, stride_h, kernel_h, padding)
out_width = get_deconv_dim(width, stride_w, kernel_w, padding)
output_shape = tf.stack([batch_size, out_height, out_width, num_output_channels], axis=0)
outputs = tf.nn.conv2d_transpose(inputs, kernel, output_shape,
[1, stride_h, stride_w, 1],
padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
# outputs = batch_norm_for_conv2d(outputs, is_training,
# bn_decay=bn_decay, scope='bn')
outputs = tf.layers.batch_normalization(outputs, momentum=0.99, epsilon=1e-6, training=is_training)
if activation_fn is not None:
# outputs = activation_fn(outputs)
outputs = tf.nn.leaky_relu(outputs, alpha=0.2)
return outputs
def conv3d(inputs,
num_output_channels,
kernel_size,
scope,
stride=[1, 1, 1],
padding='SAME',
use_xavier=True,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn=False,
bn_decay=None,
is_training=None):
""" 3D convolution with non-linear operation.
Args:
inputs: 5-D tensor variable BxDxHxWxC
num_output_channels: int
kernel_size: a list of 3 ints
scope: string
stride: a list of 3 ints
padding: 'SAME' or 'VALID'
use_xavier: bool, use xavier_initializer if true
stddev: float, stddev for truncated_normal init
weight_decay: float
activation_fn: function
bn: bool, whether to use batch norm
bn_decay: float or float tensor variable in [0,1]
is_training: bool Tensor variable
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_d, kernel_h, kernel_w = kernel_size
num_in_channels = inputs.get_shape()[-1].value
kernel_shape = [kernel_d, kernel_h, kernel_w,
num_in_channels, num_output_channels]
kernel = _variable_with_weight_decay('weights',
shape=kernel_shape,
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
stride_d, stride_h, stride_w = stride
outputs = tf.nn.conv3d(inputs, kernel,
[1, stride_d, stride_h, stride_w, 1],
padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_conv3d(outputs, is_training,
bn_decay=bn_decay, scope='bn')
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
def fully_connected(inputs,
num_outputs,
scope,
use_xavier=True,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn=False,
bn_decay=None,
is_training=None):
""" Fully connected layer with non-linear operation.
Args:
inputs: 2-D tensor BxN
num_outputs: int
Returns:
Variable tensor of size B x num_outputs.
"""
with tf.variable_scope(scope) as sc:
num_input_units = inputs.get_shape()[-1].value
weights = _variable_with_weight_decay('weights',
shape=[num_input_units, num_outputs],
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
outputs = tf.matmul(inputs, weights)
biases = _variable_on_cpu('biases', [num_outputs],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_fc(outputs, is_training, bn_decay, 'bn')
if activation_fn is not None:
# outputs = activation_fn(outputs)
outputs = tf.nn.leaky_relu(outputs, alpha=0.2)
return outputs
def max_pool2d(inputs,
kernel_size,
scope,
stride=[2, 2],
padding='VALID'):
""" 2D max pooling.
Args:
inputs: 4-D tensor BxHxWxC
kernel_size: a list of 2 ints
stride: a list of 2 ints
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_h, kernel_w = kernel_size
stride_h, stride_w = stride
outputs = tf.nn.max_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding,
name=sc.name)
return outputs
def avg_pool2d(inputs,
kernel_size,
scope,
stride=[2, 2],
padding='VALID'):
""" 2D avg pooling.
Args:
inputs: 4-D tensor BxHxWxC
kernel_size: a list of 2 ints
stride: a list of 2 ints
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_h, kernel_w = kernel_size
stride_h, stride_w = stride
outputs = tf.nn.avg_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding,
name=sc.name)
return outputs
def max_pool3d(inputs,
kernel_size,
scope,
stride=[2, 2, 2],
padding='VALID'):
""" 3D max pooling.
Args:
inputs: 5-D tensor BxDxHxWxC
kernel_size: a list of 3 ints
stride: a list of 3 ints
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_d, kernel_h, kernel_w = kernel_size
stride_d, stride_h, stride_w = stride
outputs = tf.nn.max_pool3d(inputs,
ksize=[1, kernel_d, kernel_h, kernel_w, 1],
strides=[1, stride_d, stride_h, stride_w, 1],
padding=padding,
name=sc.name)
return outputs
def avg_pool3d(inputs,
kernel_size,
scope,
stride=[2, 2, 2],
padding='VALID'):
""" 3D avg pooling.
Args:
inputs: 5-D tensor BxDxHxWxC
kernel_size: a list of 3 ints
stride: a list of 3 ints
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_d, kernel_h, kernel_w = kernel_size
stride_d, stride_h, stride_w = stride
outputs = tf.nn.avg_pool3d(inputs,
ksize=[1, kernel_d, kernel_h, kernel_w, 1],
strides=[1, stride_d, stride_h, stride_w, 1],
padding=padding,
name=sc.name)
return outputs
def batch_norm_template(inputs, is_training, scope, moments_dims, bn_decay):
""" Batch normalization on convolutional maps and beyond...
Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow
Args:
inputs: Tensor, k-D input ... x C could be BC or BHWC or BDHWC
is_training: boolean tf.Varialbe, true indicates training phase
scope: string, variable scope
moments_dims: a list of ints, indicating dimensions for moments calculation
bn_decay: float or float tensor variable, controling moving average weight
Return:
normed: batch-normalized maps
"""
with tf.variable_scope(scope) as sc:
num_channels = inputs.get_shape()[-1].value
beta = tf.Variable(tf.constant(0.0, shape=[num_channels]),
name='beta', trainable=True)
gamma = tf.Variable(tf.constant(1.0, shape=[num_channels]),
name='gamma', trainable=True)
batch_mean, batch_var = tf.nn.moments(inputs, moments_dims, name='moments')
decay = bn_decay if bn_decay is not None else 0.9
ema = tf.train.ExponentialMovingAverage(decay=decay)
# Operator that maintains moving averages of variables.
ema_apply_op = tf.cond(is_training,
lambda: ema.apply([batch_mean, batch_var]),
lambda: tf.no_op())
# Update moving average and return current batch's avg and var.
def mean_var_with_update():
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
# ema.average returns the Variable holding the average of var.
mean, var = tf.cond(is_training,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(inputs, mean, var, beta, gamma, 1e-3)
return normed
def batch_norm_for_fc(inputs, is_training, bn_decay, scope):
""" Batch normalization on FC data.
Args:
inputs: Tensor, 2D BxC input
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
Return:
normed: batch-normalized maps
"""
return batch_norm_template(inputs, is_training, scope, [0, ], bn_decay)
def batch_norm_for_conv1d(inputs, is_training, bn_decay, scope):
""" Batch normalization on 1D convolutional maps.
Args:
inputs: Tensor, 3D BLC input maps
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
Return:
normed: batch-normalized maps
"""
return batch_norm_template(inputs, is_training, scope, [0, 1], bn_decay)
def batch_norm_for_conv2d(inputs, is_training, bn_decay, scope):
""" Batch normalization on 2D convolutional maps.
Args:
inputs: Tensor, 4D BHWC input maps
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
Return:
normed: batch-normalized maps
"""
return batch_norm_template(inputs, is_training, scope, [0, 1, 2], bn_decay)
def batch_norm_for_conv3d(inputs, is_training, bn_decay, scope):
""" Batch normalization on 3D convolutional maps.
Args:
inputs: Tensor, 5D BDHWC input maps
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
Return:
normed: batch-normalized maps
"""
return batch_norm_template(inputs, is_training, scope, [0, 1, 2, 3], bn_decay)
def dropout(inputs,
is_training,
scope,
keep_prob=0.5,
noise_shape=None):
""" Dropout layer.
Args:
inputs: tensor
is_training: boolean tf.Variable
scope: string
keep_prob: float in [0,1]
noise_shape: list of ints
Returns:
tensor variable
"""
with tf.variable_scope(scope) as sc:
outputs = tf.cond(is_training,
lambda: tf.nn.dropout(inputs, keep_prob, noise_shape),
lambda: inputs)
return outputs
| [
"tensorflow.device",
"tensorflow.get_variable",
"numpy.sqrt",
"tensorflow.control_dependencies",
"tensorflow.nn.max_pool",
"tensorflow.stack",
"tensorflow.nn.conv2d_transpose",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.nn.l2_loss",
"tensorflow.nn.conv1d",
"tensorflow.nn.conv2d",
"tensorflow.layers.batch_normalization",
"tensorflow.Variable",
"tensorflow.nn.moments",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.nn.dropout",
"tensorflow.matmul",
"tensorflow.nn.batch_normalization",
"tensorflow.shape",
"tensorflow.identity",
"tensorflow.nn.conv3d",
"tensorflow.nn.avg_pool",
"tensorflow.no_op",
"tensorflow.nn.avg_pool3d",
"tensorflow.add_to_collection",
"tensorflow.nn.leaky_relu",
"tensorflow.nn.bias_add",
"tensorflow.nn.max_pool3d",
"tensorflow.constant",
"tensorflow.constant_initializer",
"tensorflow.variable_scope"
] | helper_tf_util.py | [(20, 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), True, 'import tensorflow as tf\n'), (22, 'tensorflow.get_variable', 'tf.get_variable', (['name', 'shape'], {'initializer': 'initializer', 'dtype': 'dtype'}), True, 'import tensorflow as tf\n'), (44, 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), True, 'import tensorflow as tf\n'), (54, 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""losses"""', 'weight_decay'], {}), True, 'import tensorflow as tf\n'), (91, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (100, 'tensorflow.nn.conv1d', 'tf.nn.conv1d', (['inputs', 'kernel'], {'stride': 'stride', 'padding': 'padding'}), True, 'import tensorflow as tf\n'), (105, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['outputs', 'biases'], {}), True, 'import tensorflow as tf\n'), (148, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (159, 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['inputs', 'kernel', '[1, stride_h, stride_w, 1]'], {'padding': 'padding'}), True, 'import tensorflow as tf\n'), (164, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['outputs', 'biases'], {}), True, 'import tensorflow as tf\n'), (208, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (234, 'tensorflow.stack', 'tf.stack', (['[batch_size, out_height, out_width, num_output_channels]'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (236, 'tensorflow.nn.conv2d_transpose', 'tf.nn.conv2d_transpose', (['inputs', 'kernel', 'output_shape', '[1, stride_h, stride_w, 1]'], {'padding': 'padding'}), True, 'import tensorflow as tf\n'), (241, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['outputs', 'biases'], {}), True, 'import tensorflow as tf\n'), (286, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (297, 'tensorflow.nn.conv3d', 'tf.nn.conv3d', (['inputs', 'kernel', '[1, stride_d, stride_h, stride_w, 1]'], {'padding': 'padding'}), True, 'import tensorflow as tf\n'), (302, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['outputs', 'biases'], {}), True, 'import tensorflow as tf\n'), (332, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (339, 'tensorflow.matmul', 'tf.matmul', (['inputs', 'weights'], {}), True, 'import tensorflow as tf\n'), (342, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['outputs', 'biases'], {}), True, 'import tensorflow as tf\n'), (368, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (371, 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['inputs'], {'ksize': '[1, kernel_h, kernel_w, 1]', 'strides': '[1, stride_h, stride_w, 1]', 'padding': 'padding', 'name': 'sc.name'}), True, 'import tensorflow as tf\n'), (394, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (397, 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['inputs'], {'ksize': '[1, kernel_h, kernel_w, 1]', 'strides': '[1, stride_h, stride_w, 1]', 'padding': 'padding', 'name': 'sc.name'}), True, 'import tensorflow as tf\n'), (420, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (423, 'tensorflow.nn.max_pool3d', 'tf.nn.max_pool3d', (['inputs'], {'ksize': '[1, kernel_d, kernel_h, kernel_w, 1]', 'strides': '[1, stride_d, stride_h, stride_w, 1]', 'padding': 'padding', 'name': 'sc.name'}), True, 'import tensorflow as tf\n'), (446, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (449, 'tensorflow.nn.avg_pool3d', 'tf.nn.avg_pool3d', (['inputs'], {'ksize': '[1, kernel_d, kernel_h, kernel_w, 1]', 'strides': '[1, stride_d, stride_h, stride_w, 1]', 'padding': 'padding', 'name': 'sc.name'}), True, 'import tensorflow as tf\n'), (470, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (476, 'tensorflow.nn.moments', 'tf.nn.moments', (['inputs', 'moments_dims'], {'name': '"""moments"""'}), True, 'import tensorflow as tf\n'), (478, 'tensorflow.train.ExponentialMovingAverage', 'tf.train.ExponentialMovingAverage', ([], {'decay': 'decay'}), True, 'import tensorflow as tf\n'), (493, 'tensorflow.nn.batch_normalization', 'tf.nn.batch_normalization', (['inputs', 'mean', 'var', 'beta', 'gamma', '(0.001)'], {}), True, 'import tensorflow as tf\n'), (570, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (48, 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), True, 'import tensorflow as tf\n'), (51, 'tensorflow.Variable', 'tf.Variable', (['var'], {'name': '"""weights"""'}), True, 'import tensorflow as tf\n'), (53, 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['var'], {}), True, 'import tensorflow as tf\n'), (104, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (163, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (167, 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['outputs'], {'momentum': '(0.99)', 'epsilon': '(1e-06)', 'training': 'is_training'}), True, 'import tensorflow as tf\n'), (169, 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', (['outputs'], {'alpha': '(0.2)'}), True, 'import tensorflow as tf\n'), (229, 'tensorflow.shape', 'tf.shape', (['inputs'], {}), True, 'import tensorflow as tf\n'), (230, 'tensorflow.shape', 'tf.shape', (['inputs'], {}), True, 'import tensorflow as tf\n'), (231, 'tensorflow.shape', 'tf.shape', (['inputs'], {}), True, 'import tensorflow as tf\n'), (240, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (246, 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['outputs'], {'momentum': '(0.99)', 'epsilon': '(1e-06)', 'training': 'is_training'}), True, 'import tensorflow as tf\n'), (249, 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', (['outputs'], {'alpha': '(0.2)'}), True, 'import tensorflow as tf\n'), (301, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (341, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (349, 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', (['outputs'], {'alpha': '(0.2)'}), True, 'import tensorflow as tf\n'), (472, 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'shape': '[num_channels]'}), True, 'import tensorflow as tf\n'), (474, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[num_channels]'}), True, 'import tensorflow as tf\n'), (50, 'tensorflow.constant', 'tf.constant', (['(1000)'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (482, 'tensorflow.no_op', 'tf.no_op', ([], {}), True, 'import tensorflow as tf\n'), (486, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[ema_apply_op]'], {}), True, 'import tensorflow as tf\n'), (572, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['inputs', 'keep_prob', 'noise_shape'], {}), True, 'import tensorflow as tf\n'), (49, 'numpy.sqrt', 'np.sqrt', (['(2 / shape[-1])'], {}), True, 'import numpy as np\n'), (487, 'tensorflow.identity', 'tf.identity', (['batch_mean'], {}), True, 'import tensorflow as tf\n'), (487, 'tensorflow.identity', 'tf.identity', (['batch_var'], {}), True, 'import tensorflow as tf\n'), (50, 'tensorflow.constant', 'tf.constant', (['(1000)'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n')] |
renatoviolin/GAN-image-inpainting | 6ba7ccd4ae55b185ee89844e846d4c469f4fa65f | import cv2
import numpy as np
import tensorflow as tf
import neuralgym as ng
from .inpaint_model import InpaintCAModel
checkpoint_dir = 'generative_inpainting/models'
FLAGS = ng.Config('generative_inpainting/inpaint.yml')
def run_fill(file_test, file_mask):
model = InpaintCAModel()
image = cv2.imread(file_test)
mask = cv2.imread(file_mask)
h, w, _ = image.shape
grid = 8
image = image[:h // grid * grid, :w // grid * grid, :]
mask = mask[:h // grid * grid, :w // grid * grid, :]
image = np.expand_dims(image, 0)
mask = np.expand_dims(mask, 0)
input_image = np.concatenate([image, mask], axis=2)
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
with tf.Session(config=sess_config) as sess:
input_image = tf.constant(input_image, dtype=tf.float32)
output = model.build_server_graph(FLAGS, input_image)
output = (output + 1.) * 127.5
output = tf.reverse(output, [-1])
output = tf.saturate_cast(output, tf.uint8)
# load pretrained model
vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
assign_ops = []
for var in vars_list:
vname = var.name
from_name = vname
var_value = tf.contrib.framework.load_variable(checkpoint_dir, from_name)
assign_ops.append(tf.assign(var, var_value))
sess.run(assign_ops)
result = sess.run(output)
tf.reset_default_graph()
return result[0][:, :, ::-1]
| [
"tensorflow.reverse",
"numpy.expand_dims",
"tensorflow.constant",
"tensorflow.get_collection",
"tensorflow.assign",
"numpy.concatenate",
"tensorflow.ConfigProto",
"tensorflow.reset_default_graph",
"tensorflow.contrib.framework.load_variable",
"tensorflow.Session",
"tensorflow.saturate_cast"
] | generative_inpainting/predict.py | [(10, 'neuralgym.Config', 'ng.Config', (['"""generative_inpainting/inpaint.yml"""'], {}), True, 'import neuralgym as ng\n'), (15, 'cv2.imread', 'cv2.imread', (['file_test'], {}), False, 'import cv2\n'), (16, 'cv2.imread', 'cv2.imread', (['file_mask'], {}), False, 'import cv2\n'), (23, 'numpy.expand_dims', 'np.expand_dims', (['image', '(0)'], {}), True, 'import numpy as np\n'), (24, 'numpy.expand_dims', 'np.expand_dims', (['mask', '(0)'], {}), True, 'import numpy as np\n'), (25, 'numpy.concatenate', 'np.concatenate', (['[image, mask]'], {'axis': '(2)'}), True, 'import numpy as np\n'), (27, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), True, 'import tensorflow as tf\n'), (45, 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (29, 'tensorflow.Session', 'tf.Session', ([], {'config': 'sess_config'}), True, 'import tensorflow as tf\n'), (30, 'tensorflow.constant', 'tf.constant', (['input_image'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (33, 'tensorflow.reverse', 'tf.reverse', (['output', '[-1]'], {}), True, 'import tensorflow as tf\n'), (34, 'tensorflow.saturate_cast', 'tf.saturate_cast', (['output', 'tf.uint8'], {}), True, 'import tensorflow as tf\n'), (36, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {}), True, 'import tensorflow as tf\n'), (41, 'tensorflow.contrib.framework.load_variable', 'tf.contrib.framework.load_variable', (['checkpoint_dir', 'from_name'], {}), True, 'import tensorflow as tf\n'), (42, 'tensorflow.assign', 'tf.assign', (['var', 'var_value'], {}), True, 'import tensorflow as tf\n')] |
sachinpro/sachinpro.github.io | c3bbd8d89818f5d8bb7296c851ed5e52c19728e3 | # pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow estimators for Linear and DNN joined training models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import math
import numpy as np
import six
from tensorflow.contrib import layers
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework.python.ops import variables as variables
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import state_ops
# TODO(ispir): Increase test coverage
class _DNNLinearCombinedBaseEstimator(estimator.BaseEstimator):
"""An estimator for TensorFlow Linear and DNN joined training models.
Input of `fit`, `train`, and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
Parameters:
model_dir: Directory to save model parameters, graph and etc.
n_classes: number of target classes. Default is binary classification.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
linear_feature_columns: An iterable containing all the feature columns used
by linear part of the model. All items in the set should be instances of
classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. If `None`, will use a FTRL optimizer.
dnn_feature_columns: An iterable containing all the feature columns used by
deep part of the model. All items in the set should be instances of
classes derived from `FeatureColumn`.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to the
deep part of the model. If `None`, will use an Adagrad optimizer.
dnn_activation_fn: Activation function applied to each layer. If `None`,
will use `tf.nn.relu`.
config: RunConfig object to configure the runtime settings.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
"""
def __init__(self,
model_dir=None,
n_classes=2,
weight_column_name=None,
linear_feature_columns=None,
linear_optimizer=None,
dnn_feature_columns=None,
dnn_optimizer=None,
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
config=None):
super(_DNNLinearCombinedBaseEstimator, self).__init__(model_dir=model_dir,
config=config)
self._n_classes = n_classes
self._weight_column_name = weight_column_name
self._linear_feature_columns = linear_feature_columns
self._linear_optimizer = linear_optimizer
self._dnn_feature_columns = dnn_feature_columns
self._dnn_optimizer = dnn_optimizer
self._dnn_hidden_units = dnn_hidden_units
self._dnn_activation_fn = dnn_activation_fn
if self._dnn_activation_fn is None:
self._dnn_activation_fn = nn.relu
self._dnn_weight_collection = "DNNLinearCombined_dnn"
self._linear_weight_collection = "DNNLinearCombined_linear"
def predict(self, x=None, input_fn=None, batch_size=None):
"""Returns predictions for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
Returns:
Numpy array of predicted classes or regression values.
"""
predictions = self._infer_model(x=x,
input_fn=input_fn,
batch_size=batch_size)
if self._n_classes > 1:
predictions = np.argmax(predictions, axis=1)
return predictions
def predict_proba(self, x=None, input_fn=None, batch_size=None):
"""Returns prediction probabilities for given features (classification).
Args:
x: features.
input_fn: Input function. If set, x and y must be None.
batch_size: Override default batch size.
Returns:
Numpy array of predicted probabilities.
"""
return self._infer_model(x=x, input_fn=input_fn, batch_size=batch_size)
def _get_train_ops(self, features, targets):
"""See base class."""
global_step = variables.get_global_step()
assert global_step
loss = self._loss(
self._logits(features), targets, self._get_weight_tensor(features))
logging_ops.scalar_summary("loss", loss)
linear_vars = self._get_linear_vars()
dnn_vars = self._get_dnn_vars()
grads = gradients.gradients(loss, dnn_vars + linear_vars)
dnn_grads = grads[0:len(dnn_vars)]
linear_grads = grads[len(dnn_vars):]
train_ops = self._get_linear_training_ops(
linear_grads, linear_vars) + self._get_dnn_training_ops(dnn_grads,
dnn_vars)
train_step = control_flow_ops.group(*train_ops, name="combined_training_op")
with ops.control_dependencies([train_step]):
with ops.get_default_graph().colocate_with(global_step):
return state_ops.assign_add(global_step, 1).op, loss
def _run_metrics(self, predictions, targets, metrics, weights):
result = {}
targets = math_ops.cast(targets, predictions.dtype)
for name, metric in six.iteritems(metrics or {}):
if "weights" in inspect.getargspec(metric)[0]:
result[name] = metric(predictions, targets, weights=weights)
else:
result[name] = metric(predictions, targets)
return result
def _get_eval_ops(self, features, targets, metrics=None):
"""See base class."""
logits = self._logits(features)
result = {"loss": metrics_lib.streaming_mean(self._loss(
logits, targets,
weight_tensor=self._get_weight_tensor(features)))}
# Adding default metrics
if metrics is None and self._n_classes > 1:
metrics = {"accuracy": metrics_lib.streaming_accuracy}
if self._n_classes == 2:
predictions = math_ops.sigmoid(logits)
result["eval_auc"] = metrics_lib.streaming_auc(predictions, targets)
if metrics:
predictions = self._logits_to_predictions(logits, proba=False)
result.update(self._run_metrics(predictions, targets, metrics,
self._get_weight_tensor(features)))
return result
def _get_predict_ops(self, features):
"""See base class."""
logits = self._logits(features)
return self._logits_to_predictions(logits, proba=True)
def _logits_to_predictions(self, logits, proba=False):
if self._n_classes < 2:
return array_ops.reshape(logits, [-1])
if self._n_classes == 2:
logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
if proba:
return nn.softmax(logits)
else:
return math_ops.argmax(logits, 1)
def _get_feature_ops_from_example(self, examples_batch):
column_types = layers.create_dict_for_parse_example(
(self._get_linear_feature_columns() or []) +
(self._get_dnn_feature_columns() or []))
features = parsing_ops.parse_example(examples_batch, column_types)
return features
def _num_label_columns(self):
return 1 if self._n_classes <= 2 else self._n_classes
def _get_linear_feature_columns(self):
return sorted(
set(self._linear_feature_columns),
key=lambda x: x.key) if self._linear_feature_columns else None
def _get_dnn_feature_columns(self):
return sorted(set(
self._dnn_feature_columns)) if self._dnn_feature_columns else None
def _dnn_logits(self, features):
net = layers.input_from_feature_columns(
features,
self._get_dnn_feature_columns(),
weight_collections=[self._dnn_weight_collection])
for layer_id, num_hidden_units in enumerate(self._dnn_hidden_units):
net = layers.legacy_fully_connected(
net,
num_hidden_units,
activation_fn=self._dnn_activation_fn,
weight_collections=[self._dnn_weight_collection],
bias_collections=[self._dnn_weight_collection],
name="hiddenlayer_%d" % layer_id)
self._add_hidden_layer_summary(net, "hiddenlayer_%d" % layer_id)
logit = layers.legacy_fully_connected(
net,
self._num_label_columns(),
weight_collections=[self._dnn_weight_collection],
bias_collections=[self._dnn_weight_collection],
name="dnn_logit")
self._add_hidden_layer_summary(logit, "dnn_logit")
return logit
def _add_hidden_layer_summary(self, value, tag):
# TODO(zakaria): Move this code to tf.learn and add test.
logging_ops.scalar_summary("%s:fraction_of_zero_values" % tag,
nn.zero_fraction(value))
logging_ops.histogram_summary("%s:activation" % tag, value)
def _linear_logits(self, features):
logits, _, _ = layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=self._get_linear_feature_columns(),
num_outputs=self._num_label_columns(),
weight_collections=[self._linear_weight_collection],
name="linear")
return logits
def _get_feature_dict(self, features):
if isinstance(features, dict):
return features
return {"": features}
def _logits(self, features):
if not (self._get_linear_feature_columns() or
self._get_dnn_feature_columns()):
raise ValueError("Either linear_feature_columns or dnn_feature_columns "
"should be defined.")
features = self._get_feature_dict(features)
if self._get_linear_feature_columns() and self._get_dnn_feature_columns():
return self._linear_logits(features) + self._dnn_logits(features)
elif self._get_dnn_feature_columns():
return self._dnn_logits(features)
else:
return self._linear_logits(features)
def _get_weight_tensor(self, features):
if not self._weight_column_name:
return None
else:
return array_ops.reshape(
math_ops.to_float(features[self._weight_column_name]),
shape=(-1,))
def _loss(self, logits, target, weight_tensor):
if self._n_classes < 2:
loss_vec = math_ops.square(logits - math_ops.to_float(target))
elif self._n_classes == 2:
loss_vec = nn.sigmoid_cross_entropy_with_logits(logits,
math_ops.to_float(target))
else:
loss_vec = nn.sparse_softmax_cross_entropy_with_logits(
logits, array_ops.reshape(target, [-1]))
if weight_tensor is None:
return math_ops.reduce_mean(loss_vec, name="loss")
else:
loss_vec = array_ops.reshape(loss_vec, shape=(-1,))
loss_vec = math_ops.mul(
loss_vec, array_ops.reshape(weight_tensor, shape=(-1,)))
return math_ops.div(
math_ops.reduce_sum(loss_vec),
math_ops.to_float(math_ops.reduce_sum(weight_tensor)),
name="loss")
def _get_linear_vars(self):
if self._get_linear_feature_columns():
return ops.get_collection(self._linear_weight_collection)
return []
def _get_linear_training_ops(self, linear_grads, linear_vars):
if self._get_linear_feature_columns():
self._linear_optimizer = self._get_optimizer(
self._linear_optimizer,
default_optimizer="Ftrl",
default_learning_rate=1. / math.sqrt(len(
self._get_linear_feature_columns())))
return [
self._linear_optimizer.apply_gradients(zip(linear_grads, linear_vars))
]
return []
def _get_dnn_vars(self):
if self._get_dnn_feature_columns():
return ops.get_collection(self._dnn_weight_collection)
return []
def _get_dnn_training_ops(self, dnn_grads, dnn_vars):
if self._get_dnn_feature_columns():
self._dnn_optimizer = self._get_optimizer(self._dnn_optimizer,
default_optimizer="Adagrad",
default_learning_rate=0.05)
return [self._dnn_optimizer.apply_gradients(zip(dnn_grads, dnn_vars))]
return []
def _get_optimizer(self, optimizer, default_optimizer, default_learning_rate):
if optimizer is None:
optimizer = default_optimizer
if isinstance(optimizer, six.string_types):
optimizer = layers.OPTIMIZER_CLS_NAMES[optimizer](
learning_rate=default_learning_rate)
return optimizer
class DNNLinearCombinedClassifier(_DNNLinearCombinedBaseEstimator):
"""A classifier for TensorFlow Linear and DNN joined training models.
Example:
```
installed_app_id = sparse_column_with_hash_bucket("installed_id", 1e6)
impression_app_id = sparse_column_with_hash_bucket("impression_id", 1e6)
installed_x_impression = crossed_column(
[installed_app_id, impression_app_id])
installed_emb = embedding_column(installed_app_id, dimension=16,
combiner="sum")
impression_emb = embedding_column(impression_app_id, dimension=16,
combiner="sum")
estimator = DNNLinearCombinedClassifier(
# common settings
n_classes, weight_column_name,
# wide settings
linear_feature_columns=[installed_x_impression],
linear_optimizer=tf.train.FtrlOptimizer(...),
# deep settings
dnn_feature_columns=[installed_emb, impression_emb],
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
# Input builders
def input_fn_train: # returns X, Y
...
def input_fn_eval: # returns X, Y
...
estimator.train(input_fn_train)
estimator.evaluate(input_fn_eval)
estimator.predict(x)
```
Input of `fit`, `train`, and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
Parameters:
model_dir: Directory to save model parameters, graph and etc.
n_classes: number of target classes. Default is binary classification.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
linear_feature_columns: An iterable containing all the feature columns used
by linear part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. If `None`, will use a FTRL optimizer.
dnn_feature_columns: An iterable containing all the feature columns used by
deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to the
deep part of the model. If `None`, will use an Adagrad optimizer.
dnn_activation_fn: Activation function applied to each layer. If `None`,
will use `tf.nn.relu`.
config: RunConfig object to configure the runtime settings.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
ValueError: If both n_classes < 2.
"""
def __init__(self,
model_dir=None,
n_classes=2,
weight_column_name=None,
linear_feature_columns=None,
linear_optimizer=None,
dnn_feature_columns=None,
dnn_optimizer=None,
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
config=None):
if n_classes < 2:
raise ValueError("n_classes should be greater than 1. Given: {}".format(
n_classes))
super(DNNLinearCombinedClassifier, self).__init__(
model_dir=model_dir,
n_classes=n_classes,
weight_column_name=weight_column_name,
linear_feature_columns=linear_feature_columns,
linear_optimizer=linear_optimizer,
dnn_feature_columns=dnn_feature_columns,
dnn_optimizer=dnn_optimizer,
dnn_hidden_units=dnn_hidden_units,
dnn_activation_fn=dnn_activation_fn,
config=config)
class DNNLinearCombinedRegressor(_DNNLinearCombinedBaseEstimator):
"""A regressor for TensorFlow Linear and DNN joined training models.
Example:
```
installed_app_id = sparse_column_with_hash_bucket("installed_id", 1e6)
impression_app_id = sparse_column_with_hash_bucket("impression_id", 1e6)
installed_x_impression = crossed_column(
[installed_app_id, impression_app_id])
installed_emb = embedding_column(installed_app_id, dimension=16,
combiner="sum")
impression_emb = embedding_column(impression_app_id, dimension=16,
combiner="sum")
estimator = DNNLinearCombinedClassifier(
# common settings
n_classes, weight_column_name,
# wide settings
linear_feature_columns=[installed_x_impression],
linear_optimizer=tf.train.FtrlOptimizer(...),
# deep settings
dnn_feature_columns=[installed_emb, impression_emb],
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
# Input builders
def input_fn_train: # returns X, Y
...
def input_fn_eval: # returns X, Y
...
estimator.train(input_fn_train)
estimator.evaluate(input_fn_eval)
estimator.predict(x)
```
Input of `fit`, `train`, and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
Parameters:
model_dir: Directory to save model parameters, graph and etc.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
linear_feature_columns: An iterable containing all the feature columns used
by linear part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. If `None`, will use a FTRL optimizer.
dnn_feature_columns: An iterable containing all the feature columns used by
deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to the
deep part of the model. If `None`, will use an Adagrad optimizer.
dnn_activation_fn: Activation function applied to each layer. If None, will
use `tf.nn.relu`.
config: RunConfig object to configure the runtime settings.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
"""
def __init__(self,
model_dir=None,
weight_column_name=None,
linear_feature_columns=None,
linear_optimizer=None,
dnn_feature_columns=None,
dnn_optimizer=None,
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
config=None):
super(DNNLinearCombinedRegressor, self).__init__(
model_dir=model_dir,
n_classes=0,
weight_column_name=weight_column_name,
linear_feature_columns=linear_feature_columns,
linear_optimizer=linear_optimizer,
dnn_feature_columns=dnn_feature_columns,
dnn_optimizer=dnn_optimizer,
dnn_hidden_units=dnn_hidden_units,
dnn_activation_fn=dnn_activation_fn,
config=config)
| [
"tensorflow.python.ops.nn.softmax",
"tensorflow.contrib.metrics.streaming_auc",
"tensorflow.python.ops.state_ops.assign_add",
"tensorflow.python.ops.logging_ops.scalar_summary",
"tensorflow.python.ops.logging_ops.histogram_summary",
"tensorflow.python.ops.nn.zero_fraction",
"tensorflow.python.ops.math_ops.to_float",
"tensorflow.python.ops.math_ops.argmax",
"tensorflow.python.framework.ops.get_collection",
"numpy.argmax",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.contrib.framework.python.ops.variables.get_global_step",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.ops.math_ops.reduce_mean",
"tensorflow.python.ops.math_ops.sigmoid",
"tensorflow.python.ops.gradients.gradients",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.contrib.layers.legacy_fully_connected",
"tensorflow.python.ops.parsing_ops.parse_example",
"tensorflow.python.ops.math_ops.reduce_sum"
] | tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py | [(144, 'tensorflow.contrib.framework.python.ops.variables.get_global_step', 'variables.get_global_step', ([], {}), True, 'from tensorflow.contrib.framework.python.ops import variables as variables\n'), (148, 'tensorflow.python.ops.logging_ops.scalar_summary', 'logging_ops.scalar_summary', (['"""loss"""', 'loss'], {}), False, 'from tensorflow.python.ops import logging_ops\n'), (152, 'tensorflow.python.ops.gradients.gradients', 'gradients.gradients', (['loss', '(dnn_vars + linear_vars)'], {}), False, 'from tensorflow.python.ops import gradients\n'), (160, 'tensorflow.python.ops.control_flow_ops.group', 'control_flow_ops.group', (['*train_ops'], {'name': '"""combined_training_op"""'}), False, 'from tensorflow.python.ops import control_flow_ops\n'), (167, 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['targets', 'predictions.dtype'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (168, 'six.iteritems', 'six.iteritems', (['(metrics or {})'], {}), False, 'import six\n'), (219, 'tensorflow.python.ops.parsing_ops.parse_example', 'parsing_ops.parse_example', (['examples_batch', 'column_types'], {}), False, 'from tensorflow.python.ops import parsing_ops\n'), (261, 'tensorflow.python.ops.logging_ops.histogram_summary', 'logging_ops.histogram_summary', (["('%s:activation' % tag)", 'value'], {}), False, 'from tensorflow.python.ops import logging_ops\n'), (126, 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(1)'}), True, 'import numpy as np\n'), (161, 'tensorflow.python.framework.ops.control_dependencies', 'ops.control_dependencies', (['[train_step]'], {}), False, 'from tensorflow.python.framework import ops\n'), (188, 'tensorflow.python.ops.math_ops.sigmoid', 'math_ops.sigmoid', (['logits'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (189, 'tensorflow.contrib.metrics.streaming_auc', 'metrics_lib.streaming_auc', (['predictions', 'targets'], {}), True, 'from tensorflow.contrib import metrics as metrics_lib\n'), (205, 'tensorflow.python.ops.array_ops.reshape', 'array_ops.reshape', (['logits', '[-1]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (211, 'tensorflow.python.ops.nn.softmax', 'nn.softmax', (['logits'], {}), False, 'from tensorflow.python.ops import nn\n'), (213, 'tensorflow.python.ops.math_ops.argmax', 'math_ops.argmax', (['logits', '(1)'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (240, 'tensorflow.contrib.layers.legacy_fully_connected', 'layers.legacy_fully_connected', (['net', 'num_hidden_units'], {'activation_fn': 'self._dnn_activation_fn', 'weight_collections': '[self._dnn_weight_collection]', 'bias_collections': '[self._dnn_weight_collection]', 'name': "('hiddenlayer_%d' % layer_id)"}), False, 'from tensorflow.contrib import layers\n'), (260, 'tensorflow.python.ops.nn.zero_fraction', 'nn.zero_fraction', (['value'], {}), False, 'from tensorflow.python.ops import nn\n'), (310, 'tensorflow.python.ops.math_ops.reduce_mean', 'math_ops.reduce_mean', (['loss_vec'], {'name': '"""loss"""'}), False, 'from tensorflow.python.ops import math_ops\n'), (312, 'tensorflow.python.ops.array_ops.reshape', 'array_ops.reshape', (['loss_vec'], {'shape': '(-1,)'}), False, 'from tensorflow.python.ops import array_ops\n'), (322, 'tensorflow.python.framework.ops.get_collection', 'ops.get_collection', (['self._linear_weight_collection'], {}), False, 'from tensorflow.python.framework import ops\n'), (339, 'tensorflow.python.framework.ops.get_collection', 'ops.get_collection', (['self._dnn_weight_collection'], {}), False, 'from tensorflow.python.framework import ops\n'), (296, 'tensorflow.python.ops.math_ops.to_float', 'math_ops.to_float', (['features[self._weight_column_name]'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (314, 'tensorflow.python.ops.array_ops.reshape', 'array_ops.reshape', (['weight_tensor'], {'shape': '(-1,)'}), False, 'from tensorflow.python.ops import array_ops\n'), (316, 'tensorflow.python.ops.math_ops.reduce_sum', 'math_ops.reduce_sum', (['loss_vec'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (169, 'inspect.getargspec', 'inspect.getargspec', (['metric'], {}), False, 'import inspect\n'), (208, 'tensorflow.python.ops.array_ops.zeros_like', 'array_ops.zeros_like', (['logits'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (301, 'tensorflow.python.ops.math_ops.to_float', 'math_ops.to_float', (['target'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (304, 'tensorflow.python.ops.math_ops.to_float', 'math_ops.to_float', (['target'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (307, 'tensorflow.python.ops.array_ops.reshape', 'array_ops.reshape', (['target', '[-1]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (317, 'tensorflow.python.ops.math_ops.reduce_sum', 'math_ops.reduce_sum', (['weight_tensor'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (162, 'tensorflow.python.framework.ops.get_default_graph', 'ops.get_default_graph', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (163, 'tensorflow.python.ops.state_ops.assign_add', 'state_ops.assign_add', (['global_step', '(1)'], {}), False, 'from tensorflow.python.ops import state_ops\n')] |
lucidrains/compare_gan | 2a685ab94129c398620da67d999487fa63b7f741 | # coding=utf-8
# Copyright 2018 Google LLC & Hwalsuk Lee.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Self-Supervised GAN with contrastive loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl import logging
from compare_gan.architectures.arch_ops import linear
from compare_gan.gans import loss_lib
from compare_gan.gans import modular_gan
from compare_gan.gans import penalty_lib
from compare_gan.gans import utils
import gin
import numpy as np
import random
import tensorflow as tf
FLAGS = flags.FLAGS
# augmentation functions
# augment
def random_crop_and_resize(images, ratio=0.8):
b, h, w, c = images.get_shape().as_list()
ch, cw = map(lambda x: int(x * ratio), (h, w))
crop = tf.random_crop(images, size=[b, ch, cw, 3])
crop = tf.image.resize(crop, [h, w])
return crop
def random_apply(fn, image, prob=1.):
b, *_ = image.get_shape().as_list()
chance = tf.less(tf.random_uniform([b], 0, 1.0), prob)
return tf.where(chance, fn(image), tf.identity(image))
def color_distortion(image, s=1.0):
lower, upper, x = (1 - 0.8 * s), (1 + 0.8 * s), image
x = tf.image.random_brightness(x, max_delta=0.8*s)
x = tf.image.random_contrast(x, lower=lower, upper=upper)
x = tf.image.random_saturation(x, lower=lower, upper=upper)
x = tf.image.random_hue(x, max_delta=0.2*s)
x = tf.clip_by_value(x, 0, 1)
return x
def color_drop(image):
image = tf.image.rgb_to_grayscale(image)
image = tf.tile(image, [1, 1, 1, 3])
return image
# pylint: disable=not-callable
@gin.configurable(blacklist=["kwargs"])
class CLGAN(modular_gan.ModularGAN):
"""Self-Supervised GAN with Contrastive Loss"""
def __init__(self,
aug_color_jitter_prob=0.8,
aug_color_drop_prob=0.0,
weight_contrastive_loss_d=2.0,
**kwargs):
"""Creates a new Self-Supervised GAN using Contrastive Loss.
Args:
self_supervised_batch_size: The total number images per batch for the self supervised loss.
weight_contrastive_loss_d: Weight for the contrastive loss for the self supervised learning on real images
**kwargs: Additional arguments passed to `ModularGAN` constructor.
"""
super(CLGAN, self).__init__(**kwargs)
self._weight_contrastive_loss_d = weight_contrastive_loss_d
self._aug_color_jitter_prob = aug_color_jitter_prob
self._aug_color_drop_prob = aug_color_drop_prob
# To safe memory ModularGAN supports feeding real and fake samples
# separately through the discriminator. CLGAN does not support this to
# avoid additional additional complexity in create_loss().
assert not self._deprecated_split_disc_calls, \
"Splitting discriminator calls is not supported in CLGAN."
def _latent_projections(self, latents):
bs, dim = latents.get_shape().as_list()
with tf.variable_scope("discriminator_z_projection", reuse=tf.AUTO_REUSE) as scope:
k1 = tf.get_variable("kernel1", [dim, dim * 4])
k2 = tf.get_variable("kernel2", [dim * 4, dim])
z_proj = tf.matmul(tf.nn.leaky_relu(tf.matmul(latents, k1), name=scope.name), k2)
z_proj = z_proj / tf.reshape(tf.norm(z_proj, ord=2, axis=-1), [bs, 1])
return z_proj
def create_loss(self, features, labels, params, is_training=True):
"""Build the loss tensors for discriminator and generator.
This method will set self.d_loss and self.g_loss.
Args:
features: Optional dictionary with inputs to the model ("images" should
contain the real images and "z" the noise for the generator).
labels: Tensor will labels. These are class indices. Use
self._get_one_hot_labels(labels) to get a one hot encoded tensor.
params: Dictionary with hyperparameters passed to TPUEstimator.
Additional TPUEstimator will set 3 keys: `batch_size`, `use_tpu`,
`tpu_context`. `batch_size` is the batch size for this core.
is_training: If True build the model in training mode. If False build the
model for inference mode (e.g. use trained averages for batch norm).
Raises:
ValueError: If set of meta/hyper parameters is not supported.
"""
images = features["images"] # Input images.
generated = features["generated"] # Fake images.
if self.conditional:
y = self._get_one_hot_labels(labels)
sampled_y = self._get_one_hot_labels(features["sampled_labels"])
else:
y = None
sampled_y = None
all_y = None
# Batch size per core.
bs = images.shape[0].value
def augment(imgs):
imgs = random_crop_and_resize(imgs)
imgs = random_apply(color_distortion, imgs, self._aug_color_jitter_prob)
imgs = random_apply(color_drop, imgs, self._aug_color_drop_prob)
return tf.stop_gradient(imgs)
aug_images, aug_generated = augment(images), augment(generated)
# concat all images
all_images = tf.concat([images, generated, aug_images, aug_generated], 0)
if self.conditional:
all_y = tf.concat([y, sampled_y, y, sampled_y], axis=0)
# Compute discriminator output for real and fake images in one batch.
d_all, d_all_logits, d_latents = self.discriminator(
x=all_images, y=all_y, is_training=is_training)
z_projs = self._latent_projections(d_latents)
d_real, d_fake, _, _ = tf.split(d_all, 4)
d_real_logits, d_fake_logits, _, _ = tf.split(d_all_logits, 4)
z_projs_real, z_projs_fake, z_aug_projs_real, z_aug_projs_fake = tf.split(z_projs, 4)
self.d_loss, _, _, self.g_loss = loss_lib.get_losses(
d_real=d_real, d_fake=d_fake, d_real_logits=d_real_logits,
d_fake_logits=d_fake_logits)
penalty_loss = penalty_lib.get_penalty_loss(
x=images, x_fake=generated, y=y, is_training=is_training,
discriminator=self.discriminator, architecture=self._architecture)
self.d_loss += self._lambda * penalty_loss
z_projs = tf.concat([z_projs_real, z_projs_fake], 0)
z_aug_projs = tf.concat([z_aug_projs_real, z_aug_projs_fake], 0)
sims_logits = tf.matmul(z_projs, z_aug_projs, transpose_b=True)
logits_max = tf.reduce_max(sims_logits,1)
sims_logits = sims_logits - tf.reshape(logits_max, [-1, 1])
sims_probs = tf.nn.softmax(sims_logits)
sim_labels = tf.constant(np.arange(bs * 2, dtype=np.int32))
sims_onehot = tf.one_hot(sim_labels, bs * 2)
c_real_loss = - tf.reduce_mean(
tf.reduce_sum(sims_onehot * tf.log(sims_probs + 1e-10), 1))
self.d_loss += c_real_loss * self._weight_contrastive_loss_d
self._tpu_summary.scalar("loss/c_real_loss", c_real_loss)
self._tpu_summary.scalar("loss/penalty", penalty_loss)
| [
"tensorflow.get_variable",
"tensorflow.image.random_contrast",
"tensorflow.concat",
"tensorflow.image.random_saturation",
"tensorflow.image.random_hue",
"numpy.arange",
"tensorflow.stop_gradient",
"tensorflow.image.rgb_to_grayscale",
"tensorflow.tile",
"tensorflow.matmul",
"tensorflow.image.random_brightness",
"tensorflow.norm",
"tensorflow.identity",
"tensorflow.one_hot",
"tensorflow.split",
"tensorflow.clip_by_value",
"tensorflow.reduce_max",
"tensorflow.nn.softmax",
"tensorflow.reshape",
"tensorflow.random_crop",
"tensorflow.image.resize",
"tensorflow.log",
"tensorflow.variable_scope",
"tensorflow.random_uniform"
] | compare_gan/gans/clgan.py | [(68, 'gin.configurable', 'gin.configurable', ([], {'blacklist': "['kwargs']"}), False, 'import gin\n'), (44, 'tensorflow.random_crop', 'tf.random_crop', (['images'], {'size': '[b, ch, cw, 3]'}), True, 'import tensorflow as tf\n'), (45, 'tensorflow.image.resize', 'tf.image.resize', (['crop', '[h, w]'], {}), True, 'import tensorflow as tf\n'), (55, 'tensorflow.image.random_brightness', 'tf.image.random_brightness', (['x'], {'max_delta': '(0.8 * s)'}), True, 'import tensorflow as tf\n'), (56, 'tensorflow.image.random_contrast', 'tf.image.random_contrast', (['x'], {'lower': 'lower', 'upper': 'upper'}), True, 'import tensorflow as tf\n'), (57, 'tensorflow.image.random_saturation', 'tf.image.random_saturation', (['x'], {'lower': 'lower', 'upper': 'upper'}), True, 'import tensorflow as tf\n'), (58, 'tensorflow.image.random_hue', 'tf.image.random_hue', (['x'], {'max_delta': '(0.2 * s)'}), True, 'import tensorflow as tf\n'), (59, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['x', '(0)', '(1)'], {}), True, 'import tensorflow as tf\n'), (63, 'tensorflow.image.rgb_to_grayscale', 'tf.image.rgb_to_grayscale', (['image'], {}), True, 'import tensorflow as tf\n'), (64, 'tensorflow.tile', 'tf.tile', (['image', '[1, 1, 1, 3]'], {}), True, 'import tensorflow as tf\n'), (50, 'tensorflow.random_uniform', 'tf.random_uniform', (['[b]', '(0)', '(1.0)'], {}), True, 'import tensorflow as tf\n'), (51, 'tensorflow.identity', 'tf.identity', (['image'], {}), True, 'import tensorflow as tf\n'), (147, 'tensorflow.concat', 'tf.concat', (['[images, generated, aug_images, aug_generated]', '(0)'], {}), True, 'import tensorflow as tf\n'), (159, 'tensorflow.split', 'tf.split', (['d_all', '(4)'], {}), True, 'import tensorflow as tf\n'), (160, 'tensorflow.split', 'tf.split', (['d_all_logits', '(4)'], {}), True, 'import tensorflow as tf\n'), (161, 'tensorflow.split', 'tf.split', (['z_projs', '(4)'], {}), True, 'import tensorflow as tf\n'), (163, 'compare_gan.gans.loss_lib.get_losses', 'loss_lib.get_losses', ([], {'d_real': 'd_real', 'd_fake': 'd_fake', 'd_real_logits': 'd_real_logits', 'd_fake_logits': 'd_fake_logits'}), False, 'from compare_gan.gans import loss_lib\n'), (167, 'compare_gan.gans.penalty_lib.get_penalty_loss', 'penalty_lib.get_penalty_loss', ([], {'x': 'images', 'x_fake': 'generated', 'y': 'y', 'is_training': 'is_training', 'discriminator': 'self.discriminator', 'architecture': 'self._architecture'}), False, 'from compare_gan.gans import penalty_lib\n'), (172, 'tensorflow.concat', 'tf.concat', (['[z_projs_real, z_projs_fake]', '(0)'], {}), True, 'import tensorflow as tf\n'), (173, 'tensorflow.concat', 'tf.concat', (['[z_aug_projs_real, z_aug_projs_fake]', '(0)'], {}), True, 'import tensorflow as tf\n'), (175, 'tensorflow.matmul', 'tf.matmul', (['z_projs', 'z_aug_projs'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (176, 'tensorflow.reduce_max', 'tf.reduce_max', (['sims_logits', '(1)'], {}), True, 'import tensorflow as tf\n'), (178, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['sims_logits'], {}), True, 'import tensorflow as tf\n'), (181, 'tensorflow.one_hot', 'tf.one_hot', (['sim_labels', '(bs * 2)'], {}), True, 'import tensorflow as tf\n'), (99, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""discriminator_z_projection"""'], {'reuse': 'tf.AUTO_REUSE'}), True, 'import tensorflow as tf\n'), (100, 'tensorflow.get_variable', 'tf.get_variable', (['"""kernel1"""', '[dim, dim * 4]'], {}), True, 'import tensorflow as tf\n'), (101, 'tensorflow.get_variable', 'tf.get_variable', (['"""kernel2"""', '[dim * 4, dim]'], {}), True, 'import tensorflow as tf\n'), (142, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['imgs'], {}), True, 'import tensorflow as tf\n'), (150, 'tensorflow.concat', 'tf.concat', (['[y, sampled_y, y, sampled_y]'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (177, 'tensorflow.reshape', 'tf.reshape', (['logits_max', '[-1, 1]'], {}), True, 'import tensorflow as tf\n'), (180, 'numpy.arange', 'np.arange', (['(bs * 2)'], {'dtype': 'np.int32'}), True, 'import numpy as np\n'), (102, 'tensorflow.matmul', 'tf.matmul', (['latents', 'k1'], {}), True, 'import tensorflow as tf\n'), (103, 'tensorflow.norm', 'tf.norm', (['z_proj'], {'ord': '(2)', 'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (184, 'tensorflow.log', 'tf.log', (['(sims_probs + 1e-10)'], {}), True, 'import tensorflow as tf\n')] |
ffmpbgrnn/google-research | eb924d158768e0ca91fd382f02818d1440fb5e75 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trains an L2TL model jointly on the source and target datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from absl import app
from absl import flags
import model
import model_utils
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
import numpy as np
FLAGS = flags.FLAGS
flags.DEFINE_string(
'model_dir',
None,
help=('The directory where the model and training/evaluation summaries are'
' stored.'))
flags.DEFINE_integer(
'log_step_count_steps', 64, 'The number of steps at '
'which the global step information is logged.')
flags.DEFINE_string(
'warm_start_ckpt_path', None, 'The path to the checkpoint '
'that will be used before training.')
flags.DEFINE_integer('train_steps', 120000, 'Number of total training steps.')
flags.DEFINE_integer('num_choices', 100,
'Number of actions for the scaling variable.')
flags.DEFINE_float('base_learning_rate_scale', 0.001,
'The value of the learning rate')
flags.DEFINE_float('dst_weight_decay', 0.0005,
'Weight decay for the target dataset.')
flags.DEFINE_integer('save_checkpoints_steps', 100,
'Number of steps for each checkpoint saving.')
flags.DEFINE_float('rl_learning_rate', 0.001, 'Learning rate for RL updates.')
flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for l2tl.')
flags.DEFINE_integer('target_num_classes', 10,
'The number of classes in the target dataset.')
flags.DEFINE_integer('train_batch_size', 128, 'The batch size during training.')
flags.DEFINE_integer(
'source_train_batch_multiplier', 5,
'The multiplier will be used to increase the batch size '
'to sample more examples.')
flags.DEFINE_float('loss_weight_scale', 1000.0, 'Scaling of the loss weight.')
flags.DEFINE_integer('first_pretrain_steps', 0,
'Number of steps for pretraining.')
flags.DEFINE_integer('target_val_batch_multiplier', 4,
'Multiplier for the target evaluation batch size.')
flags.DEFINE_integer('target_train_batch_multiplier', 1,
'Multiplier for the target evaluation train batch size.')
flags.DEFINE_integer('uniform_weight', 0,
'Use of uniform weight in the ablation studies.')
def get_global_step(name):
"""Returns the global step variable."""
global_step = tf.get_variable(
name,
shape=[],
dtype=tf.int64,
initializer=tf.initializers.zeros(),
trainable=False,
collections=[tf.GraphKeys.GLOBAL_VARIABLES])
return global_step
def get_src_train_op(loss): # pylint: disable=unused-argument
"""Returns the source training op."""
global_step = tf.train.get_global_step()
src_learning_rate = FLAGS.learning_rate
src_learning_rate = tf.train.piecewise_constant(
global_step, [800,],
[FLAGS.learning_rate, FLAGS.learning_rate * 0.1])
optimizer = tf.train.MomentumOptimizer(
learning_rate=src_learning_rate,
momentum=0.9,
use_nesterov=True
)
with tf.variable_scope('src'):
return optimizer.minimize(loss, global_step), src_learning_rate
def meta_train_op(acc, rl_entropy, log_prob, rl_scope, params): # pylint: disable=unused-argument
"""Returns the target training op.
Update the control variables using policy gradient.
Args:
acc: reward on validation set. In our case, the reward is the top-1 acc;
rl_entropy: entropy of action logits;
log_prob: log prob of the action;
rl_scope: variable scope;
params: other params;
Returns:
target_train_op: train op;
rl_learning_rate: lr;
out_metric: metric dict;
"""
target_global_step = get_global_step('train_rl_global_step')
rl_reward = acc
rl_step_baseline = rl_reward
rl_baseline_momentum = 0.9
rl_entropy_regularization = 0.001
def update_rl_baseline():
return model_utils.update_exponential_moving_average(
rl_step_baseline, momentum=rl_baseline_momentum)
rl_baseline = update_rl_baseline()
rl_advantage = rl_reward - rl_baseline
rl_empirical_loss = -tf.stop_gradient(rl_advantage) * log_prob
rl_entropy_loss = -rl_entropy_regularization * rl_entropy
enable_rl_optimizer = tf.cast(
tf.greater_equal(target_global_step, FLAGS.first_pretrain_steps),
tf.float32)
rl_learning_rate = FLAGS.rl_learning_rate * enable_rl_optimizer
rl_learning_rate = tf.train.piecewise_constant(
target_global_step, [800,],
[rl_learning_rate, rl_learning_rate * 0.1])
optimizer = tf.train.AdamOptimizer(rl_learning_rate)
target_train_op = optimizer.minimize(
rl_empirical_loss,
target_global_step,
var_list=tf.trainable_variables(rl_scope.name))
out_metric = {
'rl_empirical_loss': rl_empirical_loss,
'rl_entropy_loss': rl_entropy_loss,
'rl_reward': rl_reward,
'rl_step_baseline': rl_step_baseline,
'rl_baseline': rl_baseline,
'rl_advantage': rl_advantage,
'log_prob': log_prob,
}
return target_train_op, rl_learning_rate, out_metric
def get_logits(feature, mode, dataset_name, reuse=None):
"""Returns the network logits."""
avg_pool = model.conv_model(feature, mode,
target_dataset=FLAGS.target_dataset,
src_hw=FLAGS.src_hw,
target_hw=FLAGS.target_hw,
dataset_name=dataset_name,
reuse=reuse)
return avg_pool
def do_cls(avg_pool, num_classes, name='dense'):
"""Applies classification."""
with tf.variable_scope('target_CLS', reuse=tf.AUTO_REUSE):
logits = tf.layers.dense(
inputs=avg_pool,
units=num_classes,
kernel_initializer=tf.random_normal_initializer(stddev=.05),
name=name)
return logits
def get_model_logits(src_features, finetune_features, mode, num_classes,
target_num_classes):
"""Gets the logits from different models."""
src_avg_pool = get_logits(
src_features, mode, FLAGS.source_dataset, reuse=None)
dst_avg_pool = get_logits(
finetune_features, mode, FLAGS.target_dataset, reuse=True)
src_logits = do_cls(src_avg_pool, num_classes, name='final_dense_dst')
dst_logits = do_cls(
dst_avg_pool, target_num_classes, name='final_target_dense')
return src_logits, dst_logits
def get_final_loss(src_logits, src_one_hot_labels, dst_logits,
finetune_one_hot_labels, global_step, loss_weights,
inst_weights):
"""Gets the final loss for l2tl."""
if FLAGS.uniform_weight:
inst_weights = 1.0
def get_loss(logits, inst_weights, one_hot_labels):
"""Returns the loss function."""
loss = tf.losses.softmax_cross_entropy(
logits=logits, weights=inst_weights, onehot_labels=one_hot_labels)
return loss
src_loss = get_loss(src_logits, inst_weights, src_one_hot_labels)
dst_loss = get_loss(dst_logits, 1., finetune_one_hot_labels)
l2_loss = []
for v in tf.trainable_variables():
if 'batch_normalization' not in v.name and 'rl_controller' not in v.name:
l2_loss.append(tf.nn.l2_loss(v))
l2_loss = FLAGS.dst_weight_decay * tf.add_n(l2_loss)
enable_pretrain = tf.cast(
tf.greater_equal(global_step, FLAGS.first_pretrain_steps), tf.float32)
loss = src_loss * tf.stop_gradient(loss_weights) * enable_pretrain
loss += dst_loss + l2_loss
return tf.identity(loss), src_loss, dst_loss
def train_model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""Defines the model function."""
target_num_classes = FLAGS.target_num_classes
global_step = tf.train.get_global_step()
src_features, src_labels = features['src'], tf.cast(labels['src'], tf.int64)
finetune_features = features['finetune']
target_features = features['target']
num_classes = FLAGS.src_num_classes
finetune_one_hot_labels = tf.one_hot(
tf.cast(labels['finetune'], tf.int64), target_num_classes)
target_one_hot_labels = tf.one_hot(
tf.cast(labels['target'], tf.int64), target_num_classes)
with tf.variable_scope('rl_controller') as rl_scope:
# It creates a `rl_scope` which will be used for ops.
pass
rl_entropy, label_weights, log_prob = rl_label_weights(rl_scope)
loss_entropy, loss_weights, loss_log_prob = get_loss_weights(rl_scope)
def gather_init_weights():
inst_weights = tf.stop_gradient(tf.gather(label_weights, src_labels))
return inst_weights
inst_weights = gather_init_weights()
bs = FLAGS.train_batch_size
hw = FLAGS.src_hw
inst_weights, indices = tf.nn.top_k(
inst_weights,
k=bs,
sorted=True,
)
src_features = tf.reshape(src_features, [
bs * FLAGS.source_train_batch_multiplier,
hw,
hw,
1,
])
src_features = tf.gather(src_features, indices, axis=0)
src_features = tf.stop_gradient(src_features)
src_labels = tf.gather(src_labels, indices)
inst_weights = bs * inst_weights / tf.reduce_sum(inst_weights)
src_one_hot_labels = tf.one_hot(tf.cast(src_labels, tf.int64), num_classes)
src_logits, dst_logits = get_model_logits(src_features, finetune_features,
mode, num_classes,
target_num_classes)
loss, _, _ = get_final_loss(src_logits, src_one_hot_labels, dst_logits,
finetune_one_hot_labels, global_step,
loss_weights, inst_weights)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
src_train_op, _ = get_src_train_op(loss)
with tf.control_dependencies([src_train_op]):
target_avg_pool = get_logits(
target_features, mode, FLAGS.target_dataset, reuse=True)
target_logits = do_cls(
target_avg_pool, target_num_classes, name='final_target_dense')
is_prediction_correct = tf.equal(
tf.argmax(tf.identity(target_logits), axis=1),
tf.argmax(target_one_hot_labels, axis=1))
acc = tf.reduce_mean(tf.cast(is_prediction_correct, tf.float32))
entropy = loss_entropy + rl_entropy
log_prob = loss_log_prob + log_prob
train_op, _, _ = meta_train_op(acc, entropy, log_prob, rl_scope, params)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
def rl_label_weights(name=None):
"""Returns the weight for importance."""
with tf.variable_scope(name, 'rl_op_selection'):
num_classes = FLAGS.src_num_classes
num_choices = FLAGS.num_choices
logits = tf.get_variable(
name='logits_rl_w',
initializer=tf.initializers.zeros(),
shape=[num_classes, num_choices],
dtype=tf.float32)
dist = tfp.distributions.Categorical(logits=logits)
dist_entropy = tf.reduce_sum(dist.entropy())
sample = dist.sample()
sample_masks = 1. * tf.cast(sample, tf.float32) / num_choices
sample_log_prob = tf.reduce_mean(dist.log_prob(sample))
return (dist_entropy, sample_masks, sample_log_prob)
def get_loss_weights(name=None):
"""Returns the weight for loss."""
with tf.variable_scope(name, 'rl_op_selection'):
logits = tf.get_variable(
name='loss_logits_rl_w',
initializer=tf.initializers.zeros(),
shape=[
FLAGS.num_choices,
],
dtype=tf.float32)
dist = tfp.distributions.Categorical(logits=logits)
dist_entropy = tf.reduce_sum(dist.entropy())
sample = dist.sample()
sample_masks = 1. * tf.cast(sample, tf.float32) / FLAGS.loss_weight_scale
sample_log_prob = tf.reduce_mean(dist.log_prob(sample))
return (dist_entropy, sample_masks, sample_log_prob)
def main(unused_argv):
tf.set_random_seed(FLAGS.random_seed)
run_config_args = {
'model_dir': FLAGS.model_dir,
'save_checkpoints_steps': FLAGS.save_checkpoints_steps,
'log_step_count_steps': FLAGS.log_step_count_steps,
'keep_checkpoint_max': 100,
}
config = tf.contrib.tpu.RunConfig(**run_config_args)
if FLAGS.warm_start_ckpt_path:
var_names = []
checkpoint_path = FLAGS.warm_start_ckpt_path
reader = tf.train.NewCheckpointReader(checkpoint_path)
for key in reader.get_variable_to_shape_map():
keep_str = 'Momentum|global_step|finetune_global_step'
if not re.findall('({})'.format(keep_str,), key):
var_names.append(key)
tf.logging.info('Warm-starting tensors: %s', sorted(var_names))
vars_to_warm_start = var_names
warm_start_settings = tf.estimator.WarmStartSettings(
ckpt_to_initialize_from=checkpoint_path,
vars_to_warm_start=vars_to_warm_start)
else:
warm_start_settings = None
l2tl_classifier = tf.estimator.Estimator(
train_model_fn, config=config, warm_start_from=warm_start_settings)
def make_input_dataset():
"""Return input dataset."""
def _merge_datasets(train_batch, finetune_batch, target_batch):
"""Merge different splits."""
train_features, train_labels = train_batch['image'], train_batch['label']
finetune_features, finetune_labels = finetune_batch[
'image'], finetune_batch['label']
target_features, target_labels = target_batch['image'], target_batch[
'label']
features = {
'src': train_features,
'finetune': finetune_features,
'target': target_features
}
labels = {
'src': train_labels,
'finetune': finetune_labels,
'target': target_labels
}
return (features, labels)
source_train_batch_size = int(
round(FLAGS.train_batch_size * FLAGS.source_train_batch_multiplier))
train_data = tfds.load(name=FLAGS.source_dataset, split='train')
train_data = train_data.shuffle(512).repeat().batch(source_train_batch_size)
target_train_batch_size = int(
round(FLAGS.train_batch_size * FLAGS.target_train_batch_multiplier))
finetune_data = tfds.load(name=FLAGS.target_dataset, split='train')
finetune_data = finetune_data.shuffle(512).repeat().batch(
target_train_batch_size)
target_val_batch_size = int(
round(FLAGS.train_batch_size * FLAGS.target_val_batch_multiplier))
target_data = tfds.load(name=FLAGS.target_dataset, split='validation')
target_data = target_data.shuffle(512).repeat().batch(target_val_batch_size)
dataset = tf.data.Dataset.zip((train_data, finetune_data, target_data))
dataset = dataset.map(_merge_datasets)
dataset = dataset.prefetch(buffer_size=tf.contrib.data.AUTOTUNE)
return dataset
max_train_steps = FLAGS.train_steps
l2tl_classifier.train(make_input_dataset, max_steps=max_train_steps)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
| [
"tensorflow.initializers.zeros",
"tensorflow.control_dependencies",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.nn.l2_loss",
"tensorflow.train.AdamOptimizer",
"tensorflow.add_n",
"tensorflow.get_collection",
"tensorflow.train.get_global_step",
"tensorflow.stop_gradient",
"tensorflow.losses.softmax_cross_entropy",
"tensorflow.train.piecewise_constant",
"tensorflow.nn.top_k",
"tensorflow.gather",
"tensorflow.train.MomentumOptimizer",
"tensorflow.logging.set_verbosity",
"tensorflow.data.Dataset.zip",
"tensorflow.trainable_variables",
"tensorflow.argmax",
"tensorflow.random_normal_initializer",
"tensorflow.estimator.WarmStartSettings",
"tensorflow.estimator.Estimator",
"tensorflow.identity",
"tensorflow.train.NewCheckpointReader",
"tensorflow.set_random_seed",
"tensorflow.reshape",
"tensorflow.contrib.tpu.RunConfig",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.variable_scope",
"tensorflow.greater_equal"
] | l2tl/train_l2tl.py | [(35, 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""model_dir"""', 'None'], {'help': '"""The directory where the model and training/evaluation summaries are stored."""'}), False, 'from absl import flags\n'), (40, 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""log_step_count_steps"""', '(64)', '"""The number of steps at which the global step information is logged."""'], {}), False, 'from absl import flags\n'), (43, 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""warm_start_ckpt_path"""', 'None', '"""The path to the checkpoint that will be used before training."""'], {}), False, 'from absl import flags\n'), (46, 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""train_steps"""', '(120000)', '"""Number of total training steps."""'], {}), False, 'from absl import flags\n'), (47, 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_choices"""', '(100)', '"""Number of actions for the scaling variable."""'], {}), False, 'from absl import flags\n'), (49, 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""base_learning_rate_scale"""', '(0.001)', '"""The value of the learning rate"""'], {}), False, 'from absl import flags\n'), (51, 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""dst_weight_decay"""', '(0.0005)', '"""Weight decay for the target dataset."""'], {}), False, 'from absl import flags\n'), (53, 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""save_checkpoints_steps"""', '(100)', '"""Number of steps for each checkpoint saving."""'], {}), False, 'from absl import flags\n'), (55, 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""rl_learning_rate"""', '(0.001)', '"""Learning rate for RL updates."""'], {}), False, 'from absl import flags\n'), (56, 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""learning_rate"""', '(0.001)', '"""Learning rate for l2tl."""'], {}), False, 'from absl import flags\n'), (57, 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""target_num_classes"""', '(10)', '"""The number of classes in the target dataset."""'], {}), False, 'from absl import flags\n'), (59, 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""train_batch_size"""', '(128)', '"""The batch size during training."""'], {}), False, 'from absl import flags\n'), (60, 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""source_train_batch_multiplier"""', '(5)', '"""The multiplier will be used to increase the batch size to sample more examples."""'], {}), False, 'from absl import flags\n'), (64, 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""loss_weight_scale"""', '(1000.0)', '"""Scaling of the loss weight."""'], {}), False, 'from absl import flags\n'), (65, 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""first_pretrain_steps"""', '(0)', '"""Number of steps for pretraining."""'], {}), False, 'from absl import flags\n'), (67, 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""target_val_batch_multiplier"""', '(4)', '"""Multiplier for the target evaluation batch size."""'], {}), False, 'from absl import flags\n'), (69, 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""target_train_batch_multiplier"""', '(1)', '"""Multiplier for the target evaluation train batch size."""'], {}), False, 'from absl import flags\n'), (71, 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""uniform_weight"""', '(0)', '"""Use of uniform weight in the ablation studies."""'], {}), False, 'from absl import flags\n'), (89, 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), True, 'import tensorflow as tf\n'), (91, 'tensorflow.train.piecewise_constant', 'tf.train.piecewise_constant', (['global_step', '[800]', '[FLAGS.learning_rate, FLAGS.learning_rate * 0.1]'], {}), True, 'import tensorflow as tf\n'), (94, 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', ([], {'learning_rate': 'src_learning_rate', 'momentum': '(0.9)', 'use_nesterov': '(True)'}), True, 'import tensorflow as tf\n'), (140, 'tensorflow.train.piecewise_constant', 'tf.train.piecewise_constant', (['target_global_step', '[800]', '[rl_learning_rate, rl_learning_rate * 0.1]'], {}), True, 'import tensorflow as tf\n'), (144, 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['rl_learning_rate'], {}), True, 'import tensorflow as tf\n'), (164, 'model.conv_model', 'model.conv_model', (['feature', 'mode'], {'target_dataset': 'FLAGS.target_dataset', 'src_hw': 'FLAGS.src_hw', 'target_hw': 'FLAGS.target_hw', 'dataset_name': 'dataset_name', 'reuse': 'reuse'}), False, 'import model\n'), (214, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (231, 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), True, 'import tensorflow as tf\n'), (257, 'tensorflow.nn.top_k', 'tf.nn.top_k', (['inst_weights'], {'k': 'bs', 'sorted': '(True)'}), True, 'import tensorflow as tf\n'), (262, 'tensorflow.reshape', 'tf.reshape', (['src_features', '[bs * FLAGS.source_train_batch_multiplier, hw, hw, 1]'], {}), True, 'import tensorflow as tf\n'), (268, 'tensorflow.gather', 'tf.gather', (['src_features', 'indices'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (269, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['src_features'], {}), True, 'import tensorflow as tf\n'), (271, 'tensorflow.gather', 'tf.gather', (['src_labels', 'indices'], {}), True, 'import tensorflow as tf\n'), (285, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), True, 'import tensorflow as tf\n'), (303, 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'loss': 'loss', 'train_op': 'train_op'}), True, 'import tensorflow as tf\n'), (349, 'tensorflow.set_random_seed', 'tf.set_random_seed', (['FLAGS.random_seed'], {}), True, 'import tensorflow as tf\n'), (357, 'tensorflow.contrib.tpu.RunConfig', 'tf.contrib.tpu.RunConfig', ([], {}), True, 'import tensorflow as tf\n'), (377, 'tensorflow.estimator.Estimator', 'tf.estimator.Estimator', (['train_model_fn'], {'config': 'config', 'warm_start_from': 'warm_start_settings'}), True, 'import tensorflow as tf\n'), (430, 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), True, 'import tensorflow as tf\n'), (431, 'absl.app.run', 'app.run', (['main'], {}), False, 'from absl import app\n'), (99, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""src"""'], {}), True, 'import tensorflow as tf\n'), (126, 'model_utils.update_exponential_moving_average', 'model_utils.update_exponential_moving_average', (['rl_step_baseline'], {'momentum': 'rl_baseline_momentum'}), False, 'import model_utils\n'), (137, 'tensorflow.greater_equal', 'tf.greater_equal', (['target_global_step', 'FLAGS.first_pretrain_steps'], {}), True, 'import tensorflow as tf\n'), (175, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""target_CLS"""'], {'reuse': 'tf.AUTO_REUSE'}), True, 'import tensorflow as tf\n'), (207, 'tensorflow.losses.softmax_cross_entropy', 'tf.losses.softmax_cross_entropy', ([], {'logits': 'logits', 'weights': 'inst_weights', 'onehot_labels': 'one_hot_labels'}), True, 'import tensorflow as tf\n'), (217, 'tensorflow.add_n', 'tf.add_n', (['l2_loss'], {}), True, 'import tensorflow as tf\n'), (220, 'tensorflow.greater_equal', 'tf.greater_equal', (['global_step', 'FLAGS.first_pretrain_steps'], {}), True, 'import tensorflow as tf\n'), (225, 'tensorflow.identity', 'tf.identity', (['loss'], {}), True, 'import tensorflow as tf\n'), (233, 'tensorflow.cast', 'tf.cast', (["labels['src']", 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (240, 'tensorflow.cast', 'tf.cast', (["labels['finetune']", 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (242, 'tensorflow.cast', 'tf.cast', (["labels['target']", 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (244, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rl_controller"""'], {}), True, 'import tensorflow as tf\n'), (273, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['inst_weights'], {}), True, 'import tensorflow as tf\n'), (275, 'tensorflow.cast', 'tf.cast', (['src_labels', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (287, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['update_ops'], {}), True, 'import tensorflow as tf\n'), (308, 'tensorflow.variable_scope', 'tf.variable_scope', (['name', '"""rl_op_selection"""'], {}), True, 'import tensorflow as tf\n'), (317, 'tensorflow_probability.distributions.Categorical', 'tfp.distributions.Categorical', ([], {'logits': 'logits'}), True, 'import tensorflow_probability as tfp\n'), (329, 'tensorflow.variable_scope', 'tf.variable_scope', (['name', '"""rl_op_selection"""'], {}), True, 'import tensorflow as tf\n'), (338, 'tensorflow_probability.distributions.Categorical', 'tfp.distributions.Categorical', ([], {'logits': 'logits'}), True, 'import tensorflow_probability as tfp\n'), (362, 'tensorflow.train.NewCheckpointReader', 'tf.train.NewCheckpointReader', (['checkpoint_path'], {}), True, 'import tensorflow as tf\n'), (371, 'tensorflow.estimator.WarmStartSettings', 'tf.estimator.WarmStartSettings', ([], {'ckpt_to_initialize_from': 'checkpoint_path', 'vars_to_warm_start': 'vars_to_warm_start'}), True, 'import tensorflow as tf\n'), (405, 'tensorflow_datasets.load', 'tfds.load', ([], {'name': 'FLAGS.source_dataset', 'split': '"""train"""'}), True, 'import tensorflow_datasets as tfds\n'), (410, 'tensorflow_datasets.load', 'tfds.load', ([], {'name': 'FLAGS.target_dataset', 'split': '"""train"""'}), True, 'import tensorflow_datasets as tfds\n'), (417, 'tensorflow_datasets.load', 'tfds.load', ([], {'name': 'FLAGS.target_dataset', 'split': '"""validation"""'}), True, 'import tensorflow_datasets as tfds\n'), (420, 'tensorflow.data.Dataset.zip', 'tf.data.Dataset.zip', (['(train_data, finetune_data, target_data)'], {}), True, 'import tensorflow as tf\n'), (81, 'tensorflow.initializers.zeros', 'tf.initializers.zeros', ([], {}), True, 'import tensorflow as tf\n'), (132, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['rl_advantage'], {}), True, 'import tensorflow as tf\n'), (148, 'tensorflow.trainable_variables', 'tf.trainable_variables', (['rl_scope.name'], {}), True, 'import tensorflow as tf\n'), (222, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['loss_weights'], {}), True, 'import tensorflow as tf\n'), (251, 'tensorflow.gather', 'tf.gather', (['label_weights', 'src_labels'], {}), True, 'import tensorflow as tf\n'), (289, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[src_train_op]'], {}), True, 'import tensorflow as tf\n'), (179, 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.05)'}), True, 'import tensorflow as tf\n'), (216, 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['v'], {}), True, 'import tensorflow as tf\n'), (296, 'tensorflow.argmax', 'tf.argmax', (['target_one_hot_labels'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (297, 'tensorflow.cast', 'tf.cast', (['is_prediction_correct', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (314, 'tensorflow.initializers.zeros', 'tf.initializers.zeros', ([], {}), True, 'import tensorflow as tf\n'), (321, 'tensorflow.cast', 'tf.cast', (['sample', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (333, 'tensorflow.initializers.zeros', 'tf.initializers.zeros', ([], {}), True, 'import tensorflow as tf\n'), (342, 'tensorflow.cast', 'tf.cast', (['sample', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (295, 'tensorflow.identity', 'tf.identity', (['target_logits'], {}), True, 'import tensorflow as tf\n')] |
AathmanT/cv-tricks.com | 7367c42d3e2d398b31ebf1b058bdbb5dc2a56253 | import math
import numpy as np
import tensorflow as tf
from enum import Enum, unique
@unique
class InputType(Enum):
TENSOR = 1
BASE64_JPEG = 2
class OpenNsfwModel:
"""Tensorflow implementation of Yahoo's Open NSFW Model
Original implementation:
https://github.com/yahoo/open_nsfw
Weights have been converted using caffe-tensorflow:
https://github.com/ethereon/caffe-tensorflow
"""
def __init__(self):
self.weights = {}
self.bn_epsilon = 1e-5 # Default used by Caffe
def build(self, weights_path="open_nsfw-weights.npy",
input_type=InputType.TENSOR):
self.weights = np.load(weights_path, encoding="latin1").item()
self.input_tensor = None
if input_type == InputType.TENSOR:
self.input = tf.placeholder(tf.float32,
shape=[None, 224, 224, 3],
name="input")
self.input_tensor = self.input
elif input_type == InputType.BASE64_JPEG:
from image_utils import load_base64_tensor
self.input = tf.placeholder(tf.string, shape=(None,), name="input")
self.input_tensor = load_base64_tensor(self.input)
else:
raise ValueError("invalid input type '{}'".format(input_type))
x = self.input_tensor
x = tf.pad(x, [[0, 0], [3, 3], [3, 3], [0, 0]], 'CONSTANT')
x = self.__conv2d("conv_1", x, filter_depth=64,
kernel_size=7, stride=2, padding='valid')
x = self.__batch_norm("bn_1", x)
x = tf.nn.relu(x)
x = tf.keras.layers.MaxPool2D(pool_size = 3, strides = 2, padding = 'same')(x)
x = self.__conv_block(stage=0, block=0, inputs=x,
filter_depths=[32, 32, 128],
kernel_size=3, stride=1)
x = self.__identity_block(stage=0, block=1, inputs=x,
filter_depths=[32, 32, 128], kernel_size=3)
x = self.__identity_block(stage=0, block=2, inputs=x,
filter_depths=[32, 32, 128], kernel_size=3)
x = self.__conv_block(stage=1, block=0, inputs=x,
filter_depths=[64, 64, 256],
kernel_size=3, stride=2)
x = self.__identity_block(stage=1, block=1, inputs=x,
filter_depths=[64, 64, 256], kernel_size=3)
x = self.__identity_block(stage=1, block=2, inputs=x,
filter_depths=[64, 64, 256], kernel_size=3)
x = self.__identity_block(stage=1, block=3, inputs=x,
filter_depths=[64, 64, 256], kernel_size=3)
x = self.__conv_block(stage=2, block=0, inputs=x,
filter_depths=[128, 128, 512],
kernel_size=3, stride=2)
x = self.__identity_block(stage=2, block=1, inputs=x,
filter_depths=[128, 128, 512], kernel_size=3)
x = self.__identity_block(stage=2, block=2, inputs=x,
filter_depths=[128, 128, 512], kernel_size=3)
x = self.__identity_block(stage=2, block=3, inputs=x,
filter_depths=[128, 128, 512], kernel_size=3)
x = self.__identity_block(stage=2, block=4, inputs=x,
filter_depths=[128, 128, 512], kernel_size=3)
x = self.__identity_block(stage=2, block=5, inputs=x,
filter_depths=[128, 128, 512], kernel_size=3)
x = self.__conv_block(stage=3, block=0, inputs=x,
filter_depths=[256, 256, 1024], kernel_size=3,
stride=2)
x = self.__identity_block(stage=3, block=1, inputs=x,
filter_depths=[256, 256, 1024],
kernel_size=3)
x = self.__identity_block(stage=3, block=2, inputs=x,
filter_depths=[256, 256, 1024],
kernel_size=3)
x = tf.keras.layers.AveragePooling2D(pool_size=7, strides=1,
padding="valid", name="pool")(x)
x = tf.reshape(x, shape=(-1, 1024))
self.logits = self.__fully_connected(name="fc_nsfw",
inputs=x, num_outputs=2)
self.predictions = tf.nn.softmax(self.logits, name="predictions")
"""Get weights for layer with given name
"""
def __get_weights(self, layer_name, field_name):
if not layer_name in self.weights:
raise ValueError("No weights for layer named '{}' found"
.format(layer_name))
w = self.weights[layer_name]
if not field_name in w:
raise (ValueError("No entry for field '{}' in layer named '{}'"
.format(field_name, layer_name)))
return w[field_name]
"""Layer creation and weight initialization
"""
def __fully_connected(self, name, inputs, num_outputs):
return tf.keras.layers.Dense(
units=num_outputs, name=name,
kernel_initializer=tf.constant_initializer(
self.__get_weights(name, "weights"), dtype=tf.float32),
bias_initializer=tf.constant_initializer(
self.__get_weights(name, "biases"), dtype=tf.float32))(inputs)
def __conv2d(self, name, inputs, filter_depth, kernel_size, stride=1, padding="same", trainable=False):
if padding.lower() == 'same' and kernel_size > 1:
#print("INPUT SHAPE: ", inputs.get_shape().as_list())
#print("KERNEL SIZE: ", kernel_size)
if kernel_size > 1:
oh = inputs.get_shape().as_list()[1]
h = inputs.get_shape().as_list()[1]
p = int(math.floor(((oh - 1) * stride + kernel_size - h)//2))
inputs = tf.pad(inputs,
[[0, 0], [p, p], [p, p], [0, 0]],
'CONSTANT')
#print("PADDED INPUT SIZE: ", inputs.get_shape().as_list())
else:
raise Exception('unsupported kernel size for padding: "{}"'
.format(kernel_size))
return tf.keras.layers.Conv2D(
filters = filter_depth,
kernel_size=(kernel_size, kernel_size),
strides=(stride, stride), padding='valid',
activation=None, trainable=trainable, name=name,
kernel_initializer=tf.constant_initializer(
self.__get_weights(name, "weights"), dtype=tf.float32),
bias_initializer=tf.constant_initializer(
self.__get_weights(name, "biases"), dtype=tf.float32))(inputs)
def __batch_norm(self, name, inputs, training=False):
return tf.keras.layers.BatchNormalization(
trainable=training, epsilon=self.bn_epsilon,
gamma_initializer=tf.constant_initializer(
self.__get_weights(name, "scale"), dtype=tf.float32),
beta_initializer=tf.constant_initializer(
self.__get_weights(name, "offset"), dtype=tf.float32),
moving_mean_initializer=tf.constant_initializer(
self.__get_weights(name, "mean"), dtype=tf.float32),
moving_variance_initializer=tf.constant_initializer(
self.__get_weights(name, "variance"), dtype=tf.float32),
name=name)(inputs)
"""ResNet blocks
"""
def __conv_block(self, stage, block, inputs, filter_depths,
kernel_size=3, stride=2):
filter_depth1, filter_depth2, filter_depth3 = filter_depths
conv_name_base = "conv_stage{}_block{}_branch".format(stage, block)
bn_name_base = "bn_stage{}_block{}_branch".format(stage, block)
shortcut_name_post = "_stage{}_block{}_proj_shortcut" \
.format(stage, block)
shortcut = self.__conv2d(
name="conv{}".format(shortcut_name_post), stride=stride,
inputs=inputs, filter_depth=filter_depth3, kernel_size=1,
padding="same"
)
shortcut = self.__batch_norm("bn{}".format(shortcut_name_post),
shortcut)
x = self.__conv2d(
name="{}2a".format(conv_name_base),
inputs=inputs, filter_depth=filter_depth1, kernel_size=1,
stride=stride, padding="same",
)
x = self.__batch_norm("{}2a".format(bn_name_base), x)
x = tf.nn.relu(x)
x = self.__conv2d(
name="{}2b".format(conv_name_base),
inputs=x, filter_depth=filter_depth2, kernel_size=kernel_size,
padding="same", stride=1
)
x = self.__batch_norm("{}2b".format(bn_name_base), x)
x = tf.nn.relu(x)
x = self.__conv2d(
name="{}2c".format(conv_name_base),
inputs=x, filter_depth=filter_depth3, kernel_size=1,
padding="same", stride=1
)
x = self.__batch_norm("{}2c".format(bn_name_base), x)
x = tf.add(x, shortcut)
return tf.nn.relu(x)
def __identity_block(self, stage, block, inputs,
filter_depths, kernel_size):
filter_depth1, filter_depth2, filter_depth3 = filter_depths
conv_name_base = "conv_stage{}_block{}_branch".format(stage, block)
bn_name_base = "bn_stage{}_block{}_branch".format(stage, block)
x = self.__conv2d(
name="{}2a".format(conv_name_base),
inputs=inputs, filter_depth=filter_depth1, kernel_size=1,
stride=1, padding="same",
)
x = self.__batch_norm("{}2a".format(bn_name_base), x)
x = tf.nn.relu(x)
x = self.__conv2d(
name="{}2b".format(conv_name_base),
inputs=x, filter_depth=filter_depth2, kernel_size=kernel_size,
padding="same", stride=1
)
x = self.__batch_norm("{}2b".format(bn_name_base), x)
x = tf.nn.relu(x)
x = self.__conv2d(
name="{}2c".format(conv_name_base),
inputs=x, filter_depth=filter_depth3, kernel_size=1,
padding="same", stride=1
)
x = self.__batch_norm("{}2c".format(bn_name_base), x)
x = tf.add(x, inputs)
return tf.nn.relu(x)
| [
"tensorflow.nn.relu",
"tensorflow.keras.layers.AveragePooling2D",
"tensorflow.nn.softmax",
"tensorflow.reshape",
"tensorflow.placeholder",
"tensorflow.keras.layers.MaxPool2D",
"tensorflow.add",
"tensorflow.pad",
"numpy.load"
] | Tensorflow-tutorials/Not-Safe-For-Work-Detection/model.py | [(48, 'tensorflow.pad', 'tf.pad', (['x', '[[0, 0], [3, 3], [3, 3], [0, 0]]', '"""CONSTANT"""'], {}), True, 'import tensorflow as tf\n'), (54, 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), True, 'import tensorflow as tf\n'), (104, 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, 1024)'}), True, 'import tensorflow as tf\n'), (108, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.logits'], {'name': '"""predictions"""'}), True, 'import tensorflow as tf\n'), (201, 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), True, 'import tensorflow as tf\n'), (209, 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), True, 'import tensorflow as tf\n'), (218, 'tensorflow.add', 'tf.add', (['x', 'shortcut'], {}), True, 'import tensorflow as tf\n'), (220, 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), True, 'import tensorflow as tf\n'), (235, 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), True, 'import tensorflow as tf\n'), (243, 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), True, 'import tensorflow as tf\n'), (252, 'tensorflow.add', 'tf.add', (['x', 'inputs'], {}), True, 'import tensorflow as tf\n'), (254, 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), True, 'import tensorflow as tf\n'), (34, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 224, 224, 3]', 'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (56, 'tensorflow.keras.layers.MaxPool2D', 'tf.keras.layers.MaxPool2D', ([], {'pool_size': '(3)', 'strides': '(2)', 'padding': '"""same"""'}), True, 'import tensorflow as tf\n'), (101, 'tensorflow.keras.layers.AveragePooling2D', 'tf.keras.layers.AveragePooling2D', ([], {'pool_size': '(7)', 'strides': '(1)', 'padding': '"""valid"""', 'name': '"""pool"""'}), True, 'import tensorflow as tf\n'), (30, 'numpy.load', 'np.load', (['weights_path'], {'encoding': '"""latin1"""'}), True, 'import numpy as np\n'), (41, 'tensorflow.placeholder', 'tf.placeholder', (['tf.string'], {'shape': '(None,)', 'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (42, 'image_utils.load_base64_tensor', 'load_base64_tensor', (['self.input'], {}), False, 'from image_utils import load_base64_tensor\n'), (144, 'tensorflow.pad', 'tf.pad', (['inputs', '[[0, 0], [p, p], [p, p], [0, 0]]', '"""CONSTANT"""'], {}), True, 'import tensorflow as tf\n'), (142, 'math.floor', 'math.floor', (['(((oh - 1) * stride + kernel_size - h) // 2)'], {}), False, 'import math\n')] |
augustoolucas/iCaRL | 90ac1be39c9e055d9dd2fa1b679c0cfb8cf7335a | import tensorflow as tf
import numpy as np
try:
import cPickle
except:
import _pickle as cPickle
def relu(x, name, alpha):
if alpha > 0:
return tf.maximum(alpha * x, x, name=name)
else:
return tf.nn.relu(x, name=name)
def get_variable(name, shape, dtype, initializer, trainable=True, regularizer=None):
with tf.device('/cpu:0'):
var = tf.get_variable(name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer, trainable=trainable,
collections=[tf.GraphKeys.WEIGHTS, tf.GraphKeys.GLOBAL_VARIABLES])
return var
def conv(inp, name, size, out_channels, strides=[1, 1, 1, 1],
dilation=None, padding='SAME', apply_relu=True, alpha=0.0,bias=True,
initializer=tf.contrib.layers.xavier_initializer_conv2d()):
batch_size = inp.get_shape().as_list()[0]
res1 = inp.get_shape().as_list()[1]
res2 = inp.get_shape().as_list()[1]
in_channels = inp.get_shape().as_list()[3]
with tf.variable_scope(name):
W = get_variable("W", shape=[size, size, in_channels, out_channels], dtype=tf.float32,
initializer=initializer, regularizer=tf.nn.l2_loss)
b = get_variable("b", shape=[1, 1, 1, out_channels], dtype=tf.float32,
initializer=tf.zeros_initializer(),trainable=bias)
if dilation:
assert(strides == [1, 1, 1, 1])
out = tf.add(tf.nn.atrous_conv2d(inp, W, rate=dilation, padding=padding), b, name='convolution')
out.set_shape([batch_size, res1, res2, out_channels])
else:
out = tf.add(tf.nn.conv2d(inp, W, strides=strides, padding=padding), b, name='convolution')
if apply_relu:
out = relu(out, alpha=alpha, name='relu')
return out
def softmax(target, axis, name=None):
max_axis = tf.reduce_max(target, axis, keep_dims=True)
target_exp = tf.exp(target - max_axis)
normalize = tf.reduce_sum(target_exp, axis, keep_dims=True)
softmax = target_exp / normalize
return softmax
def batch_norm(inp, name, phase, decay=0.9):
channels = inp.get_shape().as_list()[3]
with tf.variable_scope(name):
moving_mean = get_variable("mean", shape=[channels], dtype=tf.float32, initializer=tf.constant_initializer(0.0), trainable=False)
moving_variance = get_variable("var", shape=[channels], dtype=tf.float32, initializer=tf.constant_initializer(1.0), trainable=False)
offset = get_variable("offset", shape=[channels], dtype=tf.float32, initializer=tf.constant_initializer(0.0))
scale = get_variable("scale", shape=[channels], dtype=tf.float32, initializer=tf.constant_initializer(1.0), regularizer=tf.nn.l2_loss)
mean, variance = tf.nn.moments(inp, axes=[0, 1, 2], shift=moving_mean)
mean_op = moving_mean.assign(decay * moving_mean + (1 - decay) * mean)
var_op = moving_variance.assign(decay * moving_variance + (1 - decay) * variance)
assert(phase in ['train', 'test'])
if phase == 'train':
with tf.control_dependencies([mean_op, var_op]):
return tf.nn.batch_normalization(inp, mean, variance, offset, scale, 0.01, name='norm')
else:
return tf.nn.batch_normalization(inp, moving_mean, moving_variance, offset, scale, 0.01, name='norm')
def pool(inp, name, kind, size, stride, padding='SAME'):
assert kind in ['max', 'avg']
strides = [1, stride, stride, 1]
sizes = [1, size, size, 1]
with tf.variable_scope(name):
if kind == 'max':
out = tf.nn.max_pool(inp, sizes, strides=strides, padding=padding, name=kind)
else:
out = tf.nn.avg_pool(inp, sizes, strides=strides, padding=padding, name=kind)
return out
def ResNet18(inp, phase, num_outputs=1000, alpha=0.0):
def residual_block(inp, phase, alpha=0.0,nom='a',increase_dim=False,last=False):
input_num_filters = inp.get_shape().as_list()[3]
if increase_dim:
first_stride = [1, 2, 2, 1]
out_num_filters = input_num_filters*2
else:
first_stride = [1, 1, 1, 1]
out_num_filters = input_num_filters
layer = conv(inp, 'resconv1'+nom, size=3, strides=first_stride, out_channels=out_num_filters, alpha=alpha, padding='SAME')
layer = batch_norm(layer, 'batch_norm_resconv1'+nom, phase=phase)
layer = conv(layer, 'resconv2'+nom, size=3, strides=[1, 1, 1, 1], out_channels=out_num_filters, apply_relu=False,alpha=alpha, padding='SAME')
layer = batch_norm(layer, 'batch_norm_resconv2'+nom, phase=phase)
if increase_dim:
projection = conv(inp, 'projconv'+nom, size=1, strides=[1, 2, 2, 1], out_channels=out_num_filters, alpha=alpha, apply_relu=False,padding='SAME',bias=False)
projection = batch_norm(projection, 'batch_norm_projconv'+nom, phase=phase)
if last:
block = layer + projection
else:
block = layer + projection
block = tf.nn.relu(block, name='relu')
else:
if last:
block = layer + inp
else:
block = layer + inp
block = tf.nn.relu(block, name='relu')
return block
# First conv
#layer = batch_norm(inp, 'batch_norm_0', phase=phase)
layer = conv(inp,"conv1",size=7,strides=[1, 2, 2, 1], out_channels=64, alpha=alpha, padding='SAME')
layer = batch_norm(layer, 'batch_norm_1', phase=phase)
layer = pool(layer, 'pool1', 'max', size=3, stride=2)
# First stack of residual blocks
for letter in 'ab':
layer = residual_block(layer, phase, alpha=0.0,nom=letter)
# Second stack of residual blocks
layer = residual_block(layer, phase, alpha=0.0,nom='c',increase_dim=True)
for letter in 'd':
layer = residual_block(layer, phase, alpha=0.0,nom=letter)
# Third stack of residual blocks
layer = residual_block(layer, phase, alpha=0.0,nom='e',increase_dim=True)
for letter in 'f':
layer = residual_block(layer, phase, alpha=0.0,nom=letter)
# Fourth stack of residual blocks
layer = residual_block(layer, phase, alpha=0.0,nom='g',increase_dim=True)
layer = residual_block(layer, phase, alpha=0.0,nom='h',increase_dim=False,last=True)
layer = pool(layer, 'pool_last', 'avg', size=7, stride=1,padding='VALID')
layer = conv(layer, name='fc', size=1, out_channels=num_outputs, padding='VALID', apply_relu=False, alpha=alpha)[:, 0, 0, :]
return layer
def get_weight_initializer(params):
initializer = []
scope = tf.get_variable_scope()
scope.reuse_variables()
for layer, value in params.items():
op = tf.get_variable('%s' % layer).assign(value)
initializer.append(op)
return initializer
def save_model(name, scope, sess):
variables = tf.get_collection(tf.GraphKeys.WEIGHTS, scope=scope)
d = [(v.name.split(':')[0], sess.run(v)) for v in variables]
cPickle.dump(d, open(name, 'wb'))
| [
"tensorflow.device",
"tensorflow.get_variable",
"tensorflow.control_dependencies",
"tensorflow.nn.max_pool",
"tensorflow.reduce_sum",
"tensorflow.nn.atrous_conv2d",
"tensorflow.nn.conv2d",
"tensorflow.get_collection",
"tensorflow.nn.moments",
"tensorflow.nn.batch_normalization",
"tensorflow.zeros_initializer",
"tensorflow.exp",
"tensorflow.nn.avg_pool",
"tensorflow.contrib.layers.xavier_initializer_conv2d",
"tensorflow.nn.relu",
"tensorflow.reduce_max",
"tensorflow.maximum",
"tensorflow.constant_initializer",
"tensorflow.variable_scope",
"tensorflow.get_variable_scope"
] | iCaRL-Tensorflow/utils_resnet.py | [(26, 'tensorflow.contrib.layers.xavier_initializer_conv2d', 'tf.contrib.layers.xavier_initializer_conv2d', ([], {}), True, 'import tensorflow as tf\n'), (53, 'tensorflow.reduce_max', 'tf.reduce_max', (['target', 'axis'], {'keep_dims': '(True)'}), True, 'import tensorflow as tf\n'), (54, 'tensorflow.exp', 'tf.exp', (['(target - max_axis)'], {}), True, 'import tensorflow as tf\n'), (55, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['target_exp', 'axis'], {'keep_dims': '(True)'}), True, 'import tensorflow as tf\n'), (166, 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), True, 'import tensorflow as tf\n'), (175, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.WEIGHTS'], {'scope': 'scope'}), True, 'import tensorflow as tf\n'), (11, 'tensorflow.maximum', 'tf.maximum', (['(alpha * x)', 'x'], {'name': 'name'}), True, 'import tensorflow as tf\n'), (13, 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {'name': 'name'}), True, 'import tensorflow as tf\n'), (17, 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), True, 'import tensorflow as tf\n'), (18, 'tensorflow.get_variable', 'tf.get_variable', (['name'], {'shape': 'shape', 'dtype': 'dtype', 'initializer': 'initializer', 'regularizer': 'regularizer', 'trainable': 'trainable', 'collections': '[tf.GraphKeys.WEIGHTS, tf.GraphKeys.GLOBAL_VARIABLES]'}), True, 'import tensorflow as tf\n'), (33, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (64, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (71, 'tensorflow.nn.moments', 'tf.nn.moments', (['inp'], {'axes': '[0, 1, 2]', 'shift': 'moving_mean'}), True, 'import tensorflow as tf\n'), (91, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (81, 'tensorflow.nn.batch_normalization', 'tf.nn.batch_normalization', (['inp', 'moving_mean', 'moving_variance', 'offset', 'scale', '(0.01)'], {'name': '"""norm"""'}), True, 'import tensorflow as tf\n'), (93, 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['inp', 'sizes'], {'strides': 'strides', 'padding': 'padding', 'name': 'kind'}), True, 'import tensorflow as tf\n'), (95, 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['inp', 'sizes'], {'strides': 'strides', 'padding': 'padding', 'name': 'kind'}), True, 'import tensorflow as tf\n'), (37, 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), True, 'import tensorflow as tf\n'), (41, 'tensorflow.nn.atrous_conv2d', 'tf.nn.atrous_conv2d', (['inp', 'W'], {'rate': 'dilation', 'padding': 'padding'}), True, 'import tensorflow as tf\n'), (44, 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['inp', 'W'], {'strides': 'strides', 'padding': 'padding'}), True, 'import tensorflow as tf\n'), (65, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (66, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), True, 'import tensorflow as tf\n'), (68, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (69, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), True, 'import tensorflow as tf\n'), (78, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[mean_op, var_op]'], {}), True, 'import tensorflow as tf\n'), (79, 'tensorflow.nn.batch_normalization', 'tf.nn.batch_normalization', (['inp', 'mean', 'variance', 'offset', 'scale', '(0.01)'], {'name': '"""norm"""'}), True, 'import tensorflow as tf\n'), (122, 'tensorflow.nn.relu', 'tf.nn.relu', (['block'], {'name': '"""relu"""'}), True, 'import tensorflow as tf\n'), (128, 'tensorflow.nn.relu', 'tf.nn.relu', (['block'], {'name': '"""relu"""'}), True, 'import tensorflow as tf\n'), (169, 'tensorflow.get_variable', 'tf.get_variable', (["('%s' % layer)"], {}), True, 'import tensorflow as tf\n')] |
soulsheng/lanenet-lane-detection | f7bc580a73e686a77a5506dbfc57ed424f0715b5 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 17-9-18 下午3:59
# @Author : MaybeShewill-CV
# @Site : https://github.com/MaybeShewill-CV/lanenet-lane-detection
# @File : cnn_basenet.py
# @IDE: PyCharm Community Edition
"""
The base convolution neural networks mainly implement some useful cnn functions
"""
import tensorflow as tf
import numpy as np
class CNNBaseModel(object):
"""
Base model for other specific cnn ctpn_models
"""
def __init__(self):
pass
@staticmethod
def conv2d(inputdata, out_channel, kernel_size, padding='SAME',
stride=1, w_init=None, b_init=None,
split=1, use_bias=True, data_format='NHWC', name=None):
"""
Packing the tensorflow conv2d function.
:param name: op name
:param inputdata: A 4D tensorflow tensor which ust have known number of channels, but can have other
unknown dimensions.
:param out_channel: number of output channel.
:param kernel_size: int so only support square kernel convolution
:param padding: 'VALID' or 'SAME'
:param stride: int so only support square stride
:param w_init: initializer for convolution weights
:param b_init: initializer for bias
:param split: split channels as used in Alexnet mainly group for GPU memory save.
:param use_bias: whether to use bias.
:param data_format: default set to NHWC according tensorflow
:return: tf.Tensor named ``output``
"""
with tf.variable_scope(name):
in_shape = inputdata.get_shape().as_list()
channel_axis = 3 if data_format == 'NHWC' else 1
in_channel = in_shape[channel_axis]
assert in_channel is not None, "[Conv2D] Input cannot have unknown channel!"
assert in_channel % split == 0
assert out_channel % split == 0
padding = padding.upper()
if isinstance(kernel_size, list):
filter_shape = [kernel_size[0], kernel_size[1]] + [in_channel / split, out_channel]
else:
filter_shape = [kernel_size, kernel_size] + [in_channel / split, out_channel]
if isinstance(stride, list):
strides = [1, stride[0], stride[1], 1] if data_format == 'NHWC' \
else [1, 1, stride[0], stride[1]]
else:
strides = [1, stride, stride, 1] if data_format == 'NHWC' \
else [1, 1, stride, stride]
if w_init is None:
w_init = tf.contrib.layers.variance_scaling_initializer()
if b_init is None:
b_init = tf.constant_initializer()
w = tf.get_variable('W', filter_shape, initializer=w_init)
b = None
if use_bias:
b = tf.get_variable('b', [out_channel], initializer=b_init)
if split == 1:
conv = tf.nn.conv2d(inputdata, w, strides, padding, data_format=data_format)
else:
inputs = tf.split(inputdata, split, channel_axis)
kernels = tf.split(w, split, 3)
outputs = [tf.nn.conv2d(i, k, strides, padding, data_format=data_format)
for i, k in zip(inputs, kernels)]
conv = tf.concat(outputs, channel_axis)
ret = tf.identity(tf.nn.bias_add(conv, b, data_format=data_format)
if use_bias else conv, name=name)
return ret
@staticmethod
def depthwise_conv(input_tensor, kernel_size, name, depth_multiplier=1,
padding='SAME', stride=1):
"""
:param input_tensor:
:param kernel_size:
:param name:
:param depth_multiplier:
:param padding:
:param stride:
:return:
"""
with tf.variable_scope(name_or_scope=name):
in_shape = input_tensor.get_shape().as_list()
in_channel = in_shape[3]
padding = padding.upper()
depthwise_filter_shape = [kernel_size, kernel_size] + [in_channel, depth_multiplier]
w_init = tf.contrib.layers.variance_scaling_initializer()
depthwise_filter = tf.get_variable(
name='depthwise_filter_w', shape=depthwise_filter_shape,
initializer=w_init
)
result = tf.nn.depthwise_conv2d(
input=input_tensor,
filter=depthwise_filter,
strides=[1, stride, stride, 1],
padding=padding,
name='depthwise_conv_output'
)
return result
@staticmethod
def relu(inputdata, name=None):
"""
:param name:
:param inputdata:
:return:
"""
return tf.nn.relu(features=inputdata, name=name)
@staticmethod
def sigmoid(inputdata, name=None):
"""
:param name:
:param inputdata:
:return:
"""
return tf.nn.sigmoid(x=inputdata, name=name)
@staticmethod
def maxpooling(inputdata, kernel_size, stride=None, padding='VALID',
data_format='NHWC', name=None):
"""
:param name:
:param inputdata:
:param kernel_size:
:param stride:
:param padding:
:param data_format:
:return:
"""
padding = padding.upper()
if stride is None:
stride = kernel_size
if isinstance(kernel_size, list):
kernel = [1, kernel_size[0], kernel_size[1], 1] if data_format == 'NHWC' else \
[1, 1, kernel_size[0], kernel_size[1]]
else:
kernel = [1, kernel_size, kernel_size, 1] if data_format == 'NHWC' \
else [1, 1, kernel_size, kernel_size]
if isinstance(stride, list):
strides = [1, stride[0], stride[1], 1] if data_format == 'NHWC' \
else [1, 1, stride[0], stride[1]]
else:
strides = [1, stride, stride, 1] if data_format == 'NHWC' \
else [1, 1, stride, stride]
return tf.nn.max_pool(value=inputdata, ksize=kernel, strides=strides, padding=padding,
data_format=data_format, name=name)
@staticmethod
def avgpooling(inputdata, kernel_size, stride=None, padding='VALID',
data_format='NHWC', name=None):
"""
:param name:
:param inputdata:
:param kernel_size:
:param stride:
:param padding:
:param data_format:
:return:
"""
if stride is None:
stride = kernel_size
kernel = [1, kernel_size, kernel_size, 1] if data_format == 'NHWC' \
else [1, 1, kernel_size, kernel_size]
strides = [1, stride, stride, 1] if data_format == 'NHWC' else [1, 1, stride, stride]
return tf.nn.avg_pool(value=inputdata, ksize=kernel, strides=strides, padding=padding,
data_format=data_format, name=name)
@staticmethod
def globalavgpooling(inputdata, data_format='NHWC', name=None):
"""
:param name:
:param inputdata:
:param data_format:
:return:
"""
assert inputdata.shape.ndims == 4
assert data_format in ['NHWC', 'NCHW']
axis = [1, 2] if data_format == 'NHWC' else [2, 3]
return tf.reduce_mean(input_tensor=inputdata, axis=axis, name=name)
@staticmethod
def layernorm(inputdata, epsilon=1e-5, use_bias=True, use_scale=True,
data_format='NHWC', name=None):
"""
:param name:
:param inputdata:
:param epsilon: epsilon to avoid divide-by-zero.
:param use_bias: whether to use the extra affine transformation or not.
:param use_scale: whether to use the extra affine transformation or not.
:param data_format:
:return:
"""
shape = inputdata.get_shape().as_list()
ndims = len(shape)
assert ndims in [2, 4]
mean, var = tf.nn.moments(inputdata, list(range(1, len(shape))), keep_dims=True)
if data_format == 'NCHW':
channnel = shape[1]
new_shape = [1, channnel, 1, 1]
else:
channnel = shape[-1]
new_shape = [1, 1, 1, channnel]
if ndims == 2:
new_shape = [1, channnel]
if use_bias:
beta = tf.get_variable('beta', [channnel], initializer=tf.constant_initializer())
beta = tf.reshape(beta, new_shape)
else:
beta = tf.zeros([1] * ndims, name='beta')
if use_scale:
gamma = tf.get_variable('gamma', [channnel], initializer=tf.constant_initializer(1.0))
gamma = tf.reshape(gamma, new_shape)
else:
gamma = tf.ones([1] * ndims, name='gamma')
return tf.nn.batch_normalization(inputdata, mean, var, beta, gamma, epsilon, name=name)
@staticmethod
def instancenorm(inputdata, epsilon=1e-5, data_format='NHWC', use_affine=True, name=None):
"""
:param name:
:param inputdata:
:param epsilon:
:param data_format:
:param use_affine:
:return:
"""
shape = inputdata.get_shape().as_list()
if len(shape) != 4:
raise ValueError("Input data of instancebn layer has to be 4D tensor")
if data_format == 'NHWC':
axis = [1, 2]
ch = shape[3]
new_shape = [1, 1, 1, ch]
else:
axis = [2, 3]
ch = shape[1]
new_shape = [1, ch, 1, 1]
if ch is None:
raise ValueError("Input of instancebn require known channel!")
mean, var = tf.nn.moments(inputdata, axis, keep_dims=True)
if not use_affine:
return tf.divide(inputdata - mean, tf.sqrt(var + epsilon), name='output')
beta = tf.get_variable('beta', [ch], initializer=tf.constant_initializer())
beta = tf.reshape(beta, new_shape)
gamma = tf.get_variable('gamma', [ch], initializer=tf.constant_initializer(1.0))
gamma = tf.reshape(gamma, new_shape)
return tf.nn.batch_normalization(inputdata, mean, var, beta, gamma, epsilon, name=name)
@staticmethod
def dropout(inputdata, keep_prob, noise_shape=None, name=None):
"""
:param name:
:param inputdata:
:param keep_prob:
:param noise_shape:
:return:
"""
return tf.nn.dropout(inputdata, keep_prob=keep_prob, noise_shape=noise_shape, name=name)
@staticmethod
def fullyconnect(inputdata, out_dim, w_init=None, b_init=None,
use_bias=True, name=None):
"""
Fully-Connected layer, takes a N>1D tensor and returns a 2D tensor.
It is an equivalent of `tf.layers.dense` except for naming conventions.
:param inputdata: a tensor to be flattened except for the first dimension.
:param out_dim: output dimension
:param w_init: initializer for w. Defaults to `variance_scaling_initializer`.
:param b_init: initializer for b. Defaults to zero
:param use_bias: whether to use bias.
:param name:
:return: tf.Tensor: a NC tensor named ``output`` with attribute `variables`.
"""
shape = inputdata.get_shape().as_list()[1:]
if None not in shape:
inputdata = tf.reshape(inputdata, [-1, int(np.prod(shape))])
else:
inputdata = tf.reshape(inputdata, tf.stack([tf.shape(inputdata)[0], -1]))
if w_init is None:
w_init = tf.contrib.layers.variance_scaling_initializer()
if b_init is None:
b_init = tf.constant_initializer()
ret = tf.layers.dense(inputs=inputdata, activation=lambda x: tf.identity(x, name='output'),
use_bias=use_bias, name=name,
kernel_initializer=w_init, bias_initializer=b_init,
trainable=True, units=out_dim)
return ret
@staticmethod
def layerbn(inputdata, is_training, name, scale=True):
"""
:param inputdata:
:param is_training:
:param name:
:param scale:
:return:
"""
return tf.layers.batch_normalization(inputs=inputdata, training=is_training, name=name, scale=scale)
@staticmethod
def layergn(inputdata, name, group_size=32, esp=1e-5):
"""
:param inputdata:
:param name:
:param group_size:
:param esp:
:return:
"""
with tf.variable_scope(name):
inputdata = tf.transpose(inputdata, [0, 3, 1, 2])
n, c, h, w = inputdata.get_shape().as_list()
group_size = min(group_size, c)
inputdata = tf.reshape(inputdata, [-1, group_size, c // group_size, h, w])
mean, var = tf.nn.moments(inputdata, [2, 3, 4], keep_dims=True)
inputdata = (inputdata - mean) / tf.sqrt(var + esp)
# 每个通道的gamma和beta
gamma = tf.Variable(tf.constant(1.0, shape=[c]), dtype=tf.float32, name='gamma')
beta = tf.Variable(tf.constant(0.0, shape=[c]), dtype=tf.float32, name='beta')
gamma = tf.reshape(gamma, [1, c, 1, 1])
beta = tf.reshape(beta, [1, c, 1, 1])
# 根据论文进行转换 [n, c, h, w, c] 到 [n, h, w, c]
output = tf.reshape(inputdata, [-1, c, h, w])
output = output * gamma + beta
output = tf.transpose(output, [0, 2, 3, 1])
return output
@staticmethod
def squeeze(inputdata, axis=None, name=None):
"""
:param inputdata:
:param axis:
:param name:
:return:
"""
return tf.squeeze(input=inputdata, axis=axis, name=name)
@staticmethod
def deconv2d(inputdata, out_channel, kernel_size, padding='SAME',
stride=1, w_init=None, b_init=None,
use_bias=True, activation=None, data_format='channels_last',
trainable=True, name=None):
"""
Packing the tensorflow conv2d function.
:param name: op name
:param inputdata: A 4D tensorflow tensor which ust have known number of channels, but can have other
unknown dimensions.
:param out_channel: number of output channel.
:param kernel_size: int so only support square kernel convolution
:param padding: 'VALID' or 'SAME'
:param stride: int so only support square stride
:param w_init: initializer for convolution weights
:param b_init: initializer for bias
:param activation: whether to apply a activation func to deconv result
:param use_bias: whether to use bias.
:param data_format: default set to NHWC according tensorflow
:return: tf.Tensor named ``output``
"""
with tf.variable_scope(name):
in_shape = inputdata.get_shape().as_list()
channel_axis = 3 if data_format == 'channels_last' else 1
in_channel = in_shape[channel_axis]
assert in_channel is not None, "[Deconv2D] Input cannot have unknown channel!"
padding = padding.upper()
if w_init is None:
w_init = tf.contrib.layers.variance_scaling_initializer()
if b_init is None:
b_init = tf.constant_initializer()
ret = tf.layers.conv2d_transpose(inputs=inputdata, filters=out_channel,
kernel_size=kernel_size,
strides=stride, padding=padding,
data_format=data_format,
activation=activation, use_bias=use_bias,
kernel_initializer=w_init,
bias_initializer=b_init, trainable=trainable,
name=name)
return ret
@staticmethod
def dilation_conv(input_tensor, k_size, out_dims, rate, padding='SAME',
w_init=None, b_init=None, use_bias=False, name=None):
"""
:param input_tensor:
:param k_size:
:param out_dims:
:param rate:
:param padding:
:param w_init:
:param b_init:
:param use_bias:
:param name:
:return:
"""
with tf.variable_scope(name):
in_shape = input_tensor.get_shape().as_list()
in_channel = in_shape[3]
assert in_channel is not None, "[Conv2D] Input cannot have unknown channel!"
padding = padding.upper()
if isinstance(k_size, list):
filter_shape = [k_size[0], k_size[1]] + [in_channel, out_dims]
else:
filter_shape = [k_size, k_size] + [in_channel, out_dims]
if w_init is None:
w_init = tf.contrib.layers.variance_scaling_initializer()
if b_init is None:
b_init = tf.constant_initializer()
w = tf.get_variable('W', filter_shape, initializer=w_init)
b = None
if use_bias:
b = tf.get_variable('b', [out_dims], initializer=b_init)
conv = tf.nn.atrous_conv2d(value=input_tensor, filters=w, rate=rate,
padding=padding, name='dilation_conv')
if use_bias:
ret = tf.add(conv, b)
else:
ret = conv
return ret
@staticmethod
def spatial_dropout(input_tensor, keep_prob, is_training, name, seed=1234):
"""
空间dropout实现
:param input_tensor:
:param keep_prob:
:param is_training:
:param name:
:param seed:
:return:
"""
def f1():
input_shape = input_tensor.get_shape().as_list()
noise_shape = tf.constant(value=[input_shape[0], 1, 1, input_shape[3]])
return tf.nn.dropout(input_tensor, keep_prob, noise_shape, seed=seed, name="spatial_dropout")
def f2():
return input_tensor
with tf.variable_scope(name_or_scope=name):
output = tf.cond(is_training, f1, f2)
return output
@staticmethod
def lrelu(inputdata, name, alpha=0.2):
"""
:param inputdata:
:param alpha:
:param name:
:return:
"""
with tf.variable_scope(name):
return tf.nn.relu(inputdata) - alpha * tf.nn.relu(-inputdata)
| [
"tensorflow.cond",
"tensorflow.get_variable",
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow.nn.max_pool",
"tensorflow.layers.conv2d_transpose",
"tensorflow.nn.depthwise_conv2d",
"tensorflow.nn.atrous_conv2d",
"tensorflow.nn.conv2d",
"tensorflow.layers.batch_normalization",
"tensorflow.contrib.layers.variance_scaling_initializer",
"tensorflow.nn.moments",
"tensorflow.squeeze",
"tensorflow.add",
"tensorflow.nn.dropout",
"tensorflow.nn.batch_normalization",
"tensorflow.nn.sigmoid",
"tensorflow.shape",
"tensorflow.identity",
"tensorflow.nn.avg_pool",
"tensorflow.split",
"tensorflow.nn.bias_add",
"tensorflow.nn.relu",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.ones",
"tensorflow.constant_initializer",
"numpy.prod",
"tensorflow.variable_scope",
"tensorflow.sqrt"
] | semantic_segmentation_zoo/cnn_basenet.py | [(133, 'tensorflow.nn.relu', 'tf.nn.relu', ([], {'features': 'inputdata', 'name': 'name'}), True, 'import tensorflow as tf\n'), (143, 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', ([], {'x': 'inputdata', 'name': 'name'}), True, 'import tensorflow as tf\n'), (177, 'tensorflow.nn.max_pool', 'tf.nn.max_pool', ([], {'value': 'inputdata', 'ksize': 'kernel', 'strides': 'strides', 'padding': 'padding', 'data_format': 'data_format', 'name': 'name'}), True, 'import tensorflow as tf\n'), (201, 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', ([], {'value': 'inputdata', 'ksize': 'kernel', 'strides': 'strides', 'padding': 'padding', 'data_format': 'data_format', 'name': 'name'}), True, 'import tensorflow as tf\n'), (218, 'tensorflow.reduce_mean', 'tf.reduce_mean', ([], {'input_tensor': 'inputdata', 'axis': 'axis', 'name': 'name'}), True, 'import tensorflow as tf\n'), (258, 'tensorflow.nn.batch_normalization', 'tf.nn.batch_normalization', (['inputdata', 'mean', 'var', 'beta', 'gamma', 'epsilon'], {'name': 'name'}), True, 'import tensorflow as tf\n'), (286, 'tensorflow.nn.moments', 'tf.nn.moments', (['inputdata', 'axis'], {'keep_dims': '(True)'}), True, 'import tensorflow as tf\n'), (292, 'tensorflow.reshape', 'tf.reshape', (['beta', 'new_shape'], {}), True, 'import tensorflow as tf\n'), (294, 'tensorflow.reshape', 'tf.reshape', (['gamma', 'new_shape'], {}), True, 'import tensorflow as tf\n'), (295, 'tensorflow.nn.batch_normalization', 'tf.nn.batch_normalization', (['inputdata', 'mean', 'var', 'beta', 'gamma', 'epsilon'], {'name': 'name'}), True, 'import tensorflow as tf\n'), (307, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['inputdata'], {'keep_prob': 'keep_prob', 'noise_shape': 'noise_shape', 'name': 'name'}), True, 'import tensorflow as tf\n'), (352, 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', ([], {'inputs': 'inputdata', 'training': 'is_training', 'name': 'name', 'scale': 'scale'}), True, 'import tensorflow as tf\n'), (394, 'tensorflow.squeeze', 'tf.squeeze', ([], {'input': 'inputdata', 'axis': 'axis', 'name': 'name'}), True, 'import tensorflow as tf\n'), (43, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (70, 'tensorflow.get_variable', 'tf.get_variable', (['"""W"""', 'filter_shape'], {'initializer': 'w_init'}), True, 'import tensorflow as tf\n'), (103, 'tensorflow.variable_scope', 'tf.variable_scope', ([], {'name_or_scope': 'name'}), True, 'import tensorflow as tf\n'), (109, 'tensorflow.contrib.layers.variance_scaling_initializer', 'tf.contrib.layers.variance_scaling_initializer', ([], {}), True, 'import tensorflow as tf\n'), (111, 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""depthwise_filter_w"""', 'shape': 'depthwise_filter_shape', 'initializer': 'w_init'}), True, 'import tensorflow as tf\n'), (116, 'tensorflow.nn.depthwise_conv2d', 'tf.nn.depthwise_conv2d', ([], {'input': 'input_tensor', 'filter': 'depthwise_filter', 'strides': '[1, stride, stride, 1]', 'padding': 'padding', 'name': '"""depthwise_conv_output"""'}), True, 'import tensorflow as tf\n'), (249, 'tensorflow.reshape', 'tf.reshape', (['beta', 'new_shape'], {}), True, 'import tensorflow as tf\n'), (251, 'tensorflow.zeros', 'tf.zeros', (['([1] * ndims)'], {'name': '"""beta"""'}), True, 'import tensorflow as tf\n'), (254, 'tensorflow.reshape', 'tf.reshape', (['gamma', 'new_shape'], {}), True, 'import tensorflow as tf\n'), (256, 'tensorflow.ones', 'tf.ones', (['([1] * ndims)'], {'name': '"""gamma"""'}), True, 'import tensorflow as tf\n'), (331, 'tensorflow.contrib.layers.variance_scaling_initializer', 'tf.contrib.layers.variance_scaling_initializer', ([], {}), True, 'import tensorflow as tf\n'), (333, 'tensorflow.constant_initializer', 'tf.constant_initializer', ([], {}), True, 'import tensorflow as tf\n'), (364, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (365, 'tensorflow.transpose', 'tf.transpose', (['inputdata', '[0, 3, 1, 2]'], {}), True, 'import tensorflow as tf\n'), (368, 'tensorflow.reshape', 'tf.reshape', (['inputdata', '[-1, group_size, c // group_size, h, w]'], {}), True, 'import tensorflow as tf\n'), (369, 'tensorflow.nn.moments', 'tf.nn.moments', (['inputdata', '[2, 3, 4]'], {'keep_dims': '(True)'}), True, 'import tensorflow as tf\n'), (375, 'tensorflow.reshape', 'tf.reshape', (['gamma', '[1, c, 1, 1]'], {}), True, 'import tensorflow as tf\n'), (376, 'tensorflow.reshape', 'tf.reshape', (['beta', '[1, c, 1, 1]'], {}), True, 'import tensorflow as tf\n'), (379, 'tensorflow.reshape', 'tf.reshape', (['inputdata', '[-1, c, h, w]'], {}), True, 'import tensorflow as tf\n'), (381, 'tensorflow.transpose', 'tf.transpose', (['output', '[0, 2, 3, 1]'], {}), True, 'import tensorflow as tf\n'), (417, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (430, 'tensorflow.layers.conv2d_transpose', 'tf.layers.conv2d_transpose', ([], {'inputs': 'inputdata', 'filters': 'out_channel', 'kernel_size': 'kernel_size', 'strides': 'stride', 'padding': 'padding', 'data_format': 'data_format', 'activation': 'activation', 'use_bias': 'use_bias', 'kernel_initializer': 'w_init', 'bias_initializer': 'b_init', 'trainable': 'trainable', 'name': 'name'}), True, 'import tensorflow as tf\n'), (456, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (473, 'tensorflow.get_variable', 'tf.get_variable', (['"""W"""', 'filter_shape'], {'initializer': 'w_init'}), True, 'import tensorflow as tf\n'), (479, 'tensorflow.nn.atrous_conv2d', 'tf.nn.atrous_conv2d', ([], {'value': 'input_tensor', 'filters': 'w', 'rate': 'rate', 'padding': 'padding', 'name': '"""dilation_conv"""'}), True, 'import tensorflow as tf\n'), (503, 'tensorflow.constant', 'tf.constant', ([], {'value': '[input_shape[0], 1, 1, input_shape[3]]'}), True, 'import tensorflow as tf\n'), (504, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['input_tensor', 'keep_prob', 'noise_shape'], {'seed': 'seed', 'name': '"""spatial_dropout"""'}), True, 'import tensorflow as tf\n'), (509, 'tensorflow.variable_scope', 'tf.variable_scope', ([], {'name_or_scope': 'name'}), True, 'import tensorflow as tf\n'), (511, 'tensorflow.cond', 'tf.cond', (['is_training', 'f1', 'f2'], {}), True, 'import tensorflow as tf\n'), (524, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (66, 'tensorflow.contrib.layers.variance_scaling_initializer', 'tf.contrib.layers.variance_scaling_initializer', ([], {}), True, 'import tensorflow as tf\n'), (68, 'tensorflow.constant_initializer', 'tf.constant_initializer', ([], {}), True, 'import tensorflow as tf\n'), (74, 'tensorflow.get_variable', 'tf.get_variable', (['"""b"""', '[out_channel]'], {'initializer': 'b_init'}), True, 'import tensorflow as tf\n'), (77, 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['inputdata', 'w', 'strides', 'padding'], {'data_format': 'data_format'}), True, 'import tensorflow as tf\n'), (79, 'tensorflow.split', 'tf.split', (['inputdata', 'split', 'channel_axis'], {}), True, 'import tensorflow as tf\n'), (80, 'tensorflow.split', 'tf.split', (['w', 'split', '(3)'], {}), True, 'import tensorflow as tf\n'), (83, 'tensorflow.concat', 'tf.concat', (['outputs', 'channel_axis'], {}), True, 'import tensorflow as tf\n'), (289, 'tensorflow.sqrt', 'tf.sqrt', (['(var + epsilon)'], {}), True, 'import tensorflow as tf\n'), (291, 'tensorflow.constant_initializer', 'tf.constant_initializer', ([], {}), True, 'import tensorflow as tf\n'), (293, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), True, 'import tensorflow as tf\n'), (370, 'tensorflow.sqrt', 'tf.sqrt', (['(var + esp)'], {}), True, 'import tensorflow as tf\n'), (373, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[c]'}), True, 'import tensorflow as tf\n'), (374, 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'shape': '[c]'}), True, 'import tensorflow as tf\n'), (426, 'tensorflow.contrib.layers.variance_scaling_initializer', 'tf.contrib.layers.variance_scaling_initializer', ([], {}), True, 'import tensorflow as tf\n'), (428, 'tensorflow.constant_initializer', 'tf.constant_initializer', ([], {}), True, 'import tensorflow as tf\n'), (469, 'tensorflow.contrib.layers.variance_scaling_initializer', 'tf.contrib.layers.variance_scaling_initializer', ([], {}), True, 'import tensorflow as tf\n'), (471, 'tensorflow.constant_initializer', 'tf.constant_initializer', ([], {}), True, 'import tensorflow as tf\n'), (477, 'tensorflow.get_variable', 'tf.get_variable', (['"""b"""', '[out_dims]'], {'initializer': 'b_init'}), True, 'import tensorflow as tf\n'), (483, 'tensorflow.add', 'tf.add', (['conv', 'b'], {}), True, 'import tensorflow as tf\n'), (525, 'tensorflow.nn.relu', 'tf.nn.relu', (['inputdata'], {}), True, 'import tensorflow as tf\n'), (81, 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['i', 'k', 'strides', 'padding'], {'data_format': 'data_format'}), True, 'import tensorflow as tf\n'), (85, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['conv', 'b'], {'data_format': 'data_format'}), True, 'import tensorflow as tf\n'), (248, 'tensorflow.constant_initializer', 'tf.constant_initializer', ([], {}), True, 'import tensorflow as tf\n'), (253, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), True, 'import tensorflow as tf\n'), (335, 'tensorflow.identity', 'tf.identity', (['x'], {'name': '"""output"""'}), True, 'import tensorflow as tf\n'), (525, 'tensorflow.nn.relu', 'tf.nn.relu', (['(-inputdata)'], {}), True, 'import tensorflow as tf\n'), (326, 'numpy.prod', 'np.prod', (['shape'], {}), True, 'import numpy as np\n'), (328, 'tensorflow.shape', 'tf.shape', (['inputdata'], {}), True, 'import tensorflow as tf\n')] |
luozhouyang/smile_datasets | c614314b2e2d83896b252670c6e3d8bd158f055b | import logging
import tensorflow as tf
from . import utils
from .dataset import TFDataset
class TFDatasetForTokenClassification(TFDataset):
"""Dataset for token classification in TensorFlow"""
def __init__(self, examples=None, **kwargs) -> None:
super().__init__(examples, **kwargs)
self.input_ids = kwargs.pop("input_ids", "input_ids")
self.token_type_ids = kwargs.pop("token_type_ids", "token_type_ids")
self.attention_mask = kwargs.pop("attention_mask", "attention_mask")
self.labels = kwargs.pop("labels", "labels")
@classmethod
def from_tfrecord_files(cls, input_files, **kwargs) -> tf.data.Dataset:
dataset = utils.read_tfrecord_files(input_files, **kwargs)
d = cls(examples=None, **kwargs)
# parse example
features = {
d.input_ids: tf.io.VarLenFeature(tf.int64),
d.token_type_ids: tf.io.VarLenFeature(tf.int64),
d.attention_mask: tf.io.VarLenFeature(tf.int64),
d.labels: tf.io.VarLenFeature(tf.int64),
}
dataset = dataset.map(
lambda x: tf.io.parse_example(x, features),
num_parallel_calls=utils.AUTOTUNE,
).prefetch(utils.AUTOTUNE)
dataset = dataset.map(
lambda x: (
tf.cast(tf.sparse.to_dense(x[d.input_ids]), tf.int32),
tf.cast(tf.sparse.to_dense(x[d.token_type_ids]), tf.int32),
tf.cast(tf.sparse.to_dense(x[d.attention_mask]), tf.int32),
tf.cast(tf.sparse.to_dense(x[d.labels]), tf.int32),
),
num_parallel_calls=utils.AUTOTUNE,
).prefetch(utils.AUTOTUNE)
# do transformation
return d(dataset, **kwargs)
def parse_examples_to_dataset(self):
if not self.examples:
logging.info("self.examples is empty or None, skipped.")
return None
input_ids, token_type_ids, attention_mask, labels = [], [], [], []
for e in self.examples:
input_ids.append(e.input_ids)
token_type_ids.append(e.token_type_ids)
attention_mask.append(e.attention_mask)
labels.append(e.label_ids)
# parse examples to dataset
def _to_dataset(x, dtype=tf.int32):
x = tf.ragged.constant(x, dtype=dtype)
d = tf.data.Dataset.from_tensor_slices(x)
d = d.map(lambda x: x)
return d
dataset = tf.data.Dataset.zip(
(
_to_dataset(input_ids),
_to_dataset(token_type_ids),
_to_dataset(attention_mask),
_to_dataset(labels),
)
)
return dataset
def _filter(self, dataset: tf.data.Dataset, do_filer=True, max_sequence_length=512, **kwargs) -> tf.data.Dataset:
if not do_filer:
return dataset
dataset = dataset.filter(lambda a, b, c, y: tf.size(a) <= max_sequence_length)
return dataset
def _to_dict(self, dataset: tf.data.Dataset, to_dict=True, **kwargs) -> tf.data.Dataset:
num_parallel_calls = kwargs.get("num_parallel_calls", utils.AUTOTUNE)
if not to_dict:
dataset = dataset.map(
lambda a, b, c, y: ((a, b, c), y),
num_parallel_calls=num_parallel_calls,
)
return dataset
dataset = dataset.map(
lambda a, b, c, y: ({self.input_ids: a, self.token_type_ids: b, self.attention_mask: c}, {self.labels: y}),
num_parallel_calls=num_parallel_calls,
).prefetch(kwargs.get("buffer_size", utils.AUTOTUNE))
return dataset
def _fixed_padding(self, dataset: tf.data.Dataset, pad_id=0, max_sequence_length=512, **kwargs) -> tf.data.Dataset:
maxlen = tf.constant(max_sequence_length, dtype=tf.int32)
pad_id = tf.constant(pad_id, dtype=tf.int32)
# fmt: off
padded_shapes = kwargs.get("padded_shapes", ([maxlen, ], [maxlen, ], [maxlen, ], [maxlen, ]))
padding_values = kwargs.get("padding_values", (pad_id, pad_id, pad_id, pad_id))
# fmt: on
dataset = utils.batching_and_padding(dataset, padded_shapes, padding_values, **kwargs)
return dataset
def _batch_padding(self, dataset: tf.data.Dataset, pad_id=0, **kwargs) -> tf.data.Dataset:
pad_id = tf.constant(pad_id, dtype=tf.int32)
# fmt: off
padded_shapes = kwargs.get("padded_shapes", ([None, ], [None, ], [None, ], [None, ]))
padding_values = kwargs.get("padding_values", (pad_id, pad_id, pad_id, pad_id))
# fmt: on
dataset = utils.batching_and_padding(dataset, padded_shapes, padding_values, **kwargs)
return dataset
def _bucket_padding(self, dataset: tf.data.Dataset, pad_id=0, **kwargs) -> tf.data.Dataset:
pad_id = tf.constant(pad_id, dtype=tf.int32)
# fmt: off
padded_shapes = kwargs.get("padded_shapes", ([None, ], [None, ], [None, ], [None, ]))
padding_values = kwargs.get("padding_values", (pad_id, pad_id, pad_id, pad_id))
# fmt: on
dataset = utils.bucketing_and_padding(
dataset,
bucket_fn=lambda a, b, c, y: tf.size(a),
padded_shapes=padded_shapes,
padding_values=padding_values,
**kwargs,
)
return dataset
| [
"tensorflow.sparse.to_dense",
"tensorflow.constant",
"tensorflow.ragged.constant",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.io.VarLenFeature",
"tensorflow.io.parse_example",
"tensorflow.size"
] | rapidnlp_datasets/tf/token_classification_dataset.py | [(95, 'tensorflow.constant', 'tf.constant', (['max_sequence_length'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (96, 'tensorflow.constant', 'tf.constant', (['pad_id'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (105, 'tensorflow.constant', 'tf.constant', (['pad_id'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (114, 'tensorflow.constant', 'tf.constant', (['pad_id'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (25, 'tensorflow.io.VarLenFeature', 'tf.io.VarLenFeature', (['tf.int64'], {}), True, 'import tensorflow as tf\n'), (26, 'tensorflow.io.VarLenFeature', 'tf.io.VarLenFeature', (['tf.int64'], {}), True, 'import tensorflow as tf\n'), (27, 'tensorflow.io.VarLenFeature', 'tf.io.VarLenFeature', (['tf.int64'], {}), True, 'import tensorflow as tf\n'), (28, 'tensorflow.io.VarLenFeature', 'tf.io.VarLenFeature', (['tf.int64'], {}), True, 'import tensorflow as tf\n'), (48, 'logging.info', 'logging.info', (['"""self.examples is empty or None, skipped."""'], {}), False, 'import logging\n'), (59, 'tensorflow.ragged.constant', 'tf.ragged.constant', (['x'], {'dtype': 'dtype'}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['x'], {}), True, 'import tensorflow as tf\n'), (77, 'tensorflow.size', 'tf.size', (['a'], {}), True, 'import tensorflow as tf\n'), (121, 'tensorflow.size', 'tf.size', (['a'], {}), True, 'import tensorflow as tf\n'), (31, 'tensorflow.io.parse_example', 'tf.io.parse_example', (['x', 'features'], {}), True, 'import tensorflow as tf\n'), (36, 'tensorflow.sparse.to_dense', 'tf.sparse.to_dense', (['x[d.input_ids]'], {}), True, 'import tensorflow as tf\n'), (37, 'tensorflow.sparse.to_dense', 'tf.sparse.to_dense', (['x[d.token_type_ids]'], {}), True, 'import tensorflow as tf\n'), (38, 'tensorflow.sparse.to_dense', 'tf.sparse.to_dense', (['x[d.attention_mask]'], {}), True, 'import tensorflow as tf\n'), (39, 'tensorflow.sparse.to_dense', 'tf.sparse.to_dense', (['x[d.labels]'], {}), True, 'import tensorflow as tf\n')] |
lianyfei/bert-utils | 5de95a459146482a27deae36464e95a24dfe2bcf | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import modeling
import optimization
import tokenization
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
flags.DEFINE_string(
"export_dir", None,
"The dir where the exported model will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_bool(
"do_export", False,
"Whether to export the model.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % self.language))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if label == tokenization.convert_to_unicode("contradictory"):
label = tokenization.convert_to_unicode("contradiction")
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class ChineseDataProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev')
def get_test_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test')
def get_labels(self):
return ['0', '1']
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = '%s-%s' % (set_type, i)
if set_type == 'test':
text_a = tokenization.convert_to_unicode(line[-1])
label = '0'
else:
text_a = tokenization.convert_to_unicode(line[-1])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(label_ids, predictions)
loss = tf.metrics.mean(per_example_loss)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, predictions=probabilities, scaffold_fn=scaffold_fn)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features.append(feature)
return features
def serving_input_fn():
label_ids = tf.placeholder(tf.int32, [None], name='label_ids')
input_ids = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='input_ids')
input_mask = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='input_mask')
segment_ids = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='segment_ids')
input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({
'label_ids': label_ids,
'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids,
})()
return input_fn
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"xnli": XnliProcessor,
"chinese": ChineseDataProcessor,
}
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d", len(eval_examples))
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
# Eval will be slightly WRONG on the TPU because it will truncate
# the last batch.
eval_steps = int(len(eval_examples) / FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d", len(predict_examples))
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
if FLAGS.use_tpu:
# Warning: According to tpu_estimator.py Prediction on TPU is an
# experimental feature and hence not supported here
raise ValueError("Prediction in TPU not supported")
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with tf.gfile.GFile(output_predict_file, "w") as writer:
tf.logging.info("***** Predict results *****")
for prediction in result:
output_line = "\t".join(
str(class_probability) for class_probability in prediction) + "\n"
writer.write(output_line)
if FLAGS.do_export:
estimator._export_to_tpu = False
estimator.export_savedmodel(FLAGS.export_dir, serving_input_fn)
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run() | [
"tensorflow.contrib.cluster_resolver.TPUClusterResolver",
"tensorflow.metrics.accuracy",
"tensorflow.FixedLenFeature",
"tensorflow.nn.log_softmax",
"tensorflow.estimator.export.build_raw_serving_input_receiver_fn",
"tensorflow.reduce_sum",
"tensorflow.gfile.GFile",
"tensorflow.train.init_from_checkpoint",
"tensorflow.gfile.MakeDirs",
"tensorflow.to_int32",
"tensorflow.contrib.tpu.TPUEstimatorSpec",
"tensorflow.contrib.tpu.TPUEstimator",
"tensorflow.data.TFRecordDataset",
"tensorflow.truncated_normal_initializer",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.logging.set_verbosity",
"tensorflow.trainable_variables",
"tensorflow.parse_single_example",
"tensorflow.argmax",
"tensorflow.app.run",
"tensorflow.nn.dropout",
"tensorflow.metrics.mean",
"tensorflow.matmul",
"tensorflow.gfile.Open",
"tensorflow.zeros_initializer",
"tensorflow.placeholder",
"tensorflow.logging.info",
"tensorflow.one_hot",
"tensorflow.contrib.tpu.TPUConfig",
"tensorflow.train.Features",
"tensorflow.nn.bias_add",
"tensorflow.nn.softmax",
"tensorflow.constant",
"tensorflow.train.Scaffold",
"tensorflow.reduce_mean",
"tensorflow.flags.DEFINE_string",
"tensorflow.variable_scope"
] | run_classifier_exporter.py | [(110, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""tpu_name"""', 'None', '"""The Cloud TPU to use for training. This should be either the name used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url."""'], {}), True, 'import tensorflow as tf\n'), (116, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""tpu_zone"""', 'None', '"""[Optional] GCE zone where the Cloud TPU is located in. If not specified, we will attempt to automatically detect the GCE project from metadata."""'], {}), True, 'import tensorflow as tf\n'), (122, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""gcp_project"""', 'None', '"""[Optional] Project name for the Cloud TPU-enabled project. If not specified, we will attempt to automatically detect the GCE project from metadata."""'], {}), True, 'import tensorflow as tf\n'), (128, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""master"""', 'None', '"""[Optional] TensorFlow master URL."""'], {}), True, 'import tensorflow as tf\n'), (495, 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['output_file'], {}), True, 'import tensorflow as tf\n'), (585, 'modeling.BertModel', 'modeling.BertModel', ([], {'config': 'bert_config', 'is_training': 'is_training', 'input_ids': 'input_ids', 'input_mask': 'input_mask', 'token_type_ids': 'segment_ids', 'use_one_hot_embeddings': 'use_one_hot_embeddings'}), False, 'import modeling\n'), (783, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {'name': '"""label_ids"""'}), True, 'import tensorflow as tf\n'), (784, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, FLAGS.max_seq_length]'], {'name': '"""input_ids"""'}), True, 'import tensorflow as tf\n'), (785, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, FLAGS.max_seq_length]'], {'name': '"""input_mask"""'}), True, 'import tensorflow as tf\n'), (786, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, FLAGS.max_seq_length]'], {'name': '"""segment_ids"""'}), True, 'import tensorflow as tf\n'), (797, 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), True, 'import tensorflow as tf\n'), (811, 'modeling.BertConfig.from_json_file', 'modeling.BertConfig.from_json_file', (['FLAGS.bert_config_file'], {}), False, 'import modeling\n'), (819, 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.output_dir'], {}), True, 'import tensorflow as tf\n'), (830, 'tokenization.FullTokenizer', 'tokenization.FullTokenizer', ([], {'vocab_file': 'FLAGS.vocab_file', 'do_lower_case': 'FLAGS.do_lower_case'}), False, 'import tokenization\n'), (870, 'tensorflow.contrib.tpu.TPUEstimator', 'tf.contrib.tpu.TPUEstimator', ([], {'use_tpu': 'FLAGS.use_tpu', 'model_fn': 'model_fn', 'config': 'run_config', 'train_batch_size': 'FLAGS.train_batch_size', 'eval_batch_size': 'FLAGS.eval_batch_size', 'predict_batch_size': 'FLAGS.predict_batch_size'}), True, 'import tensorflow as tf\n'), (971, 'tensorflow.app.run', 'tf.app.run', ([], {}), True, 'import tensorflow as tf\n'), (474, 'tensorflow.logging.info', 'tf.logging.info', (['"""*** Example ***"""'], {}), True, 'import tensorflow as tf\n'), (475, 'tensorflow.logging.info', 'tf.logging.info', (["('guid: %s' % example.guid)"], {}), True, 'import tensorflow as tf\n'), (481, 'tensorflow.logging.info', 'tf.logging.info', (["('label: %s (id = %d)' % (example.label, label_id))"], {}), True, 'import tensorflow as tf\n'), (508, 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), False, 'import collections\n'), (523, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (524, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (525, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (526, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (531, 'tensorflow.parse_single_example', 'tf.parse_single_example', (['record', 'name_to_features'], {}), True, 'import tensorflow as tf\n'), (549, 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['input_file'], {}), True, 'import tensorflow as tf\n'), (609, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""loss"""'], {}), True, 'import tensorflow as tf\n'), (614, 'tensorflow.matmul', 'tf.matmul', (['output_layer', 'output_weights'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (615, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['logits', 'output_bias'], {}), True, 'import tensorflow as tf\n'), (616, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (617, 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['logits'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (619, 'tensorflow.one_hot', 'tf.one_hot', (['labels'], {'depth': 'num_labels', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (622, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['per_example_loss'], {}), True, 'import tensorflow as tf\n'), (635, 'tensorflow.logging.info', 'tf.logging.info', (['"""*** Features ***"""'], {}), True, 'import tensorflow as tf\n'), (650, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (666, 'tensorflow.logging.info', 'tf.logging.info', (['"""**** Trainable Variables ****"""'], {}), True, 'import tensorflow as tf\n'), (787, 'tensorflow.estimator.export.build_raw_serving_input_receiver_fn', 'tf.estimator.export.build_raw_serving_input_receiver_fn', (["{'label_ids': label_ids, 'input_ids': input_ids, 'input_mask': input_mask,\n 'segment_ids': segment_ids}"], {}), True, 'import tensorflow as tf\n'), (835, 'tensorflow.contrib.cluster_resolver.TPUClusterResolver', 'tf.contrib.cluster_resolver.TPUClusterResolver', (['FLAGS.tpu_name'], {'zone': 'FLAGS.tpu_zone', 'project': 'FLAGS.gcp_project'}), True, 'import tensorflow as tf\n'), (879, 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""train.tf_record"""'], {}), False, 'import os\n'), (882, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Running training *****"""'], {}), True, 'import tensorflow as tf\n'), (884, 'tensorflow.logging.info', 'tf.logging.info', (['""" Batch size = %d"""', 'FLAGS.train_batch_size'], {}), True, 'import tensorflow as tf\n'), (885, 'tensorflow.logging.info', 'tf.logging.info', (['""" Num steps = %d"""', 'num_train_steps'], {}), True, 'import tensorflow as tf\n'), (895, 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""eval.tf_record"""'], {}), False, 'import os\n'), (899, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Running evaluation *****"""'], {}), True, 'import tensorflow as tf\n'), (901, 'tensorflow.logging.info', 'tf.logging.info', (['""" Batch size = %d"""', 'FLAGS.eval_batch_size'], {}), True, 'import tensorflow as tf\n'), (921, 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""eval_results.txt"""'], {}), False, 'import os\n'), (930, 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""predict.tf_record"""'], {}), False, 'import os\n'), (935, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Running prediction*****"""'], {}), True, 'import tensorflow as tf\n'), (937, 'tensorflow.logging.info', 'tf.logging.info', (['""" Batch size = %d"""', 'FLAGS.predict_batch_size'], {}), True, 'import tensorflow as tf\n'), (953, 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""test_results.tsv"""'], {}), False, 'import os\n'), (188, 'tensorflow.gfile.Open', 'tf.gfile.Open', (['input_file', '"""r"""'], {}), True, 'import tensorflow as tf\n'), (189, 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""\t"""', 'quotechar': 'quotechar'}), False, 'import csv\n'), (205, 'os.path.join', 'os.path.join', (['data_dir', '"""multinli"""', "('multinli.train.%s.tsv' % self.language)"], {}), False, 'import os\n'), (212, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), False, 'import tokenization\n'), (213, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), False, 'import tokenization\n'), (214, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[2]'], {}), False, 'import tokenization\n'), (223, 'os.path.join', 'os.path.join', (['data_dir', '"""xnli.dev.tsv"""'], {}), False, 'import os\n'), (229, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), False, 'import tokenization\n'), (232, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[6]'], {}), False, 'import tokenization\n'), (233, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[7]'], {}), False, 'import tokenization\n'), (234, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), False, 'import tokenization\n'), (274, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[8]'], {}), False, 'import tokenization\n'), (275, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[9]'], {}), False, 'import tokenization\n'), (314, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[3]'], {}), False, 'import tokenization\n'), (315, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[4]'], {}), False, 'import tokenization\n'), (604, 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.02)'}), True, 'import tensorflow as tf\n'), (607, 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), True, 'import tensorflow as tf\n'), (612, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['output_layer'], {'keep_prob': '(0.9)'}), True, 'import tensorflow as tf\n'), (621, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(one_hot_labels * log_probs)'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (637, 'tensorflow.logging.info', 'tf.logging.info', (["(' name = %s, shape = %s' % (name, features[name].shape))"], {}), True, 'import tensorflow as tf\n'), (655, 'modeling.get_assignment_map_from_checkpoint', 'modeling.get_assignment_map_from_checkpoint', (['tvars', 'init_checkpoint'], {}), False, 'import modeling\n'), (671, 'tensorflow.logging.info', 'tf.logging.info', (['""" name = %s, shape = %s%s"""', 'var.name', 'var.shape', 'init_string'], {}), True, 'import tensorflow as tf\n'), (677, 'optimization.create_optimizer', 'optimization.create_optimizer', (['total_loss', 'learning_rate', 'num_train_steps', 'num_warmup_steps', 'use_tpu'], {}), False, 'import optimization\n'), (680, 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'tf.contrib.tpu.TPUEstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'train_op': 'train_op', 'scaffold_fn': 'scaffold_fn'}), True, 'import tensorflow as tf\n'), (844, 'tensorflow.contrib.tpu.TPUConfig', 'tf.contrib.tpu.TPUConfig', ([], {'iterations_per_loop': 'FLAGS.iterations_per_loop', 'num_shards': 'FLAGS.num_tpu_cores', 'per_host_input_for_training': 'is_per_host'}), True, 'import tensorflow as tf\n'), (922, 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['output_eval_file', '"""w"""'], {}), True, 'import tensorflow as tf\n'), (923, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Eval results *****"""'], {}), True, 'import tensorflow as tf\n'), (954, 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['output_predict_file', '"""w"""'], {}), True, 'import tensorflow as tf\n'), (955, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Predict results *****"""'], {}), True, 'import tensorflow as tf\n'), (215, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['"""contradictory"""'], {}), False, 'import tokenization\n'), (216, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['"""contradiction"""'], {}), False, 'import tokenization\n'), (230, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['self.language'], {}), False, 'import tokenization\n'), (250, 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), False, 'import os\n'), (255, 'os.path.join', 'os.path.join', (['data_dir', '"""dev_matched.tsv"""'], {}), False, 'import os\n'), (261, 'os.path.join', 'os.path.join', (['data_dir', '"""test_matched.tsv"""'], {}), False, 'import os\n'), (279, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[-1]'], {}), False, 'import tokenization\n'), (291, 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), False, 'import os\n'), (296, 'os.path.join', 'os.path.join', (['data_dir', '"""dev.tsv"""'], {}), False, 'import os\n'), (301, 'os.path.join', 'os.path.join', (['data_dir', '"""test.tsv"""'], {}), False, 'import os\n'), (319, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), False, 'import tokenization\n'), (331, 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), False, 'import os\n'), (336, 'os.path.join', 'os.path.join', (['data_dir', '"""dev.tsv"""'], {}), False, 'import os\n'), (341, 'os.path.join', 'os.path.join', (['data_dir', '"""test.tsv"""'], {}), False, 'import os\n'), (356, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), False, 'import tokenization\n'), (359, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[3]'], {}), False, 'import tokenization\n'), (360, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), False, 'import tokenization\n'), (369, 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), False, 'import os\n'), (373, 'os.path.join', 'os.path.join', (['data_dir', '"""dev.tsv"""'], {}), False, 'import os\n'), (377, 'os.path.join', 'os.path.join', (['data_dir', '"""test.tsv"""'], {}), False, 'import os\n'), (389, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[-1]'], {}), False, 'import tokenization\n'), (392, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[-1]'], {}), False, 'import tokenization\n'), (393, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), False, 'import tokenization\n'), (514, 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'features'}), True, 'import tensorflow as tf\n'), (538, 'tensorflow.to_int32', 'tf.to_int32', (['t'], {}), True, 'import tensorflow as tf\n'), (664, 'tensorflow.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['init_checkpoint', 'assignment_map'], {}), True, 'import tensorflow as tf\n'), (697, 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'tf.contrib.tpu.TPUEstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'eval_metrics': 'eval_metrics', 'scaffold_fn': 'scaffold_fn'}), True, 'import tensorflow as tf\n'), (703, 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'tf.contrib.tpu.TPUEstimatorSpec', ([], {'mode': 'mode', 'predictions': 'probabilities', 'scaffold_fn': 'scaffold_fn'}), True, 'import tensorflow as tf\n'), (737, 'tensorflow.constant', 'tf.constant', (['all_input_ids'], {'shape': '[num_examples, seq_length]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (741, 'tensorflow.constant', 'tf.constant', (['all_input_mask'], {'shape': '[num_examples, seq_length]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (746, 'tensorflow.constant', 'tf.constant', (['all_segment_ids'], {'shape': '[num_examples, seq_length]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (751, 'tensorflow.constant', 'tf.constant', (['all_label_ids'], {'shape': '[num_examples]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (273, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), False, 'import tokenization\n'), (659, 'tensorflow.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['init_checkpoint', 'assignment_map'], {}), True, 'import tensorflow as tf\n'), (660, 'tensorflow.train.Scaffold', 'tf.train.Scaffold', ([], {}), True, 'import tensorflow as tf\n'), (688, 'tensorflow.argmax', 'tf.argmax', (['logits'], {'axis': '(-1)', 'output_type': 'tf.int32'}), True, 'import tensorflow as tf\n'), (689, 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', (['label_ids', 'predictions'], {}), True, 'import tensorflow as tf\n'), (690, 'tensorflow.metrics.mean', 'tf.metrics.mean', (['per_example_loss'], {}), True, 'import tensorflow as tf\n'), (477, 'tokenization.printable_text', 'tokenization.printable_text', (['x'], {}), False, 'import tokenization\n')] |
sanket-kamthe/gptf | 7db86b8a608f9ca45548c4e2c9fcb5f48daf9187 | # -*- encoding: utf-8 -*-
"""Provides base classes for models of all kinds."""
from builtins import super, range
from future.utils import with_metaclass
from abc import ABCMeta, abstractmethod
import numpy as np
import tensorflow as tf
from tensorflow.contrib.opt import ScipyOptimizerInterface
from scipy.optimize import OptimizeResult
from . import tfhacks, utils
from .params import Parameterized, ParamAttributes, DataHolder, autoflow
from .wrappedtf import tf_method
class Model(with_metaclass(ABCMeta, Parameterized)):
"""Base class for models.
Inheriting classes must define `.build_log_likelihood(self)`.
`Param` and `Parameterized` objects that are children of the model
can be used in the tensorflow expression. Children on the model are
defined like so:
>>> from overrides import overrides
>>> from gptf import Param, ParamAttributes
>>> class Example(Model, ParamAttributes):
... def __init__(self):
... super().__init__()
... self.x = Param(1.) # create new Param child
...
... @tf_method()
... @overrides
... def build_log_likelihood(self, X, Y):
... return 3 - self.x.tensor # use Param in expression
The `.optimize` method can be used to optimize the parameters of the
model to minimise the likelihood. The loss function (the negative of
the sum of the likelihood and any priors) is cached in the WrappedTF
cache, and lazily recompiled when the cache is cleared, e.g. on
recompile.
"""
@abstractmethod
def build_log_likelihood(self, X, Y):
"""Builds the log likelihood of the model w.r.t. the data.
Args:
X (tf.Tensor): The training inputs.
Y (tf.Tensor): The training outputs.
Returns:
(tf.Tensor): A tensor that, when run, calculates the log
likelihood of the model.
"""
NotImplemented
@tf_method()
def build_log_prior(self):
NotImplemented
@autoflow((tf.float64, [None, None]), (tf.float64, [None, None]))
def compute_log_likelihood(self, X, Y):
"""Computes the likelihood of the model w.r.t. the data.
Returns:
(np.ndarray): The log likelihood of the model.
"""
return self.build_log_likelihood(X, Y)
@autoflow()
def compute_log_prior(self):
NotImplemented
@tf_method(cache=False)
def optimize(self, X, Y, method='L-BFGS-B', callback=None,
maxiter=1000, **kw):
"""Optimize the model by maximising the log likelihood.
Maximises the sum of the log likelihood given X & Y and any
priors with respect to any free variables.
Args:
X (np.ndarray | tf.Tensor): The training inputs.
Y (np.ndarray | tf.Tensor): The training outputs.
method (tf.train.Optimizer | str): The means by which to
optimise. If `method` is a string, it will be passed as
the `method` argument to the initialiser of
`tf.contrib.opt.ScipyOptimizerInterface`. Else, it
will be treated as an instance of `tf.train.Optimizer`
and its `.minimize()` method will be used as the training
step.
callback (Callable[[np.ndarray], ...]): A function that will
be called at each optimization step with the current value
of the variable vector (a vector constructed by flattening
the free state of each free `Param` and then concatenating
them in the order the `Param`\ s are returned by `.params`.
maxiter (int): The maximum number of iterations of the optimizer.
**kw: Additional keyword arguments are passed through to the
optimizer.
Returns:
(scipy.OptimizeResult) The result of the optimisation.
Examples:
Let's construct a very simple model for demonstration
purposes. It has two (scalar) parameters, `.a` and `.b`,
which are constrained to be positive, and its likelihood is
`10 - a - b`, regardless of X and Y.
>>> import numbers
>>> import numpy as np
>>> from overrides import overrides
>>> from gptf import Param, ParamAttributes, transforms
>>> class Example(Model, ParamAttributes):
... def __init__(self, a, b):
... assert isinstance(a, numbers.Number)
... assert isinstance(b, numbers.Number)
... super().__init__()
... self.a = Param(a, transform=transforms.Exp(0.))
... self.b = Param(b, transform=transforms.Exp(0.))
... @tf_method()
... @overrides
... def build_log_likelihood(self, X, Y):
... return 10. - self.a.tensor - self.b.tensor
We won't care about the values of X and Y.
>>> X = np.array(0.)
>>> Y = np.array(0.)
.. rubric:: TensorFlow optimizers
We can optimise the parameters of the model using a TensorFlow
optimizer like so:
>>> m = Example(3., 4.)
>>> opt = tf.train.GradientDescentOptimizer(learning_rate=1)
>>> m.optimize(X, Y, opt) # use None for X, Y
message: 'Finished iterations.'
success: True
x: array([..., ...])
After the optimisation, both parameters are optimised
towards 0, but are still positive. The constraints on the
parameters have been respected.
>>> print("m.a: {:.3f}".format(np.asscalar(m.a.value)))
m.a: 0.001
>>> print("m.b: {:.3f}".format(np.asscalar(m.b.value)))
m.b: 0.001
If we fix a parameter, it is not optimized:
>>> m.a = 5.
>>> m.b = 1.
>>> m.b.fixed = True
>>> m.optimize(X, Y, opt)
message: 'Finished iterations.'
success: True
x: array([...])
>>> print("m.a: {:.3f}".format(np.asscalar(m.a.value)))
m.a: 0.001
>>> print("m.b: {:.3f}".format(np.asscalar(m.b.value)))
m.b: 1.000
.. rubric:: SciPy optimizers
We can optimise the parameters of the model using a SciPy
optimizer by provided a string value for `method`:
>>> m = Example(3., 4.)
>>> m.optimize(X, Y, 'L-BFGS-B', disp=False, ftol=.0001)
message: 'SciPy optimizer completed successfully.'
success: True
x: array([..., ...])
As for TensorFlow optimizers, after the optimisation both
parameters are optimised towards 0, but are still positive.
The constraints on the parameters have been respected.
>>> print("m.a: {:.3f}".format(np.asscalar(m.a.value)))
m.a: 0.000
>>> print("m.b: {:.3f}".format(np.asscalar(m.b.value)))
m.b: 0.000
If we fix a parameter, it is not optimized:
>>> m.a = 5.
>>> m.b = 1.
>>> m.b.fixed = True
>>> m.optimize(X, Y, 'L-BFGS-B', disp=False, ftol=.0001)
message: 'SciPy optimizer completed successfully.'
success: True
x: array([...])
>>> print("m.a: {:.3f}".format(np.asscalar(m.a.value)))
m.a: 0.000
>>> print("m.b: {:.3f}".format(np.asscalar(m.b.value)))
m.b: 1.000
.. rubric:: Miscellaneous
Optimisation still works, even with weird device contexts and
session targets.
>>> # set up a distributed execution environment
>>> clusterdict = \\
... { 'worker': ['localhost:2226']
... , 'master': ['localhost:2227']
... }
>>> spec = tf.train.ClusterSpec(clusterdict)
>>> worker = tf.train.Server(spec, job_name='worker', task_index=0)
>>> worker.start()
>>> master = tf.train.Server(spec, job_name='master', task_index=0)
>>> # change m's device context
>>> # we're about to do weird things with op placement, and we
>>> # don't want it in the default graph where it can mess with
>>> # other doctests, so change m's tf_graph as well.
>>> m.tf_graph = tf.Graph()
>>> m.tf_device = '/job:worker/task:0'
>>> m.tf_session_target = master.target
TensorFlow:
>>> m.a = 4.5
>>> m.optimize(X, Y, opt)
message: 'Finished iterations.'
success: True
x: array([...])
>>> print("m.a: {:.3f}".format(np.asscalar(m.a.value)))
m.a: 0.001
>>> print("m.b: {:.3f}".format(np.asscalar(m.b.value)))
m.b: 1.000
SciPy:
>>> m.a = 4.5
>>> m.optimize(X, Y, 'L-BFGS-B', disp=False, ftol=.0001)
message: 'SciPy optimizer completed successfully.'
success: True
x: array([...])
>>> print("m.a: {:.3f}".format(np.asscalar(m.a.value)))
m.a: 0.001
>>> print("m.b: {:.3f}".format(np.asscalar(m.b.value)))
m.b: 1.000
"""
X_key = X if isinstance(X, tf.Tensor) else None
Y_key = Y if isinstance(Y, tf.Tensor) else None
key = ("_Model__loss", X_key, Y_key)
if key not in self.cache:
X_tensor = (X if isinstance(X, tf.Tensor) else
tf.placeholder(tf.as_dtype(X.dtype)))
Y_tensor = (Y if isinstance(Y, tf.Tensor) else
tf.placeholder(tf.as_dtype(Y.dtype)))
self.cache[key] = (self._compile_loss(X_tensor, Y_tensor),
X_tensor, Y_tensor)
loss, X_tensor, Y_tensor = self.cache[key]
feed_dict = self.feed_dict
if not isinstance(X, tf.Tensor): feed_dict[X_tensor] = X
if not isinstance(Y, tf.Tensor): feed_dict[Y_tensor] = Y
variables = [p.free_state for p in self.params if not p.fixed]
variables = utils.unique(variables)
free_state = tf.concat(0, [tf.reshape(v, [-1]) for v in variables])
with self.get_session() as sess:
try:
if type(method) is str:
success_msg = "SciPy optimizer completed successfully."
options = {'maxiter': maxiter, 'disp': True}
options.update(kw)
optimizer = ScipyOptimizerInterface(
loss, var_list=variables, method=method,
options=options
)
optimizer.minimize(self.get_session(), feed_dict,
step_callback=callback)
else:
# treat method as TensorFlow optimizer.
success_msg = "Finished iterations."
opt_step = method.minimize(loss, var_list=variables, **kw)
for _ in range(maxiter):
sess.run(opt_step, feed_dict=feed_dict)
if callback is not None:
callback(sess.run(free_state))
except KeyboardInterrupt:
return OptimizeResult\
( x=sess.run(free_state)
, success=False
, message="Keyboard interrupt."
)
return OptimizeResult\
( x=sess.run(free_state)
, success=True
, message=success_msg
)
def _compile_loss(self, X, Y):
return -self.build_log_likelihood(X, Y)
class GPModel(Model):
"""A base class for Guassian Process models.
A Gaussian process model is a model of the form
.. math::
θ ~ p(θ)
f ~ GP(m(x), k(x, x'; θ))
F = f(X)
Y|F ~ p(Y|F)
Adds functionality to compile various predictions. Inheriting
classes must define `.build_predict()`, which is then used by this
class's methods to provide various predictions. The mean and
variance are pushed through the likelihood to obtain the means and
variances of held out data.
"""
@abstractmethod
def build_prior_mean_var(self, test_points, num_latent, full_cov=False):
"""Builds an op for the mean and variance of the prior(s).
In the returned tensors, the last index should always be the
latent function index.
Args:
test_points (tf.Tensor): The points from the sample
space for which to predict means and variances
of the prior distribution(s). The shape should be
`[m, point_dims]`.
num_latent (tf.int32): The number of latent functions of
the GP.
full_cov (bool): If `False`, return an array of variances
at the test points. If `True`, return the full
covariance matrix of the posterior distribution.
Returns:
(tf.Tensor, tf.Tensor): A tensor that calculates the mean
at the test points with shape `[m, num_latent]`, a tensor
that calculates either the variances at the test points
(shape `[m, num_latent]`) or the full covariance matrix
(shape `[m, m, num_latent]`).
Both tensors have the same dtype.
"""
NotImplemented
@abstractmethod
def build_posterior_mean_var(self, X, Y, test_points, full_cov=False):
"""Builds an op for the mean and variance of the posterior(s).
In the returned tensors, the last index should always be the
latent function index.
Args:
X (tf.Tensor): The training inputs, shape `[n, point_dims]`
Y (tf.Tensor): The training outputs, shape `[n, num_latent]`
test_points (tf.Tensor): The points from the sample
space for which to predict means and variances
of the posterior distribution(s), shape
`[m, point_dims]`.
full_cov (bool): If `False`, return an array of variances
at the test points. If `True`, return the full
covariance matrix of the posterior distribution.
Returns:
(tf.Tensor, tf.Tensor): A tensor that calculates the mean
at the test points with shape `[m, num_latent]`, a tensor
that calculates either the variances at the test points
(shape `[m, num_latent]`) or the full covariance matrix
(shape `[m, m, num_latent]`).
Both tensors have the same dtype.
"""
NotImplemented
@autoflow((tf.float64, [None, None]), (tf.int32, []))
def compute_prior_mean_var(self, test_points, num_latent):
"""Computes the means and variances of the prior(s).
This is just an autoflowed version of
`.build_prior_mean_var(test_points, num_latent)`.
Args:
test_points (np.ndarray): The points from the sample
space for which to predict means and variances
of the prior distribution(s). The shape should be
`[m, point_dims]`.
num_latent (int): The number of latent functions of the GP.
Returns:
(np.ndarray, np.ndarray): the mean at the test points
(shape `[m, num_latent]`), the variances at the test
points (shape `[m, num_latent]`).
"""
return self.build_prior_mean_var(test_points, num_latent, False)
@autoflow((tf.float64, [None, None]), (tf.int32, []))
def compute_prior_mean_cov(self, test_points, num_latent):
"""Computes the means and full covariance matrices.
This is just an autoflowed version of
`.build_prior_mean_var(test_points, num_latent, True)`.
Args:
test_points (np.ndarray): The points from the sample
space for which to predict means and variances
of the prior distribution(s). The shape should be
`[m, point_dims]`.
num_latent (int): The number of latent functions of the GP.
Returns:
(np.ndarray, np.ndarray): The means at the test points
(shape `[m, num_latent]`), the full covariance
matri(x|ces) for the prior distribution(s) (shape
`[m, m, num_latent]`.
"""
return self.build_prior_mean_var(test_points, num_latent, True)
@autoflow((tf.float64, [None, None]), (tf.int32, []), (tf.int32, []))
def compute_prior_samples(self, test_points, num_latent, num_samples):
"""Computes samples from the prior distribution(s).
Args:
test_points (np.ndarray): The points from the sample
space for which to predict means and variances
of the posterior distribution(s), shape
`[m, point_dims]`.
num_latent (int): The number of latent functions of the GP.
num_samples (int): The number of samples to take.
Returns:
(np.ndarray): An array of samples from the prior
distributions, with shape `[num_samples, m, num_latent]`
Examples:
For testing purposes, we create an example model whose
likelihood is always `0` and whose `.build_predict()`
returns mean `0` and variance `1` for every test point,
or an independent covariance matrix.
>>> from overrides import overrides
>>> from gptf import ParamAttributes, tfhacks
>>> class Example(GPModel, ParamAttributes):
... def __init__(self, dtype):
... super().__init__()
... self.dtype = dtype
... @property
... def dtype(self):
... return self._dtype
... @dtype.setter
... def dtype(self, value):
... self.clear_cache()
... self._dtype = value
... @tf_method()
... @overrides
... def build_log_likelihood(self):
... NotImplemented
... @tf_method()
... @overrides
... def build_prior_mean_var\\
... (self, test_points, num_latent, full_cov=False):
... n = tf.shape(test_points)[0]
... mu = tf.zeros([n, 1], self.dtype)
... mu = tf.tile(mu, (1, num_latent))
... if full_cov:
... var = tf.expand_dims(tfhacks.eye(n, self.dtype), 2)
... var = tf.tile(var, (1, 1, num_latent))
... else:
... var = tf.ones([n, 1], self.dtype)
... var = tf.tile(var, (1, num_latent))
... return mu, var
... @tf_method()
... @overrides
... def build_posterior_mean_var\\
... (self, X, Y, test_points, full_cov=False):
... NotImplemented
>>> m = Example(tf.float64) # ignore the likelihood
>>> test_points = np.array([[0.], [1.], [2.], [3.]])
The shape of the returned array is `(a, b, c)`, where `a`
is the number of samples, `b` is the number of test points
and `c` is the number of latent functions.
>>> samples = m.compute_prior_samples(test_points, 1, 2)
>>> samples.shape
(2, 4, 1)
`.compute_prior_samples()` respects the dtype of the tensors
returned by `.build_predict()`.
>>> samples.dtype
dtype('float64')
>>> m.dtype = tf.float32
>>> samples = m.compute_prior_samples(test_points, 1, 2)
>>> samples.dtype
dtype('float32')
"""
mu, var = self.build_prior_mean_var(test_points, num_latent, True)
jitter = tfhacks.eye(tf.shape(mu)[0], var.dtype) * 1e-06
L = tf.batch_cholesky(tf.transpose(var, (2, 0, 1)) + jitter)
V_shape = [tf.shape(L)[0], tf.shape(L)[1], num_samples]
V = tf.random_normal(V_shape, dtype=L.dtype)
samples = tf.expand_dims(tf.transpose(mu), -1) + tf.batch_matmul(L, V)
return tf.transpose(samples)
@autoflow((tf.float64, [None, None]), (tf.float64, [None, None]),
(tf.float64, [None, None]))
def compute_posterior_mean_var(self, X, Y, test_points):
"""Computes the means and variances of the posterior(s).
This is just an autoflowed version of
`.build_posterior_mean_var(X, Y, test_points)`.
Args:
X (np.ndarray): The training inputs, shape `[n, point_dims]`
Y (np.ndarray): The training outputs, shape `[n, num_latent]`
test_points (np.ndarray): The points from the sample
space for which to predict means and variances
of the posterior distribution(s), shape
`[m, point_dims]`.
Returns:
(np.ndarray, np.ndarray): The means at the test points
(shape `[m, num_latent]`), the variances at the test points
(shape `[m, num_latent]`).
"""
return self.build_posterior_mean_var(X, Y, test_points, full_cov=False)
@autoflow((tf.float64, [None, None]), (tf.float64, [None, None]),
(tf.float64, [None, None]))
def compute_posterior_mean_cov(self, X, Y, test_points):
"""Computes the means and full covariance matrices.
This is just an autoflowed version of
`.build_predict(X, Y, test_points, full_cov=True)`.
Args:
X (np.ndarray): The training inputs, shape `[n, point_dims]`
Y (np.ndarray): The training outputs, shape `[n, num_latent]`
test_points (np.ndarray): The points from the sample
space for which to predict means and variances
of the posterior distribution(s), shape
`[m, point_dims]`.
Returns:
(np.ndarray, np.ndarray): The means at the test points
(shape `[m, num_latent]`), the full covriance
matri(x|ces) for the posterior distribution(s)
(shape `[m, m, num_latent]`).
"""
return self.build_posterior_mean_var(X, Y, test_points, full_cov=True)
@autoflow((tf.float64, [None, None]), (tf.float64, [None, None]),
(tf.float64, [None, None]), (tf.int32, []))
def compute_posterior_samples(self, X, Y, test_points, num_samples):
"""Computes samples from the posterior distribution(s).
Args:
X (np.ndarray): The training inputs, shape `[n, point_dims]`
Y (np.ndarray): The training outputs, shape `[n, num_latent]`
test_points (np.ndarray): The points from the sample
space for which to predict means and variances
of the posterior distribution(s), shape
`[m, point_dims]`.
num_samples (int): The number of samples to take.
Returns:
(np.ndarray): An array of samples from the posterior
distributions, with shape `[num_samples, m, num_latent]`
Examples:
For testing purposes, we create an example model whose
likelihood is always `0` and whose `.build_predict()`
returns mean `0` and variance `1` for every test point,
or an independent covariance matrix.
>>> from overrides import overrides
>>> from gptf import ParamAttributes, tfhacks
>>> class Example(GPModel, ParamAttributes):
... def __init__(self, dtype):
... super().__init__()
... self.dtype = dtype
... @property
... def dtype(self):
... return self._dtype
... @dtype.setter
... def dtype(self, value):
... self.clear_cache()
... self._dtype = value
... @tf_method()
... @overrides
... def build_log_likelihood(self):
... NotImplemented
... @tf_method()
... @overrides
... def build_prior_mean_var\\
... (self, test_points, num_latent, full_cov=False):
... NotImplemented
... @tf_method()
... @overrides
... def build_posterior_mean_var\\
... (self, X, Y, test_points, full_cov=False):
... n = tf.shape(test_points)[0]
... num_latent = tf.shape(Y)[1]
... mu = tf.zeros([n, 1], self.dtype)
... mu = tf.tile(mu, (1, num_latent))
... if full_cov:
... var = tf.expand_dims(tfhacks.eye(n, self.dtype), 2)
... var = tf.tile(var, (1, 1, num_latent))
... else:
... var = tf.ones([n, 1], self.dtype)
... var = tf.tile(var, (1, num_latent))
... return mu, var
>>> m = Example(tf.float64)
>>> X = np.array([[.5]])
>>> Y = np.array([[.3]])
>>> test_points = np.array([[0.], [1.], [2.], [3.]])
The shape of the returned array is `(a, b, c)`, where `a`
is the number of samples, `b` is the number of test points
and `c` is the number of latent functions.
>>> samples = m.compute_posterior_samples(X, Y, test_points, 2)
>>> samples.shape
(2, 4, 1)
`.compute_posterior_samples()` respects the dtype of the tensors
returned by `.build_predict()`.
>>> samples.dtype
dtype('float64')
>>> m.dtype = tf.float32
>>> samples = m.compute_posterior_samples(X, Y, test_points, 2)
>>> samples.dtype
dtype('float32')
"""
mu, var = self.build_posterior_mean_var(X, Y, test_points, True)
jitter = tfhacks.eye(tf.shape(mu)[0], var.dtype) * 1e-06
L = tf.batch_cholesky(tf.transpose(var, (2, 0, 1)) + jitter)
V_shape = [tf.shape(L)[0], tf.shape(L)[1], num_samples]
V = tf.random_normal(V_shape, dtype=L.dtype)
samples = tf.expand_dims(tf.transpose(mu), -1) + tf.batch_matmul(L, V)
return tf.transpose(samples)
#samples = []
#for i in range(self.num_latent_functions):
# L = tf.cholesky(var[:, :, i] + jitter)
# V = tf.random_normal([tf.shape(L)[0], num_samples], dtype=L.dtype)
# samples.append(mu[:, i:i + 1] + tf.matmul(L, V)) # broadcast
#return tf.transpose(tf.pack(samples))
@autoflow((tf.float64, [None, None]))
def predict_y(self, test_points):
"""Computes the mean and variance of held-out data."""
NotImplemented
@autoflow((tf.float64, [None, None]), (tf.float64, [None, None]))
def predict_density(self, test_points, test_values):
"""Computes the (log) density of the test values at the test points."""
NotImplemented
| [
"tensorflow.batch_matmul",
"tensorflow.transpose",
"tensorflow.contrib.opt.ScipyOptimizerInterface",
"tensorflow.as_dtype",
"tensorflow.shape",
"tensorflow.reshape",
"tensorflow.random_normal"
] | gptf/core/models.py | [(16, 'future.utils.with_metaclass', 'with_metaclass', (['ABCMeta', 'Parameterized'], {}), False, 'from future.utils import with_metaclass\n'), (515, 'tensorflow.random_normal', 'tf.random_normal', (['V_shape'], {'dtype': 'L.dtype'}), True, 'import tensorflow as tf\n'), (517, 'tensorflow.transpose', 'tf.transpose', (['samples'], {}), True, 'import tensorflow as tf\n'), (657, 'tensorflow.random_normal', 'tf.random_normal', (['V_shape'], {'dtype': 'L.dtype'}), True, 'import tensorflow as tf\n'), (659, 'tensorflow.transpose', 'tf.transpose', (['samples'], {}), True, 'import tensorflow as tf\n'), (516, 'tensorflow.batch_matmul', 'tf.batch_matmul', (['L', 'V'], {}), True, 'import tensorflow as tf\n'), (658, 'tensorflow.batch_matmul', 'tf.batch_matmul', (['L', 'V'], {}), True, 'import tensorflow as tf\n'), (268, 'tensorflow.reshape', 'tf.reshape', (['v', '[-1]'], {}), True, 'import tensorflow as tf\n'), (513, 'tensorflow.transpose', 'tf.transpose', (['var', '(2, 0, 1)'], {}), True, 'import tensorflow as tf\n'), (514, 'tensorflow.shape', 'tf.shape', (['L'], {}), True, 'import tensorflow as tf\n'), (514, 'tensorflow.shape', 'tf.shape', (['L'], {}), True, 'import tensorflow as tf\n'), (516, 'tensorflow.transpose', 'tf.transpose', (['mu'], {}), True, 'import tensorflow as tf\n'), (655, 'tensorflow.transpose', 'tf.transpose', (['var', '(2, 0, 1)'], {}), True, 'import tensorflow as tf\n'), (656, 'tensorflow.shape', 'tf.shape', (['L'], {}), True, 'import tensorflow as tf\n'), (656, 'tensorflow.shape', 'tf.shape', (['L'], {}), True, 'import tensorflow as tf\n'), (658, 'tensorflow.transpose', 'tf.transpose', (['mu'], {}), True, 'import tensorflow as tf\n'), (255, 'tensorflow.as_dtype', 'tf.as_dtype', (['X.dtype'], {}), True, 'import tensorflow as tf\n'), (257, 'tensorflow.as_dtype', 'tf.as_dtype', (['Y.dtype'], {}), True, 'import tensorflow as tf\n'), (276, 'tensorflow.contrib.opt.ScipyOptimizerInterface', 'ScipyOptimizerInterface', (['loss'], {'var_list': 'variables', 'method': 'method', 'options': 'options'}), False, 'from tensorflow.contrib.opt import ScipyOptimizerInterface\n'), (286, 'builtins.range', 'range', (['maxiter'], {}), False, 'from builtins import super, range\n'), (512, 'tensorflow.shape', 'tf.shape', (['mu'], {}), True, 'import tensorflow as tf\n'), (654, 'tensorflow.shape', 'tf.shape', (['mu'], {}), True, 'import tensorflow as tf\n')] |
p328188467/edenas | 82fc62528cb25a228d011f2e30f984969d012882 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import time
import numpy as np
import tensorflow as tf
from src.controller import Controller
from src.utils import get_train_ops
from src.common_ops import stack_lstm
from tensorflow.python.training import moving_averages
class MicroController(Controller):
def __init__(self,
search_for="both",
search_whole_channels=False,
num_branches=6,
num_cells=6,
lstm_size=28,
lstm_num_layers=2,
lstm_keep_prob=1.0,
tanh_constant=None,
op_tanh_reduce=1.0,
temperature=None,
lr_init=1e-3,
lr_dec_start=0,
lr_dec_every=100,
lr_dec_rate=0.9,
l2_reg=0,
entropy_weight=None,
clip_mode=None,
grad_bound=None,
use_critic=False,
bl_dec=0.999,
optim_algo="adam",
sync_replicas=False,
num_aggregate=None,
num_replicas=None,
name="controller",
**kwargs):
print("-" * 80)
print("Building ConvController")
self.search_for = search_for
self.search_whole_channels = search_whole_channels
self.num_cells = num_cells
self.num_branches = num_branches
self.lstm_size = lstm_size
self.lstm_num_layers = lstm_num_layers
self.lstm_keep_prob = lstm_keep_prob
self.tanh_constant = tanh_constant
self.op_tanh_reduce = op_tanh_reduce
self.temperature = temperature
self.lr_init = lr_init
self.lr_dec_start = lr_dec_start
self.lr_dec_every = lr_dec_every
self.lr_dec_rate = lr_dec_rate
self.l2_reg = l2_reg
self.entropy_weight = entropy_weight
self.clip_mode = clip_mode
self.grad_bound = grad_bound
self.use_critic = use_critic
self.bl_dec = bl_dec
self.optim_algo = optim_algo
self.sync_replicas = sync_replicas
self.num_aggregate = num_aggregate
self.num_replicas = num_replicas
self.name = name
self._create_params()
arc_seq_1, entropy_1, log_prob_1, c, h = self._build_sampler(use_bias=True)
arc_seq_2, entropy_2, log_prob_2, _, _ = self._build_sampler(prev_c=c, prev_h=h)
self.sample_arc = (arc_seq_1, arc_seq_2)
self.sample_entropy = entropy_1 + entropy_2
self.sample_log_prob = log_prob_1 + log_prob_2
def _create_params(self):
initializer = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)
with tf.variable_scope(self.name, initializer=initializer):
with tf.variable_scope("lstm"):
self.w_lstm = []
for layer_id in range(self.lstm_num_layers):
with tf.variable_scope("layer_{}".format(layer_id)):
w = tf.get_variable("w", [2 * self.lstm_size, 4 * self.lstm_size])
self.w_lstm.append(w)
self.g_emb = tf.get_variable("g_emb", [1, self.lstm_size])
with tf.variable_scope("emb"):
self.w_emb = tf.get_variable("w", [self.num_branches, self.lstm_size])
with tf.variable_scope("softmax"):
self.w_soft = tf.get_variable("w", [self.lstm_size, self.num_branches])
b_init = np.array([10.0, 10.0] + [0] * (self.num_branches - 2),
dtype=np.float32)
self.b_soft = tf.get_variable(
"b", [1, self.num_branches],
initializer=tf.constant_initializer(b_init))
b_soft_no_learn = np.array(
[0.25, 0.25] + [-0.25] * (self.num_branches - 2), dtype=np.float32)
b_soft_no_learn = np.reshape(b_soft_no_learn, [1, self.num_branches])
self.b_soft_no_learn = tf.constant(b_soft_no_learn, dtype=tf.float32)
with tf.variable_scope("attention"):
self.w_attn_1 = tf.get_variable("w_1", [self.lstm_size, self.lstm_size])
self.w_attn_2 = tf.get_variable("w_2", [self.lstm_size, self.lstm_size])
self.v_attn = tf.get_variable("v", [self.lstm_size, 1])
def _build_sampler(self, prev_c=None, prev_h=None, use_bias=False):
"""Build the sampler ops and the log_prob ops."""
print ("-" * 80)
print ("Build controller sampler")
anchors = tf.TensorArray(
tf.float32, size=self.num_cells + 2, clear_after_read=False)
anchors_w_1 = tf.TensorArray(
tf.float32, size=self.num_cells + 2, clear_after_read=False)
arc_seq = tf.TensorArray(tf.int32, size=self.num_cells * 4)
if prev_c is None:
assert prev_h is None, "prev_c and prev_h must both be None"
prev_c = [tf.zeros([1, self.lstm_size], tf.float32)
for _ in range(self.lstm_num_layers)]
prev_h = [tf.zeros([1, self.lstm_size], tf.float32)
for _ in range(self.lstm_num_layers)]
inputs = self.g_emb
for layer_id in range(2):
next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)
prev_c, prev_h = next_c, next_h
anchors = anchors.write(layer_id, tf.zeros_like(next_h[-1]))
anchors_w_1 = anchors_w_1.write(
layer_id, tf.matmul(next_h[-1], self.w_attn_1))
def _condition(layer_id, *args):
return tf.less(layer_id, self.num_cells + 2)
def _body(layer_id, inputs, prev_c, prev_h, anchors, anchors_w_1, arc_seq,
entropy, log_prob):
indices = tf.range(0, layer_id, dtype=tf.int32)
start_id = 4 * (layer_id - 2)
prev_layers = []
for i in range(2): # index_1, index_2
next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)
prev_c, prev_h = next_c, next_h
query = anchors_w_1.gather(indices)
query = tf.reshape(query, [layer_id, self.lstm_size])
query = tf.tanh(query + tf.matmul(next_h[-1], self.w_attn_2))
query = tf.matmul(query, self.v_attn)
logits = tf.reshape(query, [1, layer_id])
if self.temperature is not None:
logits /= self.temperature
if self.tanh_constant is not None:
logits = self.tanh_constant * tf.tanh(logits)
index = tf.multinomial(logits, 1)
index = tf.to_int32(index)
index = tf.reshape(index, [1])
arc_seq = arc_seq.write(start_id + 2 * i, index)
curr_log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=index)
log_prob += curr_log_prob
curr_ent = tf.stop_gradient(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=tf.nn.softmax(logits)))
entropy += curr_ent
prev_layers.append(anchors.read(tf.reduce_sum(index)))
inputs = prev_layers[-1]
for i in range(2): # op_1, op_2
next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)
prev_c, prev_h = next_c, next_h
logits = tf.matmul(next_h[-1], self.w_soft) + self.b_soft
if self.temperature is not None:
logits /= self.temperature
if self.tanh_constant is not None:
op_tanh = self.tanh_constant / self.op_tanh_reduce
logits = op_tanh * tf.tanh(logits)
if use_bias:
logits += self.b_soft_no_learn
op_id = tf.multinomial(logits, 1)
op_id = tf.to_int32(op_id)
op_id = tf.reshape(op_id, [1])
arc_seq = arc_seq.write(start_id + 2 * i + 1, op_id)
curr_log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=op_id)
log_prob += curr_log_prob
curr_ent = tf.stop_gradient(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=tf.nn.softmax(logits)))
entropy += curr_ent
inputs = tf.nn.embedding_lookup(self.w_emb, op_id)
next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)
anchors = anchors.write(layer_id, next_h[-1])
anchors_w_1 = anchors_w_1.write(layer_id, tf.matmul(next_h[-1], self.w_attn_1))
inputs = self.g_emb
return (layer_id + 1, inputs, next_c, next_h, anchors, anchors_w_1,
arc_seq, entropy, log_prob)
loop_vars = [
tf.constant(2, dtype=tf.int32, name="layer_id"),
inputs,
prev_c,
prev_h,
anchors,
anchors_w_1,
arc_seq,
tf.constant([0.0], dtype=tf.float32, name="entropy"),
tf.constant([0.0], dtype=tf.float32, name="log_prob"),
]
loop_outputs = tf.while_loop(_condition, _body, loop_vars,
parallel_iterations=1)
arc_seq = loop_outputs[-3].stack()
arc_seq = tf.reshape(arc_seq, [-1])
entropy = tf.reduce_sum(loop_outputs[-2])
log_prob = tf.reduce_sum(loop_outputs[-1])
last_c = loop_outputs[-7]
last_h = loop_outputs[-6]
return arc_seq, entropy, log_prob, last_c, last_h
def build_trainer(self, child_model):
child_model.build_valid_rl()
self.valid_acc = (tf.to_float(child_model.valid_shuffle_acc) /
tf.to_float(child_model.batch_size))
self.reward = self.valid_acc
if self.entropy_weight is not None:
self.reward += self.entropy_weight * self.sample_entropy
self.sample_log_prob = tf.reduce_sum(self.sample_log_prob)
self.baseline = tf.Variable(0.0, dtype=tf.float32, trainable=False)
baseline_update = tf.assign_sub(
self.baseline, (1 - self.bl_dec) * (self.baseline - self.reward))
with tf.control_dependencies([baseline_update]):
self.reward = tf.identity(self.reward)
self.loss = self.sample_log_prob * (self.reward - self.baseline)
self.train_step = tf.Variable(0, dtype=tf.int32, trainable=False, name="train_step")
tf_variables = [var for var in tf.trainable_variables() if var.name.startswith(self.name)]
print("-" * 80)
for var in tf_variables:
print(var)
self.train_op, self.lr, self.grad_norm, self.optimizer = get_train_ops(
self.loss,
tf_variables,
self.train_step,
clip_mode=self.clip_mode,
grad_bound=self.grad_bound,
l2_reg=self.l2_reg,
lr_init=self.lr_init,
lr_dec_start=self.lr_dec_start,
lr_dec_every=self.lr_dec_every,
lr_dec_rate=self.lr_dec_rate,
optim_algo=self.optim_algo,
sync_replicas=self.sync_replicas,
num_aggregate=self.num_aggregate,
num_replicas=self.num_replicas)
self.skip_rate = tf.constant(0.0, dtype=tf.float32)
| [
"tensorflow.get_variable",
"tensorflow.control_dependencies",
"tensorflow.zeros",
"tensorflow.reduce_sum",
"tensorflow.tanh",
"tensorflow.to_int32",
"tensorflow.while_loop",
"tensorflow.Variable",
"tensorflow.random_uniform_initializer",
"numpy.reshape",
"tensorflow.to_float",
"tensorflow.trainable_variables",
"tensorflow.matmul",
"tensorflow.less",
"tensorflow.TensorArray",
"tensorflow.identity",
"tensorflow.zeros_like",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"numpy.array",
"tensorflow.nn.embedding_lookup",
"tensorflow.nn.softmax",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.reshape",
"tensorflow.constant_initializer",
"tensorflow.assign_sub",
"tensorflow.variable_scope",
"tensorflow.multinomial"
] | src/fashion_minst/micro_controller.py | [(86, 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', ([], {'minval': '(-0.1)', 'maxval': '(0.1)'}), True, 'import tensorflow as tf\n'), (122, 'tensorflow.TensorArray', 'tf.TensorArray', (['tf.float32'], {'size': '(self.num_cells + 2)', 'clear_after_read': '(False)'}), True, 'import tensorflow as tf\n'), (124, 'tensorflow.TensorArray', 'tf.TensorArray', (['tf.float32'], {'size': '(self.num_cells + 2)', 'clear_after_read': '(False)'}), True, 'import tensorflow as tf\n'), (126, 'tensorflow.TensorArray', 'tf.TensorArray', (['tf.int32'], {'size': '(self.num_cells * 4)'}), True, 'import tensorflow as tf\n'), (218, 'tensorflow.while_loop', 'tf.while_loop', (['_condition', '_body', 'loop_vars'], {'parallel_iterations': '(1)'}), True, 'import tensorflow as tf\n'), (222, 'tensorflow.reshape', 'tf.reshape', (['arc_seq', '[-1]'], {}), True, 'import tensorflow as tf\n'), (223, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loop_outputs[-2]'], {}), True, 'import tensorflow as tf\n'), (224, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loop_outputs[-1]'], {}), True, 'import tensorflow as tf\n'), (240, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.sample_log_prob'], {}), True, 'import tensorflow as tf\n'), (241, 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'dtype': 'tf.float32', 'trainable': '(False)'}), True, 'import tensorflow as tf\n'), (242, 'tensorflow.assign_sub', 'tf.assign_sub', (['self.baseline', '((1 - self.bl_dec) * (self.baseline - self.reward))'], {}), True, 'import tensorflow as tf\n'), (249, 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'dtype': 'tf.int32', 'trainable': '(False)', 'name': '"""train_step"""'}), True, 'import tensorflow as tf\n'), (256, 'src.utils.get_train_ops', 'get_train_ops', (['self.loss', 'tf_variables', 'self.train_step'], {'clip_mode': 'self.clip_mode', 'grad_bound': 'self.grad_bound', 'l2_reg': 'self.l2_reg', 'lr_init': 'self.lr_init', 'lr_dec_start': 'self.lr_dec_start', 'lr_dec_every': 'self.lr_dec_every', 'lr_dec_rate': 'self.lr_dec_rate', 'optim_algo': 'self.optim_algo', 'sync_replicas': 'self.sync_replicas', 'num_aggregate': 'self.num_aggregate', 'num_replicas': 'self.num_replicas'}), False, 'from src.utils import get_train_ops\n'), (272, 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (87, 'tensorflow.variable_scope', 'tf.variable_scope', (['self.name'], {'initializer': 'initializer'}), True, 'import tensorflow as tf\n'), (95, 'tensorflow.get_variable', 'tf.get_variable', (['"""g_emb"""', '[1, self.lstm_size]'], {}), True, 'import tensorflow as tf\n'), (136, 'src.common_ops.stack_lstm', 'stack_lstm', (['inputs', 'prev_c', 'prev_h', 'self.w_lstm'], {}), False, 'from src.common_ops import stack_lstm\n'), (143, 'tensorflow.less', 'tf.less', (['layer_id', '(self.num_cells + 2)'], {}), True, 'import tensorflow as tf\n'), (147, 'tensorflow.range', 'tf.range', (['(0)', 'layer_id'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (198, 'src.common_ops.stack_lstm', 'stack_lstm', (['inputs', 'prev_c', 'prev_h', 'self.w_lstm'], {}), False, 'from src.common_ops import stack_lstm\n'), (207, 'tensorflow.constant', 'tf.constant', (['(2)'], {'dtype': 'tf.int32', 'name': '"""layer_id"""'}), True, 'import tensorflow as tf\n'), (214, 'tensorflow.constant', 'tf.constant', (['[0.0]'], {'dtype': 'tf.float32', 'name': '"""entropy"""'}), True, 'import tensorflow as tf\n'), (215, 'tensorflow.constant', 'tf.constant', (['[0.0]'], {'dtype': 'tf.float32', 'name': '"""log_prob"""'}), True, 'import tensorflow as tf\n'), (233, 'tensorflow.to_float', 'tf.to_float', (['child_model.valid_shuffle_acc'], {}), True, 'import tensorflow as tf\n'), (234, 'tensorflow.to_float', 'tf.to_float', (['child_model.batch_size'], {}), True, 'import tensorflow as tf\n'), (245, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[baseline_update]'], {}), True, 'import tensorflow as tf\n'), (246, 'tensorflow.identity', 'tf.identity', (['self.reward'], {}), True, 'import tensorflow as tf\n'), (88, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""lstm"""'], {}), True, 'import tensorflow as tf\n'), (96, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""emb"""'], {}), True, 'import tensorflow as tf\n'), (97, 'tensorflow.get_variable', 'tf.get_variable', (['"""w"""', '[self.num_branches, self.lstm_size]'], {}), True, 'import tensorflow as tf\n'), (98, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""softmax"""'], {}), True, 'import tensorflow as tf\n'), (99, 'tensorflow.get_variable', 'tf.get_variable', (['"""w"""', '[self.lstm_size, self.num_branches]'], {}), True, 'import tensorflow as tf\n'), (100, 'numpy.array', 'np.array', (['([10.0, 10.0] + [0] * (self.num_branches - 2))'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (106, 'numpy.array', 'np.array', (['([0.25, 0.25] + [-0.25] * (self.num_branches - 2))'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (108, 'numpy.reshape', 'np.reshape', (['b_soft_no_learn', '[1, self.num_branches]'], {}), True, 'import numpy as np\n'), (109, 'tensorflow.constant', 'tf.constant', (['b_soft_no_learn'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (111, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""attention"""'], {}), True, 'import tensorflow as tf\n'), (112, 'tensorflow.get_variable', 'tf.get_variable', (['"""w_1"""', '[self.lstm_size, self.lstm_size]'], {}), True, 'import tensorflow as tf\n'), (113, 'tensorflow.get_variable', 'tf.get_variable', (['"""w_2"""', '[self.lstm_size, self.lstm_size]'], {}), True, 'import tensorflow as tf\n'), (114, 'tensorflow.get_variable', 'tf.get_variable', (['"""v"""', '[self.lstm_size, 1]'], {}), True, 'import tensorflow as tf\n'), (129, 'tensorflow.zeros', 'tf.zeros', (['[1, self.lstm_size]', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (131, 'tensorflow.zeros', 'tf.zeros', (['[1, self.lstm_size]', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (138, 'tensorflow.zeros_like', 'tf.zeros_like', (['next_h[-1]'], {}), True, 'import tensorflow as tf\n'), (140, 'tensorflow.matmul', 'tf.matmul', (['next_h[-1]', 'self.w_attn_1'], {}), True, 'import tensorflow as tf\n'), (151, 'src.common_ops.stack_lstm', 'stack_lstm', (['inputs', 'prev_c', 'prev_h', 'self.w_lstm'], {}), False, 'from src.common_ops import stack_lstm\n'), (154, 'tensorflow.reshape', 'tf.reshape', (['query', '[layer_id, self.lstm_size]'], {}), True, 'import tensorflow as tf\n'), (156, 'tensorflow.matmul', 'tf.matmul', (['query', 'self.v_attn'], {}), True, 'import tensorflow as tf\n'), (157, 'tensorflow.reshape', 'tf.reshape', (['query', '[1, layer_id]'], {}), True, 'import tensorflow as tf\n'), (162, 'tensorflow.multinomial', 'tf.multinomial', (['logits', '(1)'], {}), True, 'import tensorflow as tf\n'), (163, 'tensorflow.to_int32', 'tf.to_int32', (['index'], {}), True, 'import tensorflow as tf\n'), (164, 'tensorflow.reshape', 'tf.reshape', (['index', '[1]'], {}), True, 'import tensorflow as tf\n'), (166, 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'logits', 'labels': 'index'}), True, 'import tensorflow as tf\n'), (176, 'src.common_ops.stack_lstm', 'stack_lstm', (['inputs', 'prev_c', 'prev_h', 'self.w_lstm'], {}), False, 'from src.common_ops import stack_lstm\n'), (186, 'tensorflow.multinomial', 'tf.multinomial', (['logits', '(1)'], {}), True, 'import tensorflow as tf\n'), (187, 'tensorflow.to_int32', 'tf.to_int32', (['op_id'], {}), True, 'import tensorflow as tf\n'), (188, 'tensorflow.reshape', 'tf.reshape', (['op_id', '[1]'], {}), True, 'import tensorflow as tf\n'), (190, 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'logits', 'labels': 'op_id'}), True, 'import tensorflow as tf\n'), (196, 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.w_emb', 'op_id'], {}), True, 'import tensorflow as tf\n'), (200, 'tensorflow.matmul', 'tf.matmul', (['next_h[-1]', 'self.w_attn_1'], {}), True, 'import tensorflow as tf\n'), (251, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (178, 'tensorflow.matmul', 'tf.matmul', (['next_h[-1]', 'self.w_soft'], {}), True, 'import tensorflow as tf\n'), (92, 'tensorflow.get_variable', 'tf.get_variable', (['"""w"""', '[2 * self.lstm_size, 4 * self.lstm_size]'], {}), True, 'import tensorflow as tf\n'), (104, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['b_init'], {}), True, 'import tensorflow as tf\n'), (155, 'tensorflow.matmul', 'tf.matmul', (['next_h[-1]', 'self.w_attn_2'], {}), True, 'import tensorflow as tf\n'), (161, 'tensorflow.tanh', 'tf.tanh', (['logits'], {}), True, 'import tensorflow as tf\n'), (172, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['index'], {}), True, 'import tensorflow as tf\n'), (183, 'tensorflow.tanh', 'tf.tanh', (['logits'], {}), True, 'import tensorflow as tf\n'), (170, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), True, 'import tensorflow as tf\n'), (194, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), True, 'import tensorflow as tf\n')] |
yselivonchyk/DCIGN_tensorflow | ff8d85f3a7b7ca1e5c3f50ff003a1c09a70067cd | """MNIST Autoencoder. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
import tensorflow as tf
import utils as ut
import input as inp
import visualization as vis
import matplotlib.pyplot as plt
import time
import sys
import getch
import model_interpreter as interpreter
import network_utils as nut
import math
from tensorflow.contrib.tensorboard.plugins import projector
from Bunch import Bunch
tf.app.flags.DEFINE_string('input_path', '../data/tmp/grid03.14.c.tar.gz', 'input folder')
tf.app.flags.DEFINE_string('input_name', '', 'input folder')
tf.app.flags.DEFINE_string('test_path', '', 'test set folder')
tf.app.flags.DEFINE_string('net', 'f100-f3', 'model configuration')
tf.app.flags.DEFINE_string('model', 'noise', 'Type of the model to use: Autoencoder (ae)'
'WhatWhereAe (ww) U-netAe (u)')
tf.app.flags.DEFINE_string('postfix', '', 'Postfix for the training folder')
tf.app.flags.DEFINE_float('alpha', 10, 'Predictive reconstruction loss weight')
tf.app.flags.DEFINE_float('beta', 0.0005, 'Reconstruction from noisy data loss weight')
tf.app.flags.DEFINE_float('epsilon', 0.000001,
'Diameter of epsilon sphere comparing to distance to a neighbour. <= 0.5')
tf.app.flags.DEFINE_float('gamma', 50., 'Loss weight for large distances')
tf.app.flags.DEFINE_float('distance', 0.01, 'Maximum allowed interpoint distance')
tf.app.flags.DEFINE_float('delta', 1., 'Loss weight for stacked objective')
tf.app.flags.DEFINE_string('comment', '', 'Comment to leave by the model')
tf.app.flags.DEFINE_float('test_max', 10000, 'max number of examples in the test set')
tf.app.flags.DEFINE_integer('max_epochs', 0, 'Train for at most this number of epochs')
tf.app.flags.DEFINE_integer('save_every', 250, 'Save model state every INT epochs')
tf.app.flags.DEFINE_integer('eval_every', 25, 'Save encoding and visualizations every')
tf.app.flags.DEFINE_integer('visualiza_max', 10, 'Max pairs to show on visualization')
tf.app.flags.DEFINE_boolean('load_state', True, 'Load state if possible ')
tf.app.flags.DEFINE_boolean('kill_depth', False, 'Ignore depth information')
tf.app.flags.DEFINE_boolean('dev', False, 'Indicate development mode')
tf.app.flags.DEFINE_integer('batch_size', 128, 'Batch size')
tf.app.flags.DEFINE_float('learning_rate', 0.0001, 'Create visualization of ')
tf.app.flags.DEFINE_float('blur', 5.0, 'Max sigma value for Gaussian blur applied to training set')
tf.app.flags.DEFINE_boolean('new_blur', False, 'Use data augmentation as blur info')
tf.app.flags.DEFINE_integer('blur_decrease', 10000, 'Decrease image blur every X steps')
FLAGS = tf.app.flags.FLAGS
slim = tf.contrib.slim
AUTOENCODER = 'ae'
PREDICTIVE = 'pred'
DENOISING = 'noise'
CHECKPOINT_NAME = '-9999.chpt'
EMB_SUFFIX = '_embedding'
def is_stopping_point(current_epoch, epochs_to_train, stop_every=None, stop_x_times=None,
stop_on_last=True):
if stop_on_last and current_epoch + 1 == epochs_to_train:
return True
if stop_x_times is not None:
return current_epoch % np.ceil(epochs_to_train / float(stop_x_times)) == 0
if stop_every is not None:
return (current_epoch + 1) % stop_every == 0
def _fetch_dataset(path, take=None):
dataset = inp.read_ds_zip(path) # read
take = len(dataset) if take is None else take
dataset = dataset[:take]
# print(dataset.dtype, dataset.shape, np.min(dataset), np.max(dataset))
# dataset = inp.rescale_ds(dataset, 0, 1)
if FLAGS.kill_depth:
dataset[..., -1] = 0
ut.print_info('DS fetch: %8d (%s)' % (len(dataset), path))
return dataset
def l2(x):
l = x.get_shape().as_list()[0]
return tf.reshape(tf.sqrt(tf.reduce_sum(x ** 2, axis=1)), (l, 1))
def get_stats_template():
return Bunch(
batch=[],
input=[],
encoding=[],
reconstruction=[],
total_loss=0.,
start=time.time())
def guard_nan(x):
return x if not math.isnan(x) else -1.
def _blur_expand(input):
k_size = 9
kernels = [2, 4, 6]
channels = [input] + [nut.blur_gaussian(input, k, k_size)[0] for k in kernels]
res = tf.concat(channels, axis=3)
return res
class Autoencoder:
train_set, test_set = None, None
permutation = None
batch_shape = None
epoch_size = None
input, target = None, None # AE placeholders
encode, decode = None, None # AE operations
model = None # interpreted model
encoding = None # AE predictive evaluation placeholder
eval_decode, eval_loss = None, None # AE evaluation
inputs, targets = None, None # Noise/Predictive placeholders
raw_inputs, raw_targets = None, None # inputs in network-friendly representation
models = None # Noise/Predictive interpreted models
optimizer, _train = None, None
loss_ae, loss_reco, loss_pred, loss_dn = None, None, None, None # Objectives
loss_total = None
losses = []
step = None # operation
step_var = None # variable
vis_summary, vis_placeholder = None, None
image_summaries = None
visualization_batch_perm = None
def __init__(self, optimizer=tf.train.AdamOptimizer, need_forlders=True):
self.optimizer_constructor = optimizer
FLAGS.input_name = inp.get_input_name(FLAGS.input_path)
if need_forlders:
ut.configure_folders(FLAGS)
ut.print_flags(FLAGS)
# MISC
def get_past_epochs(self):
return int(self.step.eval() / self.epoch_size)
@staticmethod
def get_checkpoint_path():
# print(os.path.join(FLAGS.save_path, CHECKPOINT_NAME), len(CHECKPOINT_NAME))
return os.path.join(FLAGS.save_path, CHECKPOINT_NAME)
def get_latest_checkpoint(self):
return tf.train.latest_checkpoint(
self.get_checkpoint_path()[:-len(EMB_SUFFIX)],
latest_filename='checkpoint'
)
# DATA
def fetch_datasets(self):
if FLAGS.max_epochs == 0:
FLAGS.input_path = FLAGS.test_path
self.train_set = _fetch_dataset(FLAGS.input_path)
self.epoch_size = int(self.train_set.shape[0] / FLAGS.batch_size)
self.batch_shape = [FLAGS.batch_size] + list(self.train_set.shape[1:])
reuse_train = FLAGS.test_path == FLAGS.input_path or FLAGS.test_path == ''
self.test_set = self.train_set.copy() if reuse_train else _fetch_dataset(FLAGS.test_path)
take_test = int(FLAGS.test_max) if FLAGS.test_max > 1 else int(FLAGS.test_max * len(self.test_set))
ut.print_info('take %d from test' % take_test)
self.test_set = self.test_set[:take_test]
def _batch_generator(self, x=None, y=None, shuffle=True, batches=None):
"""Returns BATCH_SIZE of couples of subsequent images"""
x = x if x is not None else self._get_blurred_dataset()
y = y if y is not None else x
batches = batches if batches is not None else int(np.floor(len(x) / FLAGS.batch_size))
self.permutation = np.arange(len(x))
self.permutation = self.permutation if not shuffle else np.random.permutation(self.permutation)
for i in range(batches):
batch_indexes = self.permutation[i * FLAGS.batch_size:(i + 1) * FLAGS.batch_size]
# batch = np.stack((dataset[batch_indexes], dataset[batch_indexes + 1], dataset[batch_indexes + 2]), axis=1)
yield x[batch_indexes], y[batch_indexes]
def _batch_permutation_generator(self, length, start=0, shuffle=True, batches=None):
self.permutation = np.arange(length) + start
self.permutation = self.permutation if not shuffle else np.random.permutation(self.permutation)
for i in range(int(length/FLAGS.batch_size)):
if batches is not None and i >= batches:
break
yield self.permutation[i * FLAGS.batch_size:(i + 1) * FLAGS.batch_size]
_blurred_dataset, _last_blur = None, 0
def _get_blur_sigma(self):
calculated_sigma = FLAGS.blur - int(10 * self.step.eval() / FLAGS.blur_decrease) / 10.0
return max(0, calculated_sigma)
# @ut.timeit
def _get_blurred_dataset(self):
if FLAGS.blur != 0:
current_sigma = self._get_blur_sigma()
if current_sigma != self._last_blur:
# print(self._last_blur, current_sigma)
self._last_blur = current_sigma
self._blurred_dataset = inp.apply_gaussian(self.train_set, sigma=current_sigma)
ut.print_info('blur s:%.1f[%.1f>%.1f]' % (current_sigma, self.train_set[2, 10, 10, 0], self._blurred_dataset[2, 10, 10, 0]))
return self._blurred_dataset if self._blurred_dataset is not None else self.train_set
return self.train_set
# TRAIN
def build_ae_model(self):
self.input = tf.placeholder(tf.uint8, self.batch_shape, name='input')
self.target = tf.placeholder(tf.uint8, self.batch_shape, name='target')
self.step = tf.Variable(0, trainable=False, name='global_step')
root = self._image_to_tensor(self.input)
target = self._image_to_tensor(self.target)
model = interpreter.build_autoencoder(root, FLAGS.net)
self.encode = model.encode
self.model = model
self.encoding = tf.placeholder(self.encode.dtype, self.encode.get_shape(), name='encoding')
eval_decode = interpreter.build_decoder(self.encoding, model.config, reuse=True)
print(target, eval_decode)
self.eval_loss = interpreter.l2_loss(target, eval_decode, name='predictive_reconstruction')
self.eval_decode = self._tensor_to_image(eval_decode)
self.loss_ae = interpreter.l2_loss(target, model.decode, name='reconstruction')
self.decode = self._tensor_to_image(model.decode)
self.losses = [self.loss_ae]
def build_predictive_model(self):
self.build_ae_model() # builds on top of AE model. Due to auxilary operations init
self.inputs = tf.placeholder(tf.uint8, [3] + self.batch_shape, name='inputs')
self.targets = tf.placeholder(tf.uint8, [3] + self.batch_shape, name='targets')
# transform inputs
self.raw_inputs = [self._image_to_tensor(self.inputs[i]) for i in range(3)]
self.raw_targets = [self._image_to_tensor(self.targets[i]) for i in range(3)]
# build AE objective for triplet
config = self.model.config
models = [interpreter.build_autoencoder(x, config) for x in self.raw_inputs]
reco_losses = [1./3 * interpreter.l2_loss(models[i].decode, self.raw_targets[i]) for i in range(3)] # business as usual
self.models = models
# build predictive objective
pred_loss_2 = self._prediction_decode(models[1].encode*2 - models[0].encode, self.raw_targets[2], models[2])
pred_loss_0 = self._prediction_decode(models[1].encode*2 - models[2].encode, self.raw_targets[0], models[0])
# build regularized distance objective
dist_loss1 = self._distance_loss(models[1].encode - models[0].encode)
dist_loss2 = self._distance_loss(models[1].encode - models[2].encode)
# Stitch it all together and train
self.loss_reco = tf.add_n(reco_losses)
self.loss_pred = pred_loss_0 + pred_loss_2
self.loss_dist = dist_loss1 + dist_loss2
self.losses = [self.loss_reco, self.loss_pred]
def _distance_loss(self, distances):
error = tf.nn.relu(l2(distances) - FLAGS.distance ** 2)
return tf.reduce_sum(error)
def _prediction_decode(self, prediction, target, model):
"""Predict encoding t3 by encoding (t2 and t1) and expect a good reconstruction"""
predict_decode = interpreter.build_decoder(prediction, self.model.config, reuse=True, masks=model.mask_list)
predict_loss = 1./2 * interpreter.l2_loss(predict_decode, target, alpha=FLAGS.alpha)
self.models += [predict_decode]
return predict_loss * FLAGS.gamma
def build_denoising_model(self):
self.build_predictive_model() # builds on top of predictive model. Reuses triplet encoding
# build denoising objective
models = self.models
self.loss_dn = self._noisy_decode(models[1])
self.losses = [self.loss_reco, self.loss_pred, self.loss_dist, self.loss_dn]
def _noisy_decode(self, model):
"""Distort middle encoding with [<= 1/3*dist(neigbour)] and demand good reconstruction"""
# dist = l2(x1 - x2)
# noise = dist * self.epsilon_sphere_noise()
# tf.stop_gradient(noise)
noise = tf.random_normal(self.model.encode.get_shape().as_list()) * FLAGS.epsilon
noisy_encoding = noise + self.models[1].encode
tf.stop_gradient(noisy_encoding) # or maybe here, who knows
noisy_decode = interpreter.build_decoder(noisy_encoding, model.config, reuse=True, masks=model.mask_list)
loss = interpreter.l2_loss(noisy_decode, self.raw_targets[1], alpha=FLAGS.beta)
self.models += [noisy_decode]
return loss
def _tensor_to_image(self, net):
with tf.name_scope('to_image'):
if FLAGS.new_blur:
net = net[..., :self.batch_shape[-1]]
net = tf.nn.relu(net)
net = tf.cast(net <= 1, net.dtype) * net * 255
net = tf.cast(net, tf.uint8)
return net
def _image_to_tensor(self, image):
with tf.name_scope('args_transform'):
net = tf.cast(image, tf.float32) / 255.
if FLAGS.new_blur:
net = _blur_expand(net)
FLAGS.blur = 0.
return net
def _init_optimizer(self):
self.loss_total = tf.add_n(self.losses, 'loss_total')
self.optimizer = self.optimizer_constructor(learning_rate=FLAGS.learning_rate)
self._train = self.optimizer.minimize(self.loss_total, global_step=self.step)
# MAIN
def train(self):
self.fetch_datasets()
if FLAGS.model == AUTOENCODER:
self.build_ae_model()
elif FLAGS.model == PREDICTIVE:
self.build_predictive_model()
else:
self.build_denoising_model()
self._init_optimizer()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
self._on_training_start(sess)
try:
for current_epoch in range(FLAGS.max_epochs):
start = time.time()
full_set_blur = len(self.train_set) < 50000
ds = self._get_blurred_dataset() if full_set_blur else self.train_set
if FLAGS.model == AUTOENCODER:
# Autoencoder Training
for batch in self._batch_generator():
summs, encoding, reconstruction, loss, _, step = sess.run(
[self.summs_train, self.encode, self.decode, self.loss_ae, self.train_ae, self.step],
feed_dict={self.input: batch[0], self.target: batch[1]}
)
self._on_batch_finish(summs, loss, batch, encoding, reconstruction)
else:
# Predictive and Denoising training
for batch_indexes in self._batch_permutation_generator(len(ds)-2):
batch = np.stack((ds[batch_indexes], ds[batch_indexes + 1], ds[batch_indexes + 2]))
if not full_set_blur:
batch = np.stack((
inp.apply_gaussian(ds[batch_indexes], sigma=self._get_blur_sigma()),
inp.apply_gaussian(ds[batch_indexes+1], sigma=self._get_blur_sigma()),
inp.apply_gaussian(ds[batch_indexes+2], sigma=self._get_blur_sigma())
))
summs, loss, _ = sess.run(
[self.summs_train, self.loss_total, self._train],
feed_dict={self.inputs: batch, self.targets: batch})
self._on_batch_finish(summs, loss)
self._on_epoch_finish(current_epoch, start, sess)
self._on_training_finish(sess)
except KeyboardInterrupt:
self._on_training_abort(sess)
def inference(self, max=10^6):
self.fetch_datasets()
self.build_ae_model()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# nut.print_model_info()
# nut.list_checkpoint_vars(self.get_latest_checkpoint().replace(EMB_SUFFIX, ''))
self.saver = tf.train.Saver()
self._restore_model(sess)
# nut.print_model_info()
encoding, decoding = None, None
for i in range(len(self.train_set)):
batch = np.expand_dims(self.train_set[i], axis=0)
enc, dec = sess.run(
[self.encode, self.decode],
feed_dict={self.input: batch}
)
# print(enc.shape, dec.shape)
encoding = enc if i == 0 else np.vstack((encoding, enc))
decoding = dec if i == 0 else np.vstack((decoding, dec))
print('\r%5d/%d' % (i, len(self.train_set)), end='')
if i >= max:
break
return encoding, decoding
# @ut.timeit
def evaluate(self, sess, take):
digest = Bunch(encoded=None, reconstructed=None, source=None,
loss=.0, eval_loss=.0, dumb_loss=.0)
blurred = inp.apply_gaussian(self.test_set, self._get_blur_sigma())
# Encode
for i, batch in enumerate(self._batch_generator(blurred, shuffle=False)):
encoding = self.encode.eval(feed_dict={self.input: batch[0]})
digest.encoded = ut.concatenate(digest.encoded, encoding)
# Save encoding for visualization
encoded_no_nan = np.nan_to_num(digest.encoded)
self.embedding_assign.eval(feed_dict={self.embedding_test_ph: encoded_no_nan})
try:
self.embedding_saver.save(sess, self.get_checkpoint_path() + EMB_SUFFIX)
except:
ut.print_info("Unexpected error: %s" % str(sys.exc_info()[0]), color=33)
# Calculate expected evaluation
expected = digest.encoded[1:-1]*2 - digest.encoded[:-2]
average = 0.5 * (digest.encoded[1:-1] + digest.encoded[:-2])
digest.size = len(expected)
# evaluation summaries
self.summary_writer.add_summary(self.eval_summs.eval(
feed_dict={self.blur_ph: self._get_blur_sigma()}),
global_step=self.get_past_epochs())
# evaluation losses
for p in self._batch_permutation_generator(digest.size, shuffle=False):
digest.loss += self.eval_loss.eval(feed_dict={self.encoding: digest.encoded[p + 2], self.target: blurred[p + 2]})
digest.eval_loss += self.eval_loss.eval(feed_dict={self.encoding: expected[p], self.target: blurred[p + 2]})
digest.dumb_loss += self.loss_ae.eval( feed_dict={self.input: blurred[p], self.target: blurred[p + 2]})
# for batch in self._batch_generator(blurred, batches=1):
# digest.source = batch[1][:take]
# digest.reconstructed = self.decode.eval(feed_dict={self.input: batch[0]})[:take]
# Reconstruction visualizations
for p in self._batch_permutation_generator(digest.size, shuffle=True, batches=1):
self.visualization_batch_perm = self.visualization_batch_perm if self.visualization_batch_perm is not None else p
p = self.visualization_batch_perm
digest.source = self.eval_decode.eval(feed_dict={self.encoding: expected[p]})[:take]
digest.source = blurred[(p+2)[:take]]
digest.reconstructed = self.eval_decode.eval(feed_dict={self.encoding: average[p]})[:take]
self._eval_image_summaries(blurred[p], digest.encoded[p], average[p], expected[p])
digest.dumb_loss = guard_nan(digest.dumb_loss)
digest.eval_loss = guard_nan(digest.eval_loss)
digest.loss = guard_nan(digest.loss)
return digest
def _eval_image_summaries(self, blurred_batch, actual, average, expected):
"""Create Tensorboard summaries with image reconstructions"""
noisy = expected + np.random.randn(*expected.shape) * FLAGS.epsilon
summary = self.image_summaries['orig'].eval(feed_dict={self.input: blurred_batch})
self.summary_writer.add_summary(summary, global_step=self.get_past_epochs())
self._eval_image_summary('midd', average)
# self._eval_image_summary('reco', actual)
self._eval_image_summary('pred', expected)
self._eval_image_summary('nois', noisy)
def _eval_image_summary(self, name, encdoding_batch):
summary = self.image_summaries[name].eval(feed_dict={self.encoding: encdoding_batch})
self.summary_writer.add_summary(summary, global_step=self.get_past_epochs())
def _add_decoding_summary(self, name, var, collection='train'):
var = var[:FLAGS.visualiza_max]
var = tf.concat(tf.unstack(var), axis=0)
var = tf.expand_dims(var, dim=0)
color_s = tf.summary.image(name, var[..., :3], max_outputs=FLAGS.visualiza_max)
var = tf.expand_dims(var[..., 3], dim=3)
bw_s = tf.summary.image('depth_' + name, var, max_outputs=FLAGS.visualiza_max)
return tf.summary.merge([color_s, bw_s])
# TRAINING PROGRESS EVENTS
def _on_training_start(self, sess):
# Writers and savers
self.summary_writer = tf.summary.FileWriter(FLAGS.logdir, sess.graph)
self.saver = tf.train.Saver()
self._build_embedding_saver(sess)
self._restore_model(sess)
# Loss summaries
self._build_summaries()
self.epoch_stats = get_stats_template()
self.stats = Bunch(
epoch_accuracy=[],
epoch_reconstructions=[],
permutation=None
)
# if FLAGS.dev:
# plt.ion()
# plt.show()
def _build_summaries(self):
# losses
with tf.name_scope('losses'):
loss_names = ['loss_autoencoder', 'loss_predictive', 'loss_distance', 'loss_denoising']
for i, loss in enumerate(self.losses):
self._add_loss_summary(loss_names[i], loss)
self._add_loss_summary('loss_total', self.loss_total)
self.summs_train = tf.summary.merge_all('train')
# reconstructions
with tf.name_scope('decodings'):
self.image_summaries = {
'orig': self._add_decoding_summary('0_original_input', self.input),
'reco': self._add_decoding_summary('1_reconstruction', self.eval_decode),
'pred': self._add_decoding_summary('2_prediction', self.eval_decode),
'midd': self._add_decoding_summary('3_averaged', self.eval_decode),
'nois': self._add_decoding_summary('4_noisy', self.eval_decode)
}
# visualization
fig = vis.get_figure()
fig.canvas.draw()
self.vis_placeholder = tf.placeholder(tf.uint8, ut.fig2rgb_array(fig).shape)
self.vis_summary = tf.summary.image('visualization', self.vis_placeholder)
# embedding
dists = l2(self.embedding_test[:-1] - self.embedding_test[1:])
self.dist = dists
metrics = []
metrics.append(tf.summary.histogram('point_distance', dists))
metrics.append(tf.summary.scalar('training/trajectory_length', tf.reduce_sum(dists)))
self.blur_ph = tf.placeholder(dtype=tf.float32)
metrics.append(tf.summary.scalar('training/blur_sigma', self.blur_ph))
pred = self.embedding_test[1:-1]*2 - self.embedding_test[0:-2]
pred_error = l2(pred - self.embedding_test[2:])
mean_dist, mean_pred_error = tf.reduce_mean(dists), tf.reduce_mean(pred_error)
improvement = (mean_dist-mean_pred_error)/mean_dist
pairwise_improvement = tf.nn.relu(dists[1:] - pred_error)
pairwise_improvement_bool = tf.cast(pairwise_improvement > 0, pairwise_improvement.dtype)
self.pairwise_improvement_bool = pairwise_improvement_bool
metrics.append(tf.summary.scalar('training/avg_dist', mean_dist))
metrics.append(tf.summary.scalar('training/pred_dist', mean_pred_error))
metrics.append(tf.summary.scalar('training/improvement', improvement))
metrics.append(tf.summary.scalar('training/improvement_abs', tf.nn.relu(improvement)))
metrics.append(tf.summary.histogram('training/improvement_abs_hist', nut.nan_to_zero(improvement)))
metrics.append(tf.summary.scalar('training/improvement_pairwise', tf.reduce_mean(pairwise_improvement_bool)))
metrics.append(tf.summary.histogram('training/improvement_pairwise_hist', pairwise_improvement_bool))
self.eval_summs = tf.summary.merge(metrics)
def _build_embedding_saver(self, sess):
"""To use embedding visualizer data has to be stored in variable
since we would like to visualize TEST_SET, this variable should not affect
common checkpoint of the model.
Hence, we build a separate variable with a separate saver."""
embedding_shape = [int(len(self.test_set) / FLAGS.batch_size) * FLAGS.batch_size,
self.encode.get_shape().as_list()[1]]
tsv_path = os.path.join(FLAGS.logdir, 'metadata.tsv')
self.embedding_test_ph = tf.placeholder(tf.float32, embedding_shape, name='embedding')
self.embedding_test = tf.Variable(tf.random_normal(embedding_shape), name='test_embedding', trainable=False)
self.embedding_assign = self.embedding_test.assign(self.embedding_test_ph)
self.embedding_saver = tf.train.Saver(var_list=[self.embedding_test])
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = self.embedding_test.name
embedding.sprite.image_path = './sprite.png'
embedding.sprite.single_image_dim.extend([80, 80])
embedding.metadata_path = './metadata.tsv'
projector.visualize_embeddings(self.summary_writer, config)
sess.run(tf.variables_initializer([self.embedding_test], name='init_embeddings'))
# build sprite image
ut.images_to_sprite(self.test_set, path=os.path.join(FLAGS.logdir, 'sprite.png'))
ut.generate_tsv(len(self.test_set), tsv_path)
def _add_loss_summary(self, name, var, collection='train'):
if var is not None:
tf.summary.scalar(name, var, [collection])
tf.summary.scalar('log_' + name, tf.log(var), [collection])
def _restore_model(self, session):
latest_checkpoint = self.get_latest_checkpoint()
print(latest_checkpoint)
if latest_checkpoint is not None:
latest_checkpoint = latest_checkpoint.replace(EMB_SUFFIX, '')
ut.print_info("latest checkpoint: %s" % latest_checkpoint)
if FLAGS.load_state and latest_checkpoint is not None:
self.saver.restore(session, latest_checkpoint)
ut.print_info('Restored requested. Previous epoch: %d' % self.get_past_epochs(), color=31)
def _on_batch_finish(self, summs, loss, batch=None, encoding=None, reconstruction=None):
self.summary_writer.add_summary(summs, global_step=self.step.eval())
self.epoch_stats.total_loss += loss
if False:
assert batch is not None and reconstruction is not None
original = batch[0]
vis.plot_reconstruction(original, reconstruction, interactive=True)
# @ut.timeit
def _on_epoch_finish(self, epoch, start_time, sess):
elapsed = time.time() - start_time
self.epoch_stats.total_loss = guard_nan(self.epoch_stats.total_loss)
accuracy = np.nan_to_num(100000 * np.sqrt(self.epoch_stats.total_loss / np.prod(self.batch_shape) / self.epoch_size))
# SAVE
if is_stopping_point(epoch, FLAGS.max_epochs, FLAGS.save_every):
self.saver.save(sess, self.get_checkpoint_path())
# VISUALIZE
if is_stopping_point(epoch, FLAGS.max_epochs, FLAGS.eval_every):
evaluation = self.evaluate(sess, take=FLAGS.visualiza_max)
data = {
'enc': np.asarray(evaluation.encoded),
'rec': np.asarray(evaluation.reconstructed),
'blu': np.asarray(evaluation.source)
}
error_info = '%d(%d.%d.%d)' % (np.nan_to_num(accuracy),
np.nan_to_num(evaluation.loss)/evaluation.size,
np.nan_to_num(evaluation.eval_loss)/evaluation.size,
np.nan_to_num(evaluation.dumb_loss)/evaluation.size)
meta = Bunch(suf='encodings', e='%06d' % int(self.get_past_epochs()), er=error_info)
# print(data, meta.to_file_name(folder=FLAGS.save_path))
np.save(meta.to_file_name(folder=FLAGS.save_path), data)
vis.plot_encoding_crosssection(
evaluation.encoded,
meta.to_file_name(FLAGS.save_path, 'jpg'),
evaluation.source,
evaluation.reconstructed,
interactive=FLAGS.dev)
self._save_visualization_to_summary()
self.stats.epoch_accuracy.append(accuracy)
self._print_epoch_info(accuracy, epoch, FLAGS.max_epochs, elapsed)
if epoch + 1 != FLAGS.max_epochs:
self.epoch_stats = get_stats_template()
def _save_visualization_to_summary(self):
image = ut.fig2rgb_array(plt.figure(num=0))
self.summary_writer.add_summary(self.vis_summary.eval(feed_dict={self.vis_placeholder: image}))
def _print_epoch_info(self, accuracy, current_epoch, epochs, elapsed):
epochs_past = self.get_past_epochs() - current_epoch
accuracy_info = '' if accuracy is None else '| accuracy %d' % int(accuracy)
epoch_past_info = '' if epochs_past is None else '+%d' % (epochs_past - 1)
epoch_count = 'Epochs %2d/%d%s' % (current_epoch + 1, epochs, epoch_past_info)
time_info = '%2dms/bt' % (elapsed / self.epoch_size * 1000)
examples = int(np.floor(len(self.train_set) / FLAGS.batch_size))
loss_info = 't.loss:%d' % (self.epoch_stats.total_loss * 100 / (examples * np.prod(self.batch_shape[1:])))
info_string = ' '.join([epoch_count, accuracy_info, time_info, loss_info])
ut.print_time(info_string, same_line=True)
def _on_training_finish(self, sess):
if FLAGS.max_epochs == 0:
self._on_epoch_finish(self.get_past_epochs(), time.time(), sess)
best_acc = np.min(self.stats.epoch_accuracy)
ut.print_time('Best Quality: %f for %s' % (best_acc, FLAGS.net))
self.summary_writer.close()
def _on_training_abort(self, sess):
print('Press ENTER to save the model')
if getch.getch() == '\n':
print('saving')
self.saver.save(sess, self.get_checkpoint_path())
if __name__ == '__main__':
args = dict([arg.split('=', maxsplit=1) for arg in sys.argv[1:]])
if len(args) <= 1:
FLAGS.input_path = '../data/tmp/romb8.5.6.tar.gz'
FLAGS.test_path = '../data/tmp/romb8.5.6.tar.gz'
FLAGS.test_max = 2178
FLAGS.max_epochs = 5
FLAGS.eval_every = 1
FLAGS.save_every = 1
FLAGS.batch_size = 32
FLAGS.blur = 0.0
# FLAGS.model = 'noise'
# FLAGS.beta = 1.0
# FLAGS.epsilon = .000001
model = Autoencoder()
if FLAGS.model == 'ae':
FLAGS.model = AUTOENCODER
elif 'pred' in FLAGS.model:
print('PREDICTIVE')
FLAGS.model = PREDICTIVE
elif 'noi' in FLAGS.model:
print('DENOISING')
FLAGS.model = DENOISING
else:
print('Do-di-li-doo doo-di-li-don')
model.train()
| [
"numpy.expand_dims",
"tensorflow.concat",
"numpy.asarray",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.variables_initializer",
"numpy.nan_to_num",
"tensorflow.app.flags.DEFINE_string",
"numpy.random.randn",
"tensorflow.app.flags.DEFINE_boolean",
"tensorflow.add_n",
"tensorflow.summary.scalar",
"tensorflow.Variable",
"tensorflow.summary.image",
"numpy.arange",
"tensorflow.app.flags.DEFINE_integer",
"numpy.stack",
"tensorflow.stop_gradient",
"tensorflow.name_scope",
"tensorflow.Session",
"tensorflow.train.Saver",
"matplotlib.pyplot.figure",
"tensorflow.unstack",
"numpy.min",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.summary.merge_all",
"tensorflow.contrib.tensorboard.plugins.projector.ProjectorConfig",
"tensorflow.summary.merge",
"tensorflow.summary.histogram",
"tensorflow.nn.relu",
"tensorflow.summary.FileWriter",
"tensorflow.reduce_mean",
"tensorflow.expand_dims",
"tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings",
"numpy.random.permutation",
"tensorflow.log",
"tensorflow.app.flags.DEFINE_float",
"numpy.prod",
"numpy.vstack",
"tensorflow.random_normal"
] | autoencoder.py | [(25, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""input_path"""', '"""../data/tmp/grid03.14.c.tar.gz"""', '"""input folder"""'], {}), True, 'import tensorflow as tf\n'), (26, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""input_name"""', '""""""', '"""input folder"""'], {}), True, 'import tensorflow as tf\n'), (27, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""test_path"""', '""""""', '"""test set folder"""'], {}), True, 'import tensorflow as tf\n'), (28, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""net"""', '"""f100-f3"""', '"""model configuration"""'], {}), True, 'import tensorflow as tf\n'), (29, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""model"""', '"""noise"""', '"""Type of the model to use: Autoencoder (ae)WhatWhereAe (ww) U-netAe (u)"""'], {}), True, 'import tensorflow as tf\n'), (31, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""postfix"""', '""""""', '"""Postfix for the training folder"""'], {}), True, 'import tensorflow as tf\n'), (33, 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""alpha"""', '(10)', '"""Predictive reconstruction loss weight"""'], {}), True, 'import tensorflow as tf\n'), (34, 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""beta"""', '(0.0005)', '"""Reconstruction from noisy data loss weight"""'], {}), True, 'import tensorflow as tf\n'), (35, 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""epsilon"""', '(1e-06)', '"""Diameter of epsilon sphere comparing to distance to a neighbour. <= 0.5"""'], {}), True, 'import tensorflow as tf\n'), (37, 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""gamma"""', '(50.0)', '"""Loss weight for large distances"""'], {}), True, 'import tensorflow as tf\n'), (38, 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""distance"""', '(0.01)', '"""Maximum allowed interpoint distance"""'], {}), True, 'import tensorflow as tf\n'), (39, 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""delta"""', '(1.0)', '"""Loss weight for stacked objective"""'], {}), True, 'import tensorflow as tf\n'), (41, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""comment"""', '""""""', '"""Comment to leave by the model"""'], {}), True, 'import tensorflow as tf\n'), (43, 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""test_max"""', '(10000)', '"""max number of examples in the test set"""'], {}), True, 'import tensorflow as tf\n'), (45, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""max_epochs"""', '(0)', '"""Train for at most this number of epochs"""'], {}), True, 'import tensorflow as tf\n'), (46, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""save_every"""', '(250)', '"""Save model state every INT epochs"""'], {}), True, 'import tensorflow as tf\n'), (47, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""eval_every"""', '(25)', '"""Save encoding and visualizations every"""'], {}), True, 'import tensorflow as tf\n'), (48, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""visualiza_max"""', '(10)', '"""Max pairs to show on visualization"""'], {}), True, 'import tensorflow as tf\n'), (49, 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""load_state"""', '(True)', '"""Load state if possible """'], {}), True, 'import tensorflow as tf\n'), (50, 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""kill_depth"""', '(False)', '"""Ignore depth information"""'], {}), True, 'import tensorflow as tf\n'), (51, 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""dev"""', '(False)', '"""Indicate development mode"""'], {}), True, 'import tensorflow as tf\n'), (52, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""batch_size"""', '(128)', '"""Batch size"""'], {}), True, 'import tensorflow as tf\n'), (53, 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""learning_rate"""', '(0.0001)', '"""Create visualization of """'], {}), True, 'import tensorflow as tf\n'), (55, 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""blur"""', '(5.0)', '"""Max sigma value for Gaussian blur applied to training set"""'], {}), True, 'import tensorflow as tf\n'), (56, 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""new_blur"""', '(False)', '"""Use data augmentation as blur info"""'], {}), True, 'import tensorflow as tf\n'), (57, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""blur_decrease"""', '(10000)', '"""Decrease image blur every X steps"""'], {}), True, 'import tensorflow as tf\n'), (82, 'input.read_ds_zip', 'inp.read_ds_zip', (['path'], {}), True, 'import input as inp\n'), (116, 'tensorflow.concat', 'tf.concat', (['channels'], {'axis': '(3)'}), True, 'import tensorflow as tf\n'), (152, 'input.get_input_name', 'inp.get_input_name', (['FLAGS.input_path'], {}), True, 'import input as inp\n'), (155, 'utils.print_flags', 'ut.print_flags', (['FLAGS'], {}), True, 'import utils as ut\n'), (166, 'os.path.join', 'os.path.join', (['FLAGS.save_path', 'CHECKPOINT_NAME'], {}), False, 'import os\n'), (188, 'utils.print_info', 'ut.print_info', (["('take %d from test' % take_test)"], {}), True, 'import utils as ut\n'), (235, 'tensorflow.placeholder', 'tf.placeholder', (['tf.uint8', 'self.batch_shape'], {'name': '"""input"""'}), True, 'import tensorflow as tf\n'), (236, 'tensorflow.placeholder', 'tf.placeholder', (['tf.uint8', 'self.batch_shape'], {'name': '"""target"""'}), True, 'import tensorflow as tf\n'), (237, 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)', 'name': '"""global_step"""'}), True, 'import tensorflow as tf\n'), (241, 'model_interpreter.build_autoencoder', 'interpreter.build_autoencoder', (['root', 'FLAGS.net'], {}), True, 'import model_interpreter as interpreter\n'), (247, 'model_interpreter.build_decoder', 'interpreter.build_decoder', (['self.encoding', 'model.config'], {'reuse': '(True)'}), True, 'import model_interpreter as interpreter\n'), (249, 'model_interpreter.l2_loss', 'interpreter.l2_loss', (['target', 'eval_decode'], {'name': '"""predictive_reconstruction"""'}), True, 'import model_interpreter as interpreter\n'), (252, 'model_interpreter.l2_loss', 'interpreter.l2_loss', (['target', 'model.decode'], {'name': '"""reconstruction"""'}), True, 'import model_interpreter as interpreter\n'), (258, 'tensorflow.placeholder', 'tf.placeholder', (['tf.uint8', '([3] + self.batch_shape)'], {'name': '"""inputs"""'}), True, 'import tensorflow as tf\n'), (259, 'tensorflow.placeholder', 'tf.placeholder', (['tf.uint8', '([3] + self.batch_shape)'], {'name': '"""targets"""'}), True, 'import tensorflow as tf\n'), (280, 'tensorflow.add_n', 'tf.add_n', (['reco_losses'], {}), True, 'import tensorflow as tf\n'), (287, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['error'], {}), True, 'import tensorflow as tf\n'), (291, 'model_interpreter.build_decoder', 'interpreter.build_decoder', (['prediction', 'self.model.config'], {'reuse': '(True)', 'masks': 'model.mask_list'}), True, 'import model_interpreter as interpreter\n'), (312, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['noisy_encoding'], {}), True, 'import tensorflow as tf\n'), (313, 'model_interpreter.build_decoder', 'interpreter.build_decoder', (['noisy_encoding', 'model.config'], {'reuse': '(True)', 'masks': 'model.mask_list'}), True, 'import model_interpreter as interpreter\n'), (314, 'model_interpreter.l2_loss', 'interpreter.l2_loss', (['noisy_decode', 'self.raw_targets[1]'], {'alpha': 'FLAGS.beta'}), True, 'import model_interpreter as interpreter\n'), (336, 'tensorflow.add_n', 'tf.add_n', (['self.losses', '"""loss_total"""'], {}), True, 'import tensorflow as tf\n'), (426, 'Bunch.Bunch', 'Bunch', ([], {'encoded': 'None', 'reconstructed': 'None', 'source': 'None', 'loss': '(0.0)', 'eval_loss': '(0.0)', 'dumb_loss': '(0.0)'}), False, 'from Bunch import Bunch\n'), (434, 'numpy.nan_to_num', 'np.nan_to_num', (['digest.encoded'], {}), True, 'import numpy as np\n'), (492, 'tensorflow.expand_dims', 'tf.expand_dims', (['var'], {'dim': '(0)'}), True, 'import tensorflow as tf\n'), (493, 'tensorflow.summary.image', 'tf.summary.image', (['name', 'var[(...), :3]'], {'max_outputs': 'FLAGS.visualiza_max'}), True, 'import tensorflow as tf\n'), (494, 'tensorflow.expand_dims', 'tf.expand_dims', (['var[..., 3]'], {'dim': '(3)'}), True, 'import tensorflow as tf\n'), (495, 'tensorflow.summary.image', 'tf.summary.image', (["('depth_' + name)", 'var'], {'max_outputs': 'FLAGS.visualiza_max'}), True, 'import tensorflow as tf\n'), (496, 'tensorflow.summary.merge', 'tf.summary.merge', (['[color_s, bw_s]'], {}), True, 'import tensorflow as tf\n'), (504, 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['FLAGS.logdir', 'sess.graph'], {}), True, 'import tensorflow as tf\n'), (505, 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), True, 'import tensorflow as tf\n'), (512, 'Bunch.Bunch', 'Bunch', ([], {'epoch_accuracy': '[]', 'epoch_reconstructions': '[]', 'permutation': 'None'}), False, 'from Bunch import Bunch\n'), (528, 'tensorflow.summary.merge_all', 'tf.summary.merge_all', (['"""train"""'], {}), True, 'import tensorflow as tf\n'), (539, 'visualization.get_figure', 'vis.get_figure', ([], {}), True, 'import visualization as vis\n'), (542, 'tensorflow.summary.image', 'tf.summary.image', (['"""visualization"""', 'self.vis_placeholder'], {}), True, 'import tensorflow as tf\n'), (550, 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (559, 'tensorflow.nn.relu', 'tf.nn.relu', (['(dists[1:] - pred_error)'], {}), True, 'import tensorflow as tf\n'), (560, 'tensorflow.cast', 'tf.cast', (['(pairwise_improvement > 0)', 'pairwise_improvement.dtype'], {}), True, 'import tensorflow as tf\n'), (570, 'tensorflow.summary.merge', 'tf.summary.merge', (['metrics'], {}), True, 'import tensorflow as tf\n'), (580, 'os.path.join', 'os.path.join', (['FLAGS.logdir', '"""metadata.tsv"""'], {}), False, 'import os\n'), (582, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', 'embedding_shape'], {'name': '"""embedding"""'}), True, 'import tensorflow as tf\n'), (585, 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': '[self.embedding_test]'}), True, 'import tensorflow as tf\n'), (587, 'tensorflow.contrib.tensorboard.plugins.projector.ProjectorConfig', 'projector.ProjectorConfig', ([], {}), False, 'from tensorflow.contrib.tensorboard.plugins import projector\n'), (593, 'tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings', 'projector.visualize_embeddings', (['self.summary_writer', 'config'], {}), False, 'from tensorflow.contrib.tensorboard.plugins import projector\n'), (610, 'utils.print_info', 'ut.print_info', (["('latest checkpoint: %s' % latest_checkpoint)"], {}), True, 'import utils as ut\n'), (674, 'utils.print_time', 'ut.print_time', (['info_string'], {'same_line': '(True)'}), True, 'import utils as ut\n'), (679, 'numpy.min', 'np.min', (['self.stats.epoch_accuracy'], {}), True, 'import numpy as np\n'), (680, 'utils.print_time', 'ut.print_time', (["('Best Quality: %f for %s' % (best_acc, FLAGS.net))"], {}), True, 'import utils as ut\n'), (95, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(x ** 2)'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (105, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (109, 'math.isnan', 'math.isnan', (['x'], {}), False, 'import math\n'), (154, 'utils.configure_folders', 'ut.configure_folders', (['FLAGS'], {}), True, 'import utils as ut\n'), (197, 'numpy.random.permutation', 'np.random.permutation', (['self.permutation'], {}), True, 'import numpy as np\n'), (205, 'numpy.arange', 'np.arange', (['length'], {}), True, 'import numpy as np\n'), (206, 'numpy.random.permutation', 'np.random.permutation', (['self.permutation'], {}), True, 'import numpy as np\n'), (267, 'model_interpreter.build_autoencoder', 'interpreter.build_autoencoder', (['x', 'config'], {}), True, 'import model_interpreter as interpreter\n'), (292, 'model_interpreter.l2_loss', 'interpreter.l2_loss', (['predict_decode', 'target'], {'alpha': 'FLAGS.alpha'}), True, 'import model_interpreter as interpreter\n'), (319, 'tensorflow.name_scope', 'tf.name_scope', (['"""to_image"""'], {}), True, 'import tensorflow as tf\n'), (322, 'tensorflow.nn.relu', 'tf.nn.relu', (['net'], {}), True, 'import tensorflow as tf\n'), (324, 'tensorflow.cast', 'tf.cast', (['net', 'tf.uint8'], {}), True, 'import tensorflow as tf\n'), (328, 'tensorflow.name_scope', 'tf.name_scope', (['"""args_transform"""'], {}), True, 'import tensorflow as tf\n'), (354, 'tensorflow.Session', 'tf.Session', ([], {}), True, 'import tensorflow as tf\n'), (399, 'tensorflow.Session', 'tf.Session', ([], {}), True, 'import tensorflow as tf\n'), (404, 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), True, 'import tensorflow as tf\n'), (432, 'utils.concatenate', 'ut.concatenate', (['digest.encoded', 'encoding'], {}), True, 'import utils as ut\n'), (491, 'tensorflow.unstack', 'tf.unstack', (['var'], {}), True, 'import tensorflow as tf\n'), (523, 'tensorflow.name_scope', 'tf.name_scope', (['"""losses"""'], {}), True, 'import tensorflow as tf\n'), (530, 'tensorflow.name_scope', 'tf.name_scope', (['"""decodings"""'], {}), True, 'import tensorflow as tf\n'), (548, 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""point_distance"""', 'dists'], {}), True, 'import tensorflow as tf\n'), (551, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""training/blur_sigma"""', 'self.blur_ph'], {}), True, 'import tensorflow as tf\n'), (556, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['dists'], {}), True, 'import tensorflow as tf\n'), (556, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['pred_error'], {}), True, 'import tensorflow as tf\n'), (563, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""training/avg_dist"""', 'mean_dist'], {}), True, 'import tensorflow as tf\n'), (564, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""training/pred_dist"""', 'mean_pred_error'], {}), True, 'import tensorflow as tf\n'), (565, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""training/improvement"""', 'improvement'], {}), True, 'import tensorflow as tf\n'), (569, 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""training/improvement_pairwise_hist"""', 'pairwise_improvement_bool'], {}), True, 'import tensorflow as tf\n'), (583, 'tensorflow.random_normal', 'tf.random_normal', (['embedding_shape'], {}), True, 'import tensorflow as tf\n'), (594, 'tensorflow.variables_initializer', 'tf.variables_initializer', (['[self.embedding_test]'], {'name': '"""init_embeddings"""'}), True, 'import tensorflow as tf\n'), (602, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['name', 'var', '[collection]'], {}), True, 'import tensorflow as tf\n'), (622, 'visualization.plot_reconstruction', 'vis.plot_reconstruction', (['original', 'reconstruction'], {'interactive': '(True)'}), True, 'import visualization as vis\n'), (626, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (660, 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': '(0)'}), True, 'import matplotlib.pyplot as plt\n'), (685, 'getch.getch', 'getch.getch', ([], {}), False, 'import getch\n'), (115, 'network_utils.blur_gaussian', 'nut.blur_gaussian', (['input', 'k', 'k_size'], {}), True, 'import network_utils as nut\n'), (225, 'input.apply_gaussian', 'inp.apply_gaussian', (['self.train_set'], {'sigma': 'current_sigma'}), True, 'import input as inp\n'), (226, 'utils.print_info', 'ut.print_info', (["('blur s:%.1f[%.1f>%.1f]' % (current_sigma, self.train_set[2, 10, 10, 0],\n self._blurred_dataset[2, 10, 10, 0]))"], {}), True, 'import utils as ut\n'), (268, 'model_interpreter.l2_loss', 'interpreter.l2_loss', (['models[i].decode', 'self.raw_targets[i]'], {}), True, 'import model_interpreter as interpreter\n'), (329, 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (355, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (400, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (410, 'numpy.expand_dims', 'np.expand_dims', (['self.train_set[i]'], {'axis': '(0)'}), True, 'import numpy as np\n'), (475, 'numpy.random.randn', 'np.random.randn', (['*expected.shape'], {}), True, 'import numpy as np\n'), (541, 'utils.fig2rgb_array', 'ut.fig2rgb_array', (['fig'], {}), True, 'import utils as ut\n'), (549, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['dists'], {}), True, 'import tensorflow as tf\n'), (566, 'tensorflow.nn.relu', 'tf.nn.relu', (['improvement'], {}), True, 'import tensorflow as tf\n'), (567, 'network_utils.nan_to_zero', 'nut.nan_to_zero', (['improvement'], {}), True, 'import network_utils as nut\n'), (568, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['pairwise_improvement_bool'], {}), True, 'import tensorflow as tf\n'), (597, 'os.path.join', 'os.path.join', (['FLAGS.logdir', '"""sprite.png"""'], {}), False, 'import os\n'), (603, 'tensorflow.log', 'tf.log', (['var'], {}), True, 'import tensorflow as tf\n'), (636, 'numpy.asarray', 'np.asarray', (['evaluation.encoded'], {}), True, 'import numpy as np\n'), (637, 'numpy.asarray', 'np.asarray', (['evaluation.reconstructed'], {}), True, 'import numpy as np\n'), (638, 'numpy.asarray', 'np.asarray', (['evaluation.source'], {}), True, 'import numpy as np\n'), (678, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (323, 'tensorflow.cast', 'tf.cast', (['(net <= 1)', 'net.dtype'], {}), True, 'import tensorflow as tf\n'), (360, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (417, 'numpy.vstack', 'np.vstack', (['(encoding, enc)'], {}), True, 'import numpy as np\n'), (418, 'numpy.vstack', 'np.vstack', (['(decoding, dec)'], {}), True, 'import numpy as np\n'), (640, 'numpy.nan_to_num', 'np.nan_to_num', (['accuracy'], {}), True, 'import numpy as np\n'), (671, 'numpy.prod', 'np.prod', (['self.batch_shape[1:]'], {}), True, 'import numpy as np\n'), (641, 'numpy.nan_to_num', 'np.nan_to_num', (['evaluation.loss'], {}), True, 'import numpy as np\n'), (642, 'numpy.nan_to_num', 'np.nan_to_num', (['evaluation.eval_loss'], {}), True, 'import numpy as np\n'), (643, 'numpy.nan_to_num', 'np.nan_to_num', (['evaluation.dumb_loss'], {}), True, 'import numpy as np\n'), (377, 'numpy.stack', 'np.stack', (['(ds[batch_indexes], ds[batch_indexes + 1], ds[batch_indexes + 2])'], {}), True, 'import numpy as np\n'), (628, 'numpy.prod', 'np.prod', (['self.batch_shape'], {}), True, 'import numpy as np\n'), (439, 'sys.exc_info', 'sys.exc_info', ([], {}), False, 'import sys\n')] |
ethanm88/lingvo | 46314590ca80a557b6b95c8acdf5956f9e045eb7 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for base_model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from six.moves import range
import tensorflow as tf
from lingvo.core import base_decoder
from lingvo.core import base_encoder
from lingvo.core import base_input_generator
from lingvo.core import base_layer
from lingvo.core import base_model
from lingvo.core import base_model_params
from lingvo.core import hyperparams
from lingvo.core import layers
from lingvo.core import py_utils
from lingvo.core import task_scheduler
FLAGS = tf.flags.FLAGS
_NUMPY_RANDOM_SEED = 9885784
class BaseTaskTest(tf.test.TestCase):
def testStatsCounter(self):
with self.session() as sess:
foo = base_model.StatsCounter('foo')
val = foo.Value()
params = base_layer.BaseLayer.Params()
inc = foo.IncBy(params, 100)
tf.global_variables_initializer().run()
self.assertAllEqual(0, val.eval())
self.assertAllEqual(100, sess.run(inc))
self.assertAllEqual(100, val.eval())
self.assertAllEqual([100, 200], sess.run([val, inc]))
self.assertAllEqual([200, 300], sess.run([val, inc]))
@classmethod
def TestParams(cls):
p = base_model.BaseTask.Params()
p.name = 'base_mdl'
p.encoder = base_encoder.BaseEncoder.Params()
p.encoder.name = 'encoder'
p.decoder = base_decoder.BaseDecoder.Params()
p.decoder.name = 'decoder'
return p
def testInit(self):
p = self.TestParams()
p.input = base_input_generator.BaseSequenceInputGenerator.Params()
_ = p.cls(p)
def testScaleGradients(self):
p = self.TestParams()
p.input = base_input_generator.BaseSequenceInputGenerator.Params()
task = p.cls(p)
task.CreateVariable(
'a',
py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0)))
var_a = task.theta.a
var_grads = py_utils.NestedMap(a=(var_a, tf.ones_like(var_a)))
has_nan_or_inf, grad_scale, final_var_grads = task.ScaleGradients(var_grads)
FLAGS.enable_check_numerics = False
with self.session():
tf.global_variables_initializer().run()
self.assertFalse(has_nan_or_inf.eval())
self.assertEqual(1.0, grad_scale.eval())
# The final gradient must be finite.
self.assertFalse(tf.is_nan(final_var_grads.a[1]).eval())
self.assertTrue(tf.is_finite(final_var_grads.a[1]).eval())
def testScaleGradientsInf(self):
FLAGS.enable_check_numerics = False
p = self.TestParams()
p.input = base_input_generator.BaseSequenceInputGenerator.Params()
task = p.cls(p)
task.CreateVariable(
'a',
py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0)))
var_a = task.theta.a
# Infinite gradient.
var_grads = py_utils.NestedMap(a=(var_a, tf.log(0.)))
has_nan_or_inf, grad_scale, final_var_grads = task.ScaleGradients(var_grads)
with self.session():
tf.global_variables_initializer().run()
self.assertTrue(has_nan_or_inf.eval())
self.assertEqual(0., grad_scale.eval())
# The final gradient must be finite.
self.assertFalse(tf.is_nan(final_var_grads.a[1]).eval())
self.assertTrue(tf.is_finite(final_var_grads.a[1]).eval())
def testScaleGradientsNaN(self):
FLAGS.enable_check_numerics = False
p = self.TestParams()
p.input = base_input_generator.BaseSequenceInputGenerator.Params()
task = p.cls(p)
task.CreateVariable(
'a',
py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0)))
var_a = task.theta.a
# Make a NaN gradient.
var_grads = py_utils.NestedMap(a=(var_a, 0. * tf.log(0.)))
has_nan_or_inf, grad_scale, final_var_grads = task.ScaleGradients(var_grads)
with self.session():
tf.global_variables_initializer().run()
self.assertTrue(has_nan_or_inf.eval())
self.assertEqual(0., grad_scale.eval())
# The final gradient must be finite.
self.assertFalse(tf.is_nan(final_var_grads.a[1]).eval())
self.assertTrue(tf.is_finite(final_var_grads.a[1]).eval())
def testScaleGradientsCheckNumerics(self):
"""ScaleGradients when enable_check_numerics=True."""
FLAGS.enable_check_numerics = True
p = self.TestParams()
p.input = base_input_generator.BaseSequenceInputGenerator.Params()
task = p.cls(p)
task.CreateVariable(
'a',
py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0)))
var_a = task.theta.a
# Make a NaN gradient.
var_grads = py_utils.NestedMap(a=(var_a, 0. * tf.log(0.)))
has_nan_or_inf, grad_scale, final_var_grads = task.ScaleGradients(var_grads)
with self.session():
tf.global_variables_initializer().run()
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
'is not finite'):
self.assertTrue(has_nan_or_inf.eval())
self.assertEqual(0., grad_scale.eval())
# The final gradient must be finite.
self.assertFalse(tf.is_nan(final_var_grads.a[1]).eval())
self.assertTrue(tf.is_finite(final_var_grads.a[1]).eval())
class TeacherTask(base_model.BaseTask):
@base_layer.initializer
def __init__(self, params):
super(TeacherTask, self).__init__(params)
p = self.params
with tf.variable_scope(p.name):
self.CreateVariable('x',
py_utils.WeightParams(
shape=[], init=py_utils.WeightInit.Constant(0)))
def ComputePredictions(self, theta, input_batch):
return theta.x
class StudentTask(base_model.BaseTask):
@base_layer.initializer
def __init__(self, params):
super(StudentTask, self).__init__(params)
p = self.params
with tf.variable_scope(p.name):
self.CreateVariable('x',
py_utils.WeightParams(
shape=[], init=py_utils.WeightInit.Uniform()))
def ComputePredictions(self, theta, input_batch):
return theta.x
class TestInputGenerator(base_input_generator.BaseSequenceInputGenerator):
def __init__(self, params):
super(TestInputGenerator, self).__init__(params)
self._input_batch_size = tf.constant(1)
def InputBatch(self):
return 0
class DistillationTestTask(base_model.DistillationTask):
@classmethod
def Params(cls):
p = super(DistillationTestTask, cls).Params()
p.name = 'distillation_test'
p.teacher = TeacherTask.Params()
p.student = StudentTask.Params()
p.input = TestInputGenerator.Params()
p.train.learning_rate = 1e3
p.teacher.train = None
p.teacher.eval = None
p.student.train = None
p.student.eval = None
return p
@base_layer.initializer
def __init__(self, params):
super(DistillationTestTask, self).__init__(params)
def ComputeLoss(self, theta, input_batch, predictions):
return {'loss': (predictions.teacher - predictions.student, 1)}
class DistillationTaskTest(tf.test.TestCase):
def testFProp(self):
p = DistillationTestTask.Params()
task = p.cls(p)
self.assertFalse(task.params.is_eval)
self.assertFalse(task.teacher.params.is_eval)
self.assertIsNotNone(task.teacher.params.input)
self.assertFalse(task.student.params.is_eval)
self.assertIsNotNone(task.student.params.input)
metrics = task.FPropDefaultTheta()
self.assertItemsEqual(['loss', 'num_samples_in_batch'],
list(metrics.keys()))
task.BProp()
# Expected side effects of BProp().
self.assertIsNotNone(task.train_op)
self.assertIsNotNone(task.total_examples)
with self.session() as sess:
tf.global_variables_initializer().run()
variables = {}
values_before_training = {}
values_after_training = {}
for child in ('teacher', 'student'):
variables[child] = {
k: v
for k, v in getattr(task, child).vars.FlattenItems()
}
values_before_training[child] = sess.run(variables[child])
# Train for a few steps.
for _ in range(10):
sess.run(task.train_op)
for child in ('teacher', 'student'):
values_after_training[child] = sess.run(variables[child])
for k, v in six.iteritems(values_after_training[child]):
print('Comparing variable %s' % k)
if child == 'teacher':
# Teacher vars should not change after training.
self.assertAllEqual(values_before_training[child][k], v)
else:
# Student vars should change after training.
self.assertNotAlmostEqual(values_before_training[child][k], v)
class SingleTaskModelTest(tf.test.TestCase):
def testInit(self):
p = base_model.SingleTaskModel.Params()
p.task = BaseTaskTest.TestParams()
p.task.input = base_input_generator.BaseSequenceInputGenerator.Params()
model = p.cls(p)
self.assertEqual(model.params.name, model.GetTask().params.name)
self.assertEqual(model.params.task, model.GetTask().params)
self.assertEqual(len(model.tasks), 1)
self.assertEqual(model.tasks[0], model.GetTask())
self.assertEqual(model.tasks[0], model.SampleTask(None))
def testExponentialMovingAverage(self):
p = base_model.SingleTaskModel.Params()
p.task = BaseTaskTest.TestParams()
p.task.input = base_input_generator.BaseSequenceInputGenerator.Params()
p.train.ema_decay = 0.9
model = p.cls(p)
model._task.CreateChild('a',
layers.BatchNormLayer.Params().Set(name='a', dim=1))
model._task._train_op = tf.no_op()
model._task.ApplyExponentialMovingAverage(model.ema)
with tf.variable_scope('', reuse=True):
beta = tf.get_variable('a/beta/var')
mean = tf.get_variable('a/moving_mean/var')
self.assertIsNotNone(model.ema.average(beta))
self.assertIsNone(model.ema.average(mean))
class MultiTaskModelTest(tf.test.TestCase):
def testInit(self):
p = base_model.MultiTaskModel.Params()
p.name = 'MultiTaskModel'
p0 = BaseTaskTest.TestParams()
p1 = BaseTaskTest.TestParams()
p.input = base_model_params.MultiTaskModelParams().Train()
p.input.Define('a',
base_input_generator.BaseSequenceInputGenerator.Params(), '')
p.input.Define('b',
base_input_generator.BaseSequenceInputGenerator.Params(), '')
p.task_params = hyperparams.Params()
p.task_params.Define('a', p0, '')
p.task_params.Define('b', p1, '')
p.task_probs = hyperparams.Params()
p.task_probs.Define('a', 0.5, '')
p.task_probs.Define('b', 0.5, '')
model = p.cls(p)
self.assertEqual(len(model.tasks), 2)
self.assertEqual(set(model.task_names), {'a', 'b'})
self.assertEqual(set(model.tasks), {model.GetTask('a'), model.GetTask('b')})
self.assertEqual(model.params.task_params.a, model.GetTask('a').params)
self.assertEqual(model.params.task_params.b, model.GetTask('b').params)
def _setUpTestSampleTask(self):
np.random.seed(_NUMPY_RANDOM_SEED)
# define and initalize tasks, model and params
p = base_model.MultiTaskModel.Params()
p.name = 'MultiTaskModel'
p0 = BaseTaskTest.TestParams()
p1 = BaseTaskTest.TestParams()
p.input = base_model_params.MultiTaskModelParams().Train()
p.input.Define('a',
base_input_generator.BaseSequenceInputGenerator.Params(), '')
p.input.Define('b',
base_input_generator.BaseSequenceInputGenerator.Params(), '')
p.task_params = hyperparams.Params()
p.task_params.Define('a', p0, '')
p.task_params.Define('b', p1, '')
return p
def _testSampleTaskHelper(self, p):
model = p.cls(p)
task_to_id = {model.children['a']: 'a', model.children['b']: 'b'}
task_counts = {'a': 0, 'b': 0}
# initialize tensorflow graph and global step
with self.session() as sess:
tf.global_variables_initializer().run()
global_step = sess.run(model.global_step)
for _ in range(100):
task = model.SampleTask(global_step)
task_counts[task_to_id[task]] += 1
self.assertEqual(task_counts['a'], 83)
self.assertEqual(task_counts['b'], 17)
def testSampleTaskSpecifiedWithoutScheduler(self):
"""Expected distribution: 'a': 0.8 , 'b': 0.2."""
p = self._setUpTestSampleTask()
p.task_probs = hyperparams.Params()
p.task_probs.Define('a', 0.8, '')
p.task_probs.Define('b', 0.2, '')
self._testSampleTaskHelper(p)
def testSampleTask(self):
"""Expected distribution: 'a': 0.8 , 'b': 0.2."""
p = self._setUpTestSampleTask()
p.task_schedule = task_scheduler.ConstantScheduler.Params()
p.task_schedule.task_probs = [('a', 0.8), ('b', 0.2)]
self._testSampleTaskHelper(p)
if __name__ == '__main__':
tf.test.main()
| [
"tensorflow.is_nan",
"tensorflow.get_variable",
"tensorflow.constant",
"numpy.random.seed",
"tensorflow.is_finite",
"tensorflow.ones_like",
"tensorflow.test.main",
"tensorflow.global_variables_initializer",
"tensorflow.no_op",
"tensorflow.log",
"tensorflow.variable_scope"
] | lingvo/core/base_model_test.py | [(390, 'tensorflow.test.main', 'tf.test.main', ([], {}), True, 'import tensorflow as tf\n'), (61, 'lingvo.core.base_model.BaseTask.Params', 'base_model.BaseTask.Params', ([], {}), False, 'from lingvo.core import base_model\n'), (63, 'lingvo.core.base_encoder.BaseEncoder.Params', 'base_encoder.BaseEncoder.Params', ([], {}), False, 'from lingvo.core import base_encoder\n'), (65, 'lingvo.core.base_decoder.BaseDecoder.Params', 'base_decoder.BaseDecoder.Params', ([], {}), False, 'from lingvo.core import base_decoder\n'), (71, 'lingvo.core.base_input_generator.BaseSequenceInputGenerator.Params', 'base_input_generator.BaseSequenceInputGenerator.Params', ([], {}), False, 'from lingvo.core import base_input_generator\n'), (76, 'lingvo.core.base_input_generator.BaseSequenceInputGenerator.Params', 'base_input_generator.BaseSequenceInputGenerator.Params', ([], {}), False, 'from lingvo.core import base_input_generator\n'), (97, 'lingvo.core.base_input_generator.BaseSequenceInputGenerator.Params', 'base_input_generator.BaseSequenceInputGenerator.Params', ([], {}), False, 'from lingvo.core import base_input_generator\n'), (118, 'lingvo.core.base_input_generator.BaseSequenceInputGenerator.Params', 'base_input_generator.BaseSequenceInputGenerator.Params', ([], {}), False, 'from lingvo.core import base_input_generator\n'), (140, 'lingvo.core.base_input_generator.BaseSequenceInputGenerator.Params', 'base_input_generator.BaseSequenceInputGenerator.Params', ([], {}), False, 'from lingvo.core import base_input_generator\n'), (195, 'tensorflow.constant', 'tf.constant', (['(1)'], {}), True, 'import tensorflow as tf\n'), (275, 'lingvo.core.base_model.SingleTaskModel.Params', 'base_model.SingleTaskModel.Params', ([], {}), False, 'from lingvo.core import base_model\n'), (277, 'lingvo.core.base_input_generator.BaseSequenceInputGenerator.Params', 'base_input_generator.BaseSequenceInputGenerator.Params', ([], {}), False, 'from lingvo.core import base_input_generator\n'), (286, 'lingvo.core.base_model.SingleTaskModel.Params', 'base_model.SingleTaskModel.Params', ([], {}), False, 'from lingvo.core import base_model\n'), (288, 'lingvo.core.base_input_generator.BaseSequenceInputGenerator.Params', 'base_input_generator.BaseSequenceInputGenerator.Params', ([], {}), False, 'from lingvo.core import base_input_generator\n'), (293, 'tensorflow.no_op', 'tf.no_op', ([], {}), True, 'import tensorflow as tf\n'), (305, 'lingvo.core.base_model.MultiTaskModel.Params', 'base_model.MultiTaskModel.Params', ([], {}), False, 'from lingvo.core import base_model\n'), (316, 'lingvo.core.hyperparams.Params', 'hyperparams.Params', ([], {}), False, 'from lingvo.core import hyperparams\n'), (320, 'lingvo.core.hyperparams.Params', 'hyperparams.Params', ([], {}), False, 'from lingvo.core import hyperparams\n'), (332, 'numpy.random.seed', 'np.random.seed', (['_NUMPY_RANDOM_SEED'], {}), True, 'import numpy as np\n'), (335, 'lingvo.core.base_model.MultiTaskModel.Params', 'base_model.MultiTaskModel.Params', ([], {}), False, 'from lingvo.core import base_model\n'), (346, 'lingvo.core.hyperparams.Params', 'hyperparams.Params', ([], {}), False, 'from lingvo.core import hyperparams\n'), (373, 'lingvo.core.hyperparams.Params', 'hyperparams.Params', ([], {}), False, 'from lingvo.core import hyperparams\n'), (383, 'lingvo.core.task_scheduler.ConstantScheduler.Params', 'task_scheduler.ConstantScheduler.Params', ([], {}), False, 'from lingvo.core import task_scheduler\n'), (47, 'lingvo.core.base_model.StatsCounter', 'base_model.StatsCounter', (['"""foo"""'], {}), False, 'from lingvo.core import base_model\n'), (49, 'lingvo.core.base_layer.BaseLayer.Params', 'base_layer.BaseLayer.Params', ([], {}), False, 'from lingvo.core import base_layer\n'), (167, 'tensorflow.variable_scope', 'tf.variable_scope', (['p.name'], {}), True, 'import tensorflow as tf\n'), (182, 'tensorflow.variable_scope', 'tf.variable_scope', (['p.name'], {}), True, 'import tensorflow as tf\n'), (257, 'six.moves.range', 'range', (['(10)'], {}), False, 'from six.moves import range\n'), (295, 'tensorflow.variable_scope', 'tf.variable_scope', (['""""""'], {'reuse': '(True)'}), True, 'import tensorflow as tf\n'), (296, 'tensorflow.get_variable', 'tf.get_variable', (['"""a/beta/var"""'], {}), True, 'import tensorflow as tf\n'), (297, 'tensorflow.get_variable', 'tf.get_variable', (['"""a/moving_mean/var"""'], {}), True, 'import tensorflow as tf\n'), (312, 'lingvo.core.base_input_generator.BaseSequenceInputGenerator.Params', 'base_input_generator.BaseSequenceInputGenerator.Params', ([], {}), False, 'from lingvo.core import base_input_generator\n'), (314, 'lingvo.core.base_input_generator.BaseSequenceInputGenerator.Params', 'base_input_generator.BaseSequenceInputGenerator.Params', ([], {}), False, 'from lingvo.core import base_input_generator\n'), (342, 'lingvo.core.base_input_generator.BaseSequenceInputGenerator.Params', 'base_input_generator.BaseSequenceInputGenerator.Params', ([], {}), False, 'from lingvo.core import base_input_generator\n'), (344, 'lingvo.core.base_input_generator.BaseSequenceInputGenerator.Params', 'base_input_generator.BaseSequenceInputGenerator.Params', ([], {}), False, 'from lingvo.core import base_input_generator\n'), (362, 'six.moves.range', 'range', (['(100)'], {}), False, 'from six.moves import range\n'), (262, 'six.iteritems', 'six.iteritems', (['values_after_training[child]'], {}), False, 'import six\n'), (310, 'lingvo.core.base_model_params.MultiTaskModelParams', 'base_model_params.MultiTaskModelParams', ([], {}), False, 'from lingvo.core import base_model_params\n'), (340, 'lingvo.core.base_model_params.MultiTaskModelParams', 'base_model_params.MultiTaskModelParams', ([], {}), False, 'from lingvo.core import base_model_params\n'), (52, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (80, 'lingvo.core.py_utils.WeightInit.Constant', 'py_utils.WeightInit.Constant', (['(0)'], {}), False, 'from lingvo.core import py_utils\n'), (82, 'tensorflow.ones_like', 'tf.ones_like', (['var_a'], {}), True, 'import tensorflow as tf\n'), (87, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (101, 'lingvo.core.py_utils.WeightInit.Constant', 'py_utils.WeightInit.Constant', (['(0)'], {}), False, 'from lingvo.core import py_utils\n'), (104, 'tensorflow.log', 'tf.log', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (108, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (122, 'lingvo.core.py_utils.WeightInit.Constant', 'py_utils.WeightInit.Constant', (['(0)'], {}), False, 'from lingvo.core import py_utils\n'), (129, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (144, 'lingvo.core.py_utils.WeightInit.Constant', 'py_utils.WeightInit.Constant', (['(0)'], {}), False, 'from lingvo.core import py_utils\n'), (151, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (244, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (292, 'lingvo.core.layers.BatchNormLayer.Params', 'layers.BatchNormLayer.Params', ([], {}), False, 'from lingvo.core import layers\n'), (360, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (91, 'tensorflow.is_nan', 'tf.is_nan', (['final_var_grads.a[1]'], {}), True, 'import tensorflow as tf\n'), (92, 'tensorflow.is_finite', 'tf.is_finite', (['final_var_grads.a[1]'], {}), True, 'import tensorflow as tf\n'), (112, 'tensorflow.is_nan', 'tf.is_nan', (['final_var_grads.a[1]'], {}), True, 'import tensorflow as tf\n'), (113, 'tensorflow.is_finite', 'tf.is_finite', (['final_var_grads.a[1]'], {}), True, 'import tensorflow as tf\n'), (125, 'tensorflow.log', 'tf.log', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (133, 'tensorflow.is_nan', 'tf.is_nan', (['final_var_grads.a[1]'], {}), True, 'import tensorflow as tf\n'), (134, 'tensorflow.is_finite', 'tf.is_finite', (['final_var_grads.a[1]'], {}), True, 'import tensorflow as tf\n'), (147, 'tensorflow.log', 'tf.log', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (170, 'lingvo.core.py_utils.WeightInit.Constant', 'py_utils.WeightInit.Constant', (['(0)'], {}), False, 'from lingvo.core import py_utils\n'), (185, 'lingvo.core.py_utils.WeightInit.Uniform', 'py_utils.WeightInit.Uniform', ([], {}), False, 'from lingvo.core import py_utils\n'), (157, 'tensorflow.is_nan', 'tf.is_nan', (['final_var_grads.a[1]'], {}), True, 'import tensorflow as tf\n'), (158, 'tensorflow.is_finite', 'tf.is_finite', (['final_var_grads.a[1]'], {}), True, 'import tensorflow as tf\n')] |
Taosheng-ty/ULTRA | 2541982cb21e0acccbe66cd4437194e40e0828ef | """Training and testing the dual learning algorithm for unbiased learning to rank.
See the following paper for more information on the dual learning algorithm.
* Qingyao Ai, Keping Bi, Cheng Luo, Jiafeng Guo, W. Bruce Croft. 2018. Unbiased Learning to Rank with Unbiased Propensity Estimation. In Proceedings of SIGIR '18
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import time
import numpy as np
import tensorflow as tf
import tensorflow_ranking as tfr
import copy
import itertools
from six.moves import zip
from tensorflow import dtypes
from ultra.learning_algorithm.base_algorithm import BaseAlgorithm
import ultra.utils as utils
def sigmoid_prob(logits):
return tf.sigmoid(logits - tf.reduce_mean(logits, -1, keep_dims=True))
class DLA_atten(BaseAlgorithm):
"""The Dual Learning Algorithm for unbiased learning to rank.
This class implements the Dual Learning Algorithm (DLA) based on the input layer
feed. See the following paper for more information on the simulation data.
* Qingyao Ai, Keping Bi, Cheng Luo, Jiafeng Guo, W. Bruce Croft. 2018. Unbiased Learning to Rank with Unbiased Propensity Estimation. In Proceedings of SIGIR '18
"""
def __init__(self, data_set, exp_settings, forward_only=False):
"""Create the model.
Args:
data_set: (Raw_data) The dataset used to build the input layer.
exp_settings: (dictionary) The dictionary containing the model settings.
forward_only: Set true to conduct prediction only, false to conduct training.
"""
print('Build DLA atten')
self.hparams = tf.contrib.training.HParams(
learning_rate=0.05, # Learning rate.
max_gradient_norm=5.0, # Clip gradients to this norm.
loss_func='click_weighted_softmax_cross_entropy', # Select Loss function
logits_to_prob='softmax', # the function used to convert logits to probability distributions
ranker_learning_rate=-1.0, # The learning rate for ranker (-1 means same with learning_rate).
ranker_loss_weight=1.0, # Set the weight of unbiased ranking loss
l2_loss=0.0, # Set strength for L2 regularization.
l1_loss=0.0,
max_propensity_weight = -1, # Set maximum value for propensity weights
constant_propensity_initialization = False, # Set true to initialize propensity with constants.
grad_strategy='ada', # Select gradient strategy
)
print(exp_settings['learning_algorithm_hparams'])
self.model=None
self.hparams.parse(exp_settings['learning_algorithm_hparams'])
self.exp_settings = exp_settings
self.max_candidate_num = exp_settings['max_candidate_num']
self.feature_size = data_set.feature_size
if self.hparams.ranker_learning_rate < 0:
self.ranker_learning_rate = tf.Variable(float(self.hparams.learning_rate), trainable=False)
else:
self.ranker_learning_rate = tf.Variable(float(self.hparams.ranker_learning_rate), trainable=False)
self.learning_rate = self.ranker_learning_rate
# self.weighs_propen=
# Feeds for inputs.
self.is_training = tf.placeholder(tf.bool, name="is_train")
self.docid_inputs = [] # a list of top documents
self.letor_features = tf.placeholder(tf.float32, shape=[None, self.feature_size],
name="letor_features") # the letor features for the documents
self.labels = [] # the labels for the documents (e.g., clicks)
self.types=[]
for i in range(self.max_candidate_num):
self.docid_inputs.append(tf.placeholder(tf.int64, shape=[None],
name="docid_input{0}".format(i)))
self.labels.append(tf.placeholder(tf.float32, shape=[None],
name="label{0}".format(i)))
self.types.append(tf.placeholder(tf.float32, shape=[None],
name="type{0}".format(i)))
self.global_step = tf.Variable(0, trainable=False)
# Select logits to prob function
self.logits_to_prob = tf.nn.softmax
if self.hparams.logits_to_prob == 'sigmoid':
self.logits_to_prob = sigmoid_prob
self.output = self.ranking_model(self.max_candidate_num, scope='ranking_model')
pad_removed_output = self.remove_padding_for_metric_eval(self.docid_inputs, self.output)
reshaped_labels = tf.transpose(tf.convert_to_tensor(self.labels)) # reshape from [max_candidate_num, ?] to [?, max_candidate_num]
for metric in self.exp_settings['metrics']:
for topn in self.exp_settings['metrics_topn']:
metric_value = utils.make_ranking_metric_fn(metric, topn)(reshaped_labels, pad_removed_output, None)
tf.summary.scalar('%s_%d' % (metric, topn), metric_value, collections=['eval'])
if not forward_only:
# Build model
self.rank_list_size = exp_settings['train_list_cutoff']
train_output = self.ranking_model(self.rank_list_size, scope='ranking_model')
self.propensity = self.DenoisingNet(self.rank_list_size, forward_only)
train_labels = self.labels[:self.rank_list_size]
print('Loss Function is ' + self.hparams.loss_func)
# Select loss function
self.loss_func = None
if self.hparams.loss_func == 'click_weighted_softmax_cross_entropy':
self.loss_func = self.click_weighted_softmax_cross_entropy_loss
elif self.hparams.loss_func == 'click_weighted_log_loss':
self.loss_func = self.click_weighted_log_loss
elif self.hparams.loss_func == 'click_weighted_pairwise_loss':
self.loss_func = self.click_weighted_pairwise_loss
else: # softmax loss without weighting
self.loss_func = self.softmax_loss
# Compute rank loss
reshaped_train_labels = tf.transpose(tf.convert_to_tensor(train_labels)) # reshape from [rank_list_size, ?] to [?, rank_list_size]
self.propensity_weights = self.get_normalized_weights(self.logits_to_prob(self.propensity))
self.rank_loss = self.loss_func(train_output, reshaped_train_labels, self.propensity_weights)
pw_list = tf.unstack(self.propensity_weights, axis=1) # Compute propensity weights
self.click_metrics=self.click_loglikelihood(reshaped_train_labels,\
self.propensity,train_output)
tf.summary.scalar('click_metrics',self.click_metrics,collections=['train'])
for i in range(len(pw_list)):
tf.summary.scalar('Inverse Propensity weights %d' % i, tf.reduce_mean(pw_list[i]), collections=['train'])
tf.summary.scalar('Rank Loss', tf.reduce_mean(self.rank_loss), collections=['train'])
# Compute examination loss
self.relevance_weights = self.get_normalized_weights(self.logits_to_prob(train_output))
self.exam_loss = self.loss_func(self.propensity, reshaped_train_labels, self.relevance_weights)
rw_list = tf.unstack(self.relevance_weights, axis=1) # Compute propensity weights
for i in range(len(rw_list)):
tf.summary.scalar('Relevance weights %d' % i, tf.reduce_mean(rw_list[i]), collections=['train'])
tf.summary.scalar('Exam Loss', tf.reduce_mean(self.exam_loss), collections=['train'])
# Gradients and SGD update operation for training the model.
self.loss = self.exam_loss + self.hparams.ranker_loss_weight * self.rank_loss
# Select optimizer
self.optimizer_func = tf.train.AdagradOptimizer
if self.hparams.grad_strategy == 'sgd':
self.optimizer_func = tf.train.GradientDescentOptimizer
self.separate_gradient_update()
tf.summary.scalar('Gradient Norm', self.norm, collections=['train'])
tf.summary.scalar('Learning Rate', self.ranker_learning_rate, collections=['train'])
tf.summary.scalar('Final Loss', tf.reduce_mean(self.loss), collections=['train'])
clipped_labels = tf.clip_by_value(reshaped_train_labels, clip_value_min=0, clip_value_max=1)
pad_removed_train_output = self.remove_padding_for_metric_eval(self.docid_inputs, train_output)
for metric in self.exp_settings['metrics']:
for topn in self.exp_settings['metrics_topn']:
list_weights = tf.reduce_mean(self.propensity_weights * clipped_labels, axis=1, keep_dims=True)
metric_value = utils.make_ranking_metric_fn(metric, topn)(reshaped_train_labels, pad_removed_train_output, None)
tf.summary.scalar('%s_%d' % (metric, topn), metric_value, collections=['train'])
weighted_metric_value = utils.make_ranking_metric_fn(metric, topn)(reshaped_train_labels, pad_removed_train_output, list_weights)
tf.summary.scalar('Weighted_%s_%d' % (metric, topn), weighted_metric_value, collections=['train'])
self.train_summary = tf.summary.merge_all(key='train')
self.eval_summary = tf.summary.merge_all(key='eval')
self.saver = tf.train.Saver(tf.global_variables())
def separate_gradient_update(self):
denoise_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "denoising_model")
ranking_model_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "ranking_model")
self.weighs_propen=denoise_params
if self.hparams.l2_loss > 0:
for p in denoise_params:
# self.weighs_propen=p
# p=tf.Print(p,[p],message="show the weights")
self.exam_loss += self.hparams.l1_loss * tf.reduce_sum(tf.abs(p))
for p in ranking_model_params:
self.rank_loss += self.hparams.l2_loss * tf.nn.l2_loss(p)
self.loss = self.exam_loss + self.hparams.ranker_loss_weight * self.rank_loss
denoise_gradients = tf.gradients(self.exam_loss, denoise_params)
ranking_model_gradients = tf.gradients(self.rank_loss, ranking_model_params)
if self.hparams.max_gradient_norm > 0:
denoise_gradients, denoise_norm = tf.clip_by_global_norm(denoise_gradients,
self.hparams.max_gradient_norm)
ranking_model_gradients, ranking_model_norm = tf.clip_by_global_norm(ranking_model_gradients,
self.hparams.max_gradient_norm * self.hparams.ranker_loss_weight)
self.norm = tf.global_norm(denoise_gradients + ranking_model_gradients)
opt_denoise = self.optimizer_func(self.hparams.learning_rate)
opt_ranker = self.optimizer_func(self.ranker_learning_rate)
denoise_updates = opt_denoise.apply_gradients(zip(denoise_gradients, denoise_params),
global_step=self.global_step)
ranker_updates = opt_ranker.apply_gradients(zip(ranking_model_gradients, ranking_model_params))
self.updates = tf.group(denoise_updates, ranker_updates)
def DenoisingNet(self, list_size, forward_only=False, scope=None):
with tf.variable_scope(scope or "denoising_model"):
# If we are in testing, do not compute propensity
if forward_only:
return tf.ones_like(self.output)#, tf.ones_like(self.output)
input_vec_size = list_size*4
def propensity_network(input_data, index):
reuse = None if index < 1 else True
propensity_initializer = tf.constant_initializer(0.001) if self.hparams.constant_propensity_initialization else None
with tf.variable_scope("propensity_network", initializer=propensity_initializer,
reuse=reuse):
output_data = input_data
current_size = input_vec_size
output_sizes = [
int((list_size+1)/2) + 1,
int((list_size+1)/4) + 1,
1
]
for i in range(len(output_sizes)):
expand_W = tf.get_variable("W_%d" % i, [current_size, output_sizes[i]])
expand_b = tf.get_variable("b_%d" % i, [output_sizes[i]])
output_data = tf.nn.bias_add(tf.matmul(output_data, expand_W), expand_b)
output_data = tf.nn.elu(output_data)
current_size = output_sizes[i]
#expand_W = tf.get_variable("final_W", [current_size, 1])
#expand_b = tf.get_variable("final_b" , [1])
#output_data = tf.nn.bias_add(tf.matmul(output_data, expand_W), expand_b)
return output_data
output_propensity_list = []
for i in range(list_size):
# Add position information (one-hot vector)
click_feature = [tf.expand_dims(tf.zeros_like(self.labels[i]) , -1) for _ in range(4*list_size)]
click_feature[i] = tf.expand_dims(tf.ones_like(self.labels[i]) , -1)
# click_feature[list_size:]=[tf.expand_dims(tf.zeros_like(self.labels[i]) , -1) for _ in range(3*list_size)]
click_feature[list_size:list_size+i] =[tf.expand_dims(self.labels[k] , -1) for k in range(i-1,-1,-1)]
click_feature[2*list_size:2*list_size+i+1]=[tf.expand_dims(self.types[k] , -1) for k in range(i,-1,-1)]
click_feature[3*list_size:3*list_size+list_size-i-1]=[tf.expand_dims(self.types[k] , -1) for k in range(i+1,list_size)]
# Predict propensity with a simple network
output_propensity_list.append(propensity_network(tf.concat(click_feature, 1), i))
self.click_show=[click_feature[h][0] for h in range(4*list_size)]
return tf.concat(output_propensity_list,1)
def step(self, session, input_feed, forward_only):
"""Run a step of the model feeding the given inputs.
Args:
session: (tf.Session) tensorflow session to use.
input_feed: (dictionary) A dictionary containing all the input feed data.
forward_only: whether to do the backward step (False) or only forward (True).
Returns:
A triple consisting of the loss, outputs (None if we do backward),
and a tf.summary containing related information about the step.
"""
# Output feed: depends on whether we do a backward step or not.
if not forward_only:
input_feed[self.is_training.name] = True
output_feed = [self.updates, # Update Op that does SGD.
self.loss, # Loss for this batch.
# self.click_show,
self.weighs_propen,
self.global_step,
self.train_summary # Summarize statistics.
]
else:
input_feed[self.is_training.name] = False
output_feed = [
self.eval_summary, # Summarize statistics.
self.output # Model outputs
]
outputs = session.run(output_feed, input_feed)
if not forward_only:
# print(outputs[3],"global step")
# if outputs[3]%50==0:
# print(outputs[2])
return outputs[1], None, outputs[-1] # loss, no outputs, summary.
else:
return None, outputs[1], outputs[0] # no loss, outputs, summary.
def softmax_loss(self, output, labels, propensity=None, name=None):
"""Computes listwise softmax loss without propensity weighting.
Args:
output: (tf.Tensor) A tensor with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
labels: (tf.Tensor) A tensor of the same shape as `output`. A value >= 1 means a
relevant example.
propensity: No use.
name: A string used as the name for this variable scope.
Returns:
(tf.Tensor) A single value tensor containing the loss.
"""
loss = None
with tf.name_scope(name, "softmax_loss",[output]):
label_dis = labels / tf.reduce_sum(labels, 1, keep_dims=True)
loss = tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=label_dis) * tf.reduce_sum(labels, 1)
return tf.reduce_sum(loss) / tf.reduce_sum(labels)
def get_normalized_weights(self, propensity):
"""Computes listwise softmax loss with propensity weighting.
Args:
propensity: (tf.Tensor) A tensor of the same shape as `output` containing the weight of each element.
Returns:
(tf.Tensor) A tensor containing the propensity weights.
"""
propensity_list = tf.unstack(propensity, axis=1) # Compute propensity weights
pw_list = []
for i in range(len(propensity_list)):
pw_i = propensity_list[0] / propensity_list[i]
pw_list.append(pw_i)
propensity_weights = tf.stack(pw_list, axis=1)
if self.hparams.max_propensity_weight > 0:
propensity_weights = tf.clip_by_value(propensity_weights, clip_value_min=0, clip_value_max=self.hparams.max_propensity_weight)
return propensity_weights
def click_weighted_softmax_cross_entropy_loss(self, output, labels, propensity_weights, name=None):
"""Computes listwise softmax loss with propensity weighting.
Args:
output: (tf.Tensor) A tensor with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
labels: (tf.Tensor) A tensor of the same shape as `output`. A value >= 1 means a
relevant example.
propensity_weights: (tf.Tensor) A tensor of the same shape as `output` containing the weight of each element.
name: A string used as the name for this variable scope.
Returns:
(tf.Tensor) A single value tensor containing the loss.
"""
loss = None
with tf.name_scope(name, "click_softmax_cross_entropy",[output]):
label_dis = labels*propensity_weights / tf.reduce_sum(labels*propensity_weights, 1, keep_dims=True)
loss = tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=label_dis) * tf.reduce_sum(labels*propensity_weights, 1)
return tf.reduce_sum(loss) / tf.reduce_sum(labels*propensity_weights)
def click_loglikelihood(self, labels, propensity,train_output, name=None):
"""Computes listwise softmax loss with propensity weighting.
Args:
output: (tf.Tensor) A tensor with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
labels: (tf.Tensor) A tensor of the same shape as `output`. A value >= 1 means a
relevant example.
propensity_weights: (tf.Tensor) A tensor of the same shape as `output` containing the weight of each element.
name: A string used as the name for this variable scope.
Returns:
(tf.Tensor) A single value tensor containing the loss.
"""
# loss = None
with tf.name_scope(name, "click_loglikelihood"):
ob_prob=tf.nn.softmax(propensity)
rel_prob=tf.nn.softmax(train_output)
click_prob=ob_prob*rel_prob
click_prob_norm=click_prob/tf.reduce_sum(click_prob,axis=1,keep_dims=True)
label_dis = labels/ tf.reduce_sum(labels, 1, keep_dims=True)
entropy = tf.reduce_sum(tf.math.log(click_prob_norm)*label_dis,1)
return tf.reduce_mean(entropy)
def click_weighted_pairwise_loss(self, output, labels, propensity_weights, name=None):
"""Computes pairwise entropy loss with propensity weighting.
Args:
output: (tf.Tensor) A tensor with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
labels: (tf.Tensor) A tensor of the same shape as `output`. A value >= 1 means a
relevant example.
propensity_weights: (tf.Tensor) A tensor of the same shape as `output` containing the weight of each element.
name: A string used as the name for this variable scope.
Returns:
(tf.Tensor) A single value tensor containing the loss.
(tf.Tensor) A tensor containing the propensity weights.
"""
loss = None
with tf.name_scope(name, "click_weighted_pairwise_loss",[output]):
sliced_output = tf.unstack(output, axis=1)
sliced_label = tf.unstack(labels, axis=1)
sliced_propensity = tf.unstack(propensity_weights, axis=1)
for i in range(len(sliced_output)):
for j in range(i+1, len(sliced_output)):
cur_label_weight = tf.math.sign(sliced_label[i] - sliced_label[j])
cur_propensity = sliced_propensity[i] * sliced_label[i] + sliced_propensity[j] * sliced_label[j]
cur_pair_loss = -tf.exp(sliced_output[i]) / (tf.exp(sliced_output[i]) + tf.exp(sliced_output[j]))
if loss == None:
loss = cur_label_weight * cur_pair_loss * cur_propensity
loss += cur_label_weight * cur_pair_loss * cur_propensity
batch_size = tf.shape(labels[0])[0]
return tf.reduce_sum(loss) / tf.cast(batch_size, dtypes.float32) #/ (tf.reduce_sum(propensity_weights)+1)
def click_weighted_log_loss(self, output, labels, propensity_weights, name=None):
"""Computes pointwise sigmoid loss with propensity weighting.
Args:
output: (tf.Tensor) A tensor with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
labels: (tf.Tensor) A tensor of the same shape as `output`. A value >= 1 means a
relevant example.
propensity_weights: (tf.Tensor) A tensor of the same shape as `output` containing the weight of each element.
name: A string used as the name for this variable scope.
Returns:
(tf.Tensor) A single value tensor containing the loss.
"""
loss = None
with tf.name_scope(name, "click_weighted_log_loss",[output]):
click_prob = tf.sigmoid(output)
loss = tf.losses.log_loss(labels, click_prob, propensity_weights)
return loss
| [
"tensorflow.convert_to_tensor",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.get_variable",
"tensorflow.concat",
"tensorflow.math.sign",
"tensorflow.stack",
"tensorflow.reduce_sum",
"tensorflow.global_variables",
"tensorflow.cast",
"tensorflow.nn.l2_loss",
"tensorflow.losses.log_loss",
"tensorflow.group",
"tensorflow.summary.scalar",
"tensorflow.Variable",
"tensorflow.get_collection",
"tensorflow.gradients",
"tensorflow.name_scope",
"tensorflow.matmul",
"tensorflow.nn.elu",
"tensorflow.unstack",
"tensorflow.shape",
"tensorflow.placeholder",
"tensorflow.exp",
"tensorflow.zeros_like",
"tensorflow.summary.merge_all",
"tensorflow.contrib.training.HParams",
"tensorflow.global_norm",
"tensorflow.clip_by_value",
"tensorflow.nn.softmax",
"tensorflow.reduce_mean",
"tensorflow.sigmoid",
"tensorflow.ones_like",
"tensorflow.expand_dims",
"tensorflow.math.log",
"tensorflow.constant_initializer",
"tensorflow.clip_by_global_norm",
"tensorflow.variable_scope",
"tensorflow.abs"
] | ultra/learning_algorithm/dla_attention.py | [(53, 'tensorflow.contrib.training.HParams', 'tf.contrib.training.HParams', ([], {'learning_rate': '(0.05)', 'max_gradient_norm': '(5.0)', 'loss_func': '"""click_weighted_softmax_cross_entropy"""', 'logits_to_prob': '"""softmax"""', 'ranker_learning_rate': '(-1.0)', 'ranker_loss_weight': '(1.0)', 'l2_loss': '(0.0)', 'l1_loss': '(0.0)', 'max_propensity_weight': '(-1)', 'constant_propensity_initialization': '(False)', 'grad_strategy': '"""ada"""'}), True, 'import tensorflow as tf\n'), (80, 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'name': '"""is_train"""'}), True, 'import tensorflow as tf\n'), (82, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.feature_size]', 'name': '"""letor_features"""'}), True, 'import tensorflow as tf\n'), (93, 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), True, 'import tensorflow as tf\n'), (172, 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {'key': '"""train"""'}), True, 'import tensorflow as tf\n'), (173, 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {'key': '"""eval"""'}), True, 'import tensorflow as tf\n'), (177, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES', '"""denoising_model"""'], {}), True, 'import tensorflow as tf\n'), (178, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES', '"""ranking_model"""'], {}), True, 'import tensorflow as tf\n'), (189, 'tensorflow.gradients', 'tf.gradients', (['self.exam_loss', 'denoise_params'], {}), True, 'import tensorflow as tf\n'), (190, 'tensorflow.gradients', 'tf.gradients', (['self.rank_loss', 'ranking_model_params'], {}), True, 'import tensorflow as tf\n'), (196, 'tensorflow.global_norm', 'tf.global_norm', (['(denoise_gradients + ranking_model_gradients)'], {}), True, 'import tensorflow as tf\n'), (205, 'tensorflow.group', 'tf.group', (['denoise_updates', 'ranker_updates'], {}), True, 'import tensorflow as tf\n'), (249, 'tensorflow.concat', 'tf.concat', (['output_propensity_list', '(1)'], {}), True, 'import tensorflow as tf\n'), (322, 'tensorflow.unstack', 'tf.unstack', (['propensity'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (327, 'tensorflow.stack', 'tf.stack', (['pw_list'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (375, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['entropy'], {}), True, 'import tensorflow as tf\n'), (31, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['logits', '(-1)'], {'keep_dims': '(True)'}), True, 'import tensorflow as tf\n'), (102, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['self.labels'], {}), True, 'import tensorflow as tf\n'), (132, 'tensorflow.unstack', 'tf.unstack', (['self.propensity_weights'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (135, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""click_metrics"""', 'self.click_metrics'], {'collections': "['train']"}), True, 'import tensorflow as tf\n'), (143, 'tensorflow.unstack', 'tf.unstack', (['self.relevance_weights'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (158, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Gradient Norm"""', 'self.norm'], {'collections': "['train']"}), True, 'import tensorflow as tf\n'), (159, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Learning Rate"""', 'self.ranker_learning_rate'], {'collections': "['train']"}), True, 'import tensorflow as tf\n'), (162, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['reshaped_train_labels'], {'clip_value_min': '(0)', 'clip_value_max': '(1)'}), True, 'import tensorflow as tf\n'), (174, 'tensorflow.global_variables', 'tf.global_variables', ([], {}), True, 'import tensorflow as tf\n'), (192, 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['denoise_gradients', 'self.hparams.max_gradient_norm'], {}), True, 'import tensorflow as tf\n'), (194, 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['ranking_model_gradients', '(self.hparams.max_gradient_norm * self.hparams.ranker_loss_weight)'], {}), True, 'import tensorflow as tf\n'), (201, 'six.moves.zip', 'zip', (['denoise_gradients', 'denoise_params'], {}), False, 'from six.moves import zip\n'), (203, 'six.moves.zip', 'zip', (['ranking_model_gradients', 'ranking_model_params'], {}), False, 'from six.moves import zip\n'), (208, 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope or 'denoising_model')"], {}), True, 'import tensorflow as tf\n'), (308, 'tensorflow.name_scope', 'tf.name_scope', (['name', '"""softmax_loss"""', '[output]'], {}), True, 'import tensorflow as tf\n'), (311, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loss'], {}), True, 'import tensorflow as tf\n'), (311, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['labels'], {}), True, 'import tensorflow as tf\n'), (329, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['propensity_weights'], {'clip_value_min': '(0)', 'clip_value_max': 'self.hparams.max_propensity_weight'}), True, 'import tensorflow as tf\n'), (347, 'tensorflow.name_scope', 'tf.name_scope', (['name', '"""click_softmax_cross_entropy"""', '[output]'], {}), True, 'import tensorflow as tf\n'), (350, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loss'], {}), True, 'import tensorflow as tf\n'), (350, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(labels * propensity_weights)'], {}), True, 'import tensorflow as tf\n'), (368, 'tensorflow.name_scope', 'tf.name_scope', (['name', '"""click_loglikelihood"""'], {}), True, 'import tensorflow as tf\n'), (369, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['propensity'], {}), True, 'import tensorflow as tf\n'), (370, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['train_output'], {}), True, 'import tensorflow as tf\n'), (392, 'tensorflow.name_scope', 'tf.name_scope', (['name', '"""click_weighted_pairwise_loss"""', '[output]'], {}), True, 'import tensorflow as tf\n'), (393, 'tensorflow.unstack', 'tf.unstack', (['output'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (394, 'tensorflow.unstack', 'tf.unstack', (['labels'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (395, 'tensorflow.unstack', 'tf.unstack', (['propensity_weights'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (404, 'tensorflow.shape', 'tf.shape', (['labels[0]'], {}), True, 'import tensorflow as tf\n'), (405, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loss'], {}), True, 'import tensorflow as tf\n'), (405, 'tensorflow.cast', 'tf.cast', (['batch_size', 'dtypes.float32'], {}), True, 'import tensorflow as tf\n'), (423, 'tensorflow.name_scope', 'tf.name_scope', (['name', '"""click_weighted_log_loss"""', '[output]'], {}), True, 'import tensorflow as tf\n'), (424, 'tensorflow.sigmoid', 'tf.sigmoid', (['output'], {}), True, 'import tensorflow as tf\n'), (425, 'tensorflow.losses.log_loss', 'tf.losses.log_loss', (['labels', 'click_prob', 'propensity_weights'], {}), True, 'import tensorflow as tf\n'), (107, 'tensorflow.summary.scalar', 'tf.summary.scalar', (["('%s_%d' % (metric, topn))", 'metric_value'], {'collections': "['eval']"}), True, 'import tensorflow as tf\n'), (129, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['train_labels'], {}), True, 'import tensorflow as tf\n'), (138, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.rank_loss'], {}), True, 'import tensorflow as tf\n'), (146, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.exam_loss'], {}), True, 'import tensorflow as tf\n'), (160, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.loss'], {}), True, 'import tensorflow as tf\n'), (211, 'tensorflow.ones_like', 'tf.ones_like', (['self.output'], {}), True, 'import tensorflow as tf\n'), (309, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['labels', '(1)'], {'keep_dims': '(True)'}), True, 'import tensorflow as tf\n'), (310, 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'output', 'labels': 'label_dis'}), True, 'import tensorflow as tf\n'), (310, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['labels', '(1)'], {}), True, 'import tensorflow as tf\n'), (348, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(labels * propensity_weights)', '(1)'], {'keep_dims': '(True)'}), True, 'import tensorflow as tf\n'), (349, 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'output', 'labels': 'label_dis'}), True, 'import tensorflow as tf\n'), (349, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(labels * propensity_weights)', '(1)'], {}), True, 'import tensorflow as tf\n'), (372, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['click_prob'], {'axis': '(1)', 'keep_dims': '(True)'}), True, 'import tensorflow as tf\n'), (373, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['labels', '(1)'], {'keep_dims': '(True)'}), True, 'import tensorflow as tf\n'), (106, 'ultra.utils.make_ranking_metric_fn', 'utils.make_ranking_metric_fn', (['metric', 'topn'], {}), True, 'import ultra.utils as utils\n'), (137, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['pw_list[i]'], {}), True, 'import tensorflow as tf\n'), (145, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['rw_list[i]'], {}), True, 'import tensorflow as tf\n'), (166, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(self.propensity_weights * clipped_labels)'], {'axis': '(1)', 'keep_dims': '(True)'}), True, 'import tensorflow as tf\n'), (168, 'tensorflow.summary.scalar', 'tf.summary.scalar', (["('%s_%d' % (metric, topn))", 'metric_value'], {'collections': "['train']"}), True, 'import tensorflow as tf\n'), (170, 'tensorflow.summary.scalar', 'tf.summary.scalar', (["('Weighted_%s_%d' % (metric, topn))", 'weighted_metric_value'], {'collections': "['train']"}), True, 'import tensorflow as tf\n'), (186, 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['p'], {}), True, 'import tensorflow as tf\n'), (216, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.001)'], {}), True, 'import tensorflow as tf\n'), (217, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""propensity_network"""'], {'initializer': 'propensity_initializer', 'reuse': 'reuse'}), True, 'import tensorflow as tf\n'), (241, 'tensorflow.ones_like', 'tf.ones_like', (['self.labels[i]'], {}), True, 'import tensorflow as tf\n'), (243, 'tensorflow.expand_dims', 'tf.expand_dims', (['self.labels[k]', '(-1)'], {}), True, 'import tensorflow as tf\n'), (244, 'tensorflow.expand_dims', 'tf.expand_dims', (['self.types[k]', '(-1)'], {}), True, 'import tensorflow as tf\n'), (245, 'tensorflow.expand_dims', 'tf.expand_dims', (['self.types[k]', '(-1)'], {}), True, 'import tensorflow as tf\n'), (374, 'tensorflow.math.log', 'tf.math.log', (['click_prob_norm'], {}), True, 'import tensorflow as tf\n'), (398, 'tensorflow.math.sign', 'tf.math.sign', (['(sliced_label[i] - sliced_label[j])'], {}), True, 'import tensorflow as tf\n'), (167, 'ultra.utils.make_ranking_metric_fn', 'utils.make_ranking_metric_fn', (['metric', 'topn'], {}), True, 'import ultra.utils as utils\n'), (169, 'ultra.utils.make_ranking_metric_fn', 'utils.make_ranking_metric_fn', (['metric', 'topn'], {}), True, 'import ultra.utils as utils\n'), (184, 'tensorflow.abs', 'tf.abs', (['p'], {}), True, 'import tensorflow as tf\n'), (227, 'tensorflow.get_variable', 'tf.get_variable', (["('W_%d' % i)", '[current_size, output_sizes[i]]'], {}), True, 'import tensorflow as tf\n'), (228, 'tensorflow.get_variable', 'tf.get_variable', (["('b_%d' % i)", '[output_sizes[i]]'], {}), True, 'import tensorflow as tf\n'), (230, 'tensorflow.nn.elu', 'tf.nn.elu', (['output_data'], {}), True, 'import tensorflow as tf\n'), (240, 'tensorflow.zeros_like', 'tf.zeros_like', (['self.labels[i]'], {}), True, 'import tensorflow as tf\n'), (247, 'tensorflow.concat', 'tf.concat', (['click_feature', '(1)'], {}), True, 'import tensorflow as tf\n'), (229, 'tensorflow.matmul', 'tf.matmul', (['output_data', 'expand_W'], {}), True, 'import tensorflow as tf\n'), (400, 'tensorflow.exp', 'tf.exp', (['sliced_output[i]'], {}), True, 'import tensorflow as tf\n'), (400, 'tensorflow.exp', 'tf.exp', (['sliced_output[i]'], {}), True, 'import tensorflow as tf\n'), (400, 'tensorflow.exp', 'tf.exp', (['sliced_output[j]'], {}), True, 'import tensorflow as tf\n')] |