INSTRUCTION
stringlengths 1
8.43k
| RESPONSE
stringlengths 75
104k
|
|---|---|
Helper to validate block sizes.
|
def _validate_block_sizes(block_sizes, bijectors, validate_args):
"""Helper to validate block sizes."""
block_sizes_shape = block_sizes.shape
if tensorshape_util.is_fully_defined(block_sizes_shape):
if (tensorshape_util.rank(block_sizes_shape) != 1 or
(tensorshape_util.num_elements(block_sizes_shape) != len(bijectors))):
raise ValueError(
'`block_sizes` must be `None`, or a vector of the same length as '
'`bijectors`. Got a `Tensor` with shape {} and `bijectors` of '
'length {}'.format(block_sizes_shape, len(bijectors)))
return block_sizes
elif validate_args:
message = ('`block_sizes` must be `None`, or a vector of the same length '
'as `bijectors`.')
with tf.control_dependencies([
assert_util.assert_equal(
tf.size(input=block_sizes), len(bijectors), message=message),
assert_util.assert_equal(tf.rank(block_sizes), 1)
]):
return tf.identity(block_sizes)
else:
return block_sizes
|
Verifies that parts don t broadcast.
|
def maybe_check_wont_broadcast(flat_xs, validate_args):
"""Verifies that `parts` don't broadcast."""
flat_xs = tuple(flat_xs) # So we can receive generators.
if not validate_args:
# Note: we don't try static validation because it is theoretically
# possible that a user wants to take advantage of broadcasting.
# Only when `validate_args` is `True` do we enforce the validation.
return flat_xs
msg = 'Broadcasting probably indicates an error in model specification.'
s = tuple(x.shape for x in flat_xs)
if all(tensorshape_util.is_fully_defined(s_) for s_ in s):
if not all(a == b for a, b in zip(s[1:], s[:-1])):
raise ValueError(msg)
return flat_xs
assertions = [assert_util.assert_equal(a, b, message=msg)
for a, b in zip(s[1:], s[:-1])]
with tf.control_dependencies(assertions):
return tuple(tf.identity(x) for x in flat_xs)
|
Converts ( batch of ) scalars to ( batch of ) positive valued scalars.
|
def softplus_and_shift(x, shift=1e-5, name=None):
"""Converts (batch of) scalars to (batch of) positive valued scalars.
Args:
x: (Batch of) `float`-like `Tensor` representing scalars which will be
transformed into positive elements.
shift: `Tensor` added to `softplus` transformation of elements.
Default value: `1e-5`.
name: A `name_scope` name for operations created by this function.
Default value: `None` (i.e., "positive_tril_with_shift").
Returns:
scale: (Batch of) scalars`with `x.dtype` and `x.shape`.
"""
with tf.compat.v1.name_scope(name, 'softplus_and_shift', [x, shift]):
x = tf.convert_to_tensor(value=x, name='x')
y = tf.nn.softplus(x)
if shift is not None:
y += shift
return y
|
Converts ( batch of ) vectors to ( batch of ) lower - triangular scale matrices.
|
def tril_with_diag_softplus_and_shift(x, diag_shift=1e-5, name=None):
"""Converts (batch of) vectors to (batch of) lower-triangular scale matrices.
Args:
x: (Batch of) `float`-like `Tensor` representing vectors which will be
transformed into lower-triangular scale matrices with positive diagonal
elements. Rightmost shape `n` must be such that
`n = dims * (dims + 1) / 2` for some positive, integer `dims`.
diag_shift: `Tensor` added to `softplus` transformation of diagonal
elements.
Default value: `1e-5`.
name: A `name_scope` name for operations created by this function.
Default value: `None` (i.e., "tril_with_diag_softplus_and_shift").
Returns:
scale_tril: (Batch of) lower-triangular `Tensor` with `x.dtype` and
rightmost shape `[dims, dims]` where `n = dims * (dims + 1) / 2` where
`n = x.shape[-1]`.
"""
with tf.compat.v1.name_scope(name, 'tril_with_diag_softplus_and_shift',
[x, diag_shift]):
x = tf.convert_to_tensor(value=x, name='x')
x = tfd.fill_triangular(x)
diag = softplus_and_shift(tf.linalg.diag_part(x), diag_shift)
x = tf.linalg.set_diag(x, diag)
return x
|
Constructs a trainable tfd. MultivariateNormalTriL distribution.
|
def multivariate_normal_tril(x,
dims,
layer_fn=tf.compat.v1.layers.dense,
loc_fn=lambda x: x,
scale_fn=tril_with_diag_softplus_and_shift,
name=None):
"""Constructs a trainable `tfd.MultivariateNormalTriL` distribution.
This function creates a MultivariateNormal (MVN) with lower-triangular scale
matrix. By default the MVN is parameterized via affine transformation of input
tensor `x`. Using default args, this function is mathematically equivalent to:
```none
Y = MVN(loc=matmul(W, x) + b,
scale_tril=f(reshape_tril(matmul(M, x) + c)))
where,
W in R^[d, n]
M in R^[d*(d+1)/2, n]
b in R^d
c in R^d
f(S) = set_diag(S, softplus(matrix_diag_part(S)) + 1e-5)
```
Observe that `f` makes the diagonal of the triangular-lower scale matrix be
positive and no smaller than `1e-5`.
#### Examples
```python
# This example fits a multilinear regression loss.
import tensorflow as tf
import tensorflow_probability as tfp
# Create fictitious training data.
dtype = np.float32
n = 3000 # number of samples
x_size = 4 # size of single x
y_size = 2 # size of single y
def make_training_data():
np.random.seed(142)
x = np.random.randn(n, x_size).astype(dtype)
w = np.random.randn(x_size, y_size).astype(dtype)
b = np.random.randn(1, y_size).astype(dtype)
true_mean = np.tensordot(x, w, axes=[[-1], [0]]) + b
noise = np.random.randn(n, y_size).astype(dtype)
y = true_mean + noise
return y, x
y, x = make_training_data()
# Build TF graph for fitting MVNTriL maximum likelihood estimator.
mvn = tfp.trainable_distributions.multivariate_normal_tril(x, dims=y_size)
loss = -tf.reduce_mean(mvn.log_prob(y))
train_op = tf.train.AdamOptimizer(learning_rate=2.**-3).minimize(loss)
mse = tf.reduce_mean(tf.squared_difference(y, mvn.mean()))
init_op = tf.global_variables_initializer()
# Run graph 1000 times.
num_steps = 1000
loss_ = np.zeros(num_steps) # Style: `_` to indicate sess.run result.
mse_ = np.zeros(num_steps)
with tf.Session() as sess:
sess.run(init_op)
for it in xrange(loss_.size):
_, loss_[it], mse_[it] = sess.run([train_op, loss, mse])
if it % 200 == 0 or it == loss_.size - 1:
print("iteration:{} loss:{} mse:{}".format(it, loss_[it], mse_[it]))
# ==> iteration:0 loss:38.2020797729 mse:4.17175960541
# iteration:200 loss:2.90179634094 mse:0.990987896919
# iteration:400 loss:2.82727336884 mse:0.990926623344
# iteration:600 loss:2.82726788521 mse:0.990926682949
# iteration:800 loss:2.82726788521 mse:0.990926682949
# iteration:999 loss:2.82726788521 mse:0.990926682949
```
Args:
x: `Tensor` with floating type. Must have statically defined rank and
statically known right-most dimension.
dims: Scalar, `int`, `Tensor` indicated the MVN event size, i.e., the
created MVN will be distribution over length-`dims` vectors.
layer_fn: Python `callable` which takes input `x` and `int` scalar `d` and
returns a transformation of `x` with shape
`tf.concat([tf.shape(x)[:-1], [d]], axis=0)`.
Default value: `tf.layers.dense`.
loc_fn: Python `callable` which transforms the `loc` parameter. Takes a
(batch of) length-`dims` vectors and returns a `Tensor` of same shape and
`dtype`.
Default value: `lambda x: x`.
scale_fn: Python `callable` which transforms the `scale` parameters. Takes a
(batch of) length-`dims * (dims + 1) / 2` vectors and returns a
lower-triangular `Tensor` of same batch shape with rightmost dimensions
having shape `[dims, dims]`.
Default value: `tril_with_diag_softplus_and_shift`.
name: A `name_scope` name for operations created by this function.
Default value: `None` (i.e., "multivariate_normal_tril").
Returns:
mvntril: An instance of `tfd.MultivariateNormalTriL`.
"""
with tf.compat.v1.name_scope(name, 'multivariate_normal_tril', [x, dims]):
x = tf.convert_to_tensor(value=x, name='x')
x = layer_fn(x, dims + dims * (dims + 1) // 2)
return tfd.MultivariateNormalTriL(
loc=loc_fn(x[..., :dims]),
scale_tril=scale_fn(x[..., dims:]))
|
Constructs a trainable tfd. Bernoulli distribution.
|
def bernoulli(x, layer_fn=tf.compat.v1.layers.dense, name=None):
"""Constructs a trainable `tfd.Bernoulli` distribution.
This function creates a Bernoulli distribution parameterized by logits.
Using default args, this function is mathematically equivalent to:
```none
Y = Bernoulli(logits=matmul(W, x) + b)
where,
W in R^[d, n]
b in R^d
```
#### Examples
This function can be used as a [logistic regression](
https://en.wikipedia.org/wiki/Logistic_regression) loss.
```python
# This example fits a logistic regression loss.
import tensorflow as tf
import tensorflow_probability as tfp
# Create fictitious training data.
dtype = np.float32
n = 3000 # number of samples
x_size = 4 # size of single x
def make_training_data():
np.random.seed(142)
x = np.random.randn(n, x_size).astype(dtype)
w = np.random.randn(x_size).astype(dtype)
b = np.random.randn(1).astype(dtype)
true_logits = np.tensordot(x, w, axes=[[-1], [-1]]) + b
noise = np.random.logistic(size=n).astype(dtype)
y = dtype(true_logits + noise > 0.)
return y, x
y, x = make_training_data()
# Build TF graph for fitting Bernoulli maximum likelihood estimator.
bernoulli = tfp.trainable_distributions.bernoulli(x)
loss = -tf.reduce_mean(bernoulli.log_prob(y))
train_op = tf.train.AdamOptimizer(learning_rate=2.**-5).minimize(loss)
mse = tf.reduce_mean(tf.squared_difference(y, bernoulli.mean()))
init_op = tf.global_variables_initializer()
# Run graph 1000 times.
num_steps = 1000
loss_ = np.zeros(num_steps) # Style: `_` to indicate sess.run result.
mse_ = np.zeros(num_steps)
with tf.Session() as sess:
sess.run(init_op)
for it in xrange(loss_.size):
_, loss_[it], mse_[it] = sess.run([train_op, loss, mse])
if it % 200 == 0 or it == loss_.size - 1:
print("iteration:{} loss:{} mse:{}".format(it, loss_[it], mse_[it]))
# ==> iteration:0 loss:0.635675370693 mse:0.222526371479
# iteration:200 loss:0.440077394247 mse:0.143687799573
# iteration:400 loss:0.440077394247 mse:0.143687844276
# iteration:600 loss:0.440077394247 mse:0.143687844276
# iteration:800 loss:0.440077424049 mse:0.143687844276
# iteration:999 loss:0.440077424049 mse:0.143687844276
```
Args:
x: `Tensor` with floating type. Must have statically defined rank and
statically known right-most dimension.
layer_fn: Python `callable` which takes input `x` and `int` scalar `d` and
returns a transformation of `x` with shape
`tf.concat([tf.shape(x)[:-1], [1]], axis=0)`.
Default value: `tf.layers.dense`.
name: A `name_scope` name for operations created by this function.
Default value: `None` (i.e., "bernoulli").
Returns:
bernoulli: An instance of `tfd.Bernoulli`.
"""
with tf.compat.v1.name_scope(name, 'bernoulli', [x]):
x = tf.convert_to_tensor(value=x, name='x')
logits = tf.squeeze(layer_fn(x, 1), axis=-1)
return tfd.Bernoulli(logits=logits)
|
Constructs a trainable tfd. Normal distribution.
|
def normal(x,
layer_fn=tf.compat.v1.layers.dense,
loc_fn=lambda x: x,
scale_fn=1.,
name=None):
"""Constructs a trainable `tfd.Normal` distribution.
This function creates a Normal distribution parameterized by loc and scale.
Using default args, this function is mathematically equivalent to:
```none
Y = Normal(loc=matmul(W, x) + b, scale=1)
where,
W in R^[d, n]
b in R^d
```
#### Examples
This function can be used as a [linear regression](
https://en.wikipedia.org/wiki/Linear_regression) loss.
```python
# This example fits a linear regression loss.
import tensorflow as tf
import tensorflow_probability as tfp
# Create fictitious training data.
dtype = np.float32
n = 3000 # number of samples
x_size = 4 # size of single x
def make_training_data():
np.random.seed(142)
x = np.random.randn(n, x_size).astype(dtype)
w = np.random.randn(x_size).astype(dtype)
b = np.random.randn(1).astype(dtype)
true_mean = np.tensordot(x, w, axes=[[-1], [-1]]) + b
noise = np.random.randn(n).astype(dtype)
y = true_mean + noise
return y, x
y, x = make_training_data()
# Build TF graph for fitting Normal maximum likelihood estimator.
normal = tfp.trainable_distributions.normal(x)
loss = -tf.reduce_mean(normal.log_prob(y))
train_op = tf.train.AdamOptimizer(learning_rate=2.**-5).minimize(loss)
mse = tf.reduce_mean(tf.squared_difference(y, normal.mean()))
init_op = tf.global_variables_initializer()
# Run graph 1000 times.
num_steps = 1000
loss_ = np.zeros(num_steps) # Style: `_` to indicate sess.run result.
mse_ = np.zeros(num_steps)
with tf.Session() as sess:
sess.run(init_op)
for it in xrange(loss_.size):
_, loss_[it], mse_[it] = sess.run([train_op, loss, mse])
if it % 200 == 0 or it == loss_.size - 1:
print("iteration:{} loss:{} mse:{}".format(it, loss_[it], mse_[it]))
# ==> iteration:0 loss:6.34114170074 mse:10.8444051743
# iteration:200 loss:1.40146839619 mse:0.965059816837
# iteration:400 loss:1.40052902699 mse:0.963181257248
# iteration:600 loss:1.40052902699 mse:0.963181257248
# iteration:800 loss:1.40052902699 mse:0.963181257248
# iteration:999 loss:1.40052902699 mse:0.963181257248
```
Args:
x: `Tensor` with floating type. Must have statically defined rank and
statically known right-most dimension.
layer_fn: Python `callable` which takes input `x` and `int` scalar `d` and
returns a transformation of `x` with shape
`tf.concat([tf.shape(x)[:-1], [1]], axis=0)`.
Default value: `tf.layers.dense`.
loc_fn: Python `callable` which transforms the `loc` parameter. Takes a
(batch of) length-`dims` vectors and returns a `Tensor` of same shape and
`dtype`.
Default value: `lambda x: x`.
scale_fn: Python `callable` or `Tensor`. If a `callable` transforms the
`scale` parameters; if `Tensor` is the `tfd.Normal` `scale` argument.
Takes a (batch of) length-`dims` vectors and returns a `Tensor` of same
size. (Taking a `callable` or `Tensor` is how `tf.Variable` intializers
behave.)
Default value: `1`.
name: A `name_scope` name for operations created by this function.
Default value: `None` (i.e., "normal").
Returns:
normal: An instance of `tfd.Normal`.
"""
with tf.compat.v1.name_scope(name, 'normal', [x]):
x = tf.convert_to_tensor(value=x, name='x')
if callable(scale_fn):
y = layer_fn(x, 2)
loc = loc_fn(y[..., 0])
scale = scale_fn(y[..., 1])
else:
y = tf.squeeze(layer_fn(x, 1), axis=-1)
loc = loc_fn(y)
scale = tf.cast(scale_fn, loc.dtype.base_dtype)
return tfd.Normal(loc=loc, scale=scale)
|
Constructs a trainable tfd. Poisson distribution.
|
def poisson(x,
layer_fn=tf.compat.v1.layers.dense,
log_rate_fn=lambda x: x,
name=None):
"""Constructs a trainable `tfd.Poisson` distribution.
This function creates a Poisson distribution parameterized by log rate.
Using default args, this function is mathematically equivalent to:
```none
Y = Poisson(log_rate=matmul(W, x) + b)
where,
W in R^[d, n]
b in R^d
```
#### Examples
This can be used as a [Poisson regression](
https://en.wikipedia.org/wiki/Poisson_regression) loss.
```python
# This example fits a poisson regression loss.
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
# Create fictitious training data.
dtype = np.float32
n = 3000 # number of samples
x_size = 4 # size of single x
def make_training_data():
np.random.seed(142)
x = np.random.randn(n, x_size).astype(dtype)
w = np.random.randn(x_size).astype(dtype)
b = np.random.randn(1).astype(dtype)
true_log_rate = np.tensordot(x, w, axes=[[-1], [-1]]) + b
y = np.random.poisson(lam=np.exp(true_log_rate)).astype(dtype)
return y, x
y, x = make_training_data()
# Build TF graph for fitting Poisson maximum likelihood estimator.
poisson = tfp.trainable_distributions.poisson(x)
loss = -tf.reduce_mean(poisson.log_prob(y))
train_op = tf.train.AdamOptimizer(learning_rate=2.**-5).minimize(loss)
mse = tf.reduce_mean(tf.squared_difference(y, poisson.mean()))
init_op = tf.global_variables_initializer()
# Run graph 1000 times.
num_steps = 1000
loss_ = np.zeros(num_steps) # Style: `_` to indicate sess.run result.
mse_ = np.zeros(num_steps)
with tf.Session() as sess:
sess.run(init_op)
for it in xrange(loss_.size):
_, loss_[it], mse_[it] = sess.run([train_op, loss, mse])
if it % 200 == 0 or it == loss_.size - 1:
print("iteration:{} loss:{} mse:{}".format(it, loss_[it], mse_[it]))
# ==> iteration:0 loss:37.0814208984 mse:6359.41259766
# iteration:200 loss:1.42010736465 mse:40.7654914856
# iteration:400 loss:1.39027583599 mse:8.77660560608
# iteration:600 loss:1.3902695179 mse:8.78443241119
# iteration:800 loss:1.39026939869 mse:8.78443622589
# iteration:999 loss:1.39026939869 mse:8.78444766998
```
Args:
x: `Tensor` with floating type. Must have statically defined rank and
statically known right-most dimension.
layer_fn: Python `callable` which takes input `x` and `int` scalar `d` and
returns a transformation of `x` with shape
`tf.concat([tf.shape(x)[:-1], [1]], axis=0)`.
Default value: `tf.layers.dense`.
log_rate_fn: Python `callable` which transforms the `log_rate` parameter.
Takes a (batch of) length-`dims` vectors and returns a `Tensor` of same
shape and `dtype`.
Default value: `lambda x: x`.
name: A `name_scope` name for operations created by this function.
Default value: `None` (i.e., "poisson").
Returns:
poisson: An instance of `tfd.Poisson`.
"""
with tf.compat.v1.name_scope(name, 'poisson', [x]):
x = tf.convert_to_tensor(value=x, name='x')
log_rate = log_rate_fn(tf.squeeze(layer_fn(x, 1), axis=-1))
return tfd.Poisson(log_rate=log_rate)
|
r Finds root ( s ) of a function of single variable using the secant method.
|
def secant_root(objective_fn,
initial_position,
next_position=None,
value_at_position=None,
position_tolerance=1e-8,
value_tolerance=1e-8,
max_iterations=50,
stopping_policy_fn=tf.reduce_all,
validate_args=False,
name=None):
r"""Finds root(s) of a function of single variable using the secant method.
The [secant method](https://en.wikipedia.org/wiki/Secant_method) is a
root-finding algorithm that uses a succession of roots of secant lines to
better approximate a root of a function. The secant method can be thought of
as a finite-difference approximation of Newton's method.
Args:
objective_fn: Python callable for which roots are searched. It must be a
callable of a single variable. `objective_fn` must return a `Tensor` of
the same shape and dtype as `initial_position`.
initial_position: `Tensor` or Python float representing the starting
position. The function will search for roots in the neighborhood of each
point. The shape of `initial_position` should match that of the input to
`objective_fn`.
next_position: Optional `Tensor` representing the next position in the
search. If specified, this argument must broadcast with the shape of
`initial_position` and have the same dtype. It will be used to compute the
first step to take when searching for roots. If not specified, a default
value will be used instead.
Default value: `initial_position * (1 + 1e-4) + sign(initial_position) *
1e-4`.
value_at_position: Optional `Tensor` or Pyhon float representing the value
of `objective_fn` at `initial_position`. If specified, this argument must
have the same shape and dtype as `initial_position`. If not specified, the
value will be evaluated during the search.
Default value: None.
position_tolerance: Optional `Tensor` representing the tolerance for the
estimated roots. If specified, this argument must broadcast with the shape
of `initial_position` and have the same dtype.
Default value: `1e-8`.
value_tolerance: Optional `Tensor` representing the tolerance used to check
for roots. If the absolute value of `objective_fn` is smaller than
`value_tolerance` at a given position, then that position is considered a
root for the function. If specified, this argument must broadcast with the
shape of `initial_position` and have the same dtype.
Default value: `1e-8`.
max_iterations: Optional `Tensor` or Python integer specifying the maximum
number of steps to perform for each initial position. Must broadcast with
the shape of `initial_position`.
Default value: `50`.
stopping_policy_fn: Python `callable` controlling the algorithm termination.
It must be a callable accepting a `Tensor` of booleans with the shape of
`initial_position` (each denoting whether the search is finished for each
starting point), and returning a scalar boolean `Tensor` (indicating
whether the overall search should stop). Typical values are
`tf.reduce_all` (which returns only when the search is finished for all
points), and `tf.reduce_any` (which returns as soon as the search is
finished for any point).
Default value: `tf.reduce_all` (returns only when the search is finished
for all points).
validate_args: Python `bool` indicating whether to validate arguments such
as `position_tolerance`, `value_tolerance`, and `max_iterations`.
Default value: `False`.
name: Python `str` name prefixed to ops created by this function.
Returns:
root_search_results: A Python `namedtuple` containing the following items:
estimated_root: `Tensor` containing the last position explored. If the
search was successful within the specified tolerance, this position is
a root of the objective function.
objective_at_estimated_root: `Tensor` containing the value of the
objective function at `position`. If the search was successful within
the specified tolerance, then this is close to 0.
num_iterations: The number of iterations performed.
Raises:
ValueError: if a non-callable `stopping_policy_fn` is passed.
#### Examples
```python
import tensorflow as tf
import tensorflow_probability as tfp
tf.enable_eager_execution()
# Example 1: Roots of a single function from two different starting points.
f = lambda x: (63 * x**5 - 70 * x**3 + 15 * x) / 8.
x = tf.constant([-1, 10], dtype=tf.float64)
tfp.math.secant_root(objective_fn=f, initial_position=x))
# ==> RootSearchResults(
estimated_root=array([-0.90617985, 0.90617985]),
objective_at_estimated_root=array([-4.81727769e-10, 7.44957651e-10]),
num_iterations=array([ 7, 24], dtype=int32))
tfp.math.secant_root(objective_fn=f,
initial_position=x,
stopping_policy_fn=tf.reduce_any)
# ==> RootSearchResults(
estimated_root=array([-0.90617985, 3.27379206]),
objective_at_estimated_root=array([-4.81727769e-10, 2.66058312e+03]),
num_iterations=array([7, 8], dtype=int32))
# Example 2: Roots of a multiplex function from a single starting point.
def f(x):
return tf.constant([0., 63. / 8], dtype=tf.float64) * x**5 \
+ tf.constant([5. / 2, -70. / 8], dtype=tf.float64) * x**3 \
+ tf.constant([-3. / 2, 15. / 8], dtype=tf.float64) * x
x = tf.constant([-1, -1], dtype=tf.float64)
tfp.math.secant_root(objective_fn=f, initial_position=x)
# ==> RootSearchResults(
estimated_root=array([-0.77459667, -0.90617985]),
objective_at_estimated_root=array([-7.81339438e-11, -4.81727769e-10]),
num_iterations=array([7, 7], dtype=int32))
# Example 3: Roots of a multiplex function from two starting points.
def f(x):
return tf.constant([0., 63. / 8], dtype=tf.float64) * x**5 \
+ tf.constant([5. / 2, -70. / 8], dtype=tf.float64) * x**3 \
+ tf.constant([-3. / 2, 15. / 8], dtype=tf.float64) * x
x = tf.constant([[-1, -1], [10, 10]], dtype=tf.float64)
tfp.math.secant_root(objective_fn=f, initial_position=x)
# ==> RootSearchResults(
estimated_root=array([
[-0.77459667, -0.90617985],
[ 0.77459667, 0.90617985]]),
objective_at_estimated_root=array([
[-7.81339438e-11, -4.81727769e-10],
[6.66025013e-11, 7.44957651e-10]]),
num_iterations=array([
[7, 7],
[16, 24]], dtype=int32))
```
"""
if not callable(stopping_policy_fn):
raise ValueError('stopping_policy_fn must be callable')
position = tf.convert_to_tensor(
value=initial_position,
name='position',
)
value_at_position = tf.convert_to_tensor(
value=value_at_position or objective_fn(position),
name='value_at_position',
dtype=position.dtype.base_dtype)
zero = tf.zeros_like(position)
position_tolerance = tf.convert_to_tensor(
value=position_tolerance, name='position_tolerance', dtype=position.dtype)
value_tolerance = tf.convert_to_tensor(
value=value_tolerance, name='value_tolerance', dtype=position.dtype)
num_iterations = tf.zeros_like(position, dtype=tf.int32)
max_iterations = tf.convert_to_tensor(value=max_iterations, dtype=tf.int32)
max_iterations = tf.broadcast_to(
max_iterations, name='max_iterations', shape=position.shape)
# Compute the step from `next_position` if present. This covers the case where
# a user has two starting points, which bound the root or has a specific step
# size in mind.
if next_position is None:
epsilon = tf.constant(1e-4, dtype=position.dtype, shape=position.shape)
step = position * epsilon + tf.sign(position) * epsilon
else:
step = next_position - initial_position
finished = tf.constant(False, shape=position.shape)
# Negate `stopping_condition` to determine if the search should continue.
# This means, in particular, that tf.reduce_*all* will return only when the
# search is finished for *all* starting points.
def _should_continue(position, value_at_position, num_iterations, step,
finished):
"""Indicates whether the overall search should continue.
Args:
position: `Tensor` containing the current root estimates.
value_at_position: `Tensor` containing the value of `objective_fn` at
`position`.
num_iterations: `Tensor` containing the current iteration index for each
point.
step: `Tensor` containing the size of the step to take for each point.
finished: `Tensor` indicating for which points the search is finished.
Returns:
A boolean value indicating whether the overall search should continue.
"""
del position, value_at_position, num_iterations, step # Unused
return ~tf.convert_to_tensor(
value=stopping_policy_fn(finished), name='should_stop', dtype=tf.bool)
# For each point in `position`, the search is stopped if either:
# (1) A root has been found
# (2) f(position) == f(position + step)
# (3) The maximum number of iterations has been reached
# In case (2), the search may be stopped both before the desired tolerance is
# achieved (or even a root is found), and the maximum number of iterations is
# reached.
def _body(position, value_at_position, num_iterations, step, finished):
"""Performs one iteration of the secant root-finding algorithm.
Args:
position: `Tensor` containing the current root estimates.
value_at_position: `Tensor` containing the value of `objective_fn` at
`position`.
num_iterations: `Tensor` containing the current iteration index for each
point.
step: `Tensor` containing the size of the step to take for each point.
finished: `Tensor` indicating for which points the search is finished.
Returns:
The `Tensor`s to use for the next iteration of the algorithm.
"""
# True if the search was already finished, or (1) or (3) just became true.
was_finished = finished | (num_iterations >= max_iterations) | (
tf.abs(step) < position_tolerance) | (
tf.abs(value_at_position) < value_tolerance)
# Compute the next position and the value at that point.
next_position = tf.where(was_finished, position, position + step)
value_at_next_position = tf.where(was_finished, value_at_position,
objective_fn(next_position))
# True if the search was already finished, or (2) just became true.
is_finished = tf.equal(value_at_position, value_at_next_position)
# Use the mid-point between the last two positions if (2) just became true.
next_position = tf.where(is_finished & ~was_finished,
(position + next_position) * 0.5, next_position)
# Once finished, stop updating the iteration index and set the step to zero.
num_iterations = tf.where(is_finished, num_iterations, num_iterations + 1)
next_step = tf.where(
is_finished, zero, step * value_at_next_position /
(value_at_position - value_at_next_position))
return (next_position, value_at_next_position, num_iterations, next_step,
is_finished)
with tf.compat.v1.name_scope(
name, 'secant_root',
[position, next_position, value_at_position, max_iterations]):
assertions = []
if validate_args:
assertions += [
tf.Assert(
tf.reduce_all(input_tensor=position_tolerance > zero),
[position_tolerance]),
tf.Assert(
tf.reduce_all(input_tensor=value_tolerance > zero),
[value_tolerance]),
tf.Assert(
tf.reduce_all(input_tensor=max_iterations >= num_iterations),
[max_iterations]),
]
with tf.control_dependencies(assertions):
root, value_at_root, num_iterations, _, _ = tf.while_loop(
cond=_should_continue,
body=_body,
loop_vars=[
position, value_at_position, num_iterations, step, finished
])
return RootSearchResults(
estimated_root=root,
objective_at_estimated_root=value_at_root,
num_iterations=num_iterations)
|
Applies one step of Euler - Maruyama method.
|
def _euler_method(random_draw_parts,
state_parts,
drift_parts,
step_size_parts,
volatility_parts,
name=None):
"""Applies one step of Euler-Maruyama method.
Generates proposal of the form:
```python
tfd.Normal(loc=state_parts + _get_drift(state_parts, ...),
scale=tf.sqrt(step_size * volatility_fn(current_state)))
```
`_get_drift(state_parts, ..)` is a diffusion drift value at `state_parts`.
Args:
random_draw_parts: Python `list` of `Tensor`s containing the value(s) of the
random perturbation variable(s). Must broadcast with the shape of
`state_parts`.
state_parts: Python `list` of `Tensor`s representing the current
state(s) of the Markov chain(s).
drift_parts: Python `list` of `Tensor`s representing value of the drift
`_get_drift(*state_parts, ..)`. Must broadcast with the shape of
`state_parts`.
step_size_parts: Python `list` of `Tensor`s representing the step size for
the Euler-Maruyama method. Must broadcast with the shape of
`state_parts`. Larger step sizes lead to faster progress, but
too-large step sizes make rejection exponentially more likely. When
possible, it's often helpful to match per-variable step sizes to the
standard deviations of the target distribution in each variable.
volatility_parts: Python `list` of `Tensor`s representing the value of
`volatility_fn(*state_parts)`. Must broadcast with the shape of
`state_parts`.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'mala_euler_method').
Returns:
proposed_state_parts: Tensor or Python list of `Tensor`s representing the
state(s) of the Markov chain(s) at each result step. Has same shape as
input `current_state_parts`.
"""
with tf.compat.v1.name_scope(name, 'mala_euler_method', [
random_draw_parts, state_parts, drift_parts, step_size_parts,
volatility_parts
]):
proposed_state_parts = []
for random_draw, state, drift, step_size, volatility in zip(
random_draw_parts,
state_parts,
drift_parts,
step_size_parts,
volatility_parts):
proposal = state + drift + volatility * tf.sqrt(step_size) * random_draw
proposed_state_parts.append(proposal)
return proposed_state_parts
|
Compute diffusion drift at the current location current_state.
|
def _get_drift(step_size_parts, volatility_parts, grads_volatility,
grads_target_log_prob,
name=None):
"""Compute diffusion drift at the current location `current_state`.
The drift of the diffusion at is computed as
```none
0.5 * `step_size` * volatility_parts * `target_log_prob_fn(current_state)`
+ `step_size` * `grads_volatility`
```
where `volatility_parts` = `volatility_fn(current_state)**2` and
`grads_volatility` is a gradient of `volatility_parts` at the `current_state`.
Args:
step_size_parts: Python `list` of `Tensor`s representing the step size for
Euler-Maruyama method. Must broadcast with the shape of
`volatility_parts`. Larger step sizes lead to faster progress, but
too-large step sizes make rejection exponentially more likely. When
possible, it's often helpful to match per-variable step sizes to the
standard deviations of the target distribution in each variable.
volatility_parts: Python `list` of `Tensor`s representing the value of
`volatility_fn(*state_parts)`.
grads_volatility: Python list of `Tensor`s representing the value of the
gradient of `volatility_parts**2` wrt the state of the chain.
grads_target_log_prob: Python list of `Tensor`s representing
gradient of `target_log_prob_fn(*state_parts`) wrt `state_parts`. Must
have same shape as `volatility_parts`.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'mala_get_drift').
Returns:
drift_parts: Tensor or Python list of `Tensor`s representing the
state(s) of the Markov chain(s) at each result step. Has same shape as
input `current_state_parts`.
"""
with tf.compat.v1.name_scope(name, 'mala_get_drift', [
step_size_parts, volatility_parts, grads_volatility, grads_target_log_prob
]):
drift_parts = []
for step_size, volatility, grad_volatility, grad_target_log_prob in (
zip(step_size_parts,
volatility_parts,
grads_volatility,
grads_target_log_prob)):
volatility_squared = tf.square(volatility)
drift = 0.5 * step_size * (volatility_squared * grad_target_log_prob
+ grad_volatility)
drift_parts.append(drift)
return drift_parts
|
r Helper to kernel which computes the log acceptance - correction.
|
def _compute_log_acceptance_correction(current_state_parts,
proposed_state_parts,
current_volatility_parts,
proposed_volatility_parts,
current_drift_parts,
proposed_drift_parts,
step_size_parts,
independent_chain_ndims,
name=None):
r"""Helper to `kernel` which computes the log acceptance-correction.
Computes `log_acceptance_correction` as described in `MetropolisHastings`
class. The proposal density is normal. More specifically,
```none
q(proposed_state | current_state) \sim N(current_state + current_drift,
step_size * current_volatility**2)
q(current_state | proposed_state) \sim N(proposed_state + proposed_drift,
step_size * proposed_volatility**2)
```
The `log_acceptance_correction` is then
```none
log_acceptance_correctio = q(current_state | proposed_state)
- q(proposed_state | current_state)
```
Args:
current_state_parts: Python `list` of `Tensor`s representing the value(s) of
the current state of the chain.
proposed_state_parts: Python `list` of `Tensor`s representing the value(s)
of the proposed state of the chain. Must broadcast with the shape of
`current_state_parts`.
current_volatility_parts: Python `list` of `Tensor`s representing the value
of `volatility_fn(*current_volatility_parts)`. Must broadcast with the
shape of `current_state_parts`.
proposed_volatility_parts: Python `list` of `Tensor`s representing the value
of `volatility_fn(*proposed_volatility_parts)`. Must broadcast with the
shape of `current_state_parts`
current_drift_parts: Python `list` of `Tensor`s representing value of the
drift `_get_drift(*current_state_parts, ..)`. Must broadcast with the
shape of `current_state_parts`.
proposed_drift_parts: Python `list` of `Tensor`s representing value of the
drift `_get_drift(*proposed_drift_parts, ..)`. Must broadcast with the
shape of `current_state_parts`.
step_size_parts: Python `list` of `Tensor`s representing the step size for
Euler-Maruyama method. Must broadcast with the shape of
`current_state_parts`.
independent_chain_ndims: Scalar `int` `Tensor` representing the number of
leftmost `Tensor` dimensions which index independent chains.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'compute_log_acceptance_correction').
Returns:
log_acceptance_correction: `Tensor` representing the `log`
acceptance-correction. (See docstring for mathematical definition.)
"""
with tf.compat.v1.name_scope(name, 'compute_log_acceptance_correction', [
current_state_parts, proposed_state_parts, current_volatility_parts,
proposed_volatility_parts, current_drift_parts, proposed_drift_parts,
step_size_parts, independent_chain_ndims
]):
proposed_log_density_parts = []
dual_log_density_parts = []
for [
current_state,
proposed_state,
current_volatility,
proposed_volatility,
current_drift,
proposed_drift,
step_size,
] in zip(
current_state_parts,
proposed_state_parts,
current_volatility_parts,
proposed_volatility_parts,
current_drift_parts,
proposed_drift_parts,
step_size_parts,
):
axis = tf.range(independent_chain_ndims, tf.rank(current_state))
state_diff = proposed_state - current_state
current_volatility *= tf.sqrt(step_size)
proposed_energy = (state_diff - current_drift) / current_volatility
proposed_volatility *= tf.sqrt(step_size)
# Compute part of `q(proposed_state | current_state)`
proposed_energy = (
tf.reduce_sum(
input_tensor=mcmc_util.safe_sum(
[tf.math.log(current_volatility),
0.5 * (proposed_energy**2)]),
axis=axis))
proposed_log_density_parts.append(-proposed_energy)
# Compute part of `q(current_state | proposed_state)`
dual_energy = (state_diff + proposed_drift) / proposed_volatility
dual_energy = (
tf.reduce_sum(
input_tensor=mcmc_util.safe_sum(
[tf.math.log(proposed_volatility), 0.5 * (dual_energy**2)]),
axis=axis))
dual_log_density_parts.append(-dual_energy)
# Compute `q(proposed_state | current_state)`
proposed_log_density_reduce = tf.reduce_sum(
input_tensor=tf.stack(proposed_log_density_parts, axis=-1), axis=-1)
# Compute `q(current_state | proposed_state)`
dual_log_density_reduce = tf.reduce_sum(
input_tensor=tf.stack(dual_log_density_parts, axis=-1), axis=-1)
return mcmc_util.safe_sum([dual_log_density_reduce,
-proposed_log_density_reduce])
|
Helper which computes volatility_fn results and grads if needed.
|
def _maybe_call_volatility_fn_and_grads(volatility_fn,
state,
volatility_fn_results=None,
grads_volatility_fn=None,
sample_shape=None,
parallel_iterations=10):
"""Helper which computes `volatility_fn` results and grads, if needed."""
state_parts = list(state) if mcmc_util.is_list_like(state) else [state]
needs_volatility_fn_gradients = grads_volatility_fn is None
# Convert `volatility_fn_results` to a list
if volatility_fn_results is None:
volatility_fn_results = volatility_fn(*state_parts)
volatility_fn_results = (list(volatility_fn_results)
if mcmc_util.is_list_like(volatility_fn_results)
else [volatility_fn_results])
if len(volatility_fn_results) == 1:
volatility_fn_results *= len(state_parts)
if len(state_parts) != len(volatility_fn_results):
raise ValueError('`volatility_fn` should return a tensor or a list '
'of the same length as `current_state`.')
# The shape of 'volatility_parts' needs to have the number of chains as a
# leading dimension. For determinism we broadcast 'volatility_parts' to the
# shape of `state_parts` since each dimension of `state_parts` could have a
# different volatility value.
volatility_fn_results = _maybe_broadcast_volatility(volatility_fn_results,
state_parts)
if grads_volatility_fn is None:
[
_,
grads_volatility_fn,
] = diag_jacobian(
xs=state_parts,
ys=volatility_fn_results,
sample_shape=sample_shape,
parallel_iterations=parallel_iterations,
fn=volatility_fn)
# Compute gradient of `volatility_parts**2`
if needs_volatility_fn_gradients:
grads_volatility_fn = [
2. * g * volatility if g is not None else tf.zeros_like(
fn_arg, dtype=fn_arg.dtype.base_dtype)
for g, volatility, fn_arg in zip(
grads_volatility_fn, volatility_fn_results, state_parts)
]
return volatility_fn_results, grads_volatility_fn
|
Helper to broadcast volatility_parts to the shape of state_parts.
|
def _maybe_broadcast_volatility(volatility_parts,
state_parts):
"""Helper to broadcast `volatility_parts` to the shape of `state_parts`."""
return [v + tf.zeros_like(sp, dtype=sp.dtype.base_dtype)
for v, sp in zip(volatility_parts, state_parts)]
|
Helper which processes input args to meet list - like assumptions.
|
def _prepare_args(target_log_prob_fn,
volatility_fn,
state,
step_size,
target_log_prob=None,
grads_target_log_prob=None,
volatility=None,
grads_volatility_fn=None,
diffusion_drift=None,
parallel_iterations=10):
"""Helper which processes input args to meet list-like assumptions."""
state_parts = list(state) if mcmc_util.is_list_like(state) else [state]
[
target_log_prob,
grads_target_log_prob,
] = mcmc_util.maybe_call_fn_and_grads(
target_log_prob_fn,
state_parts,
target_log_prob,
grads_target_log_prob)
[
volatility_parts,
grads_volatility,
] = _maybe_call_volatility_fn_and_grads(
volatility_fn,
state_parts,
volatility,
grads_volatility_fn,
distribution_util.prefer_static_shape(target_log_prob),
parallel_iterations)
step_sizes = (list(step_size) if mcmc_util.is_list_like(step_size)
else [step_size])
step_sizes = [
tf.convert_to_tensor(
value=s, name='step_size', dtype=target_log_prob.dtype)
for s in step_sizes
]
if len(step_sizes) == 1:
step_sizes *= len(state_parts)
if len(state_parts) != len(step_sizes):
raise ValueError('There should be exactly one `step_size` or it should '
'have same length as `current_state`.')
if diffusion_drift is None:
diffusion_drift_parts = _get_drift(step_sizes, volatility_parts,
grads_volatility,
grads_target_log_prob)
else:
diffusion_drift_parts = (list(diffusion_drift)
if mcmc_util.is_list_like(diffusion_drift)
else [diffusion_drift])
if len(state_parts) != len(diffusion_drift):
raise ValueError('There should be exactly one `diffusion_drift` or it '
'should have same length as list-like `current_state`.')
return [
state_parts,
step_sizes,
target_log_prob,
grads_target_log_prob,
volatility_parts,
grads_volatility,
diffusion_drift_parts,
]
|
Build transition matrix for an autoregressive StateSpaceModel.
|
def make_ar_transition_matrix(coefficients):
"""Build transition matrix for an autoregressive StateSpaceModel.
When applied to a vector of previous values, this matrix computes
the expected new value (summing the previous states according to the
autoregressive coefficients) in the top dimension of the state space,
and moves all previous values down by one dimension, 'forgetting' the
final (least recent) value. That is, it looks like this:
```
ar_matrix = [ coefs[0], coefs[1], ..., coefs[order]
1., 0 , ..., 0.
0., 1., ..., 0.
...
0., 0., ..., 1., 0. ]
```
Args:
coefficients: float `Tensor` of shape `concat([batch_shape, [order]])`.
Returns:
ar_matrix: float `Tensor` with shape `concat([batch_shape,
[order, order]])`.
"""
top_row = tf.expand_dims(coefficients, -2)
coef_shape = dist_util.prefer_static_shape(coefficients)
batch_shape, order = coef_shape[:-1], coef_shape[-1]
remaining_rows = tf.concat([
tf.eye(order - 1, dtype=coefficients.dtype, batch_shape=batch_shape),
tf.zeros(tf.concat([batch_shape, (order - 1, 1)], axis=0),
dtype=coefficients.dtype)
], axis=-1)
ar_matrix = tf.concat([top_row, remaining_rows], axis=-2)
return ar_matrix
|
Computes diagonal of the Jacobian matrix of ys = fn ( xs ) wrt xs.
|
def diag_jacobian(xs,
ys=None,
sample_shape=None,
fn=None,
parallel_iterations=10,
name=None):
"""Computes diagonal of the Jacobian matrix of `ys=fn(xs)` wrt `xs`.
If `ys` is a tensor or a list of tensors of the form `(ys_1, .., ys_n)` and
`xs` is of the form `(xs_1, .., xs_n)`, the function `jacobians_diag`
computes the diagonal of the Jacobian matrix, i.e., the partial derivatives
`(dys_1/dxs_1,.., dys_n/dxs_n`). For definition details, see
https://en.wikipedia.org/wiki/Jacobian_matrix_and_determinant
#### Example
##### Diagonal Hessian of the log-density of a 3D Gaussian distribution
In this example we sample from a standard univariate normal
distribution using MALA with `step_size` equal to 0.75.
```python
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
tfd = tfp.distributions
dtype = np.float32
with tf.Session(graph=tf.Graph()) as sess:
true_mean = dtype([0, 0, 0])
true_cov = dtype([[1, 0.25, 0.25], [0.25, 2, 0.25], [0.25, 0.25, 3]])
chol = tf.linalg.cholesky(true_cov)
target = tfd.MultivariateNormalTriL(loc=true_mean, scale_tril=chol)
# Assume that the state is passed as a list of tensors `x` and `y`.
# Then the target function is defined as follows:
def target_fn(x, y):
# Stack the input tensors together
z = tf.concat([x, y], axis=-1) - true_mean
return target.log_prob(z)
sample_shape = [3, 5]
state = [tf.ones(sample_shape + [2], dtype=dtype),
tf.ones(sample_shape + [1], dtype=dtype)]
fn_val, grads = tfp.math.value_and_gradient(target_fn, state)
# We can either pass the `sample_shape` of the `state` or not, which impacts
# computational speed of `diag_jacobian`
_, diag_jacobian_shape_passed = diag_jacobian(
xs=state, ys=grads, sample_shape=tf.shape(fn_val))
_, diag_jacobian_shape_none = diag_jacobian(
xs=state, ys=grads)
diag_jacobian_shape_passed_ = sess.run(diag_jacobian_shape_passed)
diag_jacobian_shape_none_ = sess.run(diag_jacobian_shape_none)
print('hessian computed through `diag_jacobian`, sample_shape passed: ',
np.concatenate(diag_jacobian_shape_passed_, -1))
print('hessian computed through `diag_jacobian`, sample_shape skipped',
np.concatenate(diag_jacobian_shape_none_, -1))
```
Args:
xs: `Tensor` or a python `list` of `Tensors` of real-like dtypes and shapes
`sample_shape` + `event_shape_i`, where `event_shape_i` can be different
for different tensors.
ys: `Tensor` or a python `list` of `Tensors` of the same dtype as `xs`. Must
broadcast with the shape of `xs`. Can be omitted if `fn` is provided.
sample_shape: A common `sample_shape` of the input tensors of `xs`. If not,
provided, assumed to be `[1]`, which may result in a slow performance of
`jacobians_diag`.
fn: Python callable that takes `xs` as an argument (or `*xs`, if it is a
list) and returns `ys`. Might be skipped if `ys` is provided and
`tf.enable_eager_execution()` is disabled.
parallel_iterations: `int` that specifies the allowed number of coordinates
of the input tensor `xs`, for which the partial derivatives `dys_i/dxs_i`
can be computed in parallel.
name: Python `str` name prefixed to `Ops` created by this function.
Default value: `None` (i.e., "diag_jacobian").
Returns:
ys: a list, which coincides with the input `ys`, when provided.
If the input `ys` is None, `fn(*xs)` gets computed and returned as a list.
jacobians_diag_res: a `Tensor` or a Python list of `Tensor`s of the same
dtypes and shapes as the input `xs`. This is the diagonal of the Jacobian
of ys wrt xs.
Raises:
ValueError: if lists `xs` and `ys` have different length or both `ys` and
`fn` are `None`, or `fn` is None in the eager execution mode.
"""
with tf.compat.v1.name_scope(name, 'jacobians_diag', [xs, ys]):
if sample_shape is None:
sample_shape = [1]
# Output Jacobian diagonal
jacobians_diag_res = []
# Convert input `xs` to a list
xs = list(xs) if _is_list_like(xs) else [xs]
xs = [tf.convert_to_tensor(value=x) for x in xs]
if not tf.executing_eagerly():
if ys is None:
if fn is None:
raise ValueError('Both `ys` and `fn` can not be `None`')
else:
ys = fn(*xs)
# Convert ys to a list
ys = list(ys) if _is_list_like(ys) else [ys]
if len(xs) != len(ys):
raise ValueError('`xs` and `ys` should have the same length')
for y, x in zip(ys, xs):
# Broadcast `y` to the shape of `x`.
y_ = y + tf.zeros_like(x)
# Change `event_shape` to one-dimension
y_ = tf.reshape(y, tf.concat([sample_shape, [-1]], -1))
# Declare an iterator and tensor array loop variables for the gradients.
n = tf.size(input=x) / tf.cast(
tf.reduce_prod(input_tensor=sample_shape), dtype=tf.int32)
n = tf.cast(n, dtype=tf.int32)
loop_vars = [
0,
tf.TensorArray(x.dtype, n)
]
def loop_body(j):
"""Loop function to compute gradients of the each direction."""
# Gradient along direction `j`.
res = tf.gradients(ys=y_[..., j], xs=x)[0] # pylint: disable=cell-var-from-loop
if res is None:
# Return zero, if the gradient is `None`.
res = tf.zeros(tf.concat([sample_shape, [1]], -1),
dtype=x.dtype) # pylint: disable=cell-var-from-loop
else:
# Reshape `event_shape` to 1D
res = tf.reshape(res, tf.concat([sample_shape, [-1]], -1))
# Add artificial dimension for the case of zero shape input tensor
res = tf.expand_dims(res, 0)
res = res[..., j]
return res # pylint: disable=cell-var-from-loop
# Iterate over all elements of the gradient and compute second order
# derivatives.
_, jacobian_diag_res = tf.while_loop(
cond=lambda j, _: j < n, # pylint: disable=cell-var-from-loop
body=lambda j, result: (j + 1, result.write(j, loop_body(j))),
loop_vars=loop_vars,
parallel_iterations=parallel_iterations)
shape_x = tf.shape(input=x)
# Stack gradients together and move flattened `event_shape` to the
# zero position
reshaped_jacobian_diag = tf.transpose(a=jacobian_diag_res.stack())
# Reshape to the original tensor
reshaped_jacobian_diag = tf.reshape(reshaped_jacobian_diag, shape_x)
jacobians_diag_res.append(reshaped_jacobian_diag)
else:
if fn is None:
raise ValueError('`fn` can not be `None` when eager execution is '
'enabled')
if ys is None:
ys = fn(*xs)
def fn_slice(i, j):
"""Broadcast y[i], flatten event shape of y[i], return y[i][..., j]."""
def fn_broadcast(*state):
res = fn(*state)
res = list(res) if _is_list_like(res) else [res]
if len(res) != len(state):
res *= len(state)
res = [tf.reshape(r + tf.zeros_like(s),
tf.concat([sample_shape, [-1]], -1))
for r, s in zip(res, state)]
return res
# Expand dimensions before returning in order to support 0D input `xs`
return lambda *state: tf.expand_dims(fn_broadcast(*state)[i], 0)[..., j]
def make_loop_body(i, x):
"""Loop function to compute gradients of the each direction."""
def _fn(j, result):
res = value_and_gradient(fn_slice(i, j), xs)[1][i]
if res is None:
res = tf.zeros(tf.concat([sample_shape, [1]], -1), dtype=x.dtype)
else:
res = tf.reshape(res, tf.concat([sample_shape, [-1]], -1))
res = res[..., j]
return j + 1, result.write(j, res)
return _fn
for i, x in enumerate(xs):
# Declare an iterator and tensor array loop variables for the gradients.
n = tf.size(input=x) / tf.cast(
tf.reduce_prod(input_tensor=sample_shape), dtype=tf.int32)
n = tf.cast(n, dtype=tf.int32)
loop_vars = [
0,
tf.TensorArray(x.dtype, n)
]
# Iterate over all elements of the gradient and compute second order
# derivatives.
_, jacobian_diag_res = tf.while_loop(
cond=lambda j, _: j < n,
body=make_loop_body(i, x),
loop_vars=loop_vars,
parallel_iterations=parallel_iterations)
shape_x = tf.shape(input=x)
# Stack gradients together and move flattened `event_shape` to the
# zero position
reshaped_jacobian_diag = tf.transpose(a=jacobian_diag_res.stack())
# Reshape to the original tensor
reshaped_jacobian_diag = tf.reshape(reshaped_jacobian_diag, shape_x)
jacobians_diag_res.append(reshaped_jacobian_diag)
return ys, jacobians_diag_res
|
Calculates the reshaped dimensions ( replacing up to one - 1 in reshape ).
|
def calculate_reshape(original_shape, new_shape, validate=False, name=None):
"""Calculates the reshaped dimensions (replacing up to one -1 in reshape)."""
batch_shape_static = tensorshape_util.constant_value_as_shape(new_shape)
if tensorshape_util.is_fully_defined(batch_shape_static):
return np.int32(batch_shape_static), batch_shape_static, []
with tf.name_scope(name or "calculate_reshape"):
original_size = tf.reduce_prod(input_tensor=original_shape)
implicit_dim = tf.equal(new_shape, -1)
size_implicit_dim = (
original_size // tf.maximum(1, -tf.reduce_prod(input_tensor=new_shape)))
new_ndims = tf.shape(input=new_shape)
expanded_new_shape = tf.where( # Assumes exactly one `-1`.
implicit_dim, tf.fill(new_ndims, size_implicit_dim), new_shape)
validations = [] if not validate else [ # pylint: disable=g-long-ternary
assert_util.assert_rank(
original_shape, 1, message="Original shape must be a vector."),
assert_util.assert_rank(
new_shape, 1, message="New shape must be a vector."),
assert_util.assert_less_equal(
tf.math.count_nonzero(implicit_dim, dtype=tf.int32),
1,
message="At most one dimension can be unknown."),
assert_util.assert_positive(
expanded_new_shape, message="Shape elements must be >=-1."),
assert_util.assert_equal(
tf.reduce_prod(input_tensor=expanded_new_shape),
original_size,
message="Shape sizes do not match."),
]
return expanded_new_shape, batch_shape_static, validations
|
Helper to __init__ which makes or raises assertions.
|
def validate_init_args_statically(distribution, batch_shape):
"""Helper to __init__ which makes or raises assertions."""
if tensorshape_util.rank(batch_shape.shape) is not None:
if tensorshape_util.rank(batch_shape.shape) != 1:
raise ValueError("`batch_shape` must be a vector "
"(saw rank: {}).".format(
tensorshape_util.rank(batch_shape.shape)))
batch_shape_static = tensorshape_util.constant_value_as_shape(batch_shape)
batch_size_static = tensorshape_util.num_elements(batch_shape_static)
dist_batch_size_static = tensorshape_util.num_elements(
distribution.batch_shape)
if batch_size_static is not None and dist_batch_size_static is not None:
if batch_size_static != dist_batch_size_static:
raise ValueError("`batch_shape` size ({}) must match "
"`distribution.batch_shape` size ({}).".format(
batch_size_static, dist_batch_size_static))
if tensorshape_util.dims(batch_shape_static) is not None:
if any(
tf.compat.dimension_value(dim) is not None and
tf.compat.dimension_value(dim) < 1 for dim in batch_shape_static):
raise ValueError("`batch_shape` elements must be >=-1.")
|
Computes graph and static sample_shape.
|
def _sample_shape(self, x):
"""Computes graph and static `sample_shape`."""
x_ndims = (
tf.rank(x) if tensorshape_util.rank(x.shape) is None else
tensorshape_util.rank(x.shape))
event_ndims = (
tf.size(input=self.event_shape_tensor())
if tensorshape_util.rank(self.event_shape) is None else
tensorshape_util.rank(self.event_shape))
batch_ndims = (
tf.size(input=self._batch_shape_unexpanded)
if tensorshape_util.rank(self.batch_shape) is None else
tensorshape_util.rank(self.batch_shape))
sample_ndims = x_ndims - batch_ndims - event_ndims
if isinstance(sample_ndims, int):
static_sample_shape = x.shape[:sample_ndims]
else:
static_sample_shape = tf.TensorShape(None)
if tensorshape_util.is_fully_defined(static_sample_shape):
sample_shape = np.int32(static_sample_shape)
else:
sample_shape = tf.shape(input=x)[:sample_ndims]
return sample_shape, static_sample_shape
|
Calls fn appropriately reshaping its input x and output.
|
def _call_reshape_input_output(self, fn, x, extra_kwargs=None):
"""Calls `fn`, appropriately reshaping its input `x` and output."""
# Note: we take `extra_kwargs` as a dict rather than `**extra_kwargs`
# because it is possible the user provided extra kwargs would itself
# have `fn` and/or `x` as a key.
with tf.control_dependencies(self._runtime_assertions +
self._validate_sample_arg(x)):
sample_shape, static_sample_shape = self._sample_shape(x)
old_shape = tf.concat(
[
sample_shape,
self.distribution.batch_shape_tensor(),
self.event_shape_tensor(),
],
axis=0)
x_reshape = tf.reshape(x, old_shape)
result = fn(x_reshape, **extra_kwargs) if extra_kwargs else fn(x_reshape)
new_shape = tf.concat(
[
sample_shape,
self._batch_shape_unexpanded,
], axis=0)
result = tf.reshape(result, new_shape)
if (tensorshape_util.rank(static_sample_shape) is not None and
tensorshape_util.rank(self.batch_shape) is not None):
new_shape = tensorshape_util.concatenate(static_sample_shape,
self.batch_shape)
tensorshape_util.set_shape(result, new_shape)
return result
|
Calls fn and appropriately reshapes its output.
|
def _call_and_reshape_output(
self,
fn,
event_shape_list=None,
static_event_shape_list=None,
extra_kwargs=None):
"""Calls `fn` and appropriately reshapes its output."""
# Note: we take `extra_kwargs` as a dict rather than `**extra_kwargs`
# because it is possible the user provided extra kwargs would itself
# have `fn`, `event_shape_list`, `static_event_shape_list` and/or
# `extra_kwargs` as keys.
with tf.control_dependencies(self._runtime_assertions):
if event_shape_list is None:
event_shape_list = [self._event_shape_tensor()]
if static_event_shape_list is None:
static_event_shape_list = [self.event_shape]
new_shape = tf.concat(
[self._batch_shape_unexpanded] + event_shape_list, axis=0)
result = tf.reshape(fn(**extra_kwargs) if extra_kwargs else fn(),
new_shape)
if (tensorshape_util.rank(self.batch_shape) is not None and
tensorshape_util.rank(self.event_shape) is not None):
event_shape = tf.TensorShape([])
for rss in static_event_shape_list:
event_shape = tensorshape_util.concatenate(event_shape, rss)
static_shape = tensorshape_util.concatenate(
self.batch_shape, event_shape)
tensorshape_util.set_shape(result, static_shape)
return result
|
Helper which validates sample arg e. g. input to log_prob.
|
def _validate_sample_arg(self, x):
"""Helper which validates sample arg, e.g., input to `log_prob`."""
with tf.name_scope("validate_sample_arg"):
x_ndims = (
tf.rank(x) if tensorshape_util.rank(x.shape) is None else
tensorshape_util.rank(x.shape))
event_ndims = (
tf.size(input=self.event_shape_tensor())
if tensorshape_util.rank(self.event_shape) is None else
tensorshape_util.rank(self.event_shape))
batch_ndims = (
tf.size(input=self._batch_shape_unexpanded)
if tensorshape_util.rank(self.batch_shape) is None else
tensorshape_util.rank(self.batch_shape))
expected_batch_event_ndims = batch_ndims + event_ndims
if (isinstance(x_ndims, int) and
isinstance(expected_batch_event_ndims, int)):
if x_ndims < expected_batch_event_ndims:
raise NotImplementedError(
"Broadcasting is not supported; too few batch and event dims "
"(expected at least {}, saw {}).".format(
expected_batch_event_ndims, x_ndims))
ndims_assertion = []
elif self.validate_args:
ndims_assertion = [
assert_util.assert_greater_equal(
x_ndims,
expected_batch_event_ndims,
message=("Broadcasting is not supported; too few "
"batch and event dims."),
name="assert_batch_and_event_ndims_large_enough"),
]
if (tensorshape_util.is_fully_defined(self.batch_shape) and
tensorshape_util.is_fully_defined(self.event_shape)):
expected_batch_event_shape = np.int32(
tensorshape_util.concatenate(self.batch_shape, self.event_shape))
else:
expected_batch_event_shape = tf.concat(
[
self.batch_shape_tensor(),
self.event_shape_tensor(),
], axis=0)
sample_ndims = x_ndims - expected_batch_event_ndims
if isinstance(sample_ndims, int):
sample_ndims = max(sample_ndims, 0)
if (isinstance(sample_ndims, int) and
tensorshape_util.is_fully_defined(x.shape[sample_ndims:])):
actual_batch_event_shape = np.int32(x.shape[sample_ndims:])
else:
sample_ndims = tf.maximum(sample_ndims, 0)
actual_batch_event_shape = tf.shape(input=x)[sample_ndims:]
if (isinstance(expected_batch_event_shape, np.ndarray) and
isinstance(actual_batch_event_shape, np.ndarray)):
if any(expected_batch_event_shape != actual_batch_event_shape):
raise NotImplementedError("Broadcasting is not supported; "
"unexpected batch and event shape "
"(expected {}, saw {}).".format(
expected_batch_event_shape,
actual_batch_event_shape))
# We need to set the final runtime-assertions to `ndims_assertion` since
# its possible this assertion was created. We could add a condition to
# only do so if `self.validate_args == True`, however this is redundant
# as `ndims_assertion` already encodes this information.
runtime_assertions = ndims_assertion
elif self.validate_args:
# We need to make the `ndims_assertion` a control dep because otherwise
# TF itself might raise an exception owing to this assertion being
# ill-defined, ie, one cannot even compare different rank Tensors.
with tf.control_dependencies(ndims_assertion):
shape_assertion = assert_util.assert_equal(
expected_batch_event_shape,
actual_batch_event_shape,
message=("Broadcasting is not supported; "
"unexpected batch and event shape."),
name="assert_batch_and_event_shape_same")
runtime_assertions = [shape_assertion]
else:
runtime_assertions = []
return runtime_assertions
|
The binomial cumulative distribution function.
|
def _bdtr(k, n, p):
"""The binomial cumulative distribution function.
Args:
k: floating point `Tensor`.
n: floating point `Tensor`.
p: floating point `Tensor`.
Returns:
`sum_{j=0}^k p^j (1 - p)^(n - j)`.
"""
# Trick for getting safe backprop/gradients into n, k when
# betainc(a = 0, ..) = nan
# Write:
# where(unsafe, safe_output, betainc(where(unsafe, safe_input, input)))
ones = tf.ones_like(n - k)
k_eq_n = tf.equal(k, n)
safe_dn = tf.where(k_eq_n, ones, n - k)
dk = tf.math.betainc(a=safe_dn, b=k + 1, x=1 - p)
return tf.where(k_eq_n, ones, dk)
|
Check counts for proper shape values then return tensor version.
|
def _maybe_assert_valid_sample(self, counts):
"""Check counts for proper shape, values, then return tensor version."""
if not self.validate_args:
return counts
counts = distribution_util.embed_check_nonnegative_integer_form(counts)
return distribution_util.with_dependencies([
assert_util.assert_less_equal(
counts,
self.total_count,
message="counts are not less than or equal to n."),
], counts)
|
Executes model creating both samples and distributions.
|
def _flat_sample_distributions(self, sample_shape=(), seed=None, value=None):
"""Executes `model`, creating both samples and distributions."""
ds = []
values_out = []
seed = seed_stream.SeedStream('JointDistributionCoroutine', seed)
gen = self._model()
index = 0
d = next(gen)
try:
while True:
actual_distribution = d.distribution if isinstance(d, self.Root) else d
ds.append(actual_distribution)
if (value is not None and len(value) > index and
value[index] is not None):
seed()
next_value = value[index]
else:
next_value = actual_distribution.sample(
sample_shape=sample_shape if isinstance(d, self.Root) else (),
seed=seed())
values_out.append(next_value)
index += 1
d = gen.send(next_value)
except StopIteration:
pass
return ds, values_out
|
Calculate the batched KL divergence KL ( a || b ) with a and b Pareto.
|
def _kl_pareto_pareto(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a and b Pareto.
Args:
a: instance of a Pareto distribution object.
b: instance of a Pareto distribution object.
name: (optional) Name to use for created operations.
default is "kl_pareto_pareto".
Returns:
Batchwise KL(a || b)
"""
with tf.name_scope(name or "kl_pareto_pareto"):
# Consistent with
# http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf, page 55
# Terminology is different from source to source for Pareto distributions.
# The 'concentration' parameter corresponds to 'a' in that source, and the
# 'scale' parameter corresponds to 'm'.
final_batch_shape = distribution_util.get_broadcast_shape(
a.concentration, b.concentration, a.scale, b.scale)
common_type = dtype_util.common_dtype(
[a.concentration, b.concentration, a.scale, b.scale], tf.float32)
return tf.where(
a.scale >= b.scale,
b.concentration * (tf.math.log(a.scale) - tf.math.log(b.scale)) +
tf.math.log(a.concentration) - tf.math.log(b.concentration) +
b.concentration / a.concentration - 1.0,
tf.broadcast_to(tf.cast(np.inf, common_type), final_batch_shape))
|
Returns f ( x ) if x is in the support and alt otherwise.
|
def _extend_support(self, x, f, alt):
"""Returns `f(x)` if x is in the support, and `alt` otherwise.
Given `f` which is defined on the support of this distribution
(e.g. x > scale), extend the function definition to the real line
by defining `f(x) = alt` for `x < scale`.
Args:
x: Floating-point Tensor to evaluate `f` at.
f: Lambda that takes in a tensor and returns a tensor. This represents
the function who we want to extend the domain of definition.
alt: Python or numpy literal representing the value to use for extending
the domain.
Returns:
Tensor representing an extension of `f(x)`.
"""
# We need to do a series of broadcasts for the tf.where.
scale = self.scale + tf.zeros_like(self.concentration)
is_invalid = x < scale
scale = scale + tf.zeros_like(x)
x = x + tf.zeros_like(scale)
# We need to do this to ensure gradients are sound.
y = f(tf.where(is_invalid, scale, x))
if alt == 0.:
alt = tf.zeros_like(y)
elif alt == 1.:
alt = tf.ones_like(y)
else:
alt = tf.fill(
dims=tf.shape(input=y),
value=dtype_util.as_numpy_dtype(self.dtype)(alt))
return tf.where(is_invalid, alt, y)
|
Latent Dirichlet Allocation in terms of its generative process.
|
def latent_dirichlet_allocation(concentration, topics_words):
"""Latent Dirichlet Allocation in terms of its generative process.
The model posits a distribution over bags of words and is parameterized by
a concentration and the topic-word probabilities. It collapses per-word
topic assignments.
Args:
concentration: A Tensor of shape [1, num_topics], which parameterizes the
Dirichlet prior over topics.
topics_words: A Tensor of shape [num_topics, num_words], where each row
(topic) denotes the probability of each word being in that topic.
Returns:
bag_of_words: A random variable capturing a sample from the model, of shape
[1, num_words]. It represents one generated document as a bag of words.
"""
topics = ed.Dirichlet(concentration=concentration, name="topics")
word_probs = tf.matmul(topics, topics_words)
# The observations are bags of words and therefore not one-hot. However,
# log_prob of OneHotCategorical computes the probability correctly in
# this case.
bag_of_words = ed.OneHotCategorical(probs=word_probs, name="bag_of_words")
return bag_of_words
|
Creates the variational distribution for LDA.
|
def make_lda_variational(activation, num_topics, layer_sizes):
"""Creates the variational distribution for LDA.
Args:
activation: Activation function to use.
num_topics: The number of topics.
layer_sizes: The number of hidden units per layer in the encoder.
Returns:
lda_variational: A function that takes a bag-of-words Tensor as
input and returns a distribution over topics.
"""
encoder_net = tf.keras.Sequential()
for num_hidden_units in layer_sizes:
encoder_net.add(
tf.keras.layers.Dense(
num_hidden_units,
activation=activation,
kernel_initializer=tf.compat.v1.glorot_normal_initializer()))
encoder_net.add(
tf.keras.layers.Dense(
num_topics,
activation=tf.nn.softplus,
kernel_initializer=tf.compat.v1.glorot_normal_initializer()))
def lda_variational(bag_of_words):
concentration = _clip_dirichlet_parameters(encoder_net(bag_of_words))
return ed.Dirichlet(concentration=concentration, name="topics_posterior")
return lda_variational
|
Builds the model function for use in an Estimator.
|
def model_fn(features, labels, mode, params, config):
"""Builds the model function for use in an Estimator.
Arguments:
features: The input features for the Estimator.
labels: The labels, unused here.
mode: Signifies whether it is train or test or predict.
params: Some hyperparameters as a dictionary.
config: The RunConfig, unused here.
Returns:
EstimatorSpec: A tf.estimator.EstimatorSpec instance.
"""
del labels, config
# Set up the model's learnable parameters.
logit_concentration = tf.compat.v1.get_variable(
"logit_concentration",
shape=[1, params["num_topics"]],
initializer=tf.compat.v1.initializers.constant(
_softplus_inverse(params["prior_initial_value"])))
concentration = _clip_dirichlet_parameters(
tf.nn.softplus(logit_concentration))
num_words = features.shape[1]
topics_words_logits = tf.compat.v1.get_variable(
"topics_words_logits",
shape=[params["num_topics"], num_words],
initializer=tf.compat.v1.glorot_normal_initializer())
topics_words = tf.nn.softmax(topics_words_logits, axis=-1)
# Compute expected log-likelihood. First, sample from the variational
# distribution; second, compute the log-likelihood given the sample.
lda_variational = make_lda_variational(
params["activation"],
params["num_topics"],
params["layer_sizes"])
with ed.tape() as variational_tape:
_ = lda_variational(features)
with ed.tape() as model_tape:
with ed.interception(
make_value_setter(topics=variational_tape["topics_posterior"])):
posterior_predictive = latent_dirichlet_allocation(concentration,
topics_words)
log_likelihood = posterior_predictive.distribution.log_prob(features)
tf.compat.v1.summary.scalar("log_likelihood",
tf.reduce_mean(input_tensor=log_likelihood))
# Compute the KL-divergence between two Dirichlets analytically.
# The sampled KL does not work well for "sparse" distributions
# (see Appendix D of [2]).
kl = variational_tape["topics_posterior"].distribution.kl_divergence(
model_tape["topics"].distribution)
tf.compat.v1.summary.scalar("kl", tf.reduce_mean(input_tensor=kl))
# Ensure that the KL is non-negative (up to a very small slack).
# Negative KL can happen due to numerical instability.
with tf.control_dependencies(
[tf.compat.v1.assert_greater(kl, -1e-3, message="kl")]):
kl = tf.identity(kl)
elbo = log_likelihood - kl
avg_elbo = tf.reduce_mean(input_tensor=elbo)
tf.compat.v1.summary.scalar("elbo", avg_elbo)
loss = -avg_elbo
# Perform variational inference by minimizing the -ELBO.
global_step = tf.compat.v1.train.get_or_create_global_step()
optimizer = tf.compat.v1.train.AdamOptimizer(params["learning_rate"])
# This implements the "burn-in" for prior parameters (see Appendix D of [2]).
# For the first prior_burn_in_steps steps they are fixed, and then trained
# jointly with the other parameters.
grads_and_vars = optimizer.compute_gradients(loss)
grads_and_vars_except_prior = [
x for x in grads_and_vars if x[1] != logit_concentration]
def train_op_except_prior():
return optimizer.apply_gradients(
grads_and_vars_except_prior,
global_step=global_step)
def train_op_all():
return optimizer.apply_gradients(
grads_and_vars,
global_step=global_step)
train_op = tf.cond(
pred=global_step < params["prior_burn_in_steps"],
true_fn=train_op_except_prior,
false_fn=train_op_all)
# The perplexity is an exponent of the average negative ELBO per word.
words_per_document = tf.reduce_sum(input_tensor=features, axis=1)
log_perplexity = -elbo / words_per_document
tf.compat.v1.summary.scalar(
"perplexity", tf.exp(tf.reduce_mean(input_tensor=log_perplexity)))
(log_perplexity_tensor,
log_perplexity_update) = tf.compat.v1.metrics.mean(log_perplexity)
perplexity_tensor = tf.exp(log_perplexity_tensor)
# Obtain the topics summary. Implemented as a py_func for simplicity.
topics = tf.compat.v1.py_func(
functools.partial(get_topics_strings, vocabulary=params["vocabulary"]),
[topics_words, concentration],
tf.string,
stateful=False)
tf.compat.v1.summary.text("topics", topics)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops={
"elbo": tf.compat.v1.metrics.mean(elbo),
"log_likelihood": tf.compat.v1.metrics.mean(log_likelihood),
"kl": tf.compat.v1.metrics.mean(kl),
"perplexity": (perplexity_tensor, log_perplexity_update),
"topics": (topics, tf.no_op()),
},
)
|
Returns the summary of the learned topics.
|
def get_topics_strings(topics_words, alpha, vocabulary,
topics_to_print=10, words_per_topic=10):
"""Returns the summary of the learned topics.
Arguments:
topics_words: KxV tensor with topics as rows and words as columns.
alpha: 1xK tensor of prior Dirichlet concentrations for the
topics.
vocabulary: A mapping of word's integer index to the corresponding string.
topics_to_print: The number of topics with highest prior weight to
summarize.
words_per_topic: Number of wodrs per topic to return.
Returns:
summary: A np.array with strings.
"""
alpha = np.squeeze(alpha, axis=0)
# Use a stable sorting algorithm so that when alpha is fixed
# we always get the same topics.
highest_weight_topics = np.argsort(-alpha, kind="mergesort")
top_words = np.argsort(-topics_words, axis=1)
res = []
for topic_idx in highest_weight_topics[:topics_to_print]:
l = ["index={} alpha={:.2f}".format(topic_idx, alpha[topic_idx])]
l += [vocabulary[word] for word in top_words[topic_idx, :words_per_topic]]
res.append(" ".join(l))
return np.array(res)
|
20 newsgroups as a tf. data. Dataset.
|
def newsgroups_dataset(directory, split_name, num_words, shuffle_and_repeat):
"""20 newsgroups as a tf.data.Dataset."""
data = np.load(download(directory, FILE_TEMPLATE.format(split=split_name)))
# The last row is empty in both train and test.
data = data[:-1]
# Each row is a list of word ids in the document. We first convert this to
# sparse COO matrix (which automatically sums the repeating words). Then,
# we convert this COO matrix to CSR format which allows for fast querying of
# documents.
num_documents = data.shape[0]
indices = np.array([(row_idx, column_idx)
for row_idx, row in enumerate(data)
for column_idx in row])
sparse_matrix = scipy.sparse.coo_matrix(
(np.ones(indices.shape[0]), (indices[:, 0], indices[:, 1])),
shape=(num_documents, num_words),
dtype=np.float32)
sparse_matrix = sparse_matrix.tocsr()
dataset = tf.data.Dataset.range(num_documents)
# For training, we shuffle each epoch and repeat the epochs.
if shuffle_and_repeat:
dataset = dataset.shuffle(num_documents).repeat()
# Returns a single document as a dense TensorFlow tensor. The dataset is
# stored as a sparse matrix outside of the graph.
def get_row_py_func(idx):
def get_row_python(idx_py):
return np.squeeze(np.array(sparse_matrix[idx_py].todense()), axis=0)
py_func = tf.compat.v1.py_func(
get_row_python, [idx], tf.float32, stateful=False)
py_func.set_shape((num_words,))
return py_func
dataset = dataset.map(get_row_py_func)
return dataset
|
Builds fake data for unit testing.
|
def build_fake_input_fns(batch_size):
"""Builds fake data for unit testing."""
num_words = 1000
vocabulary = [str(i) for i in range(num_words)]
random_sample = np.random.randint(
10, size=(batch_size, num_words)).astype(np.float32)
def train_input_fn():
dataset = tf.data.Dataset.from_tensor_slices(random_sample)
dataset = dataset.batch(batch_size).repeat()
return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
def eval_input_fn():
dataset = tf.data.Dataset.from_tensor_slices(random_sample)
dataset = dataset.batch(batch_size)
return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
return train_input_fn, eval_input_fn, vocabulary
|
Builds iterators for train and evaluation data.
|
def build_input_fns(data_dir, batch_size):
"""Builds iterators for train and evaluation data.
Each object is represented as a bag-of-words vector.
Arguments:
data_dir: Folder in which to store the data.
batch_size: Batch size for both train and evaluation.
Returns:
train_input_fn: A function that returns an iterator over the training data.
eval_input_fn: A function that returns an iterator over the evaluation data.
vocabulary: A mapping of word's integer index to the corresponding string.
"""
with open(download(data_dir, "vocab.pkl"), "r") as f:
words_to_idx = pickle.load(f)
num_words = len(words_to_idx)
vocabulary = [None] * num_words
for word, idx in words_to_idx.items():
vocabulary[idx] = word
# Build an iterator over training batches.
def train_input_fn():
dataset = newsgroups_dataset(
data_dir, "train", num_words, shuffle_and_repeat=True)
# Prefetching makes training about 1.5x faster.
dataset = dataset.batch(batch_size).prefetch(32)
return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
# Build an iterator over the heldout set.
def eval_input_fn():
dataset = newsgroups_dataset(
data_dir, "test", num_words, shuffle_and_repeat=False)
dataset = dataset.batch(batch_size)
return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
return train_input_fn, eval_input_fn, vocabulary
|
Calculate the batched KL divergence KL ( a || b ) with a and b Chi.
|
def _kl_chi_chi(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a and b Chi.
Args:
a: instance of a Chi distribution object.
b: instance of a Chi distribution object.
name: (optional) Name to use for created operations.
default is "kl_chi_chi".
Returns:
Batchwise KL(a || b)
"""
with tf.name_scope(name or "kl_chi_chi"):
# Consistent with
# https://mast.queensu.ca/~communications/Papers/gil-msc11.pdf, page 118
# The paper introduces an additional scaling parameter; setting that
# parameter to 1 and simplifying yields the expression we use here.
return (0.5 * tf.math.digamma(0.5 * a.df) * (a.df - b.df) +
tf.math.lgamma(0.5 * b.df) - tf.math.lgamma(0.5 * a.df))
|
Returns a ( dense ) column of a Tensor or SparseTensor.
|
def _sparse_or_dense_matmul_onehot(sparse_or_dense_matrix, col_index):
"""Returns a (dense) column of a Tensor or SparseTensor.
Args:
sparse_or_dense_matrix: matrix-shaped, `float` `Tensor` or `SparseTensor`.
col_index: scalar, `int` `Tensor` representing the index of the desired
column.
Returns:
column: vector-shaped, `float` `Tensor` with the same dtype as
`sparse_or_dense_matrix`, representing the `col_index`th column of
`sparse_or_dense_matrix`.
"""
if isinstance(sparse_or_dense_matrix,
(tf.SparseTensor, tf.compat.v1.SparseTensorValue)):
# TODO(b/111924846): Implement better (ideally in a way that allows us to
# eliminate the `num_rows` arg, if possible).
num_rows = _get_shape(sparse_or_dense_matrix)[-2]
batch_shape = _get_shape(sparse_or_dense_matrix)[:-2]
slice_start = tf.concat([tf.zeros_like(batch_shape), [0, col_index]],
axis=0)
slice_size = tf.concat([batch_shape, [num_rows, 1]], axis=0)
# We momentarily lose static shape information in tf.sparse_slice. However
# we regain it in the following tf.reshape.
sparse_slice = tf.sparse.slice(sparse_or_dense_matrix,
tf.cast(slice_start, tf.int64),
tf.cast(slice_size, tf.int64))
output_shape = tf.concat([batch_shape, [num_rows]], axis=0)
return tf.reshape(tf.sparse.to_dense(sparse_slice), output_shape)
else:
return tf.gather(sparse_or_dense_matrix, col_index, axis=-1)
|
One step of ( the outer loop of ) the minimization algorithm.
|
def minimize_one_step(gradient_unregularized_loss,
hessian_unregularized_loss_outer,
hessian_unregularized_loss_middle,
x_start,
tolerance,
l1_regularizer,
l2_regularizer=None,
maximum_full_sweeps=1,
learning_rate=None,
name=None):
"""One step of (the outer loop of) the minimization algorithm.
This function returns a new value of `x`, equal to `x_start + x_update`. The
increment `x_update in R^n` is computed by a coordinate descent method, that
is, by a loop in which each iteration updates exactly one coordinate of
`x_update`. (Some updates may leave the value of the coordinate unchanged.)
The particular update method used is to apply an L1-based proximity operator,
"soft threshold", whose fixed point `x_update_fix` is the desired minimum
```none
x_update_fix = argmin{
Loss(x_start + x_update')
+ l1_regularizer * ||x_start + x_update'||_1
+ l2_regularizer * ||x_start + x_update'||_2**2
: x_update' }
```
where in each iteration `x_update'` is constrained to have at most one nonzero
coordinate.
This update method preserves sparsity, i.e., tends to find sparse solutions if
`x_start` is sparse. Additionally, the choice of step size is based on
curvature (Hessian), which significantly speeds up convergence.
This algorithm assumes that `Loss` is convex, at least in a region surrounding
the optimum. (If `l2_regularizer > 0`, then only weak convexity is needed.)
Args:
gradient_unregularized_loss: (Batch of) `Tensor` with the same shape and
dtype as `x_start` representing the gradient, evaluated at `x_start`, of
the unregularized loss function (denoted `Loss` above). (In all current
use cases, `Loss` is the negative log likelihood.)
hessian_unregularized_loss_outer: (Batch of) `Tensor` or `SparseTensor`
having the same dtype as `x_start`, and shape `[N, n]` where `x_start` has
shape `[n]`, satisfying the property
`Transpose(hessian_unregularized_loss_outer)
@ diag(hessian_unregularized_loss_middle)
@ hessian_unregularized_loss_inner
= (approximation of) Hessian matrix of Loss, evaluated at x_start`.
hessian_unregularized_loss_middle: (Batch of) vector-shaped `Tensor` having
the same dtype as `x_start`, and shape `[N]` where
`hessian_unregularized_loss_outer` has shape `[N, n]`, satisfying the
property
`Transpose(hessian_unregularized_loss_outer)
@ diag(hessian_unregularized_loss_middle)
@ hessian_unregularized_loss_inner
= (approximation of) Hessian matrix of Loss, evaluated at x_start`.
x_start: (Batch of) vector-shaped, `float` `Tensor` representing the current
value of the argument to the Loss function.
tolerance: scalar, `float` `Tensor` representing the convergence threshold.
The optimization step will terminate early, returning its current value of
`x_start + x_update`, once the following condition is met:
`||x_update_end - x_update_start||_2 / (1 + ||x_start||_2)
< sqrt(tolerance)`,
where `x_update_end` is the value of `x_update` at the end of a sweep and
`x_update_start` is the value of `x_update` at the beginning of that
sweep.
l1_regularizer: scalar, `float` `Tensor` representing the weight of the L1
regularization term (see equation above). If L1 regularization is not
required, then `tfp.glm.fit_one_step` is preferable.
l2_regularizer: scalar, `float` `Tensor` representing the weight of the L2
regularization term (see equation above).
Default value: `None` (i.e., no L2 regularization).
maximum_full_sweeps: Python integer specifying maximum number of sweeps to
run. A "sweep" consists of an iteration of coordinate descent on each
coordinate. After this many sweeps, the algorithm will terminate even if
convergence has not been reached.
Default value: `1`.
learning_rate: scalar, `float` `Tensor` representing a multiplicative factor
used to dampen the proximal gradient descent steps.
Default value: `None` (i.e., factor is conceptually `1`).
name: Python string representing the name of the TensorFlow operation.
The default name is `"minimize_one_step"`.
Returns:
x: (Batch of) `Tensor` having the same shape and dtype as `x_start`,
representing the updated value of `x`, that is, `x_start + x_update`.
is_converged: scalar, `bool` `Tensor` indicating whether convergence
occurred across all batches within the specified number of sweeps.
iter: scalar, `int` `Tensor` representing the actual number of coordinate
updates made (before achieving convergence). Since each sweep consists of
`tf.size(x_start)` iterations, the maximum number of updates is
`maximum_full_sweeps * tf.size(x_start)`.
#### References
[1]: Jerome Friedman, Trevor Hastie and Rob Tibshirani. Regularization Paths
for Generalized Linear Models via Coordinate Descent. _Journal of
Statistical Software_, 33(1), 2010.
https://www.jstatsoft.org/article/view/v033i01/v33i01.pdf
[2]: Guo-Xun Yuan, Chia-Hua Ho and Chih-Jen Lin. An Improved GLMNET for
L1-regularized Logistic Regression. _Journal of Machine Learning
Research_, 13, 2012.
http://www.jmlr.org/papers/volume13/yuan12a/yuan12a.pdf
"""
graph_deps = [
gradient_unregularized_loss,
hessian_unregularized_loss_outer,
hessian_unregularized_loss_middle,
x_start,
l1_regularizer,
l2_regularizer,
maximum_full_sweeps,
tolerance,
learning_rate,
]
with tf.compat.v1.name_scope(name, 'minimize_one_step', graph_deps):
x_shape = _get_shape(x_start)
batch_shape = x_shape[:-1]
dims = x_shape[-1]
def _hessian_diag_elt_with_l2(coord): # pylint: disable=missing-docstring
# Returns the (coord, coord) entry of
#
# Hessian(UnregularizedLoss(x) + l2_regularizer * ||x||_2**2)
#
# evaluated at x = x_start.
inner_square = tf.reduce_sum(
input_tensor=_sparse_or_dense_matmul_onehot(
hessian_unregularized_loss_outer, coord)**2,
axis=-1)
unregularized_component = (
hessian_unregularized_loss_middle[..., coord] * inner_square)
l2_component = _mul_or_none(2., l2_regularizer)
return _add_ignoring_nones(unregularized_component, l2_component)
grad_loss_with_l2 = _add_ignoring_nones(
gradient_unregularized_loss, _mul_or_none(2., l2_regularizer, x_start))
# We define `x_update_diff_norm_sq_convergence_threshold` such that the
# convergence condition
# ||x_update_end - x_update_start||_2 / (1 + ||x_start||_2)
# < sqrt(tolerance)
# is equivalent to
# ||x_update_end - x_update_start||_2**2
# < x_update_diff_norm_sq_convergence_threshold.
x_update_diff_norm_sq_convergence_threshold = (
tolerance * (1. + tf.norm(tensor=x_start, ord=2, axis=-1))**2)
# Reshape update vectors so that the coordinate sweeps happen along the
# first dimension. This is so that we can use tensor_scatter_update to make
# sparse updates along the first axis without copying the Tensor.
# TODO(b/118789120): Switch to something like tf.tensor_scatter_nd_add if
# or when it exists.
update_shape = tf.concat([[dims], batch_shape], axis=-1)
def _loop_cond(iter_, x_update_diff_norm_sq, x_update,
hess_matmul_x_update):
del x_update
del hess_matmul_x_update
sweep_complete = (iter_ > 0) & tf.equal(iter_ % dims, 0)
small_delta = (
x_update_diff_norm_sq < x_update_diff_norm_sq_convergence_threshold)
converged = sweep_complete & small_delta
allowed_more_iterations = iter_ < maximum_full_sweeps * dims
return allowed_more_iterations & tf.reduce_any(input_tensor=~converged)
def _loop_body( # pylint: disable=missing-docstring
iter_, x_update_diff_norm_sq, x_update, hess_matmul_x_update):
# Inner loop of the minimizer.
#
# This loop updates a single coordinate of x_update. Ideally, an
# iteration of this loop would set
#
# x_update[j] += argmin{ LocalLoss(x_update + z*e_j) : z in R }
#
# where
#
# LocalLoss(x_update')
# = LocalLossSmoothComponent(x_update')
# + l1_regularizer * (||x_start + x_update'||_1 -
# ||x_start + x_update||_1)
# := (UnregularizedLoss(x_start + x_update') -
# UnregularizedLoss(x_start + x_update)
# + l2_regularizer * (||x_start + x_update'||_2**2 -
# ||x_start + x_update||_2**2)
# + l1_regularizer * (||x_start + x_update'||_1 -
# ||x_start + x_update||_1)
#
# In this algorithm approximate the above argmin using (univariate)
# proximal gradient descent:
#
# (*) x_update[j] = prox_{t * l1_regularizer * L1}(
# x_update[j] -
# t * d/dz|z=0 UnivariateLocalLossSmoothComponent(z))
#
# where
#
# UnivariateLocalLossSmoothComponent(z)
# := LocalLossSmoothComponent(x_update + z*e_j)
#
# and we approximate
#
# d/dz UnivariateLocalLossSmoothComponent(z)
# = grad LocalLossSmoothComponent(x_update))[j]
# ~= (grad LossSmoothComponent(x_start)
# + x_update matmul HessianOfLossSmoothComponent(x_start))[j].
#
# To choose the parameter t, we squint and pretend that the inner term of
# (*) is a Newton update as if we were using Newton's method to minimize
# UnivariateLocalLossSmoothComponent. That is, we choose t such that
#
# -t * d/dz ULLSC = -learning_rate * (d/dz ULLSC) / (d^2/dz^2 ULLSC)
#
# at z=0. Hence
#
# t = learning_rate / (d^2/dz^2|z=0 ULLSC)
# = learning_rate / HessianOfLossSmoothComponent(
# x_start + x_update)[j,j]
# ~= learning_rate / HessianOfLossSmoothComponent(
# x_start)[j,j]
#
# The above approximation is equivalent to assuming that
# HessianOfUnregularizedLoss is constant, i.e., ignoring third-order
# effects.
#
# Note that because LossSmoothComponent is (assumed to be) convex, t is
# positive.
# In above notation, coord = j.
coord = iter_ % dims
# x_update_diff_norm_sq := ||x_update_end - x_update_start||_2**2,
# computed incrementally, where x_update_end and x_update_start are as
# defined in the convergence criteria. Accordingly, we reset
# x_update_diff_norm_sq to zero at the beginning of each sweep.
x_update_diff_norm_sq = tf.where(
tf.equal(coord, 0), tf.zeros_like(x_update_diff_norm_sq),
x_update_diff_norm_sq)
# Recall that x_update and hess_matmul_x_update has the rightmost
# dimension transposed to the leftmost dimension.
w_old = x_start[..., coord] + x_update[coord, ...]
# This is the coordinatewise Newton update if no L1 regularization.
# In above notation, newton_step = -t * (approximation of d/dz|z=0 ULLSC).
second_deriv = _hessian_diag_elt_with_l2(coord)
newton_step = -_mul_ignoring_nones( # pylint: disable=invalid-unary-operand-type
learning_rate, grad_loss_with_l2[..., coord] +
hess_matmul_x_update[coord, ...]) / second_deriv
# Applying the soft-threshold operator accounts for L1 regularization.
# In above notation, delta =
# prox_{t*l1_regularizer*L1}(w_old + newton_step) - w_old.
delta = (
soft_threshold(
w_old + newton_step,
_mul_ignoring_nones(learning_rate, l1_regularizer) / second_deriv)
- w_old)
def _do_update(x_update_diff_norm_sq, x_update, hess_matmul_x_update): # pylint: disable=missing-docstring
hessian_column_with_l2 = sparse_or_dense_matvecmul(
hessian_unregularized_loss_outer,
hessian_unregularized_loss_middle * _sparse_or_dense_matmul_onehot(
hessian_unregularized_loss_outer, coord),
adjoint_a=True)
if l2_regularizer is not None:
hessian_column_with_l2 += _one_hot_like(
hessian_column_with_l2, coord, on_value=2. * l2_regularizer)
# Move the batch dimensions of `hessian_column_with_l2` to rightmost in
# order to conform to `hess_matmul_x_update`.
n = tf.rank(hessian_column_with_l2)
perm = tf.roll(tf.range(n), shift=1, axis=0)
hessian_column_with_l2 = tf.transpose(
a=hessian_column_with_l2, perm=perm)
# Update the entire batch at `coord` even if `delta` may be 0 at some
# batch coordinates. In those cases, adding `delta` is a no-op.
x_update = tf.tensor_scatter_nd_add(x_update, [[coord]], [delta])
with tf.control_dependencies([x_update]):
x_update_diff_norm_sq_ = x_update_diff_norm_sq + delta**2
hess_matmul_x_update_ = (
hess_matmul_x_update + delta * hessian_column_with_l2)
# Hint that loop vars retain the same shape.
x_update_diff_norm_sq_.set_shape(
x_update_diff_norm_sq_.shape.merge_with(
x_update_diff_norm_sq.shape))
hess_matmul_x_update_.set_shape(
hess_matmul_x_update_.shape.merge_with(
hess_matmul_x_update.shape))
return [x_update_diff_norm_sq_, x_update, hess_matmul_x_update_]
inputs_to_update = [x_update_diff_norm_sq, x_update, hess_matmul_x_update]
return [iter_ + 1] + prefer_static.cond(
# Note on why checking delta (a difference of floats) for equality to
# zero is ok:
#
# First of all, x - x == 0 in floating point -- see
# https://stackoverflow.com/a/2686671
#
# Delta will conceptually equal zero when one of the following holds:
# (i) |w_old + newton_step| <= threshold and w_old == 0
# (ii) |w_old + newton_step| > threshold and
# w_old + newton_step - sign(w_old + newton_step) * threshold
# == w_old
#
# In case (i) comparing delta to zero is fine.
#
# In case (ii), newton_step conceptually equals
# sign(w_old + newton_step) * threshold.
# Also remember
# threshold = -newton_step / (approximation of d/dz|z=0 ULLSC).
# So (i) happens when
# (approximation of d/dz|z=0 ULLSC) == -sign(w_old + newton_step).
# If we did not require LossSmoothComponent to be strictly convex,
# then this could actually happen a non-negligible amount of the time,
# e.g. if the loss function is piecewise linear and one of the pieces
# has slope 1. But since LossSmoothComponent is strictly convex, (i)
# should not systematically happen.
tf.reduce_all(input_tensor=tf.equal(delta, 0.)),
lambda: inputs_to_update,
lambda: _do_update(*inputs_to_update))
base_dtype = x_start.dtype.base_dtype
iter_, x_update_diff_norm_sq, x_update, _ = tf.while_loop(
cond=_loop_cond,
body=_loop_body,
loop_vars=[
tf.zeros([], dtype=np.int32, name='iter'),
tf.zeros(
batch_shape, dtype=base_dtype, name='x_update_diff_norm_sq'),
tf.zeros(update_shape, dtype=base_dtype, name='x_update'),
tf.zeros(
update_shape, dtype=base_dtype, name='hess_matmul_x_update'),
])
# Convert back x_update to the shape of x_start by transposing the leftmost
# dimension to the rightmost.
n = tf.rank(x_update)
perm = tf.roll(tf.range(n), shift=-1, axis=0)
x_update = tf.transpose(a=x_update, perm=perm)
converged = tf.reduce_all(input_tensor=x_update_diff_norm_sq <
x_update_diff_norm_sq_convergence_threshold)
return x_start + x_update, converged, iter_ / dims
|
Minimize using Hessian - informed proximal gradient descent.
|
def minimize(grad_and_hessian_loss_fn,
x_start,
tolerance,
l1_regularizer,
l2_regularizer=None,
maximum_iterations=1,
maximum_full_sweeps_per_iteration=1,
learning_rate=None,
name=None):
"""Minimize using Hessian-informed proximal gradient descent.
This function solves the regularized minimization problem
```none
argmin{ Loss(x)
+ l1_regularizer * ||x||_1
+ l2_regularizer * ||x||_2**2
: x in R^n }
```
where `Loss` is a convex C^2 function (typically, `Loss` is the negative log
likelihood of a model and `x` is a vector of model coefficients). The `Loss`
function does not need to be supplied directly, but this optimizer does need a
way to compute the gradient and Hessian of the Loss function at a given value
of `x`. The gradient and Hessian are often computationally expensive, and
this optimizer calls them relatively few times compared with other algorithms.
Args:
grad_and_hessian_loss_fn: callable that takes as input a (batch of) `Tensor`
of the same shape and dtype as `x_start` and returns the triple
`(gradient_unregularized_loss, hessian_unregularized_loss_outer,
hessian_unregularized_loss_middle)` as defined in the argument spec of
`minimize_one_step`.
x_start: (Batch of) vector-shaped, `float` `Tensor` representing the initial
value of the argument to the `Loss` function.
tolerance: scalar, `float` `Tensor` representing the tolerance for each
optimization step; see the `tolerance` argument of
`minimize_one_step`.
l1_regularizer: scalar, `float` `Tensor` representing the weight of the L1
regularization term (see equation above).
l2_regularizer: scalar, `float` `Tensor` representing the weight of the L2
regularization term (see equation above).
Default value: `None` (i.e., no L2 regularization).
maximum_iterations: Python integer specifying the maximum number of
iterations of the outer loop of the optimizer. After this many iterations
of the outer loop, the algorithm will terminate even if the return value
`optimal_x` has not converged.
Default value: `1`.
maximum_full_sweeps_per_iteration: Python integer specifying the maximum
number of sweeps allowed in each iteration of the outer loop of the
optimizer. Passed as the `maximum_full_sweeps` argument to
`minimize_one_step`.
Default value: `1`.
learning_rate: scalar, `float` `Tensor` representing a multiplicative factor
used to dampen the proximal gradient descent steps.
Default value: `None` (i.e., factor is conceptually `1`).
name: Python string representing the name of the TensorFlow operation.
The default name is `"minimize"`.
Returns:
x: `Tensor` of the same shape and dtype as `x_start`, representing the
(batches of) computed values of `x` which minimizes `Loss(x)`.
is_converged: scalar, `bool` `Tensor` indicating whether the minimization
procedure converged within the specified number of iterations across all
batches. Here convergence means that an iteration of the inner loop
(`minimize_one_step`) returns `True` for its `is_converged` output value.
iter: scalar, `int` `Tensor` indicating the actual number of iterations of
the outer loop of the optimizer completed (i.e., number of calls to
`minimize_one_step` before achieving convergence).
#### References
[1]: Jerome Friedman, Trevor Hastie and Rob Tibshirani. Regularization Paths
for Generalized Linear Models via Coordinate Descent. _Journal of
Statistical Software_, 33(1), 2010.
https://www.jstatsoft.org/article/view/v033i01/v33i01.pdf
[2]: Guo-Xun Yuan, Chia-Hua Ho and Chih-Jen Lin. An Improved GLMNET for
L1-regularized Logistic Regression. _Journal of Machine Learning
Research_, 13, 2012.
http://www.jmlr.org/papers/volume13/yuan12a/yuan12a.pdf
"""
graph_deps = [
x_start,
l1_regularizer,
l2_regularizer,
maximum_iterations,
maximum_full_sweeps_per_iteration,
tolerance,
learning_rate,
],
with tf.compat.v1.name_scope(name, 'minimize', graph_deps):
def _loop_cond(x_start, converged, iter_):
del x_start
return tf.logical_and(iter_ < maximum_iterations,
tf.logical_not(converged))
def _loop_body(x_start, converged, iter_): # pylint: disable=missing-docstring
g, h_outer, h_middle = grad_and_hessian_loss_fn(x_start)
x_start, converged, _ = minimize_one_step(
gradient_unregularized_loss=g,
hessian_unregularized_loss_outer=h_outer,
hessian_unregularized_loss_middle=h_middle,
x_start=x_start,
l1_regularizer=l1_regularizer,
l2_regularizer=l2_regularizer,
maximum_full_sweeps=maximum_full_sweeps_per_iteration,
tolerance=tolerance,
learning_rate=learning_rate)
return x_start, converged, iter_ + 1
return tf.while_loop(
cond=_loop_cond,
body=_loop_body,
loop_vars=[
x_start,
tf.zeros([], np.bool, name='converged'),
tf.zeros([], np.int32, name='iter'),
])
|
Creates the encoder function.
|
def make_encoder(base_depth, activation, latent_size, code_size):
"""Creates the encoder function.
Args:
base_depth: Layer base depth in encoder net.
activation: Activation function in hidden layers.
latent_size: The number of latent variables in the code.
code_size: The dimensionality of each latent variable.
Returns:
encoder: A `callable` mapping a `Tensor` of images to a `Tensor` of shape
`[..., latent_size, code_size]`.
"""
conv = functools.partial(
tf.keras.layers.Conv2D, padding="SAME", activation=activation)
encoder_net = tf.keras.Sequential([
conv(base_depth, 5, 1),
conv(base_depth, 5, 2),
conv(2 * base_depth, 5, 1),
conv(2 * base_depth, 5, 2),
conv(4 * latent_size, 7, padding="VALID"),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(latent_size * code_size, activation=None),
tf.keras.layers.Reshape([latent_size, code_size])
])
def encoder(images):
"""Encodes a batch of images.
Args:
images: A `Tensor` representing the inputs to be encoded, of shape `[...,
channels]`.
Returns:
codes: A `float`-like `Tensor` of shape `[..., latent_size, code_size]`.
It represents latent vectors to be matched with the codebook.
"""
images = 2 * tf.cast(images, dtype=tf.float32) - 1
codes = encoder_net(images)
return codes
return encoder
|
Creates the decoder function.
|
def make_decoder(base_depth, activation, input_size, output_shape):
"""Creates the decoder function.
Args:
base_depth: Layer base depth in decoder net.
activation: Activation function in hidden layers.
input_size: The flattened latent input shape as an int.
output_shape: The output image shape as a list.
Returns:
decoder: A `callable` mapping a `Tensor` of encodings to a
`tfd.Distribution` instance over images.
"""
deconv = functools.partial(
tf.keras.layers.Conv2DTranspose, padding="SAME", activation=activation)
conv = functools.partial(
tf.keras.layers.Conv2D, padding="SAME", activation=activation)
decoder_net = tf.keras.Sequential([
tf.keras.layers.Reshape((1, 1, input_size)),
deconv(2 * base_depth, 7, padding="VALID"),
deconv(2 * base_depth, 5),
deconv(2 * base_depth, 5, 2),
deconv(base_depth, 5),
deconv(base_depth, 5, 2),
deconv(base_depth, 5),
conv(output_shape[-1], 5, activation=None),
tf.keras.layers.Reshape(output_shape),
])
def decoder(codes):
"""Builds a distribution over images given codes.
Args:
codes: A `Tensor` representing the inputs to be decoded, of shape `[...,
code_size]`.
Returns:
decoder_distribution: A multivariate `Bernoulli` distribution.
"""
logits = decoder_net(codes)
return tfd.Independent(tfd.Bernoulli(logits=logits),
reinterpreted_batch_ndims=len(output_shape),
name="decoder_distribution")
return decoder
|
Add control dependencies to the commmitment loss to update the codebook.
|
def add_ema_control_dependencies(vector_quantizer,
one_hot_assignments,
codes,
commitment_loss,
decay):
"""Add control dependencies to the commmitment loss to update the codebook.
Args:
vector_quantizer: An instance of the VectorQuantizer class.
one_hot_assignments: The one-hot vectors corresponding to the matched
codebook entry for each code in the batch.
codes: A `float`-like `Tensor` containing the latent vectors to be compared
to the codebook.
commitment_loss: The commitment loss from comparing the encoder outputs to
their neighboring codebook entries.
decay: Decay factor for exponential moving average.
Returns:
commitment_loss: Commitment loss with control dependencies.
"""
# Use an exponential moving average to update the codebook.
updated_ema_count = moving_averages.assign_moving_average(
vector_quantizer.ema_count,
tf.reduce_sum(input_tensor=one_hot_assignments, axis=[0, 1]),
decay,
zero_debias=False)
updated_ema_means = moving_averages.assign_moving_average(
vector_quantizer.ema_means,
tf.reduce_sum(
input_tensor=tf.expand_dims(codes, 2) *
tf.expand_dims(one_hot_assignments, 3),
axis=[0, 1]),
decay,
zero_debias=False)
# Add small value to avoid dividing by zero.
perturbed_ema_count = updated_ema_count + 1e-5
with tf.control_dependencies([commitment_loss]):
update_means = tf.compat.v1.assign(
vector_quantizer.codebook,
updated_ema_means / perturbed_ema_count[..., tf.newaxis])
with tf.control_dependencies([update_means]):
return tf.identity(commitment_loss)
|
Helper method to save a grid of images to a PNG file.
|
def save_imgs(x, fname):
"""Helper method to save a grid of images to a PNG file.
Args:
x: A numpy array of shape [n_images, height, width].
fname: The filename to write to (including extension).
"""
n = x.shape[0]
fig = figure.Figure(figsize=(n, 1), frameon=False)
canvas = backend_agg.FigureCanvasAgg(fig)
for i in range(n):
ax = fig.add_subplot(1, n, i+1)
ax.imshow(x[i].squeeze(),
interpolation="none",
cmap=cm.get_cmap("binary"))
ax.axis("off")
canvas.print_figure(fname, format="png")
print("saved %s" % fname)
|
Helper method to save images visualizing model reconstructions.
|
def visualize_training(images_val,
reconstructed_images_val,
random_images_val,
log_dir, prefix, viz_n=10):
"""Helper method to save images visualizing model reconstructions.
Args:
images_val: Numpy array containing a batch of input images.
reconstructed_images_val: Numpy array giving the expected output
(mean) of the decoder.
random_images_val: Optionally, a Numpy array giving the expected output
(mean) of decoding samples from the prior, or `None`.
log_dir: The directory to write images (Python `str`).
prefix: A specific label for the saved visualizations, which
determines their filenames (Python `str`).
viz_n: The number of images from each batch to visualize (Python `int`).
"""
save_imgs(images_val[:viz_n],
os.path.join(log_dir, "{}_inputs.png".format(prefix)))
save_imgs(reconstructed_images_val[:viz_n],
os.path.join(log_dir,
"{}_reconstructions.png".format(prefix)))
if random_images_val is not None:
save_imgs(random_images_val[:viz_n],
os.path.join(log_dir,
"{}_prior_samples.png".format(prefix)))
|
Returns Hugo Larochelle s binary static MNIST tf. data. Dataset.
|
def load_bernoulli_mnist_dataset(directory, split_name):
"""Returns Hugo Larochelle's binary static MNIST tf.data.Dataset."""
amat_file = download(directory, FILE_TEMPLATE.format(split=split_name))
dataset = tf.data.TextLineDataset(amat_file)
str_to_arr = lambda string: np.array([c == b"1" for c in string.split()])
def _parser(s):
booltensor = tf.compat.v1.py_func(str_to_arr, [s], tf.bool)
reshaped = tf.reshape(booltensor, [28, 28, 1])
return tf.cast(reshaped, dtype=tf.float32), tf.constant(0, tf.int32)
return dataset.map(_parser)
|
Builds an Iterator switching between train and heldout data.
|
def build_input_pipeline(data_dir, batch_size, heldout_size, mnist_type):
"""Builds an Iterator switching between train and heldout data."""
# Build an iterator over training batches.
if mnist_type in [MnistType.FAKE_DATA, MnistType.THRESHOLD]:
if mnist_type == MnistType.FAKE_DATA:
mnist_data = build_fake_data()
else:
mnist_data = mnist.read_data_sets(data_dir)
training_dataset = tf.data.Dataset.from_tensor_slices(
(mnist_data.train.images, np.int32(mnist_data.train.labels)))
heldout_dataset = tf.data.Dataset.from_tensor_slices(
(mnist_data.validation.images,
np.int32(mnist_data.validation.labels)))
elif mnist_type == MnistType.BERNOULLI:
training_dataset = load_bernoulli_mnist_dataset(data_dir, "train")
heldout_dataset = load_bernoulli_mnist_dataset(data_dir, "valid")
else:
raise ValueError("Unknown MNIST type.")
training_batches = training_dataset.repeat().batch(batch_size)
training_iterator = tf.compat.v1.data.make_one_shot_iterator(training_batches)
# Build a iterator over the heldout set with batch_size=heldout_size,
# i.e., return the entire heldout set as a constant.
heldout_frozen = (heldout_dataset.take(heldout_size).
repeat().batch(heldout_size))
heldout_iterator = tf.compat.v1.data.make_one_shot_iterator(heldout_frozen)
# Combine these into a feedable iterator that can switch between training
# and validation inputs.
handle = tf.compat.v1.placeholder(tf.string, shape=[])
feedable_iterator = tf.compat.v1.data.Iterator.from_string_handle(
handle, training_batches.output_types, training_batches.output_shapes)
images, labels = feedable_iterator.get_next()
# Reshape as a pixel image and binarize pixels.
images = tf.reshape(images, shape=[-1] + IMAGE_SHAPE)
if mnist_type in [MnistType.FAKE_DATA, MnistType.THRESHOLD]:
images = tf.cast(images > 0.5, dtype=tf.int32)
return images, labels, handle, training_iterator, heldout_iterator
|
Returns a np. dtype based on this dtype.
|
def as_numpy_dtype(dtype):
"""Returns a `np.dtype` based on this `dtype`."""
dtype = tf.as_dtype(dtype)
if hasattr(dtype, 'as_numpy_dtype'):
return dtype.as_numpy_dtype
return dtype
|
Returns a non - reference dtype based on this dtype.
|
def base_dtype(dtype):
"""Returns a non-reference `dtype` based on this `dtype`."""
dtype = tf.as_dtype(dtype)
if hasattr(dtype, 'base_dtype'):
return dtype.base_dtype
return dtype
|
Returns whether this is a boolean data type.
|
def is_bool(dtype):
"""Returns whether this is a boolean data type."""
dtype = tf.as_dtype(dtype)
if hasattr(dtype, 'is_bool'):
return dtype.is_bool
# We use `kind` because:
# np.issubdtype(np.uint8, np.bool) == True.
return np.dtype(dtype).kind == 'b'
|
Returns whether this is a complex floating point type.
|
def is_complex(dtype):
"""Returns whether this is a complex floating point type."""
dtype = tf.as_dtype(dtype)
if hasattr(dtype, 'is_complex'):
return dtype.is_complex
return np.issubdtype(np.dtype(dtype), np.complex)
|
Returns whether this is a ( non - quantized real ) floating point type.
|
def is_floating(dtype):
"""Returns whether this is a (non-quantized, real) floating point type."""
dtype = tf.as_dtype(dtype)
if hasattr(dtype, 'is_floating'):
return dtype.is_floating
return np.issubdtype(np.dtype(dtype), np.float)
|
Returns whether this is a ( non - quantized ) integer type.
|
def is_integer(dtype):
"""Returns whether this is a (non-quantized) integer type."""
dtype = tf.as_dtype(dtype)
if hasattr(dtype, 'is_integer'):
return dtype.is_integer
return np.issubdtype(np.dtype(dtype), np.integer)
|
Returns the maximum representable value in this data type.
|
def max(dtype): # pylint: disable=redefined-builtin
"""Returns the maximum representable value in this data type."""
dtype = tf.as_dtype(dtype)
if hasattr(dtype, 'max'):
return dtype.max
use_finfo = is_floating(dtype) or is_complex(dtype)
return np.finfo(dtype).max if use_finfo else np.iinfo(dtype).max
|
Returns the string name for this dtype.
|
def name(dtype):
"""Returns the string name for this `dtype`."""
dtype = tf.as_dtype(dtype)
if hasattr(dtype, 'name'):
return dtype.name
if hasattr(dtype, '__name__'):
return dtype.__name__
return str(dtype)
|
Returns the number of bytes to represent this dtype.
|
def size(dtype):
"""Returns the number of bytes to represent this `dtype`."""
dtype = tf.as_dtype(dtype)
if hasattr(dtype, 'size'):
return dtype.size
return np.dtype(dtype).itemsize
|
r Asserts all items are of the same base type.
|
def _assert_same_base_type(items, expected_type=None):
r"""Asserts all items are of the same base type.
Args:
items: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`,
`Operation`, or `IndexedSlices`). Can include `None` elements, which
will be ignored.
expected_type: Expected type. If not specified, assert all items are
of the same base type.
Returns:
Validated type, or none if neither expected_type nor items provided.
Raises:
ValueError: If any types do not match.
"""
original_expected_type = expected_type
mismatch = False
for item in items:
if item is not None:
item_type = base_dtype(item.dtype)
if not expected_type:
expected_type = item_type
elif expected_type != item_type:
mismatch = True
break
if mismatch:
# Loop back through and build up an informative error message (this is very
# slow, so we don't do it unless we found an error above).
expected_type = original_expected_type
original_item_str = None
get_name = lambda x: x.name if hasattr(x, 'name') else str(x)
for item in items:
if item is not None:
item_type = base_dtype(item.dtype)
if not expected_type:
expected_type = item_type
original_item_str = get_name(item)
elif expected_type != item_type:
raise ValueError(
'{}, type={}, must be of the same type ({}){}.'.format(
get_name(item),
item_type,
expected_type,
((' as {}'.format(original_item_str))
if original_item_str else '')))
return expected_type # Should be unreachable
else:
return expected_type
|
Validate and return float type based on tensors and dtype.
|
def assert_same_float_dtype(tensors=None, dtype=None):
"""Validate and return float type based on `tensors` and `dtype`.
For ops such as matrix multiplication, inputs and weights must be of the
same float type. This function validates that all `tensors` are the same type,
validates that type is `dtype` (if supplied), and returns the type. Type must
be a floating point type. If neither `tensors` nor `dtype` is supplied,
the function will return `dtypes.float32`.
Args:
tensors: Tensors of input values. Can include `None` elements, which will
be ignored.
dtype: Expected type.
Returns:
Validated type.
Raises:
ValueError: if neither `tensors` nor `dtype` is supplied, or result is not
float, or the common type of the inputs is not a floating point type.
"""
if tensors:
dtype = _assert_same_base_type(tensors, dtype)
if not dtype:
dtype = tf.float32
elif not is_floating(dtype):
raise ValueError('Expected floating point type, got {}.'.format(dtype))
return dtype
|
Calculate the batched KL divergence KL ( a || b ) with a b OneHotCategorical.
|
def _kl_categorical_categorical(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a, b OneHotCategorical.
Args:
a: instance of a OneHotCategorical distribution object.
b: instance of a OneHotCategorical distribution object.
name: (optional) Name to use for created operations.
default is "kl_categorical_categorical".
Returns:
Batchwise KL(a || b)
"""
with tf.name_scope(name or "kl_categorical_categorical"):
# sum(p ln(p / q))
return tf.reduce_sum(
input_tensor=tf.nn.softmax(a.logits) *
(tf.nn.log_softmax(a.logits) - tf.nn.log_softmax(b.logits)),
axis=-1)
|
Minimum of the objective function using the Nelder Mead simplex algorithm.
|
def minimize(objective_function,
initial_simplex=None,
initial_vertex=None,
step_sizes=None,
objective_at_initial_simplex=None,
objective_at_initial_vertex=None,
batch_evaluate_objective=False,
func_tolerance=1e-8,
position_tolerance=1e-8,
parallel_iterations=1,
max_iterations=None,
reflection=None,
expansion=None,
contraction=None,
shrinkage=None,
name=None):
"""Minimum of the objective function using the Nelder Mead simplex algorithm.
Performs an unconstrained minimization of a (possibly non-smooth) function
using the Nelder Mead simplex method. Nelder Mead method does not support
univariate functions. Hence the dimensions of the domain must be 2 or greater.
For details of the algorithm, see
[Press, Teukolsky, Vetterling and Flannery(2007)][1].
Points in the domain of the objective function may be represented as a
`Tensor` of general shape but with rank at least 1. The algorithm proceeds
by modifying a full rank simplex in the domain. The initial simplex may
either be specified by the user or can be constructed using a single vertex
supplied by the user. In the latter case, if `v0` is the supplied vertex,
the simplex is the convex hull of the set:
```None
S = {v0} + {v0 + step_i * e_i}
```
Here `e_i` is a vector which is `1` along the `i`-th axis and zero elsewhere
and `step_i` is a characteristic length scale along the `i`-th axis. If the
step size is not supplied by the user, a unit step size is used in every axis.
Alternately, a single step size may be specified which is used for every
axis. The most flexible option is to supply a bespoke step size for every
axis.
### Usage:
The following example demonstrates the usage of the Nelder Mead minimzation
on a two dimensional problem with the minimum located at a non-differentiable
point.
```python
# The objective function
def sqrt_quadratic(x):
return tf.sqrt(tf.reduce_sum(x ** 2, axis=-1))
start = tf.constant([6.0, -21.0]) # Starting point for the search.
optim_results = tfp.optimizer.nelder_mead_minimize(
sqrt_quadratic, initial_vertex=start, func_tolerance=1e-8,
batch_evaluate_objective=True)
with tf.Session() as session:
results = session.run(optim_results)
# Check that the search converged
assert(results.converged)
# Check that the argmin is close to the actual value.
np.testing.assert_allclose(results.position, np.array([0.0, 0.0]),
atol=1e-7)
# Print out the total number of function evaluations it took.
print ("Function evaluations: %d" % results.num_objective_evaluations)
```
### References:
[1]: William Press, Saul Teukolsky, William Vetterling and Brian Flannery.
Numerical Recipes in C++, third edition. pp. 502-507. (2007).
http://numerical.recipes/cpppages/chap0sel.pdf
[2]: Jeffrey Lagarias, James Reeds, Margaret Wright and Paul Wright.
Convergence properties of the Nelder-Mead simplex method in low dimensions,
Siam J. Optim., Vol 9, No. 1, pp. 112-147. (1998).
http://www.math.kent.edu/~reichel/courses/Opt/reading.material.2/nelder.mead.pdf
[3]: Fuchang Gao and Lixing Han. Implementing the Nelder-Mead simplex
algorithm with adaptive parameters. Computational Optimization and
Applications, Vol 51, Issue 1, pp 259-277. (2012).
https://pdfs.semanticscholar.org/15b4/c4aa7437df4d032c6ee6ce98d6030dd627be.pdf
Args:
objective_function: A Python callable that accepts a point as a
real `Tensor` and returns a `Tensor` of real dtype containing
the value of the function at that point. The function
to be minimized. If `batch_evaluate_objective` is `True`, the callable
may be evaluated on a `Tensor` of shape `[n+1] + s ` where `n` is
the dimension of the problem and `s` is the shape of a single point
in the domain (so `n` is the size of a `Tensor` representing a
single point).
In this case, the expected return value is a `Tensor` of shape `[n+1]`.
Note that this method does not support univariate functions so the problem
dimension `n` must be strictly greater than 1.
initial_simplex: (Optional) `Tensor` of real dtype. The initial simplex to
start the search. If supplied, should be a `Tensor` of shape `[n+1] + s`
where `n` is the dimension of the problem and `s` is the shape of a
single point in the domain. Each row (i.e. the `Tensor` with a given
value of the first index) is interpreted as a vertex of a simplex and
hence the rows must be affinely independent. If not supplied, an axes
aligned simplex is constructed using the `initial_vertex` and
`step_sizes`. Only one and at least one of `initial_simplex` and
`initial_vertex` must be supplied.
initial_vertex: (Optional) `Tensor` of real dtype and any shape that can
be consumed by the `objective_function`. A single point in the domain that
will be used to construct an axes aligned initial simplex.
step_sizes: (Optional) `Tensor` of real dtype and shape broadcasting
compatible with `initial_vertex`. Supplies the simplex scale along each
axes. Only used if `initial_simplex` is not supplied. See description
above for details on how step sizes and initial vertex are used to
construct the initial simplex.
objective_at_initial_simplex: (Optional) Rank `1` `Tensor` of real dtype
of a rank `1` `Tensor`. The value of the objective function at the
initial simplex. May be supplied only if `initial_simplex` is
supplied. If not supplied, it will be computed.
objective_at_initial_vertex: (Optional) Scalar `Tensor` of real dtype. The
value of the objective function at the initial vertex. May be supplied
only if the `initial_vertex` is also supplied.
batch_evaluate_objective: (Optional) Python `bool`. If True, the objective
function will be evaluated on all the vertices of the simplex packed
into a single tensor. If False, the objective will be mapped across each
vertex separately. Evaluating the objective function in a batch allows
use of vectorization and should be preferred if the objective function
allows it.
func_tolerance: (Optional) Scalar `Tensor` of real dtype. The algorithm
stops if the absolute difference between the largest and the smallest
function value on the vertices of the simplex is below this number.
position_tolerance: (Optional) Scalar `Tensor` of real dtype. The
algorithm stops if the largest absolute difference between the
coordinates of the vertices is below this threshold.
parallel_iterations: (Optional) Positive integer. The number of iterations
allowed to run in parallel.
max_iterations: (Optional) Scalar positive `Tensor` of dtype `int32`.
The maximum number of iterations allowed. If `None` then no limit is
applied.
reflection: (Optional) Positive Scalar `Tensor` of same dtype as
`initial_vertex`. This parameter controls the scaling of the reflected
vertex. See, [Press et al(2007)][1] for details. If not specified,
uses the dimension dependent prescription of [Gao and Han(2012)][3].
expansion: (Optional) Positive Scalar `Tensor` of same dtype as
`initial_vertex`. Should be greater than `1` and `reflection`. This
parameter controls the expanded scaling of a reflected vertex.
See, [Press et al(2007)][1] for details. If not specified, uses the
dimension dependent prescription of [Gao and Han(2012)][3].
contraction: (Optional) Positive scalar `Tensor` of same dtype as
`initial_vertex`. Must be between `0` and `1`. This parameter controls
the contraction of the reflected vertex when the objective function at
the reflected point fails to show sufficient decrease.
See, [Press et al(2007)][1] for more details. If not specified, uses
the dimension dependent prescription of [Gao and Han(2012][3].
shrinkage: (Optional) Positive scalar `Tensor` of same dtype as
`initial_vertex`. Must be between `0` and `1`. This parameter is the scale
by which the simplex is shrunk around the best point when the other
steps fail to produce improvements.
See, [Press et al(2007)][1] for more details. If not specified, uses
the dimension dependent prescription of [Gao and Han(2012][3].
name: (Optional) Python str. The name prefixed to the ops created by this
function. If not supplied, the default name 'minimize' is used.
Returns:
optimizer_results: A namedtuple containing the following items:
converged: Scalar boolean tensor indicating whether the minimum was
found within tolerance.
num_objective_evaluations: The total number of objective
evaluations performed.
position: A `Tensor` containing the last argument value found
during the search. If the search converged, then
this value is the argmin of the objective function.
objective_value: A tensor containing the value of the objective
function at the `position`. If the search
converged, then this is the (local) minimum of
the objective function.
final_simplex: The last simplex constructed before stopping.
final_objective_values: The objective function evaluated at the
vertices of the final simplex.
initial_simplex: The starting simplex.
initial_objective_values: The objective function evaluated at the
vertices of the initial simplex.
num_iterations: The number of iterations of the main algorithm body.
Raises:
ValueError: If any of the following conditions hold
1. If none or more than one of `initial_simplex` and `initial_vertex` are
supplied.
2. If `initial_simplex` and `step_sizes` are both specified.
"""
with tf.compat.v1.name_scope(name, 'minimize', [
initial_simplex, initial_vertex, step_sizes, objective_at_initial_simplex,
objective_at_initial_vertex, func_tolerance, position_tolerance
]):
(
dim,
_,
simplex,
objective_at_simplex,
num_evaluations
) = _prepare_args(objective_function,
initial_simplex,
initial_vertex,
step_sizes,
objective_at_initial_simplex,
objective_at_initial_vertex,
batch_evaluate_objective)
domain_dtype = simplex.dtype
(
reflection,
expansion,
contraction,
shrinkage
) = _resolve_parameters(dim,
reflection,
expansion,
contraction,
shrinkage,
domain_dtype)
closure_kwargs = dict(
objective_function=objective_function,
dim=dim,
func_tolerance=func_tolerance,
position_tolerance=position_tolerance,
batch_evaluate_objective=batch_evaluate_objective,
reflection=reflection,
expansion=expansion,
contraction=contraction,
shrinkage=shrinkage)
def _loop_body(_, iterations, simplex, objective_at_simplex,
num_evaluations):
(
converged,
next_simplex,
next_objective,
evaluations
) = nelder_mead_one_step(simplex, objective_at_simplex, **closure_kwargs)
return (converged, iterations + 1, next_simplex, next_objective,
num_evaluations + evaluations)
initial_args = (False, 0, simplex, objective_at_simplex,
num_evaluations)
# Loop until either we have converged or if the max iterations are supplied
# then until we have converged or exhausted the available iteration budget.
def _is_converged(converged, num_iterations, *ignored_args): # pylint:disable=unused-argument
# It is important to ensure that not_converged is a tensor. If
# converged is not a tensor but a Python bool, then the overloaded
# op '~' acts as bitwise complement so ~True = -2 and ~False = -1.
# In that case, the loop will never terminate.
not_converged = tf.logical_not(converged)
return (not_converged if max_iterations is None
else (not_converged & (num_iterations < max_iterations)))
(converged, num_iterations, final_simplex, final_objective_values,
final_evaluations) = tf.while_loop(
cond=_is_converged,
body=_loop_body,
loop_vars=initial_args,
parallel_iterations=parallel_iterations)
order = tf.argsort(
final_objective_values, direction='ASCENDING', stable=True)
best_index = order[0]
# The explicit cast to Tensor below is done to avoid returning a mixture
# of Python types and Tensors which cause problems with session.run.
# In the eager mode, converged may remain a Python bool. Trying to evaluate
# the whole tuple in one evaluate call will raise an exception because
# of the presence of non-tensors. This is very annoying so we explicitly
# cast those arguments to Tensors.
return NelderMeadOptimizerResults(
converged=tf.convert_to_tensor(value=converged),
num_objective_evaluations=final_evaluations,
position=final_simplex[best_index],
objective_value=final_objective_values[best_index],
final_simplex=final_simplex,
final_objective_values=final_objective_values,
num_iterations=tf.convert_to_tensor(value=num_iterations),
initial_simplex=simplex,
initial_objective_values=objective_at_simplex)
|
A single iteration of the Nelder Mead algorithm.
|
def nelder_mead_one_step(current_simplex,
current_objective_values,
objective_function=None,
dim=None,
func_tolerance=None,
position_tolerance=None,
batch_evaluate_objective=False,
reflection=None,
expansion=None,
contraction=None,
shrinkage=None,
name=None):
"""A single iteration of the Nelder Mead algorithm."""
with tf.compat.v1.name_scope(name, 'nelder_mead_one_step'):
domain_dtype = current_simplex.dtype.base_dtype
order = tf.argsort(
current_objective_values, direction='ASCENDING', stable=True)
(
best_index,
worst_index,
second_worst_index
) = order[0], order[-1], order[-2]
worst_vertex = current_simplex[worst_index]
(
best_objective_value,
worst_objective_value,
second_worst_objective_value
) = (
current_objective_values[best_index],
current_objective_values[worst_index],
current_objective_values[second_worst_index]
)
# Compute the centroid of the face opposite the worst vertex.
face_centroid = tf.reduce_sum(
input_tensor=current_simplex, axis=0) - worst_vertex
face_centroid /= tf.cast(dim, domain_dtype)
# Reflect the worst vertex through the opposite face.
reflected = face_centroid + reflection * (face_centroid - worst_vertex)
objective_at_reflected = objective_function(reflected)
num_evaluations = 1
has_converged = _check_convergence(current_simplex,
current_simplex[best_index],
best_objective_value,
worst_objective_value,
func_tolerance,
position_tolerance)
def _converged_fn():
return (True, current_simplex, current_objective_values, 0)
case0 = has_converged, _converged_fn
accept_reflected = (
(objective_at_reflected < second_worst_objective_value) &
(objective_at_reflected >= best_objective_value))
accept_reflected_fn = _accept_reflected_fn(current_simplex,
current_objective_values,
worst_index,
reflected,
objective_at_reflected)
case1 = accept_reflected, accept_reflected_fn
do_expansion = objective_at_reflected < best_objective_value
expansion_fn = _expansion_fn(objective_function,
current_simplex,
current_objective_values,
worst_index,
reflected,
objective_at_reflected,
face_centroid,
expansion)
case2 = do_expansion, expansion_fn
do_outside_contraction = (
(objective_at_reflected < worst_objective_value) &
(objective_at_reflected >= second_worst_objective_value)
)
outside_contraction_fn = _outside_contraction_fn(
objective_function,
current_simplex,
current_objective_values,
face_centroid,
best_index,
worst_index,
reflected,
objective_at_reflected,
contraction,
shrinkage,
batch_evaluate_objective)
case3 = do_outside_contraction, outside_contraction_fn
default_fn = _inside_contraction_fn(objective_function,
current_simplex,
current_objective_values,
face_centroid,
best_index,
worst_index,
worst_objective_value,
contraction,
shrinkage,
batch_evaluate_objective)
(
converged,
next_simplex,
next_objective_at_simplex,
case_evals) = prefer_static.case([case0, case1, case2, case3],
default=default_fn, exclusive=False)
next_simplex.set_shape(current_simplex.shape)
next_objective_at_simplex.set_shape(current_objective_values.shape)
return (
converged,
next_simplex,
next_objective_at_simplex,
num_evaluations + case_evals
)
|
Creates the condition function pair for a reflection to be accepted.
|
def _accept_reflected_fn(simplex,
objective_values,
worst_index,
reflected,
objective_at_reflected):
"""Creates the condition function pair for a reflection to be accepted."""
def _replace_worst_with_reflected():
next_simplex = _replace_at_index(simplex, worst_index, reflected)
next_objective_values = _replace_at_index(objective_values, worst_index,
objective_at_reflected)
return False, next_simplex, next_objective_values, 0
return _replace_worst_with_reflected
|
Creates the condition function pair for an expansion.
|
def _expansion_fn(objective_function,
simplex,
objective_values,
worst_index,
reflected,
objective_at_reflected,
face_centroid,
expansion):
"""Creates the condition function pair for an expansion."""
def _expand_and_maybe_replace():
"""Performs the expansion step."""
expanded = face_centroid + expansion * (reflected - face_centroid)
expanded_objective_value = objective_function(expanded)
expanded_is_better = (expanded_objective_value <
objective_at_reflected)
accept_expanded_fn = lambda: (expanded, expanded_objective_value)
accept_reflected_fn = lambda: (reflected, objective_at_reflected)
next_pt, next_objective_value = prefer_static.cond(
expanded_is_better, accept_expanded_fn, accept_reflected_fn)
next_simplex = _replace_at_index(simplex, worst_index, next_pt)
next_objective_at_simplex = _replace_at_index(objective_values,
worst_index,
next_objective_value)
return False, next_simplex, next_objective_at_simplex, 1
return _expand_and_maybe_replace
|
Creates the condition function pair for an outside contraction.
|
def _outside_contraction_fn(objective_function,
simplex,
objective_values,
face_centroid,
best_index,
worst_index,
reflected,
objective_at_reflected,
contraction,
shrinkage,
batch_evaluate_objective):
"""Creates the condition function pair for an outside contraction."""
def _contraction():
"""Performs a contraction."""
contracted = face_centroid + contraction * (reflected - face_centroid)
objective_at_contracted = objective_function(contracted)
is_contracted_acceptable = objective_at_contracted <= objective_at_reflected
def _accept_contraction():
next_simplex = _replace_at_index(simplex, worst_index, contracted)
objective_at_next_simplex = _replace_at_index(
objective_values,
worst_index,
objective_at_contracted)
return (False,
next_simplex,
objective_at_next_simplex,
1)
def _reject_contraction():
return _shrink_towards_best(objective_function,
simplex,
best_index,
shrinkage,
batch_evaluate_objective)
return prefer_static.cond(is_contracted_acceptable,
_accept_contraction,
_reject_contraction)
return _contraction
|
Shrinks the simplex around the best vertex.
|
def _shrink_towards_best(objective_function,
simplex,
best_index,
shrinkage,
batch_evaluate_objective):
"""Shrinks the simplex around the best vertex."""
# If the contraction step fails to improve the average objective enough,
# the simplex is shrunk towards the best vertex.
best_vertex = simplex[best_index]
shrunk_simplex = best_vertex + shrinkage * (simplex - best_vertex)
objective_at_shrunk_simplex, evals = _evaluate_objective_multiple(
objective_function,
shrunk_simplex,
batch_evaluate_objective)
return (False,
shrunk_simplex,
objective_at_shrunk_simplex,
evals)
|
Replaces an element at supplied index.
|
def _replace_at_index(x, index, replacement):
"""Replaces an element at supplied index."""
x_new = tf.concat([x[:index], tf.expand_dims(replacement, axis=0),
x[(index + 1):]], axis=0)
return x_new
|
Returns True if the simplex has converged.
|
def _check_convergence(simplex,
best_vertex,
best_objective,
worst_objective,
func_tolerance,
position_tolerance):
"""Returns True if the simplex has converged.
If the simplex size is smaller than the `position_tolerance` or the variation
of the function value over the vertices of the simplex is smaller than the
`func_tolerance` return True else False.
Args:
simplex: `Tensor` of real dtype. The simplex to test for convergence. For
more details, see the docstring for `initial_simplex` argument
of `minimize`.
best_vertex: `Tensor` of real dtype and rank one less than `simplex`. The
vertex with the best (i.e. smallest) objective value.
best_objective: Scalar `Tensor` of real dtype. The best (i.e. smallest)
value of the objective function at a vertex.
worst_objective: Scalar `Tensor` of same dtype as `best_objective`. The
worst (i.e. largest) value of the objective function at a vertex.
func_tolerance: Scalar positive `Tensor`. The tolerance for the variation
of the objective function value over the simplex. If the variation over
the simplex vertices is below this threshold, convergence is True.
position_tolerance: Scalar positive `Tensor`. The algorithm stops if the
lengths (under the supremum norm) of edges connecting to the best vertex
are below this threshold.
Returns:
has_converged: A scalar boolean `Tensor` indicating whether the algorithm
is deemed to have converged.
"""
objective_convergence = tf.abs(worst_objective -
best_objective) < func_tolerance
simplex_degeneracy = tf.reduce_max(
input_tensor=tf.abs(simplex - best_vertex)) < position_tolerance
return objective_convergence | simplex_degeneracy
|
Computes the initial simplex and the objective values at the simplex.
|
def _prepare_args(objective_function,
initial_simplex,
initial_vertex,
step_sizes,
objective_at_initial_simplex,
objective_at_initial_vertex,
batch_evaluate_objective):
"""Computes the initial simplex and the objective values at the simplex.
Args:
objective_function: A Python callable that accepts a point as a
real `Tensor` and returns a `Tensor` of real dtype containing
the value of the function at that point. The function
to be evaluated at the simplex. If `batch_evaluate_objective` is `True`,
the callable may be evaluated on a `Tensor` of shape `[n+1] + s `
where `n` is the dimension of the problem and `s` is the shape of a
single point in the domain (so `n` is the size of a `Tensor`
representing a single point).
In this case, the expected return value is a `Tensor` of shape `[n+1]`.
initial_simplex: None or `Tensor` of real dtype. The initial simplex to
start the search. If supplied, should be a `Tensor` of shape `[n+1] + s`
where `n` is the dimension of the problem and `s` is the shape of a
single point in the domain. Each row (i.e. the `Tensor` with a given
value of the first index) is interpreted as a vertex of a simplex and
hence the rows must be affinely independent. If not supplied, an axes
aligned simplex is constructed using the `initial_vertex` and
`step_sizes`. Only one and at least one of `initial_simplex` and
`initial_vertex` must be supplied.
initial_vertex: None or `Tensor` of real dtype and any shape that can
be consumed by the `objective_function`. A single point in the domain that
will be used to construct an axes aligned initial simplex.
step_sizes: None or `Tensor` of real dtype and shape broadcasting
compatible with `initial_vertex`. Supplies the simplex scale along each
axes. Only used if `initial_simplex` is not supplied. See the docstring
of `minimize` for more details.
objective_at_initial_simplex: None or rank `1` `Tensor` of real dtype.
The value of the objective function at the initial simplex.
May be supplied only if `initial_simplex` is
supplied. If not supplied, it will be computed.
objective_at_initial_vertex: None or scalar `Tensor` of real dtype. The
value of the objective function at the initial vertex. May be supplied
only if the `initial_vertex` is also supplied.
batch_evaluate_objective: Python `bool`. If True, the objective function
will be evaluated on all the vertices of the simplex packed into a
single tensor. If False, the objective will be mapped across each
vertex separately.
Returns:
prepared_args: A tuple containing the following elements:
dimension: Scalar `Tensor` of `int32` dtype. The dimension of the problem
as inferred from the supplied arguments.
num_vertices: Scalar `Tensor` of `int32` dtype. The number of vertices
in the simplex.
simplex: A `Tensor` of same dtype as `initial_simplex`
(or `initial_vertex`). The first component of the shape of the
`Tensor` is `num_vertices` and each element represents a vertex of
the simplex.
objective_at_simplex: A `Tensor` of same dtype as the dtype of the
return value of objective_function. The shape is a vector of size
`num_vertices`. The objective function evaluated at the simplex.
num_evaluations: An `int32` scalar `Tensor`. The number of points on
which the objective function was evaluated.
Raises:
ValueError: If any of the following conditions hold
1. If none or more than one of `initial_simplex` and `initial_vertex` are
supplied.
2. If `initial_simplex` and `step_sizes` are both specified.
"""
if objective_at_initial_simplex is not None and initial_simplex is None:
raise ValueError('`objective_at_initial_simplex` specified but the'
'`initial_simplex` was not.')
if objective_at_initial_vertex is not None and initial_vertex is None:
raise ValueError('`objective_at_initial_vertex` specified but the'
'`initial_vertex` was not.')
# The full simplex was specified.
if initial_simplex is not None:
if initial_vertex is not None:
raise ValueError('Both `initial_simplex` and `initial_vertex` specified.'
' Only one of the two should be specified.')
if step_sizes is not None:
raise ValueError('`step_sizes` must not be specified when an'
' `initial_simplex` has been specified.')
return _prepare_args_with_initial_simplex(objective_function,
initial_simplex,
objective_at_initial_simplex,
batch_evaluate_objective)
if initial_vertex is None:
raise ValueError('One of `initial_simplex` or `initial_vertex`'
' must be supplied')
if step_sizes is None:
step_sizes = _default_step_sizes(initial_vertex)
return _prepare_args_with_initial_vertex(objective_function,
initial_vertex,
step_sizes,
objective_at_initial_vertex,
batch_evaluate_objective)
|
Chooses default step sizes according to [ Gao and Han ( 2010 ) ] [ 3 ].
|
def _default_step_sizes(reference_vertex):
"""Chooses default step sizes according to [Gao and Han(2010)][3]."""
# Step size to choose when the coordinate is zero.
small_sizes = tf.ones_like(reference_vertex) * 0.00025
# Step size to choose when the coordinate is non-zero.
large_sizes = reference_vertex * 0.05
return tf.where(tf.abs(reference_vertex) < _EPSILON,
small_sizes,
large_sizes)
|
Evaluates the objective function at the specified initial simplex.
|
def _prepare_args_with_initial_simplex(objective_function,
initial_simplex,
objective_at_initial_simplex,
batch_evaluate_objective):
"""Evaluates the objective function at the specified initial simplex."""
initial_simplex = tf.convert_to_tensor(value=initial_simplex)
# If d is the dimension of the problem, the number of vertices in the
# simplex should be d+1. From this, we can infer the number of dimensions
# as n - 1 where n is the number of vertices specified.
num_vertices = tf.shape(input=initial_simplex)[0]
dim = num_vertices - 1
num_evaluations = 0
if objective_at_initial_simplex is None:
objective_at_initial_simplex, n_evals = _evaluate_objective_multiple(
objective_function, initial_simplex, batch_evaluate_objective)
num_evaluations += n_evals
objective_at_initial_simplex = tf.convert_to_tensor(
value=objective_at_initial_simplex)
return (dim,
num_vertices,
initial_simplex,
objective_at_initial_simplex,
num_evaluations)
|
Constructs a standard axes aligned simplex.
|
def _prepare_args_with_initial_vertex(objective_function,
initial_vertex,
step_sizes,
objective_at_initial_vertex,
batch_evaluate_objective):
"""Constructs a standard axes aligned simplex."""
dim = tf.size(input=initial_vertex)
num_vertices = dim + 1
unit_vectors_along_axes = tf.reshape(
tf.eye(dim, dim, dtype=initial_vertex.dtype.base_dtype),
tf.concat([[dim], tf.shape(input=initial_vertex)], axis=0))
# If step_sizes does not broadcast to initial_vertex, the multiplication
# in the second term will fail.
simplex_face = initial_vertex + step_sizes * unit_vectors_along_axes
simplex = tf.concat([tf.expand_dims(initial_vertex, axis=0),
simplex_face], axis=0)
num_evaluations = 0
# Evaluate the objective function at the simplex vertices.
if objective_at_initial_vertex is None:
objective_at_initial_vertex = objective_function(initial_vertex)
num_evaluations += 1
objective_at_simplex_face, num_evals = _evaluate_objective_multiple(
objective_function, simplex_face, batch_evaluate_objective)
num_evaluations += num_evals
objective_at_simplex = tf.concat(
[
tf.expand_dims(objective_at_initial_vertex, axis=0),
objective_at_simplex_face
], axis=0)
return (dim,
num_vertices,
simplex,
objective_at_simplex,
num_evaluations)
|
Applies the [ Gao and Han ] [ 3 ] presciption to the unspecified parameters.
|
def _resolve_parameters(dim,
reflection,
expansion,
contraction,
shrinkage,
dtype):
"""Applies the [Gao and Han][3] presciption to the unspecified parameters."""
dim = tf.cast(dim, dtype=dtype)
reflection = 1. if reflection is None else reflection
expansion = (1. + 2. / dim) if expansion is None else expansion
contraction = (0.75 - 1. / (2 * dim)) if contraction is None else contraction
shrinkage = (1. - 1. / dim) if shrinkage is None else shrinkage
return reflection, expansion, contraction, shrinkage
|
Evaluates the objective function on a batch of points.
|
def _evaluate_objective_multiple(objective_function, arg_batch,
batch_evaluate_objective):
"""Evaluates the objective function on a batch of points.
If `batch_evaluate_objective` is True, returns
`objective function(arg_batch)` else it maps the `objective_function`
across the `arg_batch`.
Args:
objective_function: A Python callable that accepts a single `Tensor` of
rank 'R > 1' and any shape 's' and returns a scalar `Tensor` of real dtype
containing the value of the function at that point. If
`batch a `Tensor` of shape `[batch_size] + s ` where `batch_size` is the
size of the batch of args. In this case, the expected return value is a
`Tensor` of shape `[batch_size]`.
arg_batch: A `Tensor` of real dtype. The batch of arguments at which to
evaluate the `objective_function`. If `batch_evaluate_objective` is False,
`arg_batch` will be unpacked along the zeroth axis and the
`objective_function` will be applied to each element.
batch_evaluate_objective: `bool`. Whether the `objective_function` can
evaluate a batch of arguments at once.
Returns:
A tuple containing:
objective_values: A `Tensor` of real dtype and shape `[batch_size]`.
The value of the objective function evaluated at the supplied
`arg_batch`.
num_evaluations: An `int32` scalar `Tensor`containing the number of
points on which the objective function was evaluated (i.e `batch_size`).
"""
n_points = tf.shape(input=arg_batch)[0]
if batch_evaluate_objective:
return objective_function(arg_batch), n_points
return tf.map_fn(objective_function, arg_batch), n_points
|
Save a PNG plot with histograms of weight means and stddevs.
|
def plot_weight_posteriors(names, qm_vals, qs_vals, fname):
"""Save a PNG plot with histograms of weight means and stddevs.
Args:
names: A Python `iterable` of `str` variable names.
qm_vals: A Python `iterable`, the same length as `names`,
whose elements are Numpy `array`s, of any shape, containing
posterior means of weight varibles.
qs_vals: A Python `iterable`, the same length as `names`,
whose elements are Numpy `array`s, of any shape, containing
posterior standard deviations of weight varibles.
fname: Python `str` filename to save the plot to.
"""
fig = figure.Figure(figsize=(6, 3))
canvas = backend_agg.FigureCanvasAgg(fig)
ax = fig.add_subplot(1, 2, 1)
for n, qm in zip(names, qm_vals):
sns.distplot(qm.flatten(), ax=ax, label=n)
ax.set_title("weight means")
ax.set_xlim([-1.5, 1.5])
ax.legend()
ax = fig.add_subplot(1, 2, 2)
for n, qs in zip(names, qs_vals):
sns.distplot(qs.flatten(), ax=ax)
ax.set_title("weight stddevs")
ax.set_xlim([0, 1.])
fig.tight_layout()
canvas.print_figure(fname, format="png")
print("saved {}".format(fname))
|
Save a PNG plot visualizing posterior uncertainty on heldout data.
|
def plot_heldout_prediction(input_vals, probs,
fname, n=10, title=""):
"""Save a PNG plot visualizing posterior uncertainty on heldout data.
Args:
input_vals: A `float`-like Numpy `array` of shape
`[num_heldout] + IMAGE_SHAPE`, containing heldout input images.
probs: A `float`-like Numpy array of shape `[num_monte_carlo,
num_heldout, num_classes]` containing Monte Carlo samples of
class probabilities for each heldout sample.
fname: Python `str` filename to save the plot to.
n: Python `int` number of datapoints to vizualize.
title: Python `str` title for the plot.
"""
fig = figure.Figure(figsize=(9, 3*n))
canvas = backend_agg.FigureCanvasAgg(fig)
for i in range(n):
ax = fig.add_subplot(n, 3, 3*i + 1)
ax.imshow(input_vals[i, :].reshape(IMAGE_SHAPE[:-1]), interpolation="None")
ax = fig.add_subplot(n, 3, 3*i + 2)
for prob_sample in probs:
sns.barplot(np.arange(10), prob_sample[i, :], alpha=0.1, ax=ax)
ax.set_ylim([0, 1])
ax.set_title("posterior samples")
ax = fig.add_subplot(n, 3, 3*i + 3)
sns.barplot(np.arange(10), np.mean(probs[:, i, :], axis=0), ax=ax)
ax.set_ylim([0, 1])
ax.set_title("predictive probs")
fig.suptitle(title)
fig.tight_layout()
canvas.print_figure(fname, format="png")
print("saved {}".format(fname))
|
Build an Iterator switching between train and heldout data.
|
def build_input_pipeline(mnist_data, batch_size, heldout_size):
"""Build an Iterator switching between train and heldout data."""
# Build an iterator over training batches.
training_dataset = tf.data.Dataset.from_tensor_slices(
(mnist_data.train.images, np.int32(mnist_data.train.labels)))
training_batches = training_dataset.shuffle(
50000, reshuffle_each_iteration=True).repeat().batch(batch_size)
training_iterator = tf.compat.v1.data.make_one_shot_iterator(training_batches)
# Build a iterator over the heldout set with batch_size=heldout_size,
# i.e., return the entire heldout set as a constant.
heldout_dataset = tf.data.Dataset.from_tensor_slices(
(mnist_data.validation.images,
np.int32(mnist_data.validation.labels)))
heldout_frozen = (heldout_dataset.take(heldout_size).
repeat().batch(heldout_size))
heldout_iterator = tf.compat.v1.data.make_one_shot_iterator(heldout_frozen)
# Combine these into a feedable iterator that can switch between training
# and validation inputs.
handle = tf.compat.v1.placeholder(tf.string, shape=[])
feedable_iterator = tf.compat.v1.data.Iterator.from_string_handle(
handle, training_batches.output_types, training_batches.output_shapes)
images, labels = feedable_iterator.get_next()
return images, labels, handle, training_iterator, heldout_iterator
|
Build fake MNIST - style data for unit testing.
|
def build_fake_data(num_examples=10):
"""Build fake MNIST-style data for unit testing."""
class Dummy(object):
pass
num_examples = 10
mnist_data = Dummy()
mnist_data.train = Dummy()
mnist_data.train.images = np.float32(np.random.randn(
num_examples, *IMAGE_SHAPE))
mnist_data.train.labels = np.int32(np.random.permutation(
np.arange(num_examples)))
mnist_data.train.num_examples = num_examples
mnist_data.validation = Dummy()
mnist_data.validation.images = np.float32(np.random.randn(
num_examples, *IMAGE_SHAPE))
mnist_data.validation.labels = np.int32(np.random.permutation(
np.arange(num_examples)))
mnist_data.validation.num_examples = num_examples
return mnist_data
|
Calculate the batched KL divergence KL ( a || b ) with a and b Bernoulli.
|
def _kl_bernoulli_bernoulli(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a and b Bernoulli.
Args:
a: instance of a Bernoulli distribution object.
b: instance of a Bernoulli distribution object.
name: (optional) Name to use for created operations.
default is "kl_bernoulli_bernoulli".
Returns:
Batchwise KL(a || b)
"""
with tf.name_scope(name or "kl_bernoulli_bernoulli"):
delta_probs0 = tf.nn.softplus(-b.logits) - tf.nn.softplus(-a.logits)
delta_probs1 = tf.nn.softplus(b.logits) - tf.nn.softplus(a.logits)
return (tf.sigmoid(a.logits) * delta_probs0
+ tf.sigmoid(-a.logits) * delta_probs1)
|
Returns initializer configuration as a JSON - serializable dict.
|
def get_config(self):
"""Returns initializer configuration as a JSON-serializable dict."""
return {
'initializers': [
tf.compat.v2.initializers.serialize(
tf.keras.initializers.get(init))
for init in self.initializers
],
'sizes': self.sizes,
'validate_args': self.validate_args,
}
|
Instantiates an initializer from a configuration dictionary.
|
def from_config(cls, config):
"""Instantiates an initializer from a configuration dictionary."""
return cls(**{
'initializers': [tf.compat.v2.initializers.deserialize(init)
for init in config.get('initializers', [])],
'sizes': config.get('sizes', []),
'validate_args': config.get('validate_args', False),
})
|
Numpy matmul wrapper.
|
def _matmul(a, b,
transpose_a=False, transpose_b=False,
adjoint_a=False, adjoint_b=False,
a_is_sparse=False, b_is_sparse=False,
name=None): # pylint: disable=unused-argument
"""Numpy matmul wrapper."""
if a_is_sparse or b_is_sparse:
raise NotImplementedError('Numpy backend does not support sparse matmul.')
if transpose_a or adjoint_a:
a = _matrix_transpose(a, conjugate=adjoint_a)
if transpose_b or adjoint_b:
b = _matrix_transpose(b, conjugate=adjoint_b)
return np.matmul(a, b)
|
Helper to compute stddev covariance and variance.
|
def _std_var_helper(self, statistic, statistic_name, statistic_ndims,
df_factor_fn):
"""Helper to compute stddev, covariance and variance."""
df = tf.reshape(
self.df,
tf.concat([
tf.shape(input=self.df),
tf.ones([statistic_ndims], dtype=tf.int32)
], -1))
df = _broadcast_to_shape(df, tf.shape(input=statistic))
# We need to put the tf.where inside the outer tf.where to ensure we never
# hit a NaN in the gradient.
denom = tf.where(df > 2., df - 2., tf.ones_like(df))
statistic = statistic * df_factor_fn(df / denom)
# When 1 < df <= 2, stddev/variance are infinite.
inf = dtype_util.as_numpy_dtype(self.dtype)(np.inf)
result_where_defined = tf.where(
df > 2., statistic, tf.fill(tf.shape(input=statistic), inf, name="inf"))
if self.allow_nan_stats:
nan = dtype_util.as_numpy_dtype(self.dtype)(np.nan)
return tf.where(df > 1., result_where_defined,
tf.fill(tf.shape(input=statistic), nan, name="nan"))
else:
with tf.control_dependencies([
assert_util.assert_less(
tf.cast(1., self.dtype),
df,
message=statistic_name +
" not defined for components of df <= 1"),
]):
return tf.identity(result_where_defined)
|
Compute exponentially weighted moving { mean variance } of a streaming value.
|
def assign_moving_mean_variance(
mean_var, variance_var, value, decay, name=None):
"""Compute exponentially weighted moving {mean,variance} of a streaming value.
The `value` updated exponentially weighted moving `mean_var` and
`variance_var` are given by the following recurrence relations:
```python
variance_var = decay * (variance_var + (1 - decay) * (value - mean_var)**2)
mean_var = decay * mean_var + (1 - decay) * value
```
Note: `mean_var` is updated *after* `variance_var`, i.e., `variance_var` uses
the lag-1 mean.
For derivation justification, see [Finch (2009; Eq. 143)][1].
Parameterization: Finch's `alpha` is `1 - decay`.
Args:
mean_var: `float`-like `Variable` representing the exponentially weighted
moving mean. Same shape as `variance_var` and `value`.
variance_var: `float`-like `Variable` representing the
exponentially weighted moving variance. Same shape as `mean_var` and
`value`.
value: `float`-like `Tensor`. Same shape as `mean_var` and `variance_var`.
decay: A `float`-like `Tensor`. The moving mean decay. Typically close to
`1.`, e.g., `0.999`.
name: Optional name of the returned operation.
Returns:
mean_var: `Variable` representing the `value`-updated exponentially weighted
moving mean.
variance_var: `Variable` representing the `value`-updated
exponentially weighted moving variance.
Raises:
TypeError: if `mean_var` does not have float type `dtype`.
TypeError: if `mean_var`, `variance_var`, `value`, `decay` have different
`base_dtype`.
#### References
[1]: Tony Finch. Incremental calculation of weighted mean and variance.
_Technical Report_, 2009.
http://people.ds.cam.ac.uk/fanf2/hermes/doc/antiforgery/stats.pdf
"""
with tf.compat.v1.name_scope(name, "assign_moving_mean_variance",
[variance_var, mean_var, value, decay]):
with tf.compat.v1.colocate_with(variance_var):
with tf.compat.v1.colocate_with(mean_var):
base_dtype = mean_var.dtype.base_dtype
if not base_dtype.is_floating:
raise TypeError(
"mean_var.base_dtype({}) does not have float type "
"`dtype`.".format(base_dtype.name))
if base_dtype != variance_var.dtype.base_dtype:
raise TypeError(
"mean_var.base_dtype({}) != variance_var.base_dtype({})".format(
base_dtype.name,
variance_var.dtype.base_dtype.name))
value = tf.convert_to_tensor(
value=value, dtype=base_dtype, name="value")
decay = tf.convert_to_tensor(
value=decay, dtype=base_dtype, name="decay")
delta = value - mean_var
with tf.control_dependencies([delta]):
# We want mean_{t+1} = decay * mean_t + (1. - decay) * value
# We compute mean += decay * mean_t - mean_t + (1. - decay) * value =
# = (1. - decay) * (value - mean_t)
mean_var = mean_var.assign_add((1. - decay) * delta)
# We want variance_{t+1} = decay * (variance_t +
# + (1 - decay) * (value - mean_var)**2).
# We compute variance -= variance_t - decay * (variance_t +
# + (1 - decay) * (value - mean_var)**2) =
# = (1 - decay) * variance_t
# - decay * (1 - decay) * (value - mean_var)**2
# = (1 - decay) * (variance_t - decay * (value - mean_var)**2).
variance_var = variance_var.assign_sub(
(1. - decay) * (variance_var - decay * tf.square(delta)))
return mean_var, variance_var
|
Compute the log of the exponentially weighted moving mean of the exp.
|
def assign_log_moving_mean_exp(
log_mean_exp_var, log_value, decay, name=None):
"""Compute the log of the exponentially weighted moving mean of the exp.
If `log_value` is a draw from a stationary random variable, this function
approximates `log(E[exp(log_value)])`, i.e., a weighted log-sum-exp. More
precisely, a `tf.Variable`, `log_mean_exp_var`, is updated by `log_value`
using the following identity:
```none
log_mean_exp_var =
= log(decay exp(log_mean_exp_var) + (1 - decay) exp(log_value))
= log(exp(log_mean_exp_var + log(decay)) + exp(log_value + log1p(-decay)))
= log_mean_exp_var
+ log( exp(log_mean_exp_var - log_mean_exp_var + log(decay))
+ exp(log_value - log_mean_exp_var + log1p(-decay)))
= log_mean_exp_var
+ log_sum_exp([log(decay), log_value - log_mean_exp_var + log1p(-decay)]).
```
In addition to numerical stability, this formulation is advantageous because
`log_mean_exp_var` can be updated in a lock-free manner, i.e., using
`assign_add`. (Note: the updates are not thread-safe; it's just that the
update to the tf.Variable is presumed efficient due to being lock-free.)
Args:
log_mean_exp_var: `float`-like `Variable` representing the log of the
exponentially weighted moving mean of the exp. Same shape as `log_value`.
log_value: `float`-like `Tensor` representing a new (streaming) observation.
Same shape as `log_mean_exp_var`.
decay: A `float`-like `Tensor`. The moving mean decay. Typically close to
`1.`, e.g., `0.999`.
name: Optional name of the returned operation.
Returns:
log_mean_exp_var: A reference to the input 'Variable' tensor with the
`log_value`-updated log of the exponentially weighted moving mean of exp.
Raises:
TypeError: if `log_mean_exp_var` does not have float type `dtype`.
TypeError: if `log_mean_exp_var`, `log_value`, `decay` have different
`base_dtype`.
"""
with tf.compat.v1.name_scope(name, "assign_log_moving_mean_exp",
[log_mean_exp_var, log_value, decay]):
# We want to update the variable in a numerically stable and lock-free way.
# To do this, observe that variable `x` updated by `v` is:
# x = log(w exp(x) + (1-w) exp(v))
# = log(exp(x + log(w)) + exp(v + log1p(-w)))
# = x + log(exp(x - x + log(w)) + exp(v - x + log1p(-w)))
# = x + lse([log(w), v - x + log1p(-w)])
with tf.compat.v1.colocate_with(log_mean_exp_var):
base_dtype = log_mean_exp_var.dtype.base_dtype
if not base_dtype.is_floating:
raise TypeError(
"log_mean_exp_var.base_dtype({}) does not have float type "
"`dtype`.".format(base_dtype.name))
log_value = tf.convert_to_tensor(
value=log_value, dtype=base_dtype, name="log_value")
decay = tf.convert_to_tensor(value=decay, dtype=base_dtype, name="decay")
delta = (log_value - log_mean_exp_var)[tf.newaxis, ...]
x = tf.concat([
tf.math.log(decay) * tf.ones_like(delta),
delta + tf.math.log1p(-decay)
],
axis=0)
x = tf.reduce_logsumexp(input_tensor=x, axis=0)
return log_mean_exp_var.assign_add(x)
|
Compute exponentially weighted moving { mean variance } of a streaming value.
|
def moving_mean_variance(value, decay, name=None):
"""Compute exponentially weighted moving {mean,variance} of a streaming value.
The exponentially-weighting moving `mean_var` and `variance_var` are updated
by `value` according to the following recurrence:
```python
variance_var = decay * (variance_var + (1-decay) * (value - mean_var)**2)
mean_var = decay * mean_var + (1 - decay) * value
```
Note: `mean_var` is updated *after* `variance_var`, i.e., `variance_var` uses
the lag-`1` mean.
For derivation justification, see [Finch (2009; Eq. 143)][1].
Unlike `assign_moving_mean_variance`, this function handles
variable creation.
Args:
value: `float`-like `Tensor`. Same shape as `mean_var` and `variance_var`.
decay: A `float`-like `Tensor`. The moving mean decay. Typically close to
`1.`, e.g., `0.999`.
name: Optional name of the returned operation.
Returns:
mean_var: `Variable` representing the `value`-updated exponentially weighted
moving mean.
variance_var: `Variable` representing the `value`-updated
exponentially weighted moving variance.
Raises:
TypeError: if `value_var` does not have float type `dtype`.
TypeError: if `value`, `decay` have different `base_dtype`.
#### References
[1]: Tony Finch. Incremental calculation of weighted mean and variance.
_Technical Report_, 2009.
http://people.ds.cam.ac.uk/fanf2/hermes/doc/antiforgery/stats.pdf
"""
with tf.compat.v1.variable_scope(name, "moving_mean_variance",
[value, decay]):
value = tf.convert_to_tensor(value=value, name="value")
base_dtype = value.dtype.base_dtype
if not base_dtype.is_floating:
raise TypeError(
"value.base_dtype({}) does not have float type `dtype`.".format(
base_dtype.name))
decay = tf.convert_to_tensor(value=decay, dtype=base_dtype, name="decay")
variance_var = tf.compat.v2.Variable(
name="moving_variance",
initial_value=tf.zeros(shape=value.shape, dtype=value.dtype),
trainable=False)
mean_var = tf.compat.v2.Variable(
name="moving_mean",
initial_value=tf.zeros(shape=value.shape, dtype=value.dtype),
trainable=False)
return assign_moving_mean_variance(
mean_var, variance_var, value, decay)
|
Ensures non - scalar input has at least one column.
|
def _make_columnar(self, x):
"""Ensures non-scalar input has at least one column.
Example:
If `x = [1, 2, 3]` then the output is `[[1], [2], [3]]`.
If `x = [[1, 2, 3], [4, 5, 6]]` then the output is unchanged.
If `x = 1` then the output is unchanged.
Args:
x: `Tensor`.
Returns:
columnar_x: `Tensor` with at least two dimensions.
"""
if tensorshape_util.rank(x.shape) is not None:
if tensorshape_util.rank(x.shape) == 1:
x = x[tf.newaxis, :]
return x
shape = tf.shape(input=x)
maybe_expanded_shape = tf.concat([
shape[:-1],
distribution_util.pick_vector(
tf.equal(tf.rank(x), 1), [1], np.array([], dtype=np.int32)),
shape[-1:],
], 0)
return tf.reshape(x, maybe_expanded_shape)
|
Calculate the batched KL divergence KL ( a || b ) with a and b Laplace.
|
def _kl_laplace_laplace(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a and b Laplace.
Args:
a: instance of a Laplace distribution object.
b: instance of a Laplace distribution object.
name: (optional) Name to use for created operations.
default is "kl_laplace_laplace".
Returns:
Batchwise KL(a || b)
"""
with tf.name_scope(name or "kl_laplace_laplace"):
# Consistent with
# http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf, page 38
distance = tf.abs(a.loc - b.loc)
ratio = a.scale / b.scale
return (-tf.math.log(ratio) - 1 + distance / b.scale +
ratio * tf.exp(-distance / a.scale))
|
Generates Tensor consisting of - 1 or + 1 chosen uniformly at random.
|
def random_rademacher(shape, dtype=tf.float32, seed=None, name=None):
"""Generates `Tensor` consisting of `-1` or `+1`, chosen uniformly at random.
For more details, see [Rademacher distribution](
https://en.wikipedia.org/wiki/Rademacher_distribution).
Args:
shape: Vector-shaped, `int` `Tensor` representing shape of output.
dtype: (Optional) TF `dtype` representing `dtype` of output.
seed: (Optional) Python integer to seed the random number generator.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'random_rademacher').
Returns:
rademacher: `Tensor` with specified `shape` and `dtype` consisting of `-1`
or `+1` chosen uniformly-at-random.
"""
with tf.compat.v1.name_scope(name, 'random_rademacher', [shape, seed]):
# Choose the dtype to cause `2 * random_bernoulli - 1` to run in the same
# memory (host or device) as the downstream cast will want to put it. The
# convention on GPU is that int32 are in host memory and int64 are in device
# memory.
generation_dtype = tf.int64 if tf.as_dtype(dtype) != tf.int32 else tf.int32
random_bernoulli = tf.random.uniform(
shape, minval=0, maxval=2, dtype=generation_dtype, seed=seed)
return tf.cast(2 * random_bernoulli - 1, dtype)
|
Generates Tensor of positive reals drawn from a Rayleigh distributions.
|
def random_rayleigh(shape, scale=None, dtype=tf.float32, seed=None, name=None):
"""Generates `Tensor` of positive reals drawn from a Rayleigh distributions.
The probability density function of a Rayleigh distribution with `scale`
parameter is given by:
```none
f(x) = x scale**-2 exp(-x**2 0.5 scale**-2)
```
For more details, see [Rayleigh distribution](
https://en.wikipedia.org/wiki/Rayleigh_distribution)
Args:
shape: Vector-shaped, `int` `Tensor` representing shape of output.
scale: (Optional) Positive `float` `Tensor` representing `Rayleigh` scale.
Default value: `None` (i.e., `scale = 1.`).
dtype: (Optional) TF `dtype` representing `dtype` of output.
Default value: `tf.float32`.
seed: (Optional) Python integer to seed the random number generator.
Default value: `None` (i.e., no seed).
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'random_rayleigh').
Returns:
rayleigh: `Tensor` with specified `shape` and `dtype` consisting of positive
real values drawn from a Rayleigh distribution with specified `scale`.
"""
with tf.compat.v1.name_scope(name, 'random_rayleigh', [shape, scale, seed]):
if scale is not None:
# Its important to expand the shape to match scale's, otherwise we won't
# have independent draws.
scale = tf.convert_to_tensor(value=scale, dtype=dtype, name='scale')
shape = tf.broadcast_dynamic_shape(shape, tf.shape(input=scale))
x = tf.sqrt(-2. * tf.math.log(
tf.random.uniform(shape, minval=0, maxval=1, dtype=dtype, seed=seed)))
if scale is None:
return x
return x * scale
|
Convenience function which chooses the condition based on the predicate.
|
def _pick_scalar_condition(pred, cond_true, cond_false):
"""Convenience function which chooses the condition based on the predicate."""
# Note: This function is only valid if all of pred, cond_true, and cond_false
# are scalars. This means its semantics are arguably more like tf.cond than
# tf.where even though we use tf.where to implement it.
pred_ = tf.get_static_value(tf.convert_to_tensor(value=pred))
if pred_ is None:
return tf.where(pred, cond_true, cond_false)
return cond_true if pred_ else cond_false
|
Finish computation of log_prob on one element of the inverse image.
|
def _finish_log_prob_for_one_fiber(self, y, x, ildj, event_ndims,
**distribution_kwargs):
"""Finish computation of log_prob on one element of the inverse image."""
x = self._maybe_rotate_dims(x, rotate_right=True)
log_prob = self.distribution.log_prob(x, **distribution_kwargs)
if self._is_maybe_event_override:
log_prob = tf.reduce_sum(
input_tensor=log_prob, axis=self._reduce_event_indices)
log_prob += tf.cast(ildj, log_prob.dtype)
if self._is_maybe_event_override and isinstance(event_ndims, int):
tensorshape_util.set_shape(
log_prob,
tf.broadcast_static_shape(
tensorshape_util.with_rank_at_least(y.shape, 1)[:-event_ndims],
self.batch_shape))
return log_prob
|
Finish computation of prob on one element of the inverse image.
|
def _finish_prob_for_one_fiber(self, y, x, ildj, event_ndims,
**distribution_kwargs):
"""Finish computation of prob on one element of the inverse image."""
x = self._maybe_rotate_dims(x, rotate_right=True)
prob = self.distribution.prob(x, **distribution_kwargs)
if self._is_maybe_event_override:
prob = tf.reduce_prod(input_tensor=prob, axis=self._reduce_event_indices)
prob *= tf.exp(tf.cast(ildj, prob.dtype))
if self._is_maybe_event_override and isinstance(event_ndims, int):
tensorshape_util.set_shape(
prob,
tf.broadcast_static_shape(
tensorshape_util.with_rank_at_least(y.shape, 1)[:-event_ndims],
self.batch_shape))
return prob
|
Helper to __init__ which ensures override batch/ event_shape are valid.
|
def _maybe_validate_shape_override(self, override_shape, base_is_scalar,
validate_args, name):
"""Helper to __init__ which ensures override batch/event_shape are valid."""
if override_shape is None:
override_shape = []
override_shape = tf.convert_to_tensor(
value=override_shape, dtype=tf.int32, name=name)
if not dtype_util.is_integer(override_shape.dtype):
raise TypeError("shape override must be an integer")
override_is_scalar = _is_scalar_from_shape_tensor(override_shape)
if tf.get_static_value(override_is_scalar):
return self._empty
dynamic_assertions = []
if tensorshape_util.rank(override_shape.shape) is not None:
if tensorshape_util.rank(override_shape.shape) != 1:
raise ValueError("shape override must be a vector")
elif validate_args:
dynamic_assertions += [
assert_util.assert_rank(
override_shape, 1, message="shape override must be a vector")
]
if tf.get_static_value(override_shape) is not None:
if any(s < 0 for s in tf.get_static_value(override_shape)):
raise ValueError("shape override must have non-negative elements")
elif validate_args:
dynamic_assertions += [
assert_util.assert_non_negative(
override_shape,
message="shape override must have non-negative elements")
]
is_both_nonscalar = prefer_static.logical_and(
prefer_static.logical_not(base_is_scalar),
prefer_static.logical_not(override_is_scalar))
if tf.get_static_value(is_both_nonscalar) is not None:
if tf.get_static_value(is_both_nonscalar):
raise ValueError("base distribution not scalar")
elif validate_args:
dynamic_assertions += [
assert_util.assert_equal(
is_both_nonscalar, False, message="base distribution not scalar")
]
if not dynamic_assertions:
return override_shape
return distribution_util.with_dependencies(
dynamic_assertions, override_shape)
|
Helper which rolls left event_dims left or right event_dims right.
|
def _maybe_rotate_dims(self, x, rotate_right=False):
"""Helper which rolls left event_dims left or right event_dims right."""
needs_rotation_const = tf.get_static_value(self._needs_rotation)
if needs_rotation_const is not None and not needs_rotation_const:
return x
ndims = prefer_static.rank(x)
n = (ndims - self._rotate_ndims) if rotate_right else self._rotate_ndims
perm = prefer_static.concat([
prefer_static.range(n, ndims), prefer_static.range(0, n)], axis=0)
return tf.transpose(a=x, perm=perm)
|
r Inverse of tf. nn. batch_normalization.
|
def _undo_batch_normalization(x,
mean,
variance,
offset,
scale,
variance_epsilon,
name=None):
r"""Inverse of tf.nn.batch_normalization.
Args:
x: Input `Tensor` of arbitrary dimensionality.
mean: A mean `Tensor`.
variance: A variance `Tensor`.
offset: An offset `Tensor`, often denoted `beta` in equations, or
None. If present, will be added to the normalized tensor.
scale: A scale `Tensor`, often denoted `gamma` in equations, or
`None`. If present, the scale is applied to the normalized tensor.
variance_epsilon: A small `float` added to the minibatch `variance` to
prevent dividing by zero.
name: A name for this operation (optional).
Returns:
batch_unnormalized: The de-normalized, de-scaled, de-offset `Tensor`.
"""
with tf.compat.v2.name_scope(name or "undo_batchnorm"):
# inv = tf.rsqrt(variance + variance_epsilon)
# if scale is not None:
# inv *= scale
# return x * inv + (
# offset - mean * inv if offset is not None else -mean * inv)
rescale = tf.sqrt(variance + variance_epsilon)
if scale is not None:
rescale /= scale
batch_unnormalized = x * rescale + (
mean - offset * rescale if offset is not None else mean)
return batch_unnormalized
|
Check for valid BatchNormalization layer.
|
def _validate_bn_layer(self, layer):
"""Check for valid BatchNormalization layer.
Args:
layer: Instance of `tf.layers.BatchNormalization`.
Raises:
ValueError: If batchnorm_layer argument is not an instance of
`tf.layers.BatchNormalization`, or if `batchnorm_layer.renorm=True` or
if `batchnorm_layer.virtual_batch_size` is specified.
"""
if (not isinstance(layer, tf.keras.layers.BatchNormalization) and
not isinstance(layer, tf.compat.v1.layers.BatchNormalization)):
raise ValueError(
"batchnorm_layer must be an instance of BatchNormalization layer.")
if layer.renorm:
raise ValueError("BatchNorm Bijector does not support renormalization.")
if layer.virtual_batch_size:
raise ValueError(
"BatchNorm Bijector does not support virtual batch sizes.")
|
Slices a single parameter of a distribution.
|
def _slice_single_param(param, param_event_ndims, slices, dist_batch_shape):
"""Slices a single parameter of a distribution.
Args:
param: A `Tensor`, the original parameter to slice.
param_event_ndims: `int` event parameterization rank for this parameter.
slices: A `tuple` of normalized slices.
dist_batch_shape: The distribution's batch shape `Tensor`.
Returns:
new_param: A `Tensor`, batch-sliced according to slices.
"""
# Extend param shape with ones on the left to match dist_batch_shape.
param_shape = tf.shape(input=param)
insert_ones = tf.ones(
[tf.size(input=dist_batch_shape) + param_event_ndims - tf.rank(param)],
dtype=param_shape.dtype)
new_param_shape = tf.concat([insert_ones, param_shape], axis=0)
full_batch_param = tf.reshape(param, new_param_shape)
param_slices = []
# We separately track the batch axis from the parameter axis because we want
# them to align for positive indexing, and be offset by param_event_ndims for
# negative indexing.
param_dim_idx = 0
batch_dim_idx = 0
for slc in slices:
if slc is tf.newaxis:
param_slices.append(slc)
continue
if slc is Ellipsis:
if batch_dim_idx < 0:
raise ValueError('Found multiple `...` in slices {}'.format(slices))
param_slices.append(slc)
# Switch over to negative indexing for the broadcast check.
num_remaining_non_newaxis_slices = sum(
[s is not tf.newaxis for s in slices[slices.index(Ellipsis) + 1:]])
batch_dim_idx = -num_remaining_non_newaxis_slices
param_dim_idx = batch_dim_idx - param_event_ndims
continue
# Find the batch dimension sizes for both parameter and distribution.
param_dim_size = new_param_shape[param_dim_idx]
batch_dim_size = dist_batch_shape[batch_dim_idx]
is_broadcast = batch_dim_size > param_dim_size
# Slices are denoted by start:stop:step.
if isinstance(slc, slice):
start, stop, step = slc.start, slc.stop, slc.step
if start is not None:
start = tf.where(is_broadcast, 0, start)
if stop is not None:
stop = tf.where(is_broadcast, 1, stop)
if step is not None:
step = tf.where(is_broadcast, 1, step)
param_slices.append(slice(start, stop, step))
else: # int, or int Tensor, e.g. d[d.batch_shape_tensor()[0] // 2]
param_slices.append(tf.where(is_broadcast, 0, slc))
param_dim_idx += 1
batch_dim_idx += 1
param_slices.extend([ALL_SLICE] * param_event_ndims)
return full_batch_param.__getitem__(param_slices)
|
Computes the override dictionary of sliced parameters.
|
def _slice_params_to_dict(dist, params_event_ndims, slices):
"""Computes the override dictionary of sliced parameters.
Args:
dist: The tfd.Distribution being batch-sliced.
params_event_ndims: Per-event parameter ranks, a `str->int` `dict`.
slices: Slices as received by __getitem__.
Returns:
overrides: `str->Tensor` `dict` of batch-sliced parameter overrides.
"""
override_dict = {}
for param_name, param_event_ndims in six.iteritems(params_event_ndims):
# Verify that either None or a legit value is in the parameters dict.
if param_name not in dist.parameters:
raise ValueError('Distribution {} is missing advertised '
'parameter {}'.format(dist, param_name))
param = dist.parameters[param_name]
if param is None:
# some distributions have multiple possible parameterizations; this
# param was not provided
continue
dtype = None
if hasattr(dist, param_name):
attr = getattr(dist, param_name)
dtype = getattr(attr, 'dtype', None)
if dtype is None:
dtype = dist.dtype
warnings.warn('Unable to find property getter for parameter Tensor {} '
'on {}, falling back to Distribution.dtype {}'.format(
param_name, dist, dtype))
param = tf.convert_to_tensor(value=param, dtype=dtype)
override_dict[param_name] = _slice_single_param(param, param_event_ndims,
slices,
dist.batch_shape_tensor())
return override_dict
|
Applies a single slicing step to dist returning a new instance.
|
def _apply_single_step(dist, params_event_ndims, slices, params_overrides):
"""Applies a single slicing step to `dist`, returning a new instance."""
if len(slices) == 1 and slices[0] == Ellipsis:
# The path used by Distribution.copy: batch_slice(...args..., Ellipsis)
override_dict = {}
else:
override_dict = _slice_params_to_dict(dist, params_event_ndims, slices)
override_dict.update(params_overrides)
parameters = dict(dist.parameters, **override_dict)
new_dist = type(dist)(**parameters)
return new_dist
|
Applies a sequence of slice or copy - with - overrides operations to dist.
|
def _apply_slice_sequence(dist, params_event_ndims, slice_overrides_seq):
"""Applies a sequence of slice or copy-with-overrides operations to `dist`."""
for slices, overrides in slice_overrides_seq:
dist = _apply_single_step(dist, params_event_ndims, slices, overrides)
return dist
|
Slices dist along its batch dimensions. Helper for tfd. Distribution.
|
def batch_slice(dist, params_event_ndims, params_overrides, slices):
"""Slices `dist` along its batch dimensions. Helper for tfd.Distribution.
Args:
dist: A `tfd.Distribution` instance.
params_event_ndims: A `dict` of `str->int` indicating the number of
dimensions of a given parameter required to parameterize a single event.
params_overrides: A `dict` of parameter overrides. (e.g. from
`Distribution.copy`).
slices: A `slice` or `int` or `int` `Tensor` or `tf.newaxis` or `tuple`
thereof. (e.g. the argument of a `__getitem__` method).
Returns:
new_dist: A batch-sliced `tfd.Distribution`.
"""
if not isinstance(slices, collections.Sequence):
slices = (slices,)
# We track the history of slice and copy(**param_overrides) in order to trace
# back to the original distribution's source variables.
orig_dist, slice_overrides_seq = getattr(dist, PROVENANCE_ATTR, (dist, []))
slice_overrides_seq += [(slices, params_overrides)]
# Re-doing the full sequence of slice+copy override work here enables
# gradients all the way back to the original distribution's arguments.
dist = _apply_slice_sequence(orig_dist, params_event_ndims,
slice_overrides_seq)
setattr(dist, PROVENANCE_ATTR, (orig_dist, slice_overrides_seq))
return dist
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.