partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
fit
|
Runs multiple Fisher scoring steps.
Args:
model_matrix: (Batch of) `float`-like, matrix-shaped `Tensor` where each row
represents a sample's features.
response: (Batch of) vector-shaped `Tensor` where each element represents a
sample's observed response (to the corresponding row of features). Must
have same `dtype` as `model_matrix`.
model: `tfp.glm.ExponentialFamily`-like instance which implicitly
characterizes a negative log-likelihood loss by specifying the
distribuion's `mean`, `gradient_mean`, and `variance`.
model_coefficients_start: Optional (batch of) vector-shaped `Tensor`
representing the initial model coefficients, one for each column in
`model_matrix`. Must have same `dtype` as `model_matrix`.
Default value: Zeros.
predicted_linear_response_start: Optional `Tensor` with `shape`, `dtype`
matching `response`; represents `offset` shifted initial linear
predictions based on `model_coefficients_start`.
Default value: `offset` if `model_coefficients is None`, and
`tf.linalg.matvec(model_matrix, model_coefficients_start) + offset`
otherwise.
l2_regularizer: Optional scalar `Tensor` representing L2 regularization
penalty, i.e.,
`loss(w) = sum{-log p(y[i]|x[i],w) : i=1..n} + l2_regularizer ||w||_2^2`.
Default value: `None` (i.e., no L2 regularization).
dispersion: Optional (batch of) `Tensor` representing `response` dispersion,
i.e., as in, `p(y|theta) := exp((y theta - A(theta)) / dispersion)`.
Must broadcast with rows of `model_matrix`.
Default value: `None` (i.e., "no dispersion").
offset: Optional `Tensor` representing constant shift applied to
`predicted_linear_response`. Must broadcast to `response`.
Default value: `None` (i.e., `tf.zeros_like(response)`).
convergence_criteria_fn: Python `callable` taking:
`is_converged_previous`, `iter_`, `model_coefficients_previous`,
`predicted_linear_response_previous`, `model_coefficients_next`,
`predicted_linear_response_next`, `response`, `model`, `dispersion` and
returning a `bool` `Tensor` indicating that Fisher scoring has converged.
See `convergence_criteria_small_relative_norm_weights_change` as an
example function.
Default value: `None` (i.e.,
`convergence_criteria_small_relative_norm_weights_change`).
learning_rate: Optional (batch of) scalar `Tensor` used to dampen iterative
progress. Typically only needed if optimization diverges, should be no
larger than `1` and typically very close to `1`.
Default value: `None` (i.e., `1`).
fast_unsafe_numerics: Optional Python `bool` indicating if faster, less
numerically accurate methods can be employed for computing the weighted
least-squares solution.
Default value: `True` (i.e., "fast but possibly diminished accuracy").
maximum_iterations: Optional maximum number of iterations of Fisher scoring
to run; "and-ed" with result of `convergence_criteria_fn`.
Default value: `None` (i.e., `infinity`).
name: Python `str` used as name prefix to ops created by this function.
Default value: `"fit"`.
Returns:
model_coefficients: (Batch of) vector-shaped `Tensor`; represents the
fitted model coefficients, one for each column in `model_matrix`.
predicted_linear_response: `response`-shaped `Tensor` representing linear
predictions based on new `model_coefficients`, i.e.,
`tf.linalg.matvec(model_matrix, model_coefficients) + offset`.
is_converged: `bool` `Tensor` indicating that the returned
`model_coefficients` met the `convergence_criteria_fn` criteria within the
`maximum_iterations` limit.
iter_: `int32` `Tensor` indicating the number of iterations taken.
#### Example
```python
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
def make_dataset(n, d, link, scale=1., dtype=np.float32):
model_coefficients = tfd.Uniform(
low=np.array(-1, dtype),
high=np.array(1, dtype)).sample(d, seed=42)
radius = np.sqrt(2.)
model_coefficients *= radius / tf.linalg.norm(model_coefficients)
model_matrix = tfd.Normal(
loc=np.array(0, dtype),
scale=np.array(1, dtype)).sample([n, d], seed=43)
scale = tf.convert_to_tensor(scale, dtype)
linear_response = tf.tensordot(
model_matrix, model_coefficients, axes=[[1], [0]])
if link == 'linear':
response = tfd.Normal(loc=linear_response, scale=scale).sample(seed=44)
elif link == 'probit':
response = tf.cast(
tfd.Normal(loc=linear_response, scale=scale).sample(seed=44) > 0,
dtype)
elif link == 'logit':
response = tfd.Bernoulli(logits=linear_response).sample(seed=44)
else:
raise ValueError('unrecognized true link: {}'.format(link))
return model_matrix, response, model_coefficients
X, Y, w_true = make_dataset(n=int(1e6), d=100, link='probit')
w, linear_response, is_converged, num_iter = tfp.glm.fit(
model_matrix=X,
response=Y,
model=tfp.glm.BernoulliNormalCDF())
log_likelihood = tfp.glm.BernoulliNormalCDF().log_prob(Y, linear_response)
with tf.Session() as sess:
[w_, linear_response_, is_converged_, num_iter_, Y_, w_true_,
log_likelihood_] = sess.run([
w, linear_response, is_converged, num_iter, Y, w_true,
log_likelihood])
print('is_converged: ', is_converged_)
print(' num_iter: ', num_iter_)
print(' accuracy: ', np.mean((linear_response_ > 0.) == Y_))
print(' deviance: ', 2. * np.mean(log_likelihood_))
print('||w0-w1||_2 / (1+||w0||_2): ', (np.linalg.norm(w_true_ - w_, ord=2) /
(1. + np.linalg.norm(w_true_, ord=2))))
# ==>
# is_converged: True
# num_iter: 6
# accuracy: 0.804382
# deviance: -0.820746600628
# ||w0-w1||_2 / (1+||w0||_2): 0.00619245105309
```
|
tensorflow_probability/python/glm/fisher_scoring.py
|
def fit(
model_matrix,
response,
model,
model_coefficients_start=None,
predicted_linear_response_start=None,
l2_regularizer=None,
dispersion=None,
offset=None,
convergence_criteria_fn=None,
learning_rate=None,
fast_unsafe_numerics=True,
maximum_iterations=None,
name=None):
"""Runs multiple Fisher scoring steps.
Args:
model_matrix: (Batch of) `float`-like, matrix-shaped `Tensor` where each row
represents a sample's features.
response: (Batch of) vector-shaped `Tensor` where each element represents a
sample's observed response (to the corresponding row of features). Must
have same `dtype` as `model_matrix`.
model: `tfp.glm.ExponentialFamily`-like instance which implicitly
characterizes a negative log-likelihood loss by specifying the
distribuion's `mean`, `gradient_mean`, and `variance`.
model_coefficients_start: Optional (batch of) vector-shaped `Tensor`
representing the initial model coefficients, one for each column in
`model_matrix`. Must have same `dtype` as `model_matrix`.
Default value: Zeros.
predicted_linear_response_start: Optional `Tensor` with `shape`, `dtype`
matching `response`; represents `offset` shifted initial linear
predictions based on `model_coefficients_start`.
Default value: `offset` if `model_coefficients is None`, and
`tf.linalg.matvec(model_matrix, model_coefficients_start) + offset`
otherwise.
l2_regularizer: Optional scalar `Tensor` representing L2 regularization
penalty, i.e.,
`loss(w) = sum{-log p(y[i]|x[i],w) : i=1..n} + l2_regularizer ||w||_2^2`.
Default value: `None` (i.e., no L2 regularization).
dispersion: Optional (batch of) `Tensor` representing `response` dispersion,
i.e., as in, `p(y|theta) := exp((y theta - A(theta)) / dispersion)`.
Must broadcast with rows of `model_matrix`.
Default value: `None` (i.e., "no dispersion").
offset: Optional `Tensor` representing constant shift applied to
`predicted_linear_response`. Must broadcast to `response`.
Default value: `None` (i.e., `tf.zeros_like(response)`).
convergence_criteria_fn: Python `callable` taking:
`is_converged_previous`, `iter_`, `model_coefficients_previous`,
`predicted_linear_response_previous`, `model_coefficients_next`,
`predicted_linear_response_next`, `response`, `model`, `dispersion` and
returning a `bool` `Tensor` indicating that Fisher scoring has converged.
See `convergence_criteria_small_relative_norm_weights_change` as an
example function.
Default value: `None` (i.e.,
`convergence_criteria_small_relative_norm_weights_change`).
learning_rate: Optional (batch of) scalar `Tensor` used to dampen iterative
progress. Typically only needed if optimization diverges, should be no
larger than `1` and typically very close to `1`.
Default value: `None` (i.e., `1`).
fast_unsafe_numerics: Optional Python `bool` indicating if faster, less
numerically accurate methods can be employed for computing the weighted
least-squares solution.
Default value: `True` (i.e., "fast but possibly diminished accuracy").
maximum_iterations: Optional maximum number of iterations of Fisher scoring
to run; "and-ed" with result of `convergence_criteria_fn`.
Default value: `None` (i.e., `infinity`).
name: Python `str` used as name prefix to ops created by this function.
Default value: `"fit"`.
Returns:
model_coefficients: (Batch of) vector-shaped `Tensor`; represents the
fitted model coefficients, one for each column in `model_matrix`.
predicted_linear_response: `response`-shaped `Tensor` representing linear
predictions based on new `model_coefficients`, i.e.,
`tf.linalg.matvec(model_matrix, model_coefficients) + offset`.
is_converged: `bool` `Tensor` indicating that the returned
`model_coefficients` met the `convergence_criteria_fn` criteria within the
`maximum_iterations` limit.
iter_: `int32` `Tensor` indicating the number of iterations taken.
#### Example
```python
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
def make_dataset(n, d, link, scale=1., dtype=np.float32):
model_coefficients = tfd.Uniform(
low=np.array(-1, dtype),
high=np.array(1, dtype)).sample(d, seed=42)
radius = np.sqrt(2.)
model_coefficients *= radius / tf.linalg.norm(model_coefficients)
model_matrix = tfd.Normal(
loc=np.array(0, dtype),
scale=np.array(1, dtype)).sample([n, d], seed=43)
scale = tf.convert_to_tensor(scale, dtype)
linear_response = tf.tensordot(
model_matrix, model_coefficients, axes=[[1], [0]])
if link == 'linear':
response = tfd.Normal(loc=linear_response, scale=scale).sample(seed=44)
elif link == 'probit':
response = tf.cast(
tfd.Normal(loc=linear_response, scale=scale).sample(seed=44) > 0,
dtype)
elif link == 'logit':
response = tfd.Bernoulli(logits=linear_response).sample(seed=44)
else:
raise ValueError('unrecognized true link: {}'.format(link))
return model_matrix, response, model_coefficients
X, Y, w_true = make_dataset(n=int(1e6), d=100, link='probit')
w, linear_response, is_converged, num_iter = tfp.glm.fit(
model_matrix=X,
response=Y,
model=tfp.glm.BernoulliNormalCDF())
log_likelihood = tfp.glm.BernoulliNormalCDF().log_prob(Y, linear_response)
with tf.Session() as sess:
[w_, linear_response_, is_converged_, num_iter_, Y_, w_true_,
log_likelihood_] = sess.run([
w, linear_response, is_converged, num_iter, Y, w_true,
log_likelihood])
print('is_converged: ', is_converged_)
print(' num_iter: ', num_iter_)
print(' accuracy: ', np.mean((linear_response_ > 0.) == Y_))
print(' deviance: ', 2. * np.mean(log_likelihood_))
print('||w0-w1||_2 / (1+||w0||_2): ', (np.linalg.norm(w_true_ - w_, ord=2) /
(1. + np.linalg.norm(w_true_, ord=2))))
# ==>
# is_converged: True
# num_iter: 6
# accuracy: 0.804382
# deviance: -0.820746600628
# ||w0-w1||_2 / (1+||w0||_2): 0.00619245105309
```
"""
graph_deps = [model_matrix, response, model_coefficients_start,
predicted_linear_response_start, dispersion, offset,
learning_rate, maximum_iterations]
with tf.compat.v1.name_scope(name, 'fit', graph_deps):
[
model_matrix,
response,
model_coefficients_start,
predicted_linear_response_start,
offset,
] = prepare_args(
model_matrix,
response,
model_coefficients_start,
predicted_linear_response_start,
offset)
if convergence_criteria_fn is None:
convergence_criteria_fn = (
convergence_criteria_small_relative_norm_weights_change())
def _body(
is_converged_previous,
iter_,
model_coefficients_previous,
predicted_linear_response_previous):
"""`tf.while_loop` body."""
model_coefficients_next, predicted_linear_response_next = fit_one_step(
model_matrix,
response,
model,
model_coefficients_previous,
predicted_linear_response_previous,
l2_regularizer,
dispersion,
offset,
learning_rate,
fast_unsafe_numerics)
is_converged_next = convergence_criteria_fn(
is_converged_previous=is_converged_previous,
iter_=iter_,
model_coefficients_previous=model_coefficients_previous,
predicted_linear_response_previous=predicted_linear_response_previous,
model_coefficients_next=model_coefficients_next,
predicted_linear_response_next=predicted_linear_response_next,
response=response,
model=model,
dispersion=dispersion)
return [
is_converged_next,
iter_ + 1,
model_coefficients_next,
predicted_linear_response_next,
]
# while not converged:
# fit_one_step
[
is_converged,
iter_,
model_coefficients,
predicted_linear_response,
] = tf.while_loop(
cond=lambda is_converged, *args: tf.logical_not(is_converged),
body=_body,
loop_vars=[
tf.zeros([], np.bool), # is_converged
tf.zeros([], np.int32), # iter_
model_coefficients_start,
predicted_linear_response_start,
],
maximum_iterations=maximum_iterations)
return [
model_coefficients,
predicted_linear_response,
is_converged,
iter_
]
|
def fit(
model_matrix,
response,
model,
model_coefficients_start=None,
predicted_linear_response_start=None,
l2_regularizer=None,
dispersion=None,
offset=None,
convergence_criteria_fn=None,
learning_rate=None,
fast_unsafe_numerics=True,
maximum_iterations=None,
name=None):
"""Runs multiple Fisher scoring steps.
Args:
model_matrix: (Batch of) `float`-like, matrix-shaped `Tensor` where each row
represents a sample's features.
response: (Batch of) vector-shaped `Tensor` where each element represents a
sample's observed response (to the corresponding row of features). Must
have same `dtype` as `model_matrix`.
model: `tfp.glm.ExponentialFamily`-like instance which implicitly
characterizes a negative log-likelihood loss by specifying the
distribuion's `mean`, `gradient_mean`, and `variance`.
model_coefficients_start: Optional (batch of) vector-shaped `Tensor`
representing the initial model coefficients, one for each column in
`model_matrix`. Must have same `dtype` as `model_matrix`.
Default value: Zeros.
predicted_linear_response_start: Optional `Tensor` with `shape`, `dtype`
matching `response`; represents `offset` shifted initial linear
predictions based on `model_coefficients_start`.
Default value: `offset` if `model_coefficients is None`, and
`tf.linalg.matvec(model_matrix, model_coefficients_start) + offset`
otherwise.
l2_regularizer: Optional scalar `Tensor` representing L2 regularization
penalty, i.e.,
`loss(w) = sum{-log p(y[i]|x[i],w) : i=1..n} + l2_regularizer ||w||_2^2`.
Default value: `None` (i.e., no L2 regularization).
dispersion: Optional (batch of) `Tensor` representing `response` dispersion,
i.e., as in, `p(y|theta) := exp((y theta - A(theta)) / dispersion)`.
Must broadcast with rows of `model_matrix`.
Default value: `None` (i.e., "no dispersion").
offset: Optional `Tensor` representing constant shift applied to
`predicted_linear_response`. Must broadcast to `response`.
Default value: `None` (i.e., `tf.zeros_like(response)`).
convergence_criteria_fn: Python `callable` taking:
`is_converged_previous`, `iter_`, `model_coefficients_previous`,
`predicted_linear_response_previous`, `model_coefficients_next`,
`predicted_linear_response_next`, `response`, `model`, `dispersion` and
returning a `bool` `Tensor` indicating that Fisher scoring has converged.
See `convergence_criteria_small_relative_norm_weights_change` as an
example function.
Default value: `None` (i.e.,
`convergence_criteria_small_relative_norm_weights_change`).
learning_rate: Optional (batch of) scalar `Tensor` used to dampen iterative
progress. Typically only needed if optimization diverges, should be no
larger than `1` and typically very close to `1`.
Default value: `None` (i.e., `1`).
fast_unsafe_numerics: Optional Python `bool` indicating if faster, less
numerically accurate methods can be employed for computing the weighted
least-squares solution.
Default value: `True` (i.e., "fast but possibly diminished accuracy").
maximum_iterations: Optional maximum number of iterations of Fisher scoring
to run; "and-ed" with result of `convergence_criteria_fn`.
Default value: `None` (i.e., `infinity`).
name: Python `str` used as name prefix to ops created by this function.
Default value: `"fit"`.
Returns:
model_coefficients: (Batch of) vector-shaped `Tensor`; represents the
fitted model coefficients, one for each column in `model_matrix`.
predicted_linear_response: `response`-shaped `Tensor` representing linear
predictions based on new `model_coefficients`, i.e.,
`tf.linalg.matvec(model_matrix, model_coefficients) + offset`.
is_converged: `bool` `Tensor` indicating that the returned
`model_coefficients` met the `convergence_criteria_fn` criteria within the
`maximum_iterations` limit.
iter_: `int32` `Tensor` indicating the number of iterations taken.
#### Example
```python
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
def make_dataset(n, d, link, scale=1., dtype=np.float32):
model_coefficients = tfd.Uniform(
low=np.array(-1, dtype),
high=np.array(1, dtype)).sample(d, seed=42)
radius = np.sqrt(2.)
model_coefficients *= radius / tf.linalg.norm(model_coefficients)
model_matrix = tfd.Normal(
loc=np.array(0, dtype),
scale=np.array(1, dtype)).sample([n, d], seed=43)
scale = tf.convert_to_tensor(scale, dtype)
linear_response = tf.tensordot(
model_matrix, model_coefficients, axes=[[1], [0]])
if link == 'linear':
response = tfd.Normal(loc=linear_response, scale=scale).sample(seed=44)
elif link == 'probit':
response = tf.cast(
tfd.Normal(loc=linear_response, scale=scale).sample(seed=44) > 0,
dtype)
elif link == 'logit':
response = tfd.Bernoulli(logits=linear_response).sample(seed=44)
else:
raise ValueError('unrecognized true link: {}'.format(link))
return model_matrix, response, model_coefficients
X, Y, w_true = make_dataset(n=int(1e6), d=100, link='probit')
w, linear_response, is_converged, num_iter = tfp.glm.fit(
model_matrix=X,
response=Y,
model=tfp.glm.BernoulliNormalCDF())
log_likelihood = tfp.glm.BernoulliNormalCDF().log_prob(Y, linear_response)
with tf.Session() as sess:
[w_, linear_response_, is_converged_, num_iter_, Y_, w_true_,
log_likelihood_] = sess.run([
w, linear_response, is_converged, num_iter, Y, w_true,
log_likelihood])
print('is_converged: ', is_converged_)
print(' num_iter: ', num_iter_)
print(' accuracy: ', np.mean((linear_response_ > 0.) == Y_))
print(' deviance: ', 2. * np.mean(log_likelihood_))
print('||w0-w1||_2 / (1+||w0||_2): ', (np.linalg.norm(w_true_ - w_, ord=2) /
(1. + np.linalg.norm(w_true_, ord=2))))
# ==>
# is_converged: True
# num_iter: 6
# accuracy: 0.804382
# deviance: -0.820746600628
# ||w0-w1||_2 / (1+||w0||_2): 0.00619245105309
```
"""
graph_deps = [model_matrix, response, model_coefficients_start,
predicted_linear_response_start, dispersion, offset,
learning_rate, maximum_iterations]
with tf.compat.v1.name_scope(name, 'fit', graph_deps):
[
model_matrix,
response,
model_coefficients_start,
predicted_linear_response_start,
offset,
] = prepare_args(
model_matrix,
response,
model_coefficients_start,
predicted_linear_response_start,
offset)
if convergence_criteria_fn is None:
convergence_criteria_fn = (
convergence_criteria_small_relative_norm_weights_change())
def _body(
is_converged_previous,
iter_,
model_coefficients_previous,
predicted_linear_response_previous):
"""`tf.while_loop` body."""
model_coefficients_next, predicted_linear_response_next = fit_one_step(
model_matrix,
response,
model,
model_coefficients_previous,
predicted_linear_response_previous,
l2_regularizer,
dispersion,
offset,
learning_rate,
fast_unsafe_numerics)
is_converged_next = convergence_criteria_fn(
is_converged_previous=is_converged_previous,
iter_=iter_,
model_coefficients_previous=model_coefficients_previous,
predicted_linear_response_previous=predicted_linear_response_previous,
model_coefficients_next=model_coefficients_next,
predicted_linear_response_next=predicted_linear_response_next,
response=response,
model=model,
dispersion=dispersion)
return [
is_converged_next,
iter_ + 1,
model_coefficients_next,
predicted_linear_response_next,
]
# while not converged:
# fit_one_step
[
is_converged,
iter_,
model_coefficients,
predicted_linear_response,
] = tf.while_loop(
cond=lambda is_converged, *args: tf.logical_not(is_converged),
body=_body,
loop_vars=[
tf.zeros([], np.bool), # is_converged
tf.zeros([], np.int32), # iter_
model_coefficients_start,
predicted_linear_response_start,
],
maximum_iterations=maximum_iterations)
return [
model_coefficients,
predicted_linear_response,
is_converged,
iter_
]
|
[
"Runs",
"multiple",
"Fisher",
"scoring",
"steps",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/glm/fisher_scoring.py#L36-L256
|
[
"def",
"fit",
"(",
"model_matrix",
",",
"response",
",",
"model",
",",
"model_coefficients_start",
"=",
"None",
",",
"predicted_linear_response_start",
"=",
"None",
",",
"l2_regularizer",
"=",
"None",
",",
"dispersion",
"=",
"None",
",",
"offset",
"=",
"None",
",",
"convergence_criteria_fn",
"=",
"None",
",",
"learning_rate",
"=",
"None",
",",
"fast_unsafe_numerics",
"=",
"True",
",",
"maximum_iterations",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"graph_deps",
"=",
"[",
"model_matrix",
",",
"response",
",",
"model_coefficients_start",
",",
"predicted_linear_response_start",
",",
"dispersion",
",",
"offset",
",",
"learning_rate",
",",
"maximum_iterations",
"]",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'fit'",
",",
"graph_deps",
")",
":",
"[",
"model_matrix",
",",
"response",
",",
"model_coefficients_start",
",",
"predicted_linear_response_start",
",",
"offset",
",",
"]",
"=",
"prepare_args",
"(",
"model_matrix",
",",
"response",
",",
"model_coefficients_start",
",",
"predicted_linear_response_start",
",",
"offset",
")",
"if",
"convergence_criteria_fn",
"is",
"None",
":",
"convergence_criteria_fn",
"=",
"(",
"convergence_criteria_small_relative_norm_weights_change",
"(",
")",
")",
"def",
"_body",
"(",
"is_converged_previous",
",",
"iter_",
",",
"model_coefficients_previous",
",",
"predicted_linear_response_previous",
")",
":",
"\"\"\"`tf.while_loop` body.\"\"\"",
"model_coefficients_next",
",",
"predicted_linear_response_next",
"=",
"fit_one_step",
"(",
"model_matrix",
",",
"response",
",",
"model",
",",
"model_coefficients_previous",
",",
"predicted_linear_response_previous",
",",
"l2_regularizer",
",",
"dispersion",
",",
"offset",
",",
"learning_rate",
",",
"fast_unsafe_numerics",
")",
"is_converged_next",
"=",
"convergence_criteria_fn",
"(",
"is_converged_previous",
"=",
"is_converged_previous",
",",
"iter_",
"=",
"iter_",
",",
"model_coefficients_previous",
"=",
"model_coefficients_previous",
",",
"predicted_linear_response_previous",
"=",
"predicted_linear_response_previous",
",",
"model_coefficients_next",
"=",
"model_coefficients_next",
",",
"predicted_linear_response_next",
"=",
"predicted_linear_response_next",
",",
"response",
"=",
"response",
",",
"model",
"=",
"model",
",",
"dispersion",
"=",
"dispersion",
")",
"return",
"[",
"is_converged_next",
",",
"iter_",
"+",
"1",
",",
"model_coefficients_next",
",",
"predicted_linear_response_next",
",",
"]",
"# while not converged:",
"# fit_one_step",
"[",
"is_converged",
",",
"iter_",
",",
"model_coefficients",
",",
"predicted_linear_response",
",",
"]",
"=",
"tf",
".",
"while_loop",
"(",
"cond",
"=",
"lambda",
"is_converged",
",",
"*",
"args",
":",
"tf",
".",
"logical_not",
"(",
"is_converged",
")",
",",
"body",
"=",
"_body",
",",
"loop_vars",
"=",
"[",
"tf",
".",
"zeros",
"(",
"[",
"]",
",",
"np",
".",
"bool",
")",
",",
"# is_converged",
"tf",
".",
"zeros",
"(",
"[",
"]",
",",
"np",
".",
"int32",
")",
",",
"# iter_",
"model_coefficients_start",
",",
"predicted_linear_response_start",
",",
"]",
",",
"maximum_iterations",
"=",
"maximum_iterations",
")",
"return",
"[",
"model_coefficients",
",",
"predicted_linear_response",
",",
"is_converged",
",",
"iter_",
"]"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
fit_one_step
|
Runs one step of Fisher scoring.
Args:
model_matrix: (Batch of) `float`-like, matrix-shaped `Tensor` where each row
represents a sample's features.
response: (Batch of) vector-shaped `Tensor` where each element represents a
sample's observed response (to the corresponding row of features). Must
have same `dtype` as `model_matrix`.
model: `tfp.glm.ExponentialFamily`-like instance used to construct the
negative log-likelihood loss, gradient, and expected Hessian (i.e., the
Fisher information matrix).
model_coefficients_start: Optional (batch of) vector-shaped `Tensor`
representing the initial model coefficients, one for each column in
`model_matrix`. Must have same `dtype` as `model_matrix`.
Default value: Zeros.
predicted_linear_response_start: Optional `Tensor` with `shape`, `dtype`
matching `response`; represents `offset` shifted initial linear
predictions based on `model_coefficients_start`.
Default value: `offset` if `model_coefficients is None`, and
`tf.linalg.matvec(model_matrix, model_coefficients_start) + offset`
otherwise.
l2_regularizer: Optional scalar `Tensor` representing L2 regularization
penalty, i.e.,
`loss(w) = sum{-log p(y[i]|x[i],w) : i=1..n} + l2_regularizer ||w||_2^2`.
Default value: `None` (i.e., no L2 regularization).
dispersion: Optional (batch of) `Tensor` representing `response` dispersion,
i.e., as in, `p(y|theta) := exp((y theta - A(theta)) / dispersion)`.
Must broadcast with rows of `model_matrix`.
Default value: `None` (i.e., "no dispersion").
offset: Optional `Tensor` representing constant shift applied to
`predicted_linear_response`. Must broadcast to `response`.
Default value: `None` (i.e., `tf.zeros_like(response)`).
learning_rate: Optional (batch of) scalar `Tensor` used to dampen iterative
progress. Typically only needed if optimization diverges, should be no
larger than `1` and typically very close to `1`.
Default value: `None` (i.e., `1`).
fast_unsafe_numerics: Optional Python `bool` indicating if solve should be
based on Cholesky or QR decomposition.
Default value: `True` (i.e., "prefer speed via Cholesky decomposition").
name: Python `str` used as name prefix to ops created by this function.
Default value: `"fit_one_step"`.
Returns:
model_coefficients: (Batch of) vector-shaped `Tensor`; represents the
next estimate of the model coefficients, one for each column in
`model_matrix`.
predicted_linear_response: `response`-shaped `Tensor` representing linear
predictions based on new `model_coefficients`, i.e.,
`tf.linalg.matvec(model_matrix, model_coefficients_next) + offset`.
|
tensorflow_probability/python/glm/fisher_scoring.py
|
def fit_one_step(
model_matrix,
response,
model,
model_coefficients_start=None,
predicted_linear_response_start=None,
l2_regularizer=None,
dispersion=None,
offset=None,
learning_rate=None,
fast_unsafe_numerics=True,
name=None):
"""Runs one step of Fisher scoring.
Args:
model_matrix: (Batch of) `float`-like, matrix-shaped `Tensor` where each row
represents a sample's features.
response: (Batch of) vector-shaped `Tensor` where each element represents a
sample's observed response (to the corresponding row of features). Must
have same `dtype` as `model_matrix`.
model: `tfp.glm.ExponentialFamily`-like instance used to construct the
negative log-likelihood loss, gradient, and expected Hessian (i.e., the
Fisher information matrix).
model_coefficients_start: Optional (batch of) vector-shaped `Tensor`
representing the initial model coefficients, one for each column in
`model_matrix`. Must have same `dtype` as `model_matrix`.
Default value: Zeros.
predicted_linear_response_start: Optional `Tensor` with `shape`, `dtype`
matching `response`; represents `offset` shifted initial linear
predictions based on `model_coefficients_start`.
Default value: `offset` if `model_coefficients is None`, and
`tf.linalg.matvec(model_matrix, model_coefficients_start) + offset`
otherwise.
l2_regularizer: Optional scalar `Tensor` representing L2 regularization
penalty, i.e.,
`loss(w) = sum{-log p(y[i]|x[i],w) : i=1..n} + l2_regularizer ||w||_2^2`.
Default value: `None` (i.e., no L2 regularization).
dispersion: Optional (batch of) `Tensor` representing `response` dispersion,
i.e., as in, `p(y|theta) := exp((y theta - A(theta)) / dispersion)`.
Must broadcast with rows of `model_matrix`.
Default value: `None` (i.e., "no dispersion").
offset: Optional `Tensor` representing constant shift applied to
`predicted_linear_response`. Must broadcast to `response`.
Default value: `None` (i.e., `tf.zeros_like(response)`).
learning_rate: Optional (batch of) scalar `Tensor` used to dampen iterative
progress. Typically only needed if optimization diverges, should be no
larger than `1` and typically very close to `1`.
Default value: `None` (i.e., `1`).
fast_unsafe_numerics: Optional Python `bool` indicating if solve should be
based on Cholesky or QR decomposition.
Default value: `True` (i.e., "prefer speed via Cholesky decomposition").
name: Python `str` used as name prefix to ops created by this function.
Default value: `"fit_one_step"`.
Returns:
model_coefficients: (Batch of) vector-shaped `Tensor`; represents the
next estimate of the model coefficients, one for each column in
`model_matrix`.
predicted_linear_response: `response`-shaped `Tensor` representing linear
predictions based on new `model_coefficients`, i.e.,
`tf.linalg.matvec(model_matrix, model_coefficients_next) + offset`.
"""
graph_deps = [model_matrix, response, model_coefficients_start,
predicted_linear_response_start, dispersion, learning_rate]
with tf.compat.v1.name_scope(name, 'fit_one_step', graph_deps):
[
model_matrix,
response,
model_coefficients_start,
predicted_linear_response_start,
offset,
] = prepare_args(
model_matrix,
response,
model_coefficients_start,
predicted_linear_response_start,
offset)
# Compute: mean, grad(mean, predicted_linear_response_start), and variance.
mean, variance, grad_mean = model(predicted_linear_response_start)
# If either `grad_mean` or `variance is non-finite or zero, then we'll
# replace it with a value such that the row is zeroed out. Although this
# procedure may seem circuitous, it is necessary to ensure this algorithm is
# itself differentiable.
is_valid = (
tf.math.is_finite(grad_mean) & tf.not_equal(grad_mean, 0.)
& tf.math.is_finite(variance) & (variance > 0.))
def mask_if_invalid(x, mask):
mask = tf.fill(
tf.shape(input=x), value=np.array(mask, x.dtype.as_numpy_dtype))
return tf.where(is_valid, x, mask)
# Run one step of iteratively reweighted least-squares.
# Compute "`z`", the adjusted predicted linear response.
# z = predicted_linear_response_start
# + learning_rate * (response - mean) / grad_mean
z = (response - mean) / mask_if_invalid(grad_mean, 1.)
# TODO(jvdillon): Rather than use learning rate, we should consider using
# backtracking line search.
if learning_rate is not None:
z *= learning_rate[..., tf.newaxis]
z += predicted_linear_response_start
if offset is not None:
z -= offset
# Compute "`w`", the per-sample weight.
if dispersion is not None:
# For convenience, we'll now scale the variance by the dispersion factor.
variance *= dispersion
w = (
mask_if_invalid(grad_mean, 0.) *
tf.math.rsqrt(mask_if_invalid(variance, np.inf)))
a = model_matrix * w[..., tf.newaxis]
b = z * w
# Solve `min{ || A @ model_coefficients - b ||_2**2 : model_coefficients }`
# where `@` denotes `matmul`.
if l2_regularizer is None:
l2_regularizer = np.array(0, a.dtype.as_numpy_dtype)
else:
l2_regularizer_ = distribution_util.maybe_get_static_value(
l2_regularizer, a.dtype.as_numpy_dtype)
if l2_regularizer_ is not None:
l2_regularizer = l2_regularizer_
def _embed_l2_regularization():
"""Adds synthetic observations to implement L2 regularization."""
# `tf.matrix_solve_ls` does not respect the `l2_regularization` argument
# when `fast_unsafe_numerics` is `False`. This function adds synthetic
# observations to the data to implement the regularization instead.
# Adding observations `sqrt(l2_regularizer) * I` is mathematically
# equivalent to adding the term
# `-l2_regularizer ||coefficients||_2**2` to the log-likelihood.
num_model_coefficients = num_cols(model_matrix)
batch_shape = tf.shape(input=model_matrix)[:-2]
eye = tf.eye(
num_model_coefficients, batch_shape=batch_shape, dtype=a.dtype)
a_ = tf.concat([a, tf.sqrt(l2_regularizer) * eye], axis=-2)
b_ = distribution_util.pad(
b, count=num_model_coefficients, axis=-1, back=True)
# Return l2_regularizer=0 since its now embedded.
l2_regularizer_ = np.array(0, a.dtype.as_numpy_dtype)
return a_, b_, l2_regularizer_
a, b, l2_regularizer = prefer_static.cond(
prefer_static.reduce_all([not(fast_unsafe_numerics),
l2_regularizer > 0.]),
_embed_l2_regularization,
lambda: (a, b, l2_regularizer))
model_coefficients_next = tf.linalg.lstsq(
a,
b[..., tf.newaxis],
fast=fast_unsafe_numerics,
l2_regularizer=l2_regularizer,
name='model_coefficients_next')
model_coefficients_next = model_coefficients_next[..., 0]
# TODO(b/79122261): The approach used in `matrix_solve_ls` could be made
# faster by avoiding explicitly forming Q and instead keeping the
# factorization in 'implicit' form with stacked (rescaled) Householder
# vectors underneath the 'R' and then applying the (accumulated)
# reflectors in the appropriate order to apply Q'. However, we don't
# presently do this because we lack core TF functionality. For reference,
# the vanilla QR approach is:
# q, r = tf.linalg.qr(a)
# c = tf.matmul(q, b, adjoint_a=True)
# model_coefficients_next = tf.matrix_triangular_solve(
# r, c, lower=False, name='model_coefficients_next')
predicted_linear_response_next = calculate_linear_predictor(
model_matrix,
model_coefficients_next,
offset,
name='predicted_linear_response_next')
return model_coefficients_next, predicted_linear_response_next
|
def fit_one_step(
model_matrix,
response,
model,
model_coefficients_start=None,
predicted_linear_response_start=None,
l2_regularizer=None,
dispersion=None,
offset=None,
learning_rate=None,
fast_unsafe_numerics=True,
name=None):
"""Runs one step of Fisher scoring.
Args:
model_matrix: (Batch of) `float`-like, matrix-shaped `Tensor` where each row
represents a sample's features.
response: (Batch of) vector-shaped `Tensor` where each element represents a
sample's observed response (to the corresponding row of features). Must
have same `dtype` as `model_matrix`.
model: `tfp.glm.ExponentialFamily`-like instance used to construct the
negative log-likelihood loss, gradient, and expected Hessian (i.e., the
Fisher information matrix).
model_coefficients_start: Optional (batch of) vector-shaped `Tensor`
representing the initial model coefficients, one for each column in
`model_matrix`. Must have same `dtype` as `model_matrix`.
Default value: Zeros.
predicted_linear_response_start: Optional `Tensor` with `shape`, `dtype`
matching `response`; represents `offset` shifted initial linear
predictions based on `model_coefficients_start`.
Default value: `offset` if `model_coefficients is None`, and
`tf.linalg.matvec(model_matrix, model_coefficients_start) + offset`
otherwise.
l2_regularizer: Optional scalar `Tensor` representing L2 regularization
penalty, i.e.,
`loss(w) = sum{-log p(y[i]|x[i],w) : i=1..n} + l2_regularizer ||w||_2^2`.
Default value: `None` (i.e., no L2 regularization).
dispersion: Optional (batch of) `Tensor` representing `response` dispersion,
i.e., as in, `p(y|theta) := exp((y theta - A(theta)) / dispersion)`.
Must broadcast with rows of `model_matrix`.
Default value: `None` (i.e., "no dispersion").
offset: Optional `Tensor` representing constant shift applied to
`predicted_linear_response`. Must broadcast to `response`.
Default value: `None` (i.e., `tf.zeros_like(response)`).
learning_rate: Optional (batch of) scalar `Tensor` used to dampen iterative
progress. Typically only needed if optimization diverges, should be no
larger than `1` and typically very close to `1`.
Default value: `None` (i.e., `1`).
fast_unsafe_numerics: Optional Python `bool` indicating if solve should be
based on Cholesky or QR decomposition.
Default value: `True` (i.e., "prefer speed via Cholesky decomposition").
name: Python `str` used as name prefix to ops created by this function.
Default value: `"fit_one_step"`.
Returns:
model_coefficients: (Batch of) vector-shaped `Tensor`; represents the
next estimate of the model coefficients, one for each column in
`model_matrix`.
predicted_linear_response: `response`-shaped `Tensor` representing linear
predictions based on new `model_coefficients`, i.e.,
`tf.linalg.matvec(model_matrix, model_coefficients_next) + offset`.
"""
graph_deps = [model_matrix, response, model_coefficients_start,
predicted_linear_response_start, dispersion, learning_rate]
with tf.compat.v1.name_scope(name, 'fit_one_step', graph_deps):
[
model_matrix,
response,
model_coefficients_start,
predicted_linear_response_start,
offset,
] = prepare_args(
model_matrix,
response,
model_coefficients_start,
predicted_linear_response_start,
offset)
# Compute: mean, grad(mean, predicted_linear_response_start), and variance.
mean, variance, grad_mean = model(predicted_linear_response_start)
# If either `grad_mean` or `variance is non-finite or zero, then we'll
# replace it with a value such that the row is zeroed out. Although this
# procedure may seem circuitous, it is necessary to ensure this algorithm is
# itself differentiable.
is_valid = (
tf.math.is_finite(grad_mean) & tf.not_equal(grad_mean, 0.)
& tf.math.is_finite(variance) & (variance > 0.))
def mask_if_invalid(x, mask):
mask = tf.fill(
tf.shape(input=x), value=np.array(mask, x.dtype.as_numpy_dtype))
return tf.where(is_valid, x, mask)
# Run one step of iteratively reweighted least-squares.
# Compute "`z`", the adjusted predicted linear response.
# z = predicted_linear_response_start
# + learning_rate * (response - mean) / grad_mean
z = (response - mean) / mask_if_invalid(grad_mean, 1.)
# TODO(jvdillon): Rather than use learning rate, we should consider using
# backtracking line search.
if learning_rate is not None:
z *= learning_rate[..., tf.newaxis]
z += predicted_linear_response_start
if offset is not None:
z -= offset
# Compute "`w`", the per-sample weight.
if dispersion is not None:
# For convenience, we'll now scale the variance by the dispersion factor.
variance *= dispersion
w = (
mask_if_invalid(grad_mean, 0.) *
tf.math.rsqrt(mask_if_invalid(variance, np.inf)))
a = model_matrix * w[..., tf.newaxis]
b = z * w
# Solve `min{ || A @ model_coefficients - b ||_2**2 : model_coefficients }`
# where `@` denotes `matmul`.
if l2_regularizer is None:
l2_regularizer = np.array(0, a.dtype.as_numpy_dtype)
else:
l2_regularizer_ = distribution_util.maybe_get_static_value(
l2_regularizer, a.dtype.as_numpy_dtype)
if l2_regularizer_ is not None:
l2_regularizer = l2_regularizer_
def _embed_l2_regularization():
"""Adds synthetic observations to implement L2 regularization."""
# `tf.matrix_solve_ls` does not respect the `l2_regularization` argument
# when `fast_unsafe_numerics` is `False`. This function adds synthetic
# observations to the data to implement the regularization instead.
# Adding observations `sqrt(l2_regularizer) * I` is mathematically
# equivalent to adding the term
# `-l2_regularizer ||coefficients||_2**2` to the log-likelihood.
num_model_coefficients = num_cols(model_matrix)
batch_shape = tf.shape(input=model_matrix)[:-2]
eye = tf.eye(
num_model_coefficients, batch_shape=batch_shape, dtype=a.dtype)
a_ = tf.concat([a, tf.sqrt(l2_regularizer) * eye], axis=-2)
b_ = distribution_util.pad(
b, count=num_model_coefficients, axis=-1, back=True)
# Return l2_regularizer=0 since its now embedded.
l2_regularizer_ = np.array(0, a.dtype.as_numpy_dtype)
return a_, b_, l2_regularizer_
a, b, l2_regularizer = prefer_static.cond(
prefer_static.reduce_all([not(fast_unsafe_numerics),
l2_regularizer > 0.]),
_embed_l2_regularization,
lambda: (a, b, l2_regularizer))
model_coefficients_next = tf.linalg.lstsq(
a,
b[..., tf.newaxis],
fast=fast_unsafe_numerics,
l2_regularizer=l2_regularizer,
name='model_coefficients_next')
model_coefficients_next = model_coefficients_next[..., 0]
# TODO(b/79122261): The approach used in `matrix_solve_ls` could be made
# faster by avoiding explicitly forming Q and instead keeping the
# factorization in 'implicit' form with stacked (rescaled) Householder
# vectors underneath the 'R' and then applying the (accumulated)
# reflectors in the appropriate order to apply Q'. However, we don't
# presently do this because we lack core TF functionality. For reference,
# the vanilla QR approach is:
# q, r = tf.linalg.qr(a)
# c = tf.matmul(q, b, adjoint_a=True)
# model_coefficients_next = tf.matrix_triangular_solve(
# r, c, lower=False, name='model_coefficients_next')
predicted_linear_response_next = calculate_linear_predictor(
model_matrix,
model_coefficients_next,
offset,
name='predicted_linear_response_next')
return model_coefficients_next, predicted_linear_response_next
|
[
"Runs",
"one",
"step",
"of",
"Fisher",
"scoring",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/glm/fisher_scoring.py#L259-L439
|
[
"def",
"fit_one_step",
"(",
"model_matrix",
",",
"response",
",",
"model",
",",
"model_coefficients_start",
"=",
"None",
",",
"predicted_linear_response_start",
"=",
"None",
",",
"l2_regularizer",
"=",
"None",
",",
"dispersion",
"=",
"None",
",",
"offset",
"=",
"None",
",",
"learning_rate",
"=",
"None",
",",
"fast_unsafe_numerics",
"=",
"True",
",",
"name",
"=",
"None",
")",
":",
"graph_deps",
"=",
"[",
"model_matrix",
",",
"response",
",",
"model_coefficients_start",
",",
"predicted_linear_response_start",
",",
"dispersion",
",",
"learning_rate",
"]",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'fit_one_step'",
",",
"graph_deps",
")",
":",
"[",
"model_matrix",
",",
"response",
",",
"model_coefficients_start",
",",
"predicted_linear_response_start",
",",
"offset",
",",
"]",
"=",
"prepare_args",
"(",
"model_matrix",
",",
"response",
",",
"model_coefficients_start",
",",
"predicted_linear_response_start",
",",
"offset",
")",
"# Compute: mean, grad(mean, predicted_linear_response_start), and variance.",
"mean",
",",
"variance",
",",
"grad_mean",
"=",
"model",
"(",
"predicted_linear_response_start",
")",
"# If either `grad_mean` or `variance is non-finite or zero, then we'll",
"# replace it with a value such that the row is zeroed out. Although this",
"# procedure may seem circuitous, it is necessary to ensure this algorithm is",
"# itself differentiable.",
"is_valid",
"=",
"(",
"tf",
".",
"math",
".",
"is_finite",
"(",
"grad_mean",
")",
"&",
"tf",
".",
"not_equal",
"(",
"grad_mean",
",",
"0.",
")",
"&",
"tf",
".",
"math",
".",
"is_finite",
"(",
"variance",
")",
"&",
"(",
"variance",
">",
"0.",
")",
")",
"def",
"mask_if_invalid",
"(",
"x",
",",
"mask",
")",
":",
"mask",
"=",
"tf",
".",
"fill",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
",",
"value",
"=",
"np",
".",
"array",
"(",
"mask",
",",
"x",
".",
"dtype",
".",
"as_numpy_dtype",
")",
")",
"return",
"tf",
".",
"where",
"(",
"is_valid",
",",
"x",
",",
"mask",
")",
"# Run one step of iteratively reweighted least-squares.",
"# Compute \"`z`\", the adjusted predicted linear response.",
"# z = predicted_linear_response_start",
"# + learning_rate * (response - mean) / grad_mean",
"z",
"=",
"(",
"response",
"-",
"mean",
")",
"/",
"mask_if_invalid",
"(",
"grad_mean",
",",
"1.",
")",
"# TODO(jvdillon): Rather than use learning rate, we should consider using",
"# backtracking line search.",
"if",
"learning_rate",
"is",
"not",
"None",
":",
"z",
"*=",
"learning_rate",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
"z",
"+=",
"predicted_linear_response_start",
"if",
"offset",
"is",
"not",
"None",
":",
"z",
"-=",
"offset",
"# Compute \"`w`\", the per-sample weight.",
"if",
"dispersion",
"is",
"not",
"None",
":",
"# For convenience, we'll now scale the variance by the dispersion factor.",
"variance",
"*=",
"dispersion",
"w",
"=",
"(",
"mask_if_invalid",
"(",
"grad_mean",
",",
"0.",
")",
"*",
"tf",
".",
"math",
".",
"rsqrt",
"(",
"mask_if_invalid",
"(",
"variance",
",",
"np",
".",
"inf",
")",
")",
")",
"a",
"=",
"model_matrix",
"*",
"w",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
"b",
"=",
"z",
"*",
"w",
"# Solve `min{ || A @ model_coefficients - b ||_2**2 : model_coefficients }`",
"# where `@` denotes `matmul`.",
"if",
"l2_regularizer",
"is",
"None",
":",
"l2_regularizer",
"=",
"np",
".",
"array",
"(",
"0",
",",
"a",
".",
"dtype",
".",
"as_numpy_dtype",
")",
"else",
":",
"l2_regularizer_",
"=",
"distribution_util",
".",
"maybe_get_static_value",
"(",
"l2_regularizer",
",",
"a",
".",
"dtype",
".",
"as_numpy_dtype",
")",
"if",
"l2_regularizer_",
"is",
"not",
"None",
":",
"l2_regularizer",
"=",
"l2_regularizer_",
"def",
"_embed_l2_regularization",
"(",
")",
":",
"\"\"\"Adds synthetic observations to implement L2 regularization.\"\"\"",
"# `tf.matrix_solve_ls` does not respect the `l2_regularization` argument",
"# when `fast_unsafe_numerics` is `False`. This function adds synthetic",
"# observations to the data to implement the regularization instead.",
"# Adding observations `sqrt(l2_regularizer) * I` is mathematically",
"# equivalent to adding the term",
"# `-l2_regularizer ||coefficients||_2**2` to the log-likelihood.",
"num_model_coefficients",
"=",
"num_cols",
"(",
"model_matrix",
")",
"batch_shape",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"model_matrix",
")",
"[",
":",
"-",
"2",
"]",
"eye",
"=",
"tf",
".",
"eye",
"(",
"num_model_coefficients",
",",
"batch_shape",
"=",
"batch_shape",
",",
"dtype",
"=",
"a",
".",
"dtype",
")",
"a_",
"=",
"tf",
".",
"concat",
"(",
"[",
"a",
",",
"tf",
".",
"sqrt",
"(",
"l2_regularizer",
")",
"*",
"eye",
"]",
",",
"axis",
"=",
"-",
"2",
")",
"b_",
"=",
"distribution_util",
".",
"pad",
"(",
"b",
",",
"count",
"=",
"num_model_coefficients",
",",
"axis",
"=",
"-",
"1",
",",
"back",
"=",
"True",
")",
"# Return l2_regularizer=0 since its now embedded.",
"l2_regularizer_",
"=",
"np",
".",
"array",
"(",
"0",
",",
"a",
".",
"dtype",
".",
"as_numpy_dtype",
")",
"return",
"a_",
",",
"b_",
",",
"l2_regularizer_",
"a",
",",
"b",
",",
"l2_regularizer",
"=",
"prefer_static",
".",
"cond",
"(",
"prefer_static",
".",
"reduce_all",
"(",
"[",
"not",
"(",
"fast_unsafe_numerics",
")",
",",
"l2_regularizer",
">",
"0.",
"]",
")",
",",
"_embed_l2_regularization",
",",
"lambda",
":",
"(",
"a",
",",
"b",
",",
"l2_regularizer",
")",
")",
"model_coefficients_next",
"=",
"tf",
".",
"linalg",
".",
"lstsq",
"(",
"a",
",",
"b",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
",",
"fast",
"=",
"fast_unsafe_numerics",
",",
"l2_regularizer",
"=",
"l2_regularizer",
",",
"name",
"=",
"'model_coefficients_next'",
")",
"model_coefficients_next",
"=",
"model_coefficients_next",
"[",
"...",
",",
"0",
"]",
"# TODO(b/79122261): The approach used in `matrix_solve_ls` could be made",
"# faster by avoiding explicitly forming Q and instead keeping the",
"# factorization in 'implicit' form with stacked (rescaled) Householder",
"# vectors underneath the 'R' and then applying the (accumulated)",
"# reflectors in the appropriate order to apply Q'. However, we don't",
"# presently do this because we lack core TF functionality. For reference,",
"# the vanilla QR approach is:",
"# q, r = tf.linalg.qr(a)",
"# c = tf.matmul(q, b, adjoint_a=True)",
"# model_coefficients_next = tf.matrix_triangular_solve(",
"# r, c, lower=False, name='model_coefficients_next')",
"predicted_linear_response_next",
"=",
"calculate_linear_predictor",
"(",
"model_matrix",
",",
"model_coefficients_next",
",",
"offset",
",",
"name",
"=",
"'predicted_linear_response_next'",
")",
"return",
"model_coefficients_next",
",",
"predicted_linear_response_next"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
convergence_criteria_small_relative_norm_weights_change
|
Returns Python `callable` which indicates fitting procedure has converged.
Writing old, new `model_coefficients` as `w0`, `w1`, this function
defines convergence as,
```python
relative_euclidean_norm = (tf.norm(w0 - w1, ord=2, axis=-1) /
(1. + tf.norm(w0, ord=2, axis=-1)))
reduce_all(relative_euclidean_norm < tolerance)
```
where `tf.norm(x, ord=2)` denotes the [Euclidean norm](
https://en.wikipedia.org/wiki/Norm_(mathematics)#Euclidean_norm) of `x`.
Args:
tolerance: `float`-like `Tensor` indicating convergence, i.e., when
max relative Euclidean norm weights difference < tolerance`.
Default value: `1e-5`.
norm_order: Order of the norm. Default value: `2` (i.e., "Euclidean norm".)
Returns:
convergence_criteria_fn: Python `callable` which returns `bool` `Tensor`
indicated fitting procedure has converged. (See inner function
specification for argument signature.)
Default value: `1e-5`.
|
tensorflow_probability/python/glm/fisher_scoring.py
|
def convergence_criteria_small_relative_norm_weights_change(
tolerance=1e-5,
norm_order=2):
"""Returns Python `callable` which indicates fitting procedure has converged.
Writing old, new `model_coefficients` as `w0`, `w1`, this function
defines convergence as,
```python
relative_euclidean_norm = (tf.norm(w0 - w1, ord=2, axis=-1) /
(1. + tf.norm(w0, ord=2, axis=-1)))
reduce_all(relative_euclidean_norm < tolerance)
```
where `tf.norm(x, ord=2)` denotes the [Euclidean norm](
https://en.wikipedia.org/wiki/Norm_(mathematics)#Euclidean_norm) of `x`.
Args:
tolerance: `float`-like `Tensor` indicating convergence, i.e., when
max relative Euclidean norm weights difference < tolerance`.
Default value: `1e-5`.
norm_order: Order of the norm. Default value: `2` (i.e., "Euclidean norm".)
Returns:
convergence_criteria_fn: Python `callable` which returns `bool` `Tensor`
indicated fitting procedure has converged. (See inner function
specification for argument signature.)
Default value: `1e-5`.
"""
def convergence_criteria_fn(
is_converged_previous, # pylint: disable=unused-argument
iter_,
model_coefficients_previous,
predicted_linear_response_previous, # pylint: disable=unused-argument
model_coefficients_next,
predicted_linear_response_next, # pylint: disable=unused-argument
response, # pylint: disable=unused-argument
model, # pylint: disable=unused-argument
dispersion): # pylint: disable=unused-argument
"""Returns `bool` `Tensor` indicating if fitting procedure has converged.
Args:
is_converged_previous: "old" convergence results.
iter_: Iteration number.
model_coefficients_previous: "old" `model_coefficients`.
predicted_linear_response_previous: "old" `predicted_linear_response`.
model_coefficients_next: "new" `model_coefficients`.
predicted_linear_response_next: "new: `predicted_linear_response`.
response: (Batch of) vector-shaped `Tensor` where each element represents
a sample's observed response (to the corresponding row of features).
Must have same `dtype` as `model_matrix`.
model: `tfp.glm.ExponentialFamily`-like instance used to construct the
negative log-likelihood loss, gradient, and expected Hessian (i.e., the
Fisher information matrix).
dispersion: `Tensor` representing `response` dispersion, i.e., as in:
`p(y|theta) := exp((y theta - A(theta)) / dispersion)`. Must broadcast
with rows of `model_matrix`.
Default value: `None` (i.e., "no dispersion").
Returns:
is_converged: `bool` `Tensor`.
"""
relative_euclidean_norm = (
tf.norm(
tensor=model_coefficients_previous - model_coefficients_next,
ord=norm_order,
axis=-1) /
(1. +
tf.norm(tensor=model_coefficients_previous, ord=norm_order, axis=-1)))
return (iter_ > 0) & tf.reduce_all(
input_tensor=relative_euclidean_norm < tolerance)
return convergence_criteria_fn
|
def convergence_criteria_small_relative_norm_weights_change(
tolerance=1e-5,
norm_order=2):
"""Returns Python `callable` which indicates fitting procedure has converged.
Writing old, new `model_coefficients` as `w0`, `w1`, this function
defines convergence as,
```python
relative_euclidean_norm = (tf.norm(w0 - w1, ord=2, axis=-1) /
(1. + tf.norm(w0, ord=2, axis=-1)))
reduce_all(relative_euclidean_norm < tolerance)
```
where `tf.norm(x, ord=2)` denotes the [Euclidean norm](
https://en.wikipedia.org/wiki/Norm_(mathematics)#Euclidean_norm) of `x`.
Args:
tolerance: `float`-like `Tensor` indicating convergence, i.e., when
max relative Euclidean norm weights difference < tolerance`.
Default value: `1e-5`.
norm_order: Order of the norm. Default value: `2` (i.e., "Euclidean norm".)
Returns:
convergence_criteria_fn: Python `callable` which returns `bool` `Tensor`
indicated fitting procedure has converged. (See inner function
specification for argument signature.)
Default value: `1e-5`.
"""
def convergence_criteria_fn(
is_converged_previous, # pylint: disable=unused-argument
iter_,
model_coefficients_previous,
predicted_linear_response_previous, # pylint: disable=unused-argument
model_coefficients_next,
predicted_linear_response_next, # pylint: disable=unused-argument
response, # pylint: disable=unused-argument
model, # pylint: disable=unused-argument
dispersion): # pylint: disable=unused-argument
"""Returns `bool` `Tensor` indicating if fitting procedure has converged.
Args:
is_converged_previous: "old" convergence results.
iter_: Iteration number.
model_coefficients_previous: "old" `model_coefficients`.
predicted_linear_response_previous: "old" `predicted_linear_response`.
model_coefficients_next: "new" `model_coefficients`.
predicted_linear_response_next: "new: `predicted_linear_response`.
response: (Batch of) vector-shaped `Tensor` where each element represents
a sample's observed response (to the corresponding row of features).
Must have same `dtype` as `model_matrix`.
model: `tfp.glm.ExponentialFamily`-like instance used to construct the
negative log-likelihood loss, gradient, and expected Hessian (i.e., the
Fisher information matrix).
dispersion: `Tensor` representing `response` dispersion, i.e., as in:
`p(y|theta) := exp((y theta - A(theta)) / dispersion)`. Must broadcast
with rows of `model_matrix`.
Default value: `None` (i.e., "no dispersion").
Returns:
is_converged: `bool` `Tensor`.
"""
relative_euclidean_norm = (
tf.norm(
tensor=model_coefficients_previous - model_coefficients_next,
ord=norm_order,
axis=-1) /
(1. +
tf.norm(tensor=model_coefficients_previous, ord=norm_order, axis=-1)))
return (iter_ > 0) & tf.reduce_all(
input_tensor=relative_euclidean_norm < tolerance)
return convergence_criteria_fn
|
[
"Returns",
"Python",
"callable",
"which",
"indicates",
"fitting",
"procedure",
"has",
"converged",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/glm/fisher_scoring.py#L442-L514
|
[
"def",
"convergence_criteria_small_relative_norm_weights_change",
"(",
"tolerance",
"=",
"1e-5",
",",
"norm_order",
"=",
"2",
")",
":",
"def",
"convergence_criteria_fn",
"(",
"is_converged_previous",
",",
"# pylint: disable=unused-argument",
"iter_",
",",
"model_coefficients_previous",
",",
"predicted_linear_response_previous",
",",
"# pylint: disable=unused-argument",
"model_coefficients_next",
",",
"predicted_linear_response_next",
",",
"# pylint: disable=unused-argument",
"response",
",",
"# pylint: disable=unused-argument",
"model",
",",
"# pylint: disable=unused-argument",
"dispersion",
")",
":",
"# pylint: disable=unused-argument",
"\"\"\"Returns `bool` `Tensor` indicating if fitting procedure has converged.\n\n Args:\n is_converged_previous: \"old\" convergence results.\n iter_: Iteration number.\n model_coefficients_previous: \"old\" `model_coefficients`.\n predicted_linear_response_previous: \"old\" `predicted_linear_response`.\n model_coefficients_next: \"new\" `model_coefficients`.\n predicted_linear_response_next: \"new: `predicted_linear_response`.\n response: (Batch of) vector-shaped `Tensor` where each element represents\n a sample's observed response (to the corresponding row of features).\n Must have same `dtype` as `model_matrix`.\n model: `tfp.glm.ExponentialFamily`-like instance used to construct the\n negative log-likelihood loss, gradient, and expected Hessian (i.e., the\n Fisher information matrix).\n dispersion: `Tensor` representing `response` dispersion, i.e., as in:\n `p(y|theta) := exp((y theta - A(theta)) / dispersion)`. Must broadcast\n with rows of `model_matrix`.\n Default value: `None` (i.e., \"no dispersion\").\n\n Returns:\n is_converged: `bool` `Tensor`.\n \"\"\"",
"relative_euclidean_norm",
"=",
"(",
"tf",
".",
"norm",
"(",
"tensor",
"=",
"model_coefficients_previous",
"-",
"model_coefficients_next",
",",
"ord",
"=",
"norm_order",
",",
"axis",
"=",
"-",
"1",
")",
"/",
"(",
"1.",
"+",
"tf",
".",
"norm",
"(",
"tensor",
"=",
"model_coefficients_previous",
",",
"ord",
"=",
"norm_order",
",",
"axis",
"=",
"-",
"1",
")",
")",
")",
"return",
"(",
"iter_",
">",
"0",
")",
"&",
"tf",
".",
"reduce_all",
"(",
"input_tensor",
"=",
"relative_euclidean_norm",
"<",
"tolerance",
")",
"return",
"convergence_criteria_fn"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
prepare_args
|
Helper to `fit` which sanitizes input args.
Args:
model_matrix: (Batch of) `float`-like, matrix-shaped `Tensor` where each row
represents a sample's features.
response: (Batch of) vector-shaped `Tensor` where each element represents a
sample's observed response (to the corresponding row of features). Must
have same `dtype` as `model_matrix`.
model_coefficients: Optional (batch of) vector-shaped `Tensor` representing
the model coefficients, one for each column in `model_matrix`. Must have
same `dtype` as `model_matrix`.
Default value: `tf.zeros(tf.shape(model_matrix)[-1], model_matrix.dtype)`.
predicted_linear_response: Optional `Tensor` with `shape`, `dtype` matching
`response`; represents `offset` shifted initial linear predictions based
on current `model_coefficients`.
Default value: `offset` if `model_coefficients is None`, and
`tf.linalg.matvec(model_matrix, model_coefficients_start) + offset`
otherwise.
offset: Optional `Tensor` with `shape`, `dtype` matching `response`;
represents constant shift applied to `predicted_linear_response`.
Default value: `None` (i.e., `tf.zeros_like(response)`).
name: Python `str` used as name prefix to ops created by this function.
Default value: `"prepare_args"`.
Returns:
model_matrix: A `Tensor` with `shape`, `dtype` and values of the
`model_matrix` argument.
response: A `Tensor` with `shape`, `dtype` and values of the
`response` argument.
model_coefficients_start: A `Tensor` with `shape`, `dtype` and
values of the `model_coefficients_start` argument if specified.
A (batch of) vector-shaped `Tensors` with `dtype` matching `model_matrix`
containing the default starting point otherwise.
predicted_linear_response: A `Tensor` with `shape`, `dtype` and
values of the `predicted_linear_response` argument if specified.
A `Tensor` with `shape`, `dtype` matching `response` containing the
default value otherwise.
offset: A `Tensor` with `shape`, `dtype` and values of the `offset` argument
if specified or `None` otherwise.
|
tensorflow_probability/python/glm/fisher_scoring.py
|
def prepare_args(model_matrix,
response,
model_coefficients,
predicted_linear_response,
offset,
name=None):
"""Helper to `fit` which sanitizes input args.
Args:
model_matrix: (Batch of) `float`-like, matrix-shaped `Tensor` where each row
represents a sample's features.
response: (Batch of) vector-shaped `Tensor` where each element represents a
sample's observed response (to the corresponding row of features). Must
have same `dtype` as `model_matrix`.
model_coefficients: Optional (batch of) vector-shaped `Tensor` representing
the model coefficients, one for each column in `model_matrix`. Must have
same `dtype` as `model_matrix`.
Default value: `tf.zeros(tf.shape(model_matrix)[-1], model_matrix.dtype)`.
predicted_linear_response: Optional `Tensor` with `shape`, `dtype` matching
`response`; represents `offset` shifted initial linear predictions based
on current `model_coefficients`.
Default value: `offset` if `model_coefficients is None`, and
`tf.linalg.matvec(model_matrix, model_coefficients_start) + offset`
otherwise.
offset: Optional `Tensor` with `shape`, `dtype` matching `response`;
represents constant shift applied to `predicted_linear_response`.
Default value: `None` (i.e., `tf.zeros_like(response)`).
name: Python `str` used as name prefix to ops created by this function.
Default value: `"prepare_args"`.
Returns:
model_matrix: A `Tensor` with `shape`, `dtype` and values of the
`model_matrix` argument.
response: A `Tensor` with `shape`, `dtype` and values of the
`response` argument.
model_coefficients_start: A `Tensor` with `shape`, `dtype` and
values of the `model_coefficients_start` argument if specified.
A (batch of) vector-shaped `Tensors` with `dtype` matching `model_matrix`
containing the default starting point otherwise.
predicted_linear_response: A `Tensor` with `shape`, `dtype` and
values of the `predicted_linear_response` argument if specified.
A `Tensor` with `shape`, `dtype` matching `response` containing the
default value otherwise.
offset: A `Tensor` with `shape`, `dtype` and values of the `offset` argument
if specified or `None` otherwise.
"""
graph_deps = [model_matrix, response, model_coefficients,
predicted_linear_response, offset]
with tf.compat.v1.name_scope(name, 'prepare_args', graph_deps):
dtype = dtype_util.common_dtype(graph_deps, np.float32)
model_matrix = tf.convert_to_tensor(
value=model_matrix, dtype=dtype, name='model_matrix')
if offset is not None:
offset = tf.convert_to_tensor(value=offset, dtype=dtype, name='offset')
response = tf.convert_to_tensor(
value=response, dtype=dtype, name='response')
use_default_model_coefficients = model_coefficients is None
if use_default_model_coefficients:
# User did not supply model coefficients; assume they're all zero.
batch_shape = tf.shape(input=model_matrix)[:-2]
num_columns = tf.shape(input=model_matrix)[-1]
model_coefficients = tf.zeros(
shape=tf.concat([batch_shape, [num_columns]], axis=0),
dtype=dtype, name='model_coefficients')
else:
# User did supply model coefficients; convert to Tensor in case it's
# numpy or literal.
model_coefficients = tf.convert_to_tensor(
value=model_coefficients, dtype=dtype, name='model_coefficients')
if predicted_linear_response is None:
if use_default_model_coefficients:
# Since we're using zeros for model_coefficients, we know the predicted
# linear response will also be all zeros.
if offset is None:
predicted_linear_response = tf.zeros_like(
response, dtype, name='predicted_linear_response')
else:
predicted_linear_response = tf.broadcast_to(
offset,
tf.shape(input=response),
name='predicted_linear_response')
else:
# We were given model_coefficients but not the predicted linear
# response.
predicted_linear_response = calculate_linear_predictor(
model_matrix, model_coefficients, offset)
else:
predicted_linear_response = tf.convert_to_tensor(
value=predicted_linear_response,
dtype=dtype,
name='predicted_linear_response')
return [
model_matrix,
response,
model_coefficients,
predicted_linear_response,
offset,
]
|
def prepare_args(model_matrix,
response,
model_coefficients,
predicted_linear_response,
offset,
name=None):
"""Helper to `fit` which sanitizes input args.
Args:
model_matrix: (Batch of) `float`-like, matrix-shaped `Tensor` where each row
represents a sample's features.
response: (Batch of) vector-shaped `Tensor` where each element represents a
sample's observed response (to the corresponding row of features). Must
have same `dtype` as `model_matrix`.
model_coefficients: Optional (batch of) vector-shaped `Tensor` representing
the model coefficients, one for each column in `model_matrix`. Must have
same `dtype` as `model_matrix`.
Default value: `tf.zeros(tf.shape(model_matrix)[-1], model_matrix.dtype)`.
predicted_linear_response: Optional `Tensor` with `shape`, `dtype` matching
`response`; represents `offset` shifted initial linear predictions based
on current `model_coefficients`.
Default value: `offset` if `model_coefficients is None`, and
`tf.linalg.matvec(model_matrix, model_coefficients_start) + offset`
otherwise.
offset: Optional `Tensor` with `shape`, `dtype` matching `response`;
represents constant shift applied to `predicted_linear_response`.
Default value: `None` (i.e., `tf.zeros_like(response)`).
name: Python `str` used as name prefix to ops created by this function.
Default value: `"prepare_args"`.
Returns:
model_matrix: A `Tensor` with `shape`, `dtype` and values of the
`model_matrix` argument.
response: A `Tensor` with `shape`, `dtype` and values of the
`response` argument.
model_coefficients_start: A `Tensor` with `shape`, `dtype` and
values of the `model_coefficients_start` argument if specified.
A (batch of) vector-shaped `Tensors` with `dtype` matching `model_matrix`
containing the default starting point otherwise.
predicted_linear_response: A `Tensor` with `shape`, `dtype` and
values of the `predicted_linear_response` argument if specified.
A `Tensor` with `shape`, `dtype` matching `response` containing the
default value otherwise.
offset: A `Tensor` with `shape`, `dtype` and values of the `offset` argument
if specified or `None` otherwise.
"""
graph_deps = [model_matrix, response, model_coefficients,
predicted_linear_response, offset]
with tf.compat.v1.name_scope(name, 'prepare_args', graph_deps):
dtype = dtype_util.common_dtype(graph_deps, np.float32)
model_matrix = tf.convert_to_tensor(
value=model_matrix, dtype=dtype, name='model_matrix')
if offset is not None:
offset = tf.convert_to_tensor(value=offset, dtype=dtype, name='offset')
response = tf.convert_to_tensor(
value=response, dtype=dtype, name='response')
use_default_model_coefficients = model_coefficients is None
if use_default_model_coefficients:
# User did not supply model coefficients; assume they're all zero.
batch_shape = tf.shape(input=model_matrix)[:-2]
num_columns = tf.shape(input=model_matrix)[-1]
model_coefficients = tf.zeros(
shape=tf.concat([batch_shape, [num_columns]], axis=0),
dtype=dtype, name='model_coefficients')
else:
# User did supply model coefficients; convert to Tensor in case it's
# numpy or literal.
model_coefficients = tf.convert_to_tensor(
value=model_coefficients, dtype=dtype, name='model_coefficients')
if predicted_linear_response is None:
if use_default_model_coefficients:
# Since we're using zeros for model_coefficients, we know the predicted
# linear response will also be all zeros.
if offset is None:
predicted_linear_response = tf.zeros_like(
response, dtype, name='predicted_linear_response')
else:
predicted_linear_response = tf.broadcast_to(
offset,
tf.shape(input=response),
name='predicted_linear_response')
else:
# We were given model_coefficients but not the predicted linear
# response.
predicted_linear_response = calculate_linear_predictor(
model_matrix, model_coefficients, offset)
else:
predicted_linear_response = tf.convert_to_tensor(
value=predicted_linear_response,
dtype=dtype,
name='predicted_linear_response')
return [
model_matrix,
response,
model_coefficients,
predicted_linear_response,
offset,
]
|
[
"Helper",
"to",
"fit",
"which",
"sanitizes",
"input",
"args",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/glm/fisher_scoring.py#L517-L620
|
[
"def",
"prepare_args",
"(",
"model_matrix",
",",
"response",
",",
"model_coefficients",
",",
"predicted_linear_response",
",",
"offset",
",",
"name",
"=",
"None",
")",
":",
"graph_deps",
"=",
"[",
"model_matrix",
",",
"response",
",",
"model_coefficients",
",",
"predicted_linear_response",
",",
"offset",
"]",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'prepare_args'",
",",
"graph_deps",
")",
":",
"dtype",
"=",
"dtype_util",
".",
"common_dtype",
"(",
"graph_deps",
",",
"np",
".",
"float32",
")",
"model_matrix",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"model_matrix",
",",
"dtype",
"=",
"dtype",
",",
"name",
"=",
"'model_matrix'",
")",
"if",
"offset",
"is",
"not",
"None",
":",
"offset",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"offset",
",",
"dtype",
"=",
"dtype",
",",
"name",
"=",
"'offset'",
")",
"response",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"response",
",",
"dtype",
"=",
"dtype",
",",
"name",
"=",
"'response'",
")",
"use_default_model_coefficients",
"=",
"model_coefficients",
"is",
"None",
"if",
"use_default_model_coefficients",
":",
"# User did not supply model coefficients; assume they're all zero.",
"batch_shape",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"model_matrix",
")",
"[",
":",
"-",
"2",
"]",
"num_columns",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"model_matrix",
")",
"[",
"-",
"1",
"]",
"model_coefficients",
"=",
"tf",
".",
"zeros",
"(",
"shape",
"=",
"tf",
".",
"concat",
"(",
"[",
"batch_shape",
",",
"[",
"num_columns",
"]",
"]",
",",
"axis",
"=",
"0",
")",
",",
"dtype",
"=",
"dtype",
",",
"name",
"=",
"'model_coefficients'",
")",
"else",
":",
"# User did supply model coefficients; convert to Tensor in case it's",
"# numpy or literal.",
"model_coefficients",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"model_coefficients",
",",
"dtype",
"=",
"dtype",
",",
"name",
"=",
"'model_coefficients'",
")",
"if",
"predicted_linear_response",
"is",
"None",
":",
"if",
"use_default_model_coefficients",
":",
"# Since we're using zeros for model_coefficients, we know the predicted",
"# linear response will also be all zeros.",
"if",
"offset",
"is",
"None",
":",
"predicted_linear_response",
"=",
"tf",
".",
"zeros_like",
"(",
"response",
",",
"dtype",
",",
"name",
"=",
"'predicted_linear_response'",
")",
"else",
":",
"predicted_linear_response",
"=",
"tf",
".",
"broadcast_to",
"(",
"offset",
",",
"tf",
".",
"shape",
"(",
"input",
"=",
"response",
")",
",",
"name",
"=",
"'predicted_linear_response'",
")",
"else",
":",
"# We were given model_coefficients but not the predicted linear",
"# response.",
"predicted_linear_response",
"=",
"calculate_linear_predictor",
"(",
"model_matrix",
",",
"model_coefficients",
",",
"offset",
")",
"else",
":",
"predicted_linear_response",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"predicted_linear_response",
",",
"dtype",
"=",
"dtype",
",",
"name",
"=",
"'predicted_linear_response'",
")",
"return",
"[",
"model_matrix",
",",
"response",
",",
"model_coefficients",
",",
"predicted_linear_response",
",",
"offset",
",",
"]"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
calculate_linear_predictor
|
Computes `model_matrix @ model_coefficients + offset`.
|
tensorflow_probability/python/glm/fisher_scoring.py
|
def calculate_linear_predictor(model_matrix, model_coefficients, offset=None,
name=None):
"""Computes `model_matrix @ model_coefficients + offset`."""
with tf.compat.v1.name_scope(name, 'calculate_linear_predictor',
[model_matrix, model_coefficients, offset]):
predicted_linear_response = tf.linalg.matvec(model_matrix,
model_coefficients)
if offset is not None:
predicted_linear_response += offset
return predicted_linear_response
|
def calculate_linear_predictor(model_matrix, model_coefficients, offset=None,
name=None):
"""Computes `model_matrix @ model_coefficients + offset`."""
with tf.compat.v1.name_scope(name, 'calculate_linear_predictor',
[model_matrix, model_coefficients, offset]):
predicted_linear_response = tf.linalg.matvec(model_matrix,
model_coefficients)
if offset is not None:
predicted_linear_response += offset
return predicted_linear_response
|
[
"Computes",
"model_matrix"
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/glm/fisher_scoring.py#L623-L632
|
[
"def",
"calculate_linear_predictor",
"(",
"model_matrix",
",",
"model_coefficients",
",",
"offset",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'calculate_linear_predictor'",
",",
"[",
"model_matrix",
",",
"model_coefficients",
",",
"offset",
"]",
")",
":",
"predicted_linear_response",
"=",
"tf",
".",
"linalg",
".",
"matvec",
"(",
"model_matrix",
",",
"model_coefficients",
")",
"if",
"offset",
"is",
"not",
"None",
":",
"predicted_linear_response",
"+=",
"offset",
"return",
"predicted_linear_response"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
num_cols
|
Returns number of cols in a given `Tensor`.
|
tensorflow_probability/python/glm/fisher_scoring.py
|
def num_cols(x):
"""Returns number of cols in a given `Tensor`."""
if tf.compat.dimension_value(x.shape[-1]) is not None:
return tf.compat.dimension_value(x.shape[-1])
return tf.shape(input=x)[-1]
|
def num_cols(x):
"""Returns number of cols in a given `Tensor`."""
if tf.compat.dimension_value(x.shape[-1]) is not None:
return tf.compat.dimension_value(x.shape[-1])
return tf.shape(input=x)[-1]
|
[
"Returns",
"number",
"of",
"cols",
"in",
"a",
"given",
"Tensor",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/glm/fisher_scoring.py#L635-L639
|
[
"def",
"num_cols",
"(",
"x",
")",
":",
"if",
"tf",
".",
"compat",
".",
"dimension_value",
"(",
"x",
".",
"shape",
"[",
"-",
"1",
"]",
")",
"is",
"not",
"None",
":",
"return",
"tf",
".",
"compat",
".",
"dimension_value",
"(",
"x",
".",
"shape",
"[",
"-",
"1",
"]",
")",
"return",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
"[",
"-",
"1",
"]"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_prefer_static
|
Wraps original_fn, preferring to call static_fn when inputs are static.
|
tensorflow_probability/python/internal/prefer_static.py
|
def _prefer_static(original_fn, static_fn):
"""Wraps original_fn, preferring to call static_fn when inputs are static."""
original_spec = tf_inspect.getfullargspec(original_fn)
static_spec = tf_inspect.getfullargspec(static_fn)
if original_spec != static_spec:
raise ValueError(
'Arg specs do not match: original={}, static={}, fn={}'.format(
original_spec, static_spec, original_fn))
@decorator.decorator
def wrap(wrapped_fn, *args, **kwargs):
del wrapped_fn
[args_, kwargs_], all_static = _maybe_get_static_args([args, kwargs])
if all_static:
return static_fn(*args_, **kwargs_)
return original_fn(*args, **kwargs)
return wrap(original_fn)
|
def _prefer_static(original_fn, static_fn):
"""Wraps original_fn, preferring to call static_fn when inputs are static."""
original_spec = tf_inspect.getfullargspec(original_fn)
static_spec = tf_inspect.getfullargspec(static_fn)
if original_spec != static_spec:
raise ValueError(
'Arg specs do not match: original={}, static={}, fn={}'.format(
original_spec, static_spec, original_fn))
@decorator.decorator
def wrap(wrapped_fn, *args, **kwargs):
del wrapped_fn
[args_, kwargs_], all_static = _maybe_get_static_args([args, kwargs])
if all_static:
return static_fn(*args_, **kwargs_)
return original_fn(*args, **kwargs)
return wrap(original_fn)
|
[
"Wraps",
"original_fn",
"preferring",
"to",
"call",
"static_fn",
"when",
"inputs",
"are",
"static",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/prefer_static.py#L41-L56
|
[
"def",
"_prefer_static",
"(",
"original_fn",
",",
"static_fn",
")",
":",
"original_spec",
"=",
"tf_inspect",
".",
"getfullargspec",
"(",
"original_fn",
")",
"static_spec",
"=",
"tf_inspect",
".",
"getfullargspec",
"(",
"static_fn",
")",
"if",
"original_spec",
"!=",
"static_spec",
":",
"raise",
"ValueError",
"(",
"'Arg specs do not match: original={}, static={}, fn={}'",
".",
"format",
"(",
"original_spec",
",",
"static_spec",
",",
"original_fn",
")",
")",
"@",
"decorator",
".",
"decorator",
"def",
"wrap",
"(",
"wrapped_fn",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"del",
"wrapped_fn",
"[",
"args_",
",",
"kwargs_",
"]",
",",
"all_static",
"=",
"_maybe_get_static_args",
"(",
"[",
"args",
",",
"kwargs",
"]",
")",
"if",
"all_static",
":",
"return",
"static_fn",
"(",
"*",
"args_",
",",
"*",
"*",
"kwargs_",
")",
"return",
"original_fn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrap",
"(",
"original_fn",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_copy_docstring
|
Wraps new_fn with the doc of original_fn.
|
tensorflow_probability/python/internal/prefer_static.py
|
def _copy_docstring(original_fn, new_fn):
"""Wraps new_fn with the doc of original_fn."""
original_spec = tf_inspect.getfullargspec(original_fn)
new_spec = tf_inspect.getfullargspec(new_fn)
if original_spec != new_spec:
raise ValueError(
'Arg specs do not match: original={}, new={}, fn={}'.format(
original_spec, new_spec, original_fn))
@decorator.decorator
def wrap(wrapped_fn, *args, **kwargs):
del wrapped_fn
return new_fn(*args, **kwargs)
return wrap(original_fn)
|
def _copy_docstring(original_fn, new_fn):
"""Wraps new_fn with the doc of original_fn."""
original_spec = tf_inspect.getfullargspec(original_fn)
new_spec = tf_inspect.getfullargspec(new_fn)
if original_spec != new_spec:
raise ValueError(
'Arg specs do not match: original={}, new={}, fn={}'.format(
original_spec, new_spec, original_fn))
@decorator.decorator
def wrap(wrapped_fn, *args, **kwargs):
del wrapped_fn
return new_fn(*args, **kwargs)
return wrap(original_fn)
|
[
"Wraps",
"new_fn",
"with",
"the",
"doc",
"of",
"original_fn",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/prefer_static.py#L59-L71
|
[
"def",
"_copy_docstring",
"(",
"original_fn",
",",
"new_fn",
")",
":",
"original_spec",
"=",
"tf_inspect",
".",
"getfullargspec",
"(",
"original_fn",
")",
"new_spec",
"=",
"tf_inspect",
".",
"getfullargspec",
"(",
"new_fn",
")",
"if",
"original_spec",
"!=",
"new_spec",
":",
"raise",
"ValueError",
"(",
"'Arg specs do not match: original={}, new={}, fn={}'",
".",
"format",
"(",
"original_spec",
",",
"new_spec",
",",
"original_fn",
")",
")",
"@",
"decorator",
".",
"decorator",
"def",
"wrap",
"(",
"wrapped_fn",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"del",
"wrapped_fn",
"return",
"new_fn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrap",
"(",
"original_fn",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_get_static_predicate
|
Helper function for statically evaluating predicates in `cond`.
|
tensorflow_probability/python/internal/prefer_static.py
|
def _get_static_predicate(pred):
"""Helper function for statically evaluating predicates in `cond`."""
if pred in {0, 1}: # Accept 1/0 as valid boolean values
pred_value = bool(pred)
elif isinstance(pred, bool):
pred_value = pred
elif isinstance(pred, tf.Tensor):
pred_value = tf.get_static_value(pred)
# TODO(jamieas): remove the dependency on `pywrap_tensorflow`.
# pylint: disable=protected-access
if pred_value is None:
pred_value = c_api.TF_TryEvaluateConstant_wrapper(pred.graph._c_graph,
pred._as_tf_output())
# pylint: enable=protected-access
else:
raise TypeError('`pred` must be a Tensor, or a Python bool, or 1 or 0. '
'Found instead: {}'.format(pred))
return pred_value
|
def _get_static_predicate(pred):
"""Helper function for statically evaluating predicates in `cond`."""
if pred in {0, 1}: # Accept 1/0 as valid boolean values
pred_value = bool(pred)
elif isinstance(pred, bool):
pred_value = pred
elif isinstance(pred, tf.Tensor):
pred_value = tf.get_static_value(pred)
# TODO(jamieas): remove the dependency on `pywrap_tensorflow`.
# pylint: disable=protected-access
if pred_value is None:
pred_value = c_api.TF_TryEvaluateConstant_wrapper(pred.graph._c_graph,
pred._as_tf_output())
# pylint: enable=protected-access
else:
raise TypeError('`pred` must be a Tensor, or a Python bool, or 1 or 0. '
'Found instead: {}'.format(pred))
return pred_value
|
[
"Helper",
"function",
"for",
"statically",
"evaluating",
"predicates",
"in",
"cond",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/prefer_static.py#L78-L97
|
[
"def",
"_get_static_predicate",
"(",
"pred",
")",
":",
"if",
"pred",
"in",
"{",
"0",
",",
"1",
"}",
":",
"# Accept 1/0 as valid boolean values",
"pred_value",
"=",
"bool",
"(",
"pred",
")",
"elif",
"isinstance",
"(",
"pred",
",",
"bool",
")",
":",
"pred_value",
"=",
"pred",
"elif",
"isinstance",
"(",
"pred",
",",
"tf",
".",
"Tensor",
")",
":",
"pred_value",
"=",
"tf",
".",
"get_static_value",
"(",
"pred",
")",
"# TODO(jamieas): remove the dependency on `pywrap_tensorflow`.",
"# pylint: disable=protected-access",
"if",
"pred_value",
"is",
"None",
":",
"pred_value",
"=",
"c_api",
".",
"TF_TryEvaluateConstant_wrapper",
"(",
"pred",
".",
"graph",
".",
"_c_graph",
",",
"pred",
".",
"_as_tf_output",
"(",
")",
")",
"# pylint: enable=protected-access",
"else",
":",
"raise",
"TypeError",
"(",
"'`pred` must be a Tensor, or a Python bool, or 1 or 0. '",
"'Found instead: {}'",
".",
"format",
"(",
"pred",
")",
")",
"return",
"pred_value"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
rank_from_shape
|
Computes `rank` given a `Tensor`'s `shape`.
|
tensorflow_probability/python/internal/prefer_static.py
|
def rank_from_shape(shape_tensor_fn, tensorshape=None):
"""Computes `rank` given a `Tensor`'s `shape`."""
if tensorshape is None:
shape_tensor = (shape_tensor_fn() if callable(shape_tensor_fn)
else shape_tensor_fn)
if (hasattr(shape_tensor, 'shape') and
hasattr(shape_tensor.shape, 'num_elements')):
ndims_ = tensorshape_util.num_elements(shape_tensor.shape)
else:
ndims_ = len(shape_tensor)
ndims_fn = lambda: tf.size(input=shape_tensor)
else:
ndims_ = tensorshape_util.rank(tensorshape)
ndims_fn = lambda: tf.size(input=shape_tensor_fn() # pylint: disable=g-long-lambda
if callable(shape_tensor_fn)
else shape_tensor_fn)
return ndims_fn() if ndims_ is None else ndims_
|
def rank_from_shape(shape_tensor_fn, tensorshape=None):
"""Computes `rank` given a `Tensor`'s `shape`."""
if tensorshape is None:
shape_tensor = (shape_tensor_fn() if callable(shape_tensor_fn)
else shape_tensor_fn)
if (hasattr(shape_tensor, 'shape') and
hasattr(shape_tensor.shape, 'num_elements')):
ndims_ = tensorshape_util.num_elements(shape_tensor.shape)
else:
ndims_ = len(shape_tensor)
ndims_fn = lambda: tf.size(input=shape_tensor)
else:
ndims_ = tensorshape_util.rank(tensorshape)
ndims_fn = lambda: tf.size(input=shape_tensor_fn() # pylint: disable=g-long-lambda
if callable(shape_tensor_fn)
else shape_tensor_fn)
return ndims_fn() if ndims_ is None else ndims_
|
[
"Computes",
"rank",
"given",
"a",
"Tensor",
"s",
"shape",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/prefer_static.py#L100-L117
|
[
"def",
"rank_from_shape",
"(",
"shape_tensor_fn",
",",
"tensorshape",
"=",
"None",
")",
":",
"if",
"tensorshape",
"is",
"None",
":",
"shape_tensor",
"=",
"(",
"shape_tensor_fn",
"(",
")",
"if",
"callable",
"(",
"shape_tensor_fn",
")",
"else",
"shape_tensor_fn",
")",
"if",
"(",
"hasattr",
"(",
"shape_tensor",
",",
"'shape'",
")",
"and",
"hasattr",
"(",
"shape_tensor",
".",
"shape",
",",
"'num_elements'",
")",
")",
":",
"ndims_",
"=",
"tensorshape_util",
".",
"num_elements",
"(",
"shape_tensor",
".",
"shape",
")",
"else",
":",
"ndims_",
"=",
"len",
"(",
"shape_tensor",
")",
"ndims_fn",
"=",
"lambda",
":",
"tf",
".",
"size",
"(",
"input",
"=",
"shape_tensor",
")",
"else",
":",
"ndims_",
"=",
"tensorshape_util",
".",
"rank",
"(",
"tensorshape",
")",
"ndims_fn",
"=",
"lambda",
":",
"tf",
".",
"size",
"(",
"input",
"=",
"shape_tensor_fn",
"(",
")",
"# pylint: disable=g-long-lambda",
"if",
"callable",
"(",
"shape_tensor_fn",
")",
"else",
"shape_tensor_fn",
")",
"return",
"ndims_fn",
"(",
")",
"if",
"ndims_",
"is",
"None",
"else",
"ndims_"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
cond
|
Return either `true_fn()` if predicate `pred` is true else `false_fn()`.
If `pred` is a bool or has a constant value, we return either `true_fn()`
or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both.
Arguments:
pred: A scalar determining whether to return the result of `true_fn` or
`false_fn`.
true_fn: The callable to be performed if pred is true.
false_fn: The callable to be performed if pred is false.
name: Optional name prefix when using `tf.cond`.
Returns:
Tensors returned by the call to either `true_fn` or `false_fn`.
Raises:
TypeError: If `true_fn` or `false_fn` is not callable.
|
tensorflow_probability/python/internal/prefer_static.py
|
def cond(pred, true_fn=None, false_fn=None, name=None):
"""Return either `true_fn()` if predicate `pred` is true else `false_fn()`.
If `pred` is a bool or has a constant value, we return either `true_fn()`
or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both.
Arguments:
pred: A scalar determining whether to return the result of `true_fn` or
`false_fn`.
true_fn: The callable to be performed if pred is true.
false_fn: The callable to be performed if pred is false.
name: Optional name prefix when using `tf.cond`.
Returns:
Tensors returned by the call to either `true_fn` or `false_fn`.
Raises:
TypeError: If `true_fn` or `false_fn` is not callable.
"""
if not callable(true_fn):
raise TypeError('`true_fn` must be callable.')
if not callable(false_fn):
raise TypeError('`false_fn` must be callable.')
pred_value = _get_static_predicate(pred)
if pred_value is not None:
if pred_value:
return true_fn()
else:
return false_fn()
else:
return tf.cond(pred=pred, true_fn=true_fn, false_fn=false_fn, name=name)
|
def cond(pred, true_fn=None, false_fn=None, name=None):
"""Return either `true_fn()` if predicate `pred` is true else `false_fn()`.
If `pred` is a bool or has a constant value, we return either `true_fn()`
or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both.
Arguments:
pred: A scalar determining whether to return the result of `true_fn` or
`false_fn`.
true_fn: The callable to be performed if pred is true.
false_fn: The callable to be performed if pred is false.
name: Optional name prefix when using `tf.cond`.
Returns:
Tensors returned by the call to either `true_fn` or `false_fn`.
Raises:
TypeError: If `true_fn` or `false_fn` is not callable.
"""
if not callable(true_fn):
raise TypeError('`true_fn` must be callable.')
if not callable(false_fn):
raise TypeError('`false_fn` must be callable.')
pred_value = _get_static_predicate(pred)
if pred_value is not None:
if pred_value:
return true_fn()
else:
return false_fn()
else:
return tf.cond(pred=pred, true_fn=true_fn, false_fn=false_fn, name=name)
|
[
"Return",
"either",
"true_fn",
"()",
"if",
"predicate",
"pred",
"is",
"true",
"else",
"false_fn",
"()",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/prefer_static.py#L120-L151
|
[
"def",
"cond",
"(",
"pred",
",",
"true_fn",
"=",
"None",
",",
"false_fn",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"if",
"not",
"callable",
"(",
"true_fn",
")",
":",
"raise",
"TypeError",
"(",
"'`true_fn` must be callable.'",
")",
"if",
"not",
"callable",
"(",
"false_fn",
")",
":",
"raise",
"TypeError",
"(",
"'`false_fn` must be callable.'",
")",
"pred_value",
"=",
"_get_static_predicate",
"(",
"pred",
")",
"if",
"pred_value",
"is",
"not",
"None",
":",
"if",
"pred_value",
":",
"return",
"true_fn",
"(",
")",
"else",
":",
"return",
"false_fn",
"(",
")",
"else",
":",
"return",
"tf",
".",
"cond",
"(",
"pred",
"=",
"pred",
",",
"true_fn",
"=",
"true_fn",
",",
"false_fn",
"=",
"false_fn",
",",
"name",
"=",
"name",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
case
|
Like tf.case, except attempts to statically evaluate predicates.
If any predicate in `pred_fn_pairs` is a bool or has a constant value, the
associated callable will be called or omitted depending on its value.
Otherwise this functions like tf.case.
Args:
pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor and a
callable which returns a list of tensors.
default: Optional callable that returns a list of tensors.
exclusive: True iff at most one predicate is allowed to evaluate to `True`.
name: A name for this operation (optional).
Returns:
The tensors returned by the first pair whose predicate evaluated to True, or
those returned by `default` if none does.
Raises:
TypeError: If `pred_fn_pairs` is not a list/dictionary.
TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.
TypeError: If `fns[i]` is not callable for any i, or `default` is not
callable.
|
tensorflow_probability/python/internal/prefer_static.py
|
def case(pred_fn_pairs, default=None, exclusive=False, name='smart_case'):
"""Like tf.case, except attempts to statically evaluate predicates.
If any predicate in `pred_fn_pairs` is a bool or has a constant value, the
associated callable will be called or omitted depending on its value.
Otherwise this functions like tf.case.
Args:
pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor and a
callable which returns a list of tensors.
default: Optional callable that returns a list of tensors.
exclusive: True iff at most one predicate is allowed to evaluate to `True`.
name: A name for this operation (optional).
Returns:
The tensors returned by the first pair whose predicate evaluated to True, or
those returned by `default` if none does.
Raises:
TypeError: If `pred_fn_pairs` is not a list/dictionary.
TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.
TypeError: If `fns[i]` is not callable for any i, or `default` is not
callable.
"""
return control_flow_ops._case_helper( # pylint: disable=protected-access
cond, pred_fn_pairs, default, exclusive, name, allow_python_preds=True)
|
def case(pred_fn_pairs, default=None, exclusive=False, name='smart_case'):
"""Like tf.case, except attempts to statically evaluate predicates.
If any predicate in `pred_fn_pairs` is a bool or has a constant value, the
associated callable will be called or omitted depending on its value.
Otherwise this functions like tf.case.
Args:
pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor and a
callable which returns a list of tensors.
default: Optional callable that returns a list of tensors.
exclusive: True iff at most one predicate is allowed to evaluate to `True`.
name: A name for this operation (optional).
Returns:
The tensors returned by the first pair whose predicate evaluated to True, or
those returned by `default` if none does.
Raises:
TypeError: If `pred_fn_pairs` is not a list/dictionary.
TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.
TypeError: If `fns[i]` is not callable for any i, or `default` is not
callable.
"""
return control_flow_ops._case_helper( # pylint: disable=protected-access
cond, pred_fn_pairs, default, exclusive, name, allow_python_preds=True)
|
[
"Like",
"tf",
".",
"case",
"except",
"attempts",
"to",
"statically",
"evaluate",
"predicates",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/prefer_static.py#L154-L179
|
[
"def",
"case",
"(",
"pred_fn_pairs",
",",
"default",
"=",
"None",
",",
"exclusive",
"=",
"False",
",",
"name",
"=",
"'smart_case'",
")",
":",
"return",
"control_flow_ops",
".",
"_case_helper",
"(",
"# pylint: disable=protected-access",
"cond",
",",
"pred_fn_pairs",
",",
"default",
",",
"exclusive",
",",
"name",
",",
"allow_python_preds",
"=",
"True",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
ExponentialFamily.log_prob
|
Computes `D(param=mean(r)).log_prob(response)` for linear response, `r`.
Args:
response: `float`-like `Tensor` representing observed ("actual")
responses.
predicted_linear_response: `float`-like `Tensor` corresponding to
`tf.matmul(model_matrix, weights)`.
name: Python `str` used as TF namescope for ops created by member
functions. Default value: `None` (i.e., 'log_prob').
Returns:
log_prob: `Tensor` with shape and dtype of `predicted_linear_response`
representing the distribution prescribed log-probability of the observed
`response`s.
|
tensorflow_probability/python/glm/family.py
|
def log_prob(self, response, predicted_linear_response, name=None):
"""Computes `D(param=mean(r)).log_prob(response)` for linear response, `r`.
Args:
response: `float`-like `Tensor` representing observed ("actual")
responses.
predicted_linear_response: `float`-like `Tensor` corresponding to
`tf.matmul(model_matrix, weights)`.
name: Python `str` used as TF namescope for ops created by member
functions. Default value: `None` (i.e., 'log_prob').
Returns:
log_prob: `Tensor` with shape and dtype of `predicted_linear_response`
representing the distribution prescribed log-probability of the observed
`response`s.
"""
with self._name_scope(
name, 'log_prob', [response, predicted_linear_response]):
dtype = dtype_util.common_dtype([response, predicted_linear_response])
response = tf.convert_to_tensor(
value=response, dtype=dtype, name='response')
predicted_linear_response = tf.convert_to_tensor(
value=predicted_linear_response, name='predicted_linear_response')
return self._log_prob(response, predicted_linear_response)
|
def log_prob(self, response, predicted_linear_response, name=None):
"""Computes `D(param=mean(r)).log_prob(response)` for linear response, `r`.
Args:
response: `float`-like `Tensor` representing observed ("actual")
responses.
predicted_linear_response: `float`-like `Tensor` corresponding to
`tf.matmul(model_matrix, weights)`.
name: Python `str` used as TF namescope for ops created by member
functions. Default value: `None` (i.e., 'log_prob').
Returns:
log_prob: `Tensor` with shape and dtype of `predicted_linear_response`
representing the distribution prescribed log-probability of the observed
`response`s.
"""
with self._name_scope(
name, 'log_prob', [response, predicted_linear_response]):
dtype = dtype_util.common_dtype([response, predicted_linear_response])
response = tf.convert_to_tensor(
value=response, dtype=dtype, name='response')
predicted_linear_response = tf.convert_to_tensor(
value=predicted_linear_response, name='predicted_linear_response')
return self._log_prob(response, predicted_linear_response)
|
[
"Computes",
"D",
"(",
"param",
"=",
"mean",
"(",
"r",
"))",
".",
"log_prob",
"(",
"response",
")",
"for",
"linear",
"response",
"r",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/glm/family.py#L137-L161
|
[
"def",
"log_prob",
"(",
"self",
",",
"response",
",",
"predicted_linear_response",
",",
"name",
"=",
"None",
")",
":",
"with",
"self",
".",
"_name_scope",
"(",
"name",
",",
"'log_prob'",
",",
"[",
"response",
",",
"predicted_linear_response",
"]",
")",
":",
"dtype",
"=",
"dtype_util",
".",
"common_dtype",
"(",
"[",
"response",
",",
"predicted_linear_response",
"]",
")",
"response",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"response",
",",
"dtype",
"=",
"dtype",
",",
"name",
"=",
"'response'",
")",
"predicted_linear_response",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"predicted_linear_response",
",",
"name",
"=",
"'predicted_linear_response'",
")",
"return",
"self",
".",
"_log_prob",
"(",
"response",
",",
"predicted_linear_response",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
ExponentialFamily._name_scope
|
Helper function to standardize op scope.
|
tensorflow_probability/python/glm/family.py
|
def _name_scope(self, name=None, default_name=None, values=None):
"""Helper function to standardize op scope."""
with tf.compat.v1.name_scope(self.name):
with tf.compat.v1.name_scope(
name, default_name, values=values or []) as scope:
yield scope
|
def _name_scope(self, name=None, default_name=None, values=None):
"""Helper function to standardize op scope."""
with tf.compat.v1.name_scope(self.name):
with tf.compat.v1.name_scope(
name, default_name, values=values or []) as scope:
yield scope
|
[
"Helper",
"function",
"to",
"standardize",
"op",
"scope",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/glm/family.py#L174-L179
|
[
"def",
"_name_scope",
"(",
"self",
",",
"name",
"=",
"None",
",",
"default_name",
"=",
"None",
",",
"values",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"self",
".",
"name",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"default_name",
",",
"values",
"=",
"values",
"or",
"[",
"]",
")",
"as",
"scope",
":",
"yield",
"scope"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
mixture_stddev
|
Computes the standard deviation of a mixture distribution.
This function works regardless of the component distribution, so long as
each component's mean and standard deviation can be provided.
Args:
mixture_weight_vector: A 2D tensor with shape [batch_size, num_components]
mean_vector: A 2D tensor of mixture component means. Has shape `[batch_size,
num_components]`.
stddev_vector: A 2D tensor of mixture component standard deviations. Has
shape `[batch_size, num_components]`.
Returns:
A 1D tensor of shape `[batch_size]` representing the standard deviation of
the mixture distribution with given weights and component means and standard
deviations.
Raises:
ValueError: If the shapes of the input tensors are not as expected.
|
tensorflow_probability/python/internal/distribution_util.py
|
def mixture_stddev(mixture_weight_vector, mean_vector, stddev_vector):
"""Computes the standard deviation of a mixture distribution.
This function works regardless of the component distribution, so long as
each component's mean and standard deviation can be provided.
Args:
mixture_weight_vector: A 2D tensor with shape [batch_size, num_components]
mean_vector: A 2D tensor of mixture component means. Has shape `[batch_size,
num_components]`.
stddev_vector: A 2D tensor of mixture component standard deviations. Has
shape `[batch_size, num_components]`.
Returns:
A 1D tensor of shape `[batch_size]` representing the standard deviation of
the mixture distribution with given weights and component means and standard
deviations.
Raises:
ValueError: If the shapes of the input tensors are not as expected.
"""
tensorshape_util.assert_has_rank(mixture_weight_vector.shape, 2)
if not tensorshape_util.is_compatible_with(mean_vector.shape,
mixture_weight_vector.shape):
raise ValueError("Expecting means to have same shape as mixture weights.")
if not tensorshape_util.is_compatible_with(stddev_vector.shape,
mixture_weight_vector.shape):
raise ValueError("Expecting stddevs to have same shape as mixture weights.")
# Reshape the distribution parameters for batched vectorized dot products.
pi_for_dot_prod = tf.expand_dims(mixture_weight_vector, axis=1)
mu_for_dot_prod = tf.expand_dims(mean_vector, axis=2)
sigma_for_dot_prod = tf.expand_dims(stddev_vector, axis=2)
# weighted average of component means under mixture distribution.
mean_wa = tf.matmul(pi_for_dot_prod, mu_for_dot_prod)
mean_wa = tf.reshape(mean_wa, (-1,))
# weighted average of component variances under mixture distribution.
var_wa = tf.matmul(pi_for_dot_prod, tf.square(sigma_for_dot_prod))
var_wa = tf.reshape(var_wa, (-1,))
# weighted average of component squared means under mixture distribution.
sq_mean_wa = tf.matmul(pi_for_dot_prod, tf.square(mu_for_dot_prod))
sq_mean_wa = tf.reshape(sq_mean_wa, (-1,))
mixture_variance = var_wa + sq_mean_wa - tf.square(mean_wa)
return tf.sqrt(mixture_variance)
|
def mixture_stddev(mixture_weight_vector, mean_vector, stddev_vector):
"""Computes the standard deviation of a mixture distribution.
This function works regardless of the component distribution, so long as
each component's mean and standard deviation can be provided.
Args:
mixture_weight_vector: A 2D tensor with shape [batch_size, num_components]
mean_vector: A 2D tensor of mixture component means. Has shape `[batch_size,
num_components]`.
stddev_vector: A 2D tensor of mixture component standard deviations. Has
shape `[batch_size, num_components]`.
Returns:
A 1D tensor of shape `[batch_size]` representing the standard deviation of
the mixture distribution with given weights and component means and standard
deviations.
Raises:
ValueError: If the shapes of the input tensors are not as expected.
"""
tensorshape_util.assert_has_rank(mixture_weight_vector.shape, 2)
if not tensorshape_util.is_compatible_with(mean_vector.shape,
mixture_weight_vector.shape):
raise ValueError("Expecting means to have same shape as mixture weights.")
if not tensorshape_util.is_compatible_with(stddev_vector.shape,
mixture_weight_vector.shape):
raise ValueError("Expecting stddevs to have same shape as mixture weights.")
# Reshape the distribution parameters for batched vectorized dot products.
pi_for_dot_prod = tf.expand_dims(mixture_weight_vector, axis=1)
mu_for_dot_prod = tf.expand_dims(mean_vector, axis=2)
sigma_for_dot_prod = tf.expand_dims(stddev_vector, axis=2)
# weighted average of component means under mixture distribution.
mean_wa = tf.matmul(pi_for_dot_prod, mu_for_dot_prod)
mean_wa = tf.reshape(mean_wa, (-1,))
# weighted average of component variances under mixture distribution.
var_wa = tf.matmul(pi_for_dot_prod, tf.square(sigma_for_dot_prod))
var_wa = tf.reshape(var_wa, (-1,))
# weighted average of component squared means under mixture distribution.
sq_mean_wa = tf.matmul(pi_for_dot_prod, tf.square(mu_for_dot_prod))
sq_mean_wa = tf.reshape(sq_mean_wa, (-1,))
mixture_variance = var_wa + sq_mean_wa - tf.square(mean_wa)
return tf.sqrt(mixture_variance)
|
[
"Computes",
"the",
"standard",
"deviation",
"of",
"a",
"mixture",
"distribution",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L39-L82
|
[
"def",
"mixture_stddev",
"(",
"mixture_weight_vector",
",",
"mean_vector",
",",
"stddev_vector",
")",
":",
"tensorshape_util",
".",
"assert_has_rank",
"(",
"mixture_weight_vector",
".",
"shape",
",",
"2",
")",
"if",
"not",
"tensorshape_util",
".",
"is_compatible_with",
"(",
"mean_vector",
".",
"shape",
",",
"mixture_weight_vector",
".",
"shape",
")",
":",
"raise",
"ValueError",
"(",
"\"Expecting means to have same shape as mixture weights.\"",
")",
"if",
"not",
"tensorshape_util",
".",
"is_compatible_with",
"(",
"stddev_vector",
".",
"shape",
",",
"mixture_weight_vector",
".",
"shape",
")",
":",
"raise",
"ValueError",
"(",
"\"Expecting stddevs to have same shape as mixture weights.\"",
")",
"# Reshape the distribution parameters for batched vectorized dot products.",
"pi_for_dot_prod",
"=",
"tf",
".",
"expand_dims",
"(",
"mixture_weight_vector",
",",
"axis",
"=",
"1",
")",
"mu_for_dot_prod",
"=",
"tf",
".",
"expand_dims",
"(",
"mean_vector",
",",
"axis",
"=",
"2",
")",
"sigma_for_dot_prod",
"=",
"tf",
".",
"expand_dims",
"(",
"stddev_vector",
",",
"axis",
"=",
"2",
")",
"# weighted average of component means under mixture distribution.",
"mean_wa",
"=",
"tf",
".",
"matmul",
"(",
"pi_for_dot_prod",
",",
"mu_for_dot_prod",
")",
"mean_wa",
"=",
"tf",
".",
"reshape",
"(",
"mean_wa",
",",
"(",
"-",
"1",
",",
")",
")",
"# weighted average of component variances under mixture distribution.",
"var_wa",
"=",
"tf",
".",
"matmul",
"(",
"pi_for_dot_prod",
",",
"tf",
".",
"square",
"(",
"sigma_for_dot_prod",
")",
")",
"var_wa",
"=",
"tf",
".",
"reshape",
"(",
"var_wa",
",",
"(",
"-",
"1",
",",
")",
")",
"# weighted average of component squared means under mixture distribution.",
"sq_mean_wa",
"=",
"tf",
".",
"matmul",
"(",
"pi_for_dot_prod",
",",
"tf",
".",
"square",
"(",
"mu_for_dot_prod",
")",
")",
"sq_mean_wa",
"=",
"tf",
".",
"reshape",
"(",
"sq_mean_wa",
",",
"(",
"-",
"1",
",",
")",
")",
"mixture_variance",
"=",
"var_wa",
"+",
"sq_mean_wa",
"-",
"tf",
".",
"square",
"(",
"mean_wa",
")",
"return",
"tf",
".",
"sqrt",
"(",
"mixture_variance",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
make_tril_scale
|
Creates a LinearOperator representing a lower triangular matrix.
Args:
loc: Floating-point `Tensor`. This is used for inferring shape in the case
where only `scale_identity_multiplier` is set.
scale_tril: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k, k], which represents a k x k lower
triangular matrix. When `None` no `scale_tril` term is added to the
LinearOperator. The upper triangular elements above the diagonal are
ignored.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k diagonal
matrix. When `None` no diagonal term is added to the LinearOperator.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix. When `scale_identity_multiplier =
scale_diag = scale_tril = None` then `scale += IdentityMatrix`. Otherwise
no scaled-identity-matrix is added to `scale`.
shape_hint: scalar integer `Tensor` representing a hint at the dimension of
the identity matrix when only `scale_identity_multiplier` is set.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness.
assert_positive: Python `bool` indicating whether LinearOperator should be
checked for being positive definite.
name: Python `str` name given to ops managed by this object.
Returns:
`LinearOperator` representing a lower triangular matrix.
Raises:
ValueError: If only `scale_identity_multiplier` is set and `loc` and
`shape_hint` are both None.
|
tensorflow_probability/python/internal/distribution_util.py
|
def make_tril_scale(loc=None,
scale_tril=None,
scale_diag=None,
scale_identity_multiplier=None,
shape_hint=None,
validate_args=False,
assert_positive=False,
name=None):
"""Creates a LinearOperator representing a lower triangular matrix.
Args:
loc: Floating-point `Tensor`. This is used for inferring shape in the case
where only `scale_identity_multiplier` is set.
scale_tril: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k, k], which represents a k x k lower
triangular matrix. When `None` no `scale_tril` term is added to the
LinearOperator. The upper triangular elements above the diagonal are
ignored.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k diagonal
matrix. When `None` no diagonal term is added to the LinearOperator.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix. When `scale_identity_multiplier =
scale_diag = scale_tril = None` then `scale += IdentityMatrix`. Otherwise
no scaled-identity-matrix is added to `scale`.
shape_hint: scalar integer `Tensor` representing a hint at the dimension of
the identity matrix when only `scale_identity_multiplier` is set.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness.
assert_positive: Python `bool` indicating whether LinearOperator should be
checked for being positive definite.
name: Python `str` name given to ops managed by this object.
Returns:
`LinearOperator` representing a lower triangular matrix.
Raises:
ValueError: If only `scale_identity_multiplier` is set and `loc` and
`shape_hint` are both None.
"""
def _maybe_attach_assertion(x):
if not validate_args:
return x
if assert_positive:
return with_dependencies([
assert_util.assert_positive(
tf.linalg.diag_part(x), message="diagonal part must be positive"),
], x)
return with_dependencies([
assert_util.assert_none_equal(
tf.linalg.diag_part(x),
tf.zeros([], x.dtype),
message="diagonal part must be non-zero"),
], x)
with tf.name_scope(name or "make_tril_scale"):
dtype = dtype_util.common_dtype(
[loc, scale_tril, scale_diag, scale_identity_multiplier],
preferred_dtype=tf.float32)
loc = _convert_to_tensor(loc, name="loc", dtype=dtype)
scale_tril = _convert_to_tensor(scale_tril, name="scale_tril", dtype=dtype)
scale_diag = _convert_to_tensor(scale_diag, name="scale_diag", dtype=dtype)
scale_identity_multiplier = _convert_to_tensor(
scale_identity_multiplier,
name="scale_identity_multiplier",
dtype=dtype)
if scale_tril is not None:
scale_tril = tf.linalg.band_part(scale_tril, -1, 0) # Zero out TriU.
tril_diag = tf.linalg.diag_part(scale_tril)
if scale_diag is not None:
tril_diag += scale_diag
if scale_identity_multiplier is not None:
tril_diag += scale_identity_multiplier[..., tf.newaxis]
scale_tril = tf.linalg.set_diag(scale_tril, tril_diag)
return tf.linalg.LinearOperatorLowerTriangular(
tril=_maybe_attach_assertion(scale_tril),
is_non_singular=True,
is_self_adjoint=False,
is_positive_definite=assert_positive)
return make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
shape_hint=shape_hint,
validate_args=validate_args,
assert_positive=assert_positive,
name=name)
|
def make_tril_scale(loc=None,
scale_tril=None,
scale_diag=None,
scale_identity_multiplier=None,
shape_hint=None,
validate_args=False,
assert_positive=False,
name=None):
"""Creates a LinearOperator representing a lower triangular matrix.
Args:
loc: Floating-point `Tensor`. This is used for inferring shape in the case
where only `scale_identity_multiplier` is set.
scale_tril: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k, k], which represents a k x k lower
triangular matrix. When `None` no `scale_tril` term is added to the
LinearOperator. The upper triangular elements above the diagonal are
ignored.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k diagonal
matrix. When `None` no diagonal term is added to the LinearOperator.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix. When `scale_identity_multiplier =
scale_diag = scale_tril = None` then `scale += IdentityMatrix`. Otherwise
no scaled-identity-matrix is added to `scale`.
shape_hint: scalar integer `Tensor` representing a hint at the dimension of
the identity matrix when only `scale_identity_multiplier` is set.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness.
assert_positive: Python `bool` indicating whether LinearOperator should be
checked for being positive definite.
name: Python `str` name given to ops managed by this object.
Returns:
`LinearOperator` representing a lower triangular matrix.
Raises:
ValueError: If only `scale_identity_multiplier` is set and `loc` and
`shape_hint` are both None.
"""
def _maybe_attach_assertion(x):
if not validate_args:
return x
if assert_positive:
return with_dependencies([
assert_util.assert_positive(
tf.linalg.diag_part(x), message="diagonal part must be positive"),
], x)
return with_dependencies([
assert_util.assert_none_equal(
tf.linalg.diag_part(x),
tf.zeros([], x.dtype),
message="diagonal part must be non-zero"),
], x)
with tf.name_scope(name or "make_tril_scale"):
dtype = dtype_util.common_dtype(
[loc, scale_tril, scale_diag, scale_identity_multiplier],
preferred_dtype=tf.float32)
loc = _convert_to_tensor(loc, name="loc", dtype=dtype)
scale_tril = _convert_to_tensor(scale_tril, name="scale_tril", dtype=dtype)
scale_diag = _convert_to_tensor(scale_diag, name="scale_diag", dtype=dtype)
scale_identity_multiplier = _convert_to_tensor(
scale_identity_multiplier,
name="scale_identity_multiplier",
dtype=dtype)
if scale_tril is not None:
scale_tril = tf.linalg.band_part(scale_tril, -1, 0) # Zero out TriU.
tril_diag = tf.linalg.diag_part(scale_tril)
if scale_diag is not None:
tril_diag += scale_diag
if scale_identity_multiplier is not None:
tril_diag += scale_identity_multiplier[..., tf.newaxis]
scale_tril = tf.linalg.set_diag(scale_tril, tril_diag)
return tf.linalg.LinearOperatorLowerTriangular(
tril=_maybe_attach_assertion(scale_tril),
is_non_singular=True,
is_self_adjoint=False,
is_positive_definite=assert_positive)
return make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
shape_hint=shape_hint,
validate_args=validate_args,
assert_positive=assert_positive,
name=name)
|
[
"Creates",
"a",
"LinearOperator",
"representing",
"a",
"lower",
"triangular",
"matrix",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L85-L177
|
[
"def",
"make_tril_scale",
"(",
"loc",
"=",
"None",
",",
"scale_tril",
"=",
"None",
",",
"scale_diag",
"=",
"None",
",",
"scale_identity_multiplier",
"=",
"None",
",",
"shape_hint",
"=",
"None",
",",
"validate_args",
"=",
"False",
",",
"assert_positive",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"def",
"_maybe_attach_assertion",
"(",
"x",
")",
":",
"if",
"not",
"validate_args",
":",
"return",
"x",
"if",
"assert_positive",
":",
"return",
"with_dependencies",
"(",
"[",
"assert_util",
".",
"assert_positive",
"(",
"tf",
".",
"linalg",
".",
"diag_part",
"(",
"x",
")",
",",
"message",
"=",
"\"diagonal part must be positive\"",
")",
",",
"]",
",",
"x",
")",
"return",
"with_dependencies",
"(",
"[",
"assert_util",
".",
"assert_none_equal",
"(",
"tf",
".",
"linalg",
".",
"diag_part",
"(",
"x",
")",
",",
"tf",
".",
"zeros",
"(",
"[",
"]",
",",
"x",
".",
"dtype",
")",
",",
"message",
"=",
"\"diagonal part must be non-zero\"",
")",
",",
"]",
",",
"x",
")",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"\"make_tril_scale\"",
")",
":",
"dtype",
"=",
"dtype_util",
".",
"common_dtype",
"(",
"[",
"loc",
",",
"scale_tril",
",",
"scale_diag",
",",
"scale_identity_multiplier",
"]",
",",
"preferred_dtype",
"=",
"tf",
".",
"float32",
")",
"loc",
"=",
"_convert_to_tensor",
"(",
"loc",
",",
"name",
"=",
"\"loc\"",
",",
"dtype",
"=",
"dtype",
")",
"scale_tril",
"=",
"_convert_to_tensor",
"(",
"scale_tril",
",",
"name",
"=",
"\"scale_tril\"",
",",
"dtype",
"=",
"dtype",
")",
"scale_diag",
"=",
"_convert_to_tensor",
"(",
"scale_diag",
",",
"name",
"=",
"\"scale_diag\"",
",",
"dtype",
"=",
"dtype",
")",
"scale_identity_multiplier",
"=",
"_convert_to_tensor",
"(",
"scale_identity_multiplier",
",",
"name",
"=",
"\"scale_identity_multiplier\"",
",",
"dtype",
"=",
"dtype",
")",
"if",
"scale_tril",
"is",
"not",
"None",
":",
"scale_tril",
"=",
"tf",
".",
"linalg",
".",
"band_part",
"(",
"scale_tril",
",",
"-",
"1",
",",
"0",
")",
"# Zero out TriU.",
"tril_diag",
"=",
"tf",
".",
"linalg",
".",
"diag_part",
"(",
"scale_tril",
")",
"if",
"scale_diag",
"is",
"not",
"None",
":",
"tril_diag",
"+=",
"scale_diag",
"if",
"scale_identity_multiplier",
"is",
"not",
"None",
":",
"tril_diag",
"+=",
"scale_identity_multiplier",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
"scale_tril",
"=",
"tf",
".",
"linalg",
".",
"set_diag",
"(",
"scale_tril",
",",
"tril_diag",
")",
"return",
"tf",
".",
"linalg",
".",
"LinearOperatorLowerTriangular",
"(",
"tril",
"=",
"_maybe_attach_assertion",
"(",
"scale_tril",
")",
",",
"is_non_singular",
"=",
"True",
",",
"is_self_adjoint",
"=",
"False",
",",
"is_positive_definite",
"=",
"assert_positive",
")",
"return",
"make_diag_scale",
"(",
"loc",
"=",
"loc",
",",
"scale_diag",
"=",
"scale_diag",
",",
"scale_identity_multiplier",
"=",
"scale_identity_multiplier",
",",
"shape_hint",
"=",
"shape_hint",
",",
"validate_args",
"=",
"validate_args",
",",
"assert_positive",
"=",
"assert_positive",
",",
"name",
"=",
"name",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
make_diag_scale
|
Creates a LinearOperator representing a diagonal matrix.
Args:
loc: Floating-point `Tensor`. This is used for inferring shape in the case
where only `scale_identity_multiplier` is set.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k diagonal
matrix. When `None` no diagonal term is added to the LinearOperator.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix. When `scale_identity_multiplier =
scale_diag = scale_tril = None` then `scale += IdentityMatrix`. Otherwise
no scaled-identity-matrix is added to `scale`.
shape_hint: scalar integer `Tensor` representing a hint at the dimension of
the identity matrix when only `scale_identity_multiplier` is set.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness.
assert_positive: Python `bool` indicating whether LinearOperator should be
checked for being positive definite.
name: Python `str` name given to ops managed by this object.
dtype: TF `DType` to prefer when converting args to `Tensor`s. Else, we fall
back to a compatible dtype across all of `loc`, `scale_diag`, and
`scale_identity_multiplier`.
Returns:
`LinearOperator` representing a lower triangular matrix.
Raises:
ValueError: If only `scale_identity_multiplier` is set and `loc` and
`shape_hint` are both None.
|
tensorflow_probability/python/internal/distribution_util.py
|
def make_diag_scale(loc=None,
scale_diag=None,
scale_identity_multiplier=None,
shape_hint=None,
validate_args=False,
assert_positive=False,
name=None,
dtype=None):
"""Creates a LinearOperator representing a diagonal matrix.
Args:
loc: Floating-point `Tensor`. This is used for inferring shape in the case
where only `scale_identity_multiplier` is set.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k diagonal
matrix. When `None` no diagonal term is added to the LinearOperator.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix. When `scale_identity_multiplier =
scale_diag = scale_tril = None` then `scale += IdentityMatrix`. Otherwise
no scaled-identity-matrix is added to `scale`.
shape_hint: scalar integer `Tensor` representing a hint at the dimension of
the identity matrix when only `scale_identity_multiplier` is set.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness.
assert_positive: Python `bool` indicating whether LinearOperator should be
checked for being positive definite.
name: Python `str` name given to ops managed by this object.
dtype: TF `DType` to prefer when converting args to `Tensor`s. Else, we fall
back to a compatible dtype across all of `loc`, `scale_diag`, and
`scale_identity_multiplier`.
Returns:
`LinearOperator` representing a lower triangular matrix.
Raises:
ValueError: If only `scale_identity_multiplier` is set and `loc` and
`shape_hint` are both None.
"""
def _maybe_attach_assertion(x):
if not validate_args:
return x
if assert_positive:
return with_dependencies([
assert_util.assert_positive(
x, message="diagonal part must be positive"),
], x)
return with_dependencies([
assert_util.assert_none_equal(
x, tf.zeros([], x.dtype), message="diagonal part must be non-zero")
], x)
with tf.name_scope(name or "make_diag_scale"):
if dtype is None:
dtype = dtype_util.common_dtype(
[loc, scale_diag, scale_identity_multiplier],
preferred_dtype=tf.float32)
loc = _convert_to_tensor(loc, name="loc", dtype=dtype)
scale_diag = _convert_to_tensor(scale_diag, name="scale_diag", dtype=dtype)
scale_identity_multiplier = _convert_to_tensor(
scale_identity_multiplier,
name="scale_identity_multiplier",
dtype=dtype)
if scale_diag is not None:
if scale_identity_multiplier is not None:
scale_diag += scale_identity_multiplier[..., tf.newaxis]
return tf.linalg.LinearOperatorDiag(
diag=_maybe_attach_assertion(scale_diag),
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=assert_positive)
if loc is None and shape_hint is None:
raise ValueError("Cannot infer `event_shape` unless `loc` or "
"`shape_hint` is specified.")
num_rows = shape_hint
del shape_hint
if num_rows is None:
num_rows = tf.compat.dimension_value(loc.shape[-1])
if num_rows is None:
num_rows = tf.shape(input=loc)[-1]
if scale_identity_multiplier is None:
return tf.linalg.LinearOperatorIdentity(
num_rows=num_rows,
dtype=dtype,
is_self_adjoint=True,
is_positive_definite=True,
assert_proper_shapes=validate_args)
return tf.linalg.LinearOperatorScaledIdentity(
num_rows=num_rows,
multiplier=_maybe_attach_assertion(scale_identity_multiplier),
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=assert_positive,
assert_proper_shapes=validate_args)
|
def make_diag_scale(loc=None,
scale_diag=None,
scale_identity_multiplier=None,
shape_hint=None,
validate_args=False,
assert_positive=False,
name=None,
dtype=None):
"""Creates a LinearOperator representing a diagonal matrix.
Args:
loc: Floating-point `Tensor`. This is used for inferring shape in the case
where only `scale_identity_multiplier` is set.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k diagonal
matrix. When `None` no diagonal term is added to the LinearOperator.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix. When `scale_identity_multiplier =
scale_diag = scale_tril = None` then `scale += IdentityMatrix`. Otherwise
no scaled-identity-matrix is added to `scale`.
shape_hint: scalar integer `Tensor` representing a hint at the dimension of
the identity matrix when only `scale_identity_multiplier` is set.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness.
assert_positive: Python `bool` indicating whether LinearOperator should be
checked for being positive definite.
name: Python `str` name given to ops managed by this object.
dtype: TF `DType` to prefer when converting args to `Tensor`s. Else, we fall
back to a compatible dtype across all of `loc`, `scale_diag`, and
`scale_identity_multiplier`.
Returns:
`LinearOperator` representing a lower triangular matrix.
Raises:
ValueError: If only `scale_identity_multiplier` is set and `loc` and
`shape_hint` are both None.
"""
def _maybe_attach_assertion(x):
if not validate_args:
return x
if assert_positive:
return with_dependencies([
assert_util.assert_positive(
x, message="diagonal part must be positive"),
], x)
return with_dependencies([
assert_util.assert_none_equal(
x, tf.zeros([], x.dtype), message="diagonal part must be non-zero")
], x)
with tf.name_scope(name or "make_diag_scale"):
if dtype is None:
dtype = dtype_util.common_dtype(
[loc, scale_diag, scale_identity_multiplier],
preferred_dtype=tf.float32)
loc = _convert_to_tensor(loc, name="loc", dtype=dtype)
scale_diag = _convert_to_tensor(scale_diag, name="scale_diag", dtype=dtype)
scale_identity_multiplier = _convert_to_tensor(
scale_identity_multiplier,
name="scale_identity_multiplier",
dtype=dtype)
if scale_diag is not None:
if scale_identity_multiplier is not None:
scale_diag += scale_identity_multiplier[..., tf.newaxis]
return tf.linalg.LinearOperatorDiag(
diag=_maybe_attach_assertion(scale_diag),
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=assert_positive)
if loc is None and shape_hint is None:
raise ValueError("Cannot infer `event_shape` unless `loc` or "
"`shape_hint` is specified.")
num_rows = shape_hint
del shape_hint
if num_rows is None:
num_rows = tf.compat.dimension_value(loc.shape[-1])
if num_rows is None:
num_rows = tf.shape(input=loc)[-1]
if scale_identity_multiplier is None:
return tf.linalg.LinearOperatorIdentity(
num_rows=num_rows,
dtype=dtype,
is_self_adjoint=True,
is_positive_definite=True,
assert_proper_shapes=validate_args)
return tf.linalg.LinearOperatorScaledIdentity(
num_rows=num_rows,
multiplier=_maybe_attach_assertion(scale_identity_multiplier),
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=assert_positive,
assert_proper_shapes=validate_args)
|
[
"Creates",
"a",
"LinearOperator",
"representing",
"a",
"diagonal",
"matrix",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L180-L278
|
[
"def",
"make_diag_scale",
"(",
"loc",
"=",
"None",
",",
"scale_diag",
"=",
"None",
",",
"scale_identity_multiplier",
"=",
"None",
",",
"shape_hint",
"=",
"None",
",",
"validate_args",
"=",
"False",
",",
"assert_positive",
"=",
"False",
",",
"name",
"=",
"None",
",",
"dtype",
"=",
"None",
")",
":",
"def",
"_maybe_attach_assertion",
"(",
"x",
")",
":",
"if",
"not",
"validate_args",
":",
"return",
"x",
"if",
"assert_positive",
":",
"return",
"with_dependencies",
"(",
"[",
"assert_util",
".",
"assert_positive",
"(",
"x",
",",
"message",
"=",
"\"diagonal part must be positive\"",
")",
",",
"]",
",",
"x",
")",
"return",
"with_dependencies",
"(",
"[",
"assert_util",
".",
"assert_none_equal",
"(",
"x",
",",
"tf",
".",
"zeros",
"(",
"[",
"]",
",",
"x",
".",
"dtype",
")",
",",
"message",
"=",
"\"diagonal part must be non-zero\"",
")",
"]",
",",
"x",
")",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"\"make_diag_scale\"",
")",
":",
"if",
"dtype",
"is",
"None",
":",
"dtype",
"=",
"dtype_util",
".",
"common_dtype",
"(",
"[",
"loc",
",",
"scale_diag",
",",
"scale_identity_multiplier",
"]",
",",
"preferred_dtype",
"=",
"tf",
".",
"float32",
")",
"loc",
"=",
"_convert_to_tensor",
"(",
"loc",
",",
"name",
"=",
"\"loc\"",
",",
"dtype",
"=",
"dtype",
")",
"scale_diag",
"=",
"_convert_to_tensor",
"(",
"scale_diag",
",",
"name",
"=",
"\"scale_diag\"",
",",
"dtype",
"=",
"dtype",
")",
"scale_identity_multiplier",
"=",
"_convert_to_tensor",
"(",
"scale_identity_multiplier",
",",
"name",
"=",
"\"scale_identity_multiplier\"",
",",
"dtype",
"=",
"dtype",
")",
"if",
"scale_diag",
"is",
"not",
"None",
":",
"if",
"scale_identity_multiplier",
"is",
"not",
"None",
":",
"scale_diag",
"+=",
"scale_identity_multiplier",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
"return",
"tf",
".",
"linalg",
".",
"LinearOperatorDiag",
"(",
"diag",
"=",
"_maybe_attach_assertion",
"(",
"scale_diag",
")",
",",
"is_non_singular",
"=",
"True",
",",
"is_self_adjoint",
"=",
"True",
",",
"is_positive_definite",
"=",
"assert_positive",
")",
"if",
"loc",
"is",
"None",
"and",
"shape_hint",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Cannot infer `event_shape` unless `loc` or \"",
"\"`shape_hint` is specified.\"",
")",
"num_rows",
"=",
"shape_hint",
"del",
"shape_hint",
"if",
"num_rows",
"is",
"None",
":",
"num_rows",
"=",
"tf",
".",
"compat",
".",
"dimension_value",
"(",
"loc",
".",
"shape",
"[",
"-",
"1",
"]",
")",
"if",
"num_rows",
"is",
"None",
":",
"num_rows",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"loc",
")",
"[",
"-",
"1",
"]",
"if",
"scale_identity_multiplier",
"is",
"None",
":",
"return",
"tf",
".",
"linalg",
".",
"LinearOperatorIdentity",
"(",
"num_rows",
"=",
"num_rows",
",",
"dtype",
"=",
"dtype",
",",
"is_self_adjoint",
"=",
"True",
",",
"is_positive_definite",
"=",
"True",
",",
"assert_proper_shapes",
"=",
"validate_args",
")",
"return",
"tf",
".",
"linalg",
".",
"LinearOperatorScaledIdentity",
"(",
"num_rows",
"=",
"num_rows",
",",
"multiplier",
"=",
"_maybe_attach_assertion",
"(",
"scale_identity_multiplier",
")",
",",
"is_non_singular",
"=",
"True",
",",
"is_self_adjoint",
"=",
"True",
",",
"is_positive_definite",
"=",
"assert_positive",
",",
"assert_proper_shapes",
"=",
"validate_args",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
shapes_from_loc_and_scale
|
Infer distribution batch and event shapes from a location and scale.
Location and scale family distributions determine their batch/event shape by
broadcasting the `loc` and `scale` args. This helper does that broadcast,
statically if possible.
Batch shape broadcasts as per the normal rules.
We allow the `loc` event shape to broadcast up to that of `scale`. We do not
allow `scale`'s event shape to change. Therefore, the last dimension of `loc`
must either be size `1`, or the same as `scale.range_dimension`.
See `MultivariateNormalLinearOperator` for a usage example.
Args:
loc: `Tensor` (already converted to tensor) or `None`. If `None`, or
`rank(loc)==0`, both batch and event shape are determined by `scale`.
scale: A `LinearOperator` instance.
name: A string name to prepend to created ops.
Returns:
batch_shape: `TensorShape` (if broadcast is done statically), or `Tensor`.
event_shape: `TensorShape` (if broadcast is done statically), or `Tensor`.
Raises:
ValueError: If the last dimension of `loc` is determined statically to be
different than the range of `scale`.
|
tensorflow_probability/python/internal/distribution_util.py
|
def shapes_from_loc_and_scale(loc, scale, name="shapes_from_loc_and_scale"):
"""Infer distribution batch and event shapes from a location and scale.
Location and scale family distributions determine their batch/event shape by
broadcasting the `loc` and `scale` args. This helper does that broadcast,
statically if possible.
Batch shape broadcasts as per the normal rules.
We allow the `loc` event shape to broadcast up to that of `scale`. We do not
allow `scale`'s event shape to change. Therefore, the last dimension of `loc`
must either be size `1`, or the same as `scale.range_dimension`.
See `MultivariateNormalLinearOperator` for a usage example.
Args:
loc: `Tensor` (already converted to tensor) or `None`. If `None`, or
`rank(loc)==0`, both batch and event shape are determined by `scale`.
scale: A `LinearOperator` instance.
name: A string name to prepend to created ops.
Returns:
batch_shape: `TensorShape` (if broadcast is done statically), or `Tensor`.
event_shape: `TensorShape` (if broadcast is done statically), or `Tensor`.
Raises:
ValueError: If the last dimension of `loc` is determined statically to be
different than the range of `scale`.
"""
if loc is not None and tensorshape_util.rank(loc.shape) == 0:
loc = None # scalar loc is irrelevant to determining batch/event shape.
with tf.name_scope(name):
# Get event shape.
event_size = scale.range_dimension_tensor()
event_size_ = tf.get_static_value(event_size)
loc_event_size_ = (None if loc is None
else tf.compat.dimension_value(loc.shape[-1]))
if event_size_ is not None and loc_event_size_ is not None:
# Static check that event shapes match.
if loc_event_size_ != 1 and loc_event_size_ != event_size_:
raise ValueError(
"Event size of 'scale' ({}) could not be broadcast up to that "
"of 'loc' ({}).".format(event_size_, loc_event_size_))
elif loc_event_size_ is not None and loc_event_size_ != 1:
event_size_ = loc_event_size_
if event_size_ is None:
event_shape = event_size[tf.newaxis]
else:
event_shape = tf.convert_to_tensor(
value=np.reshape(event_size_, [1]),
dtype=tf.int32,
name="event_shape")
# Get batch shape.
batch_shape = scale.batch_shape_tensor()
if loc is not None:
loc_batch_shape = tensorshape_util.with_rank_at_least(loc.shape, 1)[:-1]
if tensorshape_util.rank(
loc.shape) is None or not tensorshape_util.is_fully_defined(
loc_batch_shape):
loc_batch_shape = tf.shape(input=loc)[:-1]
else:
loc_batch_shape = tf.convert_to_tensor(
value=loc_batch_shape, dtype=tf.int32, name="loc_batch_shape")
# This is defined in the core util module.
batch_shape = prefer_static_broadcast_shape(batch_shape, loc_batch_shape) # pylint: disable=undefined-variable
batch_shape = tf.convert_to_tensor(
value=batch_shape, dtype=tf.int32, name="batch_shape")
return batch_shape, event_shape
|
def shapes_from_loc_and_scale(loc, scale, name="shapes_from_loc_and_scale"):
"""Infer distribution batch and event shapes from a location and scale.
Location and scale family distributions determine their batch/event shape by
broadcasting the `loc` and `scale` args. This helper does that broadcast,
statically if possible.
Batch shape broadcasts as per the normal rules.
We allow the `loc` event shape to broadcast up to that of `scale`. We do not
allow `scale`'s event shape to change. Therefore, the last dimension of `loc`
must either be size `1`, or the same as `scale.range_dimension`.
See `MultivariateNormalLinearOperator` for a usage example.
Args:
loc: `Tensor` (already converted to tensor) or `None`. If `None`, or
`rank(loc)==0`, both batch and event shape are determined by `scale`.
scale: A `LinearOperator` instance.
name: A string name to prepend to created ops.
Returns:
batch_shape: `TensorShape` (if broadcast is done statically), or `Tensor`.
event_shape: `TensorShape` (if broadcast is done statically), or `Tensor`.
Raises:
ValueError: If the last dimension of `loc` is determined statically to be
different than the range of `scale`.
"""
if loc is not None and tensorshape_util.rank(loc.shape) == 0:
loc = None # scalar loc is irrelevant to determining batch/event shape.
with tf.name_scope(name):
# Get event shape.
event_size = scale.range_dimension_tensor()
event_size_ = tf.get_static_value(event_size)
loc_event_size_ = (None if loc is None
else tf.compat.dimension_value(loc.shape[-1]))
if event_size_ is not None and loc_event_size_ is not None:
# Static check that event shapes match.
if loc_event_size_ != 1 and loc_event_size_ != event_size_:
raise ValueError(
"Event size of 'scale' ({}) could not be broadcast up to that "
"of 'loc' ({}).".format(event_size_, loc_event_size_))
elif loc_event_size_ is not None and loc_event_size_ != 1:
event_size_ = loc_event_size_
if event_size_ is None:
event_shape = event_size[tf.newaxis]
else:
event_shape = tf.convert_to_tensor(
value=np.reshape(event_size_, [1]),
dtype=tf.int32,
name="event_shape")
# Get batch shape.
batch_shape = scale.batch_shape_tensor()
if loc is not None:
loc_batch_shape = tensorshape_util.with_rank_at_least(loc.shape, 1)[:-1]
if tensorshape_util.rank(
loc.shape) is None or not tensorshape_util.is_fully_defined(
loc_batch_shape):
loc_batch_shape = tf.shape(input=loc)[:-1]
else:
loc_batch_shape = tf.convert_to_tensor(
value=loc_batch_shape, dtype=tf.int32, name="loc_batch_shape")
# This is defined in the core util module.
batch_shape = prefer_static_broadcast_shape(batch_shape, loc_batch_shape) # pylint: disable=undefined-variable
batch_shape = tf.convert_to_tensor(
value=batch_shape, dtype=tf.int32, name="batch_shape")
return batch_shape, event_shape
|
[
"Infer",
"distribution",
"batch",
"and",
"event",
"shapes",
"from",
"a",
"location",
"and",
"scale",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L281-L351
|
[
"def",
"shapes_from_loc_and_scale",
"(",
"loc",
",",
"scale",
",",
"name",
"=",
"\"shapes_from_loc_and_scale\"",
")",
":",
"if",
"loc",
"is",
"not",
"None",
"and",
"tensorshape_util",
".",
"rank",
"(",
"loc",
".",
"shape",
")",
"==",
"0",
":",
"loc",
"=",
"None",
"# scalar loc is irrelevant to determining batch/event shape.",
"with",
"tf",
".",
"name_scope",
"(",
"name",
")",
":",
"# Get event shape.",
"event_size",
"=",
"scale",
".",
"range_dimension_tensor",
"(",
")",
"event_size_",
"=",
"tf",
".",
"get_static_value",
"(",
"event_size",
")",
"loc_event_size_",
"=",
"(",
"None",
"if",
"loc",
"is",
"None",
"else",
"tf",
".",
"compat",
".",
"dimension_value",
"(",
"loc",
".",
"shape",
"[",
"-",
"1",
"]",
")",
")",
"if",
"event_size_",
"is",
"not",
"None",
"and",
"loc_event_size_",
"is",
"not",
"None",
":",
"# Static check that event shapes match.",
"if",
"loc_event_size_",
"!=",
"1",
"and",
"loc_event_size_",
"!=",
"event_size_",
":",
"raise",
"ValueError",
"(",
"\"Event size of 'scale' ({}) could not be broadcast up to that \"",
"\"of 'loc' ({}).\"",
".",
"format",
"(",
"event_size_",
",",
"loc_event_size_",
")",
")",
"elif",
"loc_event_size_",
"is",
"not",
"None",
"and",
"loc_event_size_",
"!=",
"1",
":",
"event_size_",
"=",
"loc_event_size_",
"if",
"event_size_",
"is",
"None",
":",
"event_shape",
"=",
"event_size",
"[",
"tf",
".",
"newaxis",
"]",
"else",
":",
"event_shape",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"np",
".",
"reshape",
"(",
"event_size_",
",",
"[",
"1",
"]",
")",
",",
"dtype",
"=",
"tf",
".",
"int32",
",",
"name",
"=",
"\"event_shape\"",
")",
"# Get batch shape.",
"batch_shape",
"=",
"scale",
".",
"batch_shape_tensor",
"(",
")",
"if",
"loc",
"is",
"not",
"None",
":",
"loc_batch_shape",
"=",
"tensorshape_util",
".",
"with_rank_at_least",
"(",
"loc",
".",
"shape",
",",
"1",
")",
"[",
":",
"-",
"1",
"]",
"if",
"tensorshape_util",
".",
"rank",
"(",
"loc",
".",
"shape",
")",
"is",
"None",
"or",
"not",
"tensorshape_util",
".",
"is_fully_defined",
"(",
"loc_batch_shape",
")",
":",
"loc_batch_shape",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"loc",
")",
"[",
":",
"-",
"1",
"]",
"else",
":",
"loc_batch_shape",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"loc_batch_shape",
",",
"dtype",
"=",
"tf",
".",
"int32",
",",
"name",
"=",
"\"loc_batch_shape\"",
")",
"# This is defined in the core util module.",
"batch_shape",
"=",
"prefer_static_broadcast_shape",
"(",
"batch_shape",
",",
"loc_batch_shape",
")",
"# pylint: disable=undefined-variable",
"batch_shape",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"batch_shape",
",",
"dtype",
"=",
"tf",
".",
"int32",
",",
"name",
"=",
"\"batch_shape\"",
")",
"return",
"batch_shape",
",",
"event_shape"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
get_broadcast_shape
|
Get broadcast shape as a Python list of integers (preferred) or `Tensor`.
Args:
*tensors: One or more `Tensor` objects (already converted!).
Returns:
broadcast shape: Python list (if shapes determined statically), otherwise
an `int32` `Tensor`.
|
tensorflow_probability/python/internal/distribution_util.py
|
def get_broadcast_shape(*tensors):
"""Get broadcast shape as a Python list of integers (preferred) or `Tensor`.
Args:
*tensors: One or more `Tensor` objects (already converted!).
Returns:
broadcast shape: Python list (if shapes determined statically), otherwise
an `int32` `Tensor`.
"""
# Try static.
s_shape = tensors[0].shape
for t in tensors[1:]:
s_shape = tf.broadcast_static_shape(s_shape, t.shape)
if tensorshape_util.is_fully_defined(s_shape):
return tensorshape_util.as_list(s_shape)
# Fallback on dynamic.
d_shape = tf.shape(input=tensors[0])
for t in tensors[1:]:
d_shape = tf.broadcast_dynamic_shape(d_shape, tf.shape(input=t))
return d_shape
|
def get_broadcast_shape(*tensors):
"""Get broadcast shape as a Python list of integers (preferred) or `Tensor`.
Args:
*tensors: One or more `Tensor` objects (already converted!).
Returns:
broadcast shape: Python list (if shapes determined statically), otherwise
an `int32` `Tensor`.
"""
# Try static.
s_shape = tensors[0].shape
for t in tensors[1:]:
s_shape = tf.broadcast_static_shape(s_shape, t.shape)
if tensorshape_util.is_fully_defined(s_shape):
return tensorshape_util.as_list(s_shape)
# Fallback on dynamic.
d_shape = tf.shape(input=tensors[0])
for t in tensors[1:]:
d_shape = tf.broadcast_dynamic_shape(d_shape, tf.shape(input=t))
return d_shape
|
[
"Get",
"broadcast",
"shape",
"as",
"a",
"Python",
"list",
"of",
"integers",
"(",
"preferred",
")",
"or",
"Tensor",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L354-L375
|
[
"def",
"get_broadcast_shape",
"(",
"*",
"tensors",
")",
":",
"# Try static.",
"s_shape",
"=",
"tensors",
"[",
"0",
"]",
".",
"shape",
"for",
"t",
"in",
"tensors",
"[",
"1",
":",
"]",
":",
"s_shape",
"=",
"tf",
".",
"broadcast_static_shape",
"(",
"s_shape",
",",
"t",
".",
"shape",
")",
"if",
"tensorshape_util",
".",
"is_fully_defined",
"(",
"s_shape",
")",
":",
"return",
"tensorshape_util",
".",
"as_list",
"(",
"s_shape",
")",
"# Fallback on dynamic.",
"d_shape",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"tensors",
"[",
"0",
"]",
")",
"for",
"t",
"in",
"tensors",
"[",
"1",
":",
"]",
":",
"d_shape",
"=",
"tf",
".",
"broadcast_dynamic_shape",
"(",
"d_shape",
",",
"tf",
".",
"shape",
"(",
"input",
"=",
"t",
")",
")",
"return",
"d_shape"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
is_diagonal_scale
|
Returns `True` if `scale` is a `LinearOperator` that is known to be diag.
Args:
scale: `LinearOperator` instance.
Returns:
Python `bool`.
Raises:
TypeError: If `scale` is not a `LinearOperator`.
|
tensorflow_probability/python/internal/distribution_util.py
|
def is_diagonal_scale(scale):
"""Returns `True` if `scale` is a `LinearOperator` that is known to be diag.
Args:
scale: `LinearOperator` instance.
Returns:
Python `bool`.
Raises:
TypeError: If `scale` is not a `LinearOperator`.
"""
if not isinstance(scale, tf.linalg.LinearOperator):
raise TypeError("Expected argument 'scale' to be instance of LinearOperator"
". Found: %s" % scale)
return (isinstance(scale, tf.linalg.LinearOperatorIdentity) or
isinstance(scale, tf.linalg.LinearOperatorScaledIdentity) or
isinstance(scale, tf.linalg.LinearOperatorDiag))
|
def is_diagonal_scale(scale):
"""Returns `True` if `scale` is a `LinearOperator` that is known to be diag.
Args:
scale: `LinearOperator` instance.
Returns:
Python `bool`.
Raises:
TypeError: If `scale` is not a `LinearOperator`.
"""
if not isinstance(scale, tf.linalg.LinearOperator):
raise TypeError("Expected argument 'scale' to be instance of LinearOperator"
". Found: %s" % scale)
return (isinstance(scale, tf.linalg.LinearOperatorIdentity) or
isinstance(scale, tf.linalg.LinearOperatorScaledIdentity) or
isinstance(scale, tf.linalg.LinearOperatorDiag))
|
[
"Returns",
"True",
"if",
"scale",
"is",
"a",
"LinearOperator",
"that",
"is",
"known",
"to",
"be",
"diag",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L378-L395
|
[
"def",
"is_diagonal_scale",
"(",
"scale",
")",
":",
"if",
"not",
"isinstance",
"(",
"scale",
",",
"tf",
".",
"linalg",
".",
"LinearOperator",
")",
":",
"raise",
"TypeError",
"(",
"\"Expected argument 'scale' to be instance of LinearOperator\"",
"\". Found: %s\"",
"%",
"scale",
")",
"return",
"(",
"isinstance",
"(",
"scale",
",",
"tf",
".",
"linalg",
".",
"LinearOperatorIdentity",
")",
"or",
"isinstance",
"(",
"scale",
",",
"tf",
".",
"linalg",
".",
"LinearOperatorScaledIdentity",
")",
"or",
"isinstance",
"(",
"scale",
",",
"tf",
".",
"linalg",
".",
"LinearOperatorDiag",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
maybe_check_scalar_distribution
|
Helper which checks validity of a scalar `distribution` init arg.
Valid here means:
* `distribution` has scalar batch and event shapes.
* `distribution` is `FULLY_REPARAMETERIZED`
* `distribution` has expected dtype.
Args:
distribution: `Distribution`-like object.
expected_base_dtype: `TensorFlow` `dtype`.
validate_args: Python `bool`. Whether to do additional checks: (i) check
that reparameterization_type is `FULLY_REPARAMETERIZED`. (ii) add
`tf.Assert` ops to the graph to enforce that distribution is scalar in the
event that this cannot be determined statically.
Returns:
List of `tf.Assert` ops to run to enforce validity checks that could not
be statically determined. Empty if `not validate_args`.
Raises:
ValueError: If validate_args and distribution is not FULLY_REPARAMETERIZED
ValueError: If distribution is statically determined to not have both
scalar batch and scalar event shapes.
|
tensorflow_probability/python/internal/distribution_util.py
|
def maybe_check_scalar_distribution(distribution, expected_base_dtype,
validate_args):
"""Helper which checks validity of a scalar `distribution` init arg.
Valid here means:
* `distribution` has scalar batch and event shapes.
* `distribution` is `FULLY_REPARAMETERIZED`
* `distribution` has expected dtype.
Args:
distribution: `Distribution`-like object.
expected_base_dtype: `TensorFlow` `dtype`.
validate_args: Python `bool`. Whether to do additional checks: (i) check
that reparameterization_type is `FULLY_REPARAMETERIZED`. (ii) add
`tf.Assert` ops to the graph to enforce that distribution is scalar in the
event that this cannot be determined statically.
Returns:
List of `tf.Assert` ops to run to enforce validity checks that could not
be statically determined. Empty if `not validate_args`.
Raises:
ValueError: If validate_args and distribution is not FULLY_REPARAMETERIZED
ValueError: If distribution is statically determined to not have both
scalar batch and scalar event shapes.
"""
if distribution.dtype != expected_base_dtype:
raise TypeError("dtype mismatch; "
"distribution.dtype=\"{}\" is not \"{}\"".format(
dtype_util.name(distribution.dtype),
dtype_util.name(expected_base_dtype)))
# Although `reparameterization_type` is a static property, we guard it by
# `validate_args`. This allows users to use a `distribution` which is not
# reparameterized itself. However, we tacitly assume that although the
# distribution is not reparameterized, it only depends on non-trainable
# variables.
if validate_args and (distribution.reparameterization_type !=
reparameterization.FULLY_REPARAMETERIZED):
raise ValueError("Base distribution should be reparameterized or be "
"a function of non-trainable variables; "
"distribution.reparameterization_type = \"{}\" "
"!= \"FULLY_REPARAMETERIZED\".".format(
distribution.reparameterization_type))
with tf.name_scope("check_distribution"):
assertions = []
def check_is_scalar(is_scalar, name):
is_scalar_ = tf.get_static_value(is_scalar)
if is_scalar_ is not None:
if not is_scalar_:
raise ValueError("distribution must be scalar; "
"distribution.{}=False is not True".format(name))
elif validate_args:
assertions.append(
assert_util.assert_equal(
is_scalar,
True,
message=("distribution must be scalar; "
"distribution.{}=False is not True".format(name))))
check_is_scalar(distribution.is_scalar_event(), "is_scalar_event")
check_is_scalar(distribution.is_scalar_batch(), "is_scalar_batch")
return assertions
|
def maybe_check_scalar_distribution(distribution, expected_base_dtype,
validate_args):
"""Helper which checks validity of a scalar `distribution` init arg.
Valid here means:
* `distribution` has scalar batch and event shapes.
* `distribution` is `FULLY_REPARAMETERIZED`
* `distribution` has expected dtype.
Args:
distribution: `Distribution`-like object.
expected_base_dtype: `TensorFlow` `dtype`.
validate_args: Python `bool`. Whether to do additional checks: (i) check
that reparameterization_type is `FULLY_REPARAMETERIZED`. (ii) add
`tf.Assert` ops to the graph to enforce that distribution is scalar in the
event that this cannot be determined statically.
Returns:
List of `tf.Assert` ops to run to enforce validity checks that could not
be statically determined. Empty if `not validate_args`.
Raises:
ValueError: If validate_args and distribution is not FULLY_REPARAMETERIZED
ValueError: If distribution is statically determined to not have both
scalar batch and scalar event shapes.
"""
if distribution.dtype != expected_base_dtype:
raise TypeError("dtype mismatch; "
"distribution.dtype=\"{}\" is not \"{}\"".format(
dtype_util.name(distribution.dtype),
dtype_util.name(expected_base_dtype)))
# Although `reparameterization_type` is a static property, we guard it by
# `validate_args`. This allows users to use a `distribution` which is not
# reparameterized itself. However, we tacitly assume that although the
# distribution is not reparameterized, it only depends on non-trainable
# variables.
if validate_args and (distribution.reparameterization_type !=
reparameterization.FULLY_REPARAMETERIZED):
raise ValueError("Base distribution should be reparameterized or be "
"a function of non-trainable variables; "
"distribution.reparameterization_type = \"{}\" "
"!= \"FULLY_REPARAMETERIZED\".".format(
distribution.reparameterization_type))
with tf.name_scope("check_distribution"):
assertions = []
def check_is_scalar(is_scalar, name):
is_scalar_ = tf.get_static_value(is_scalar)
if is_scalar_ is not None:
if not is_scalar_:
raise ValueError("distribution must be scalar; "
"distribution.{}=False is not True".format(name))
elif validate_args:
assertions.append(
assert_util.assert_equal(
is_scalar,
True,
message=("distribution must be scalar; "
"distribution.{}=False is not True".format(name))))
check_is_scalar(distribution.is_scalar_event(), "is_scalar_event")
check_is_scalar(distribution.is_scalar_batch(), "is_scalar_batch")
return assertions
|
[
"Helper",
"which",
"checks",
"validity",
"of",
"a",
"scalar",
"distribution",
"init",
"arg",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L398-L462
|
[
"def",
"maybe_check_scalar_distribution",
"(",
"distribution",
",",
"expected_base_dtype",
",",
"validate_args",
")",
":",
"if",
"distribution",
".",
"dtype",
"!=",
"expected_base_dtype",
":",
"raise",
"TypeError",
"(",
"\"dtype mismatch; \"",
"\"distribution.dtype=\\\"{}\\\" is not \\\"{}\\\"\"",
".",
"format",
"(",
"dtype_util",
".",
"name",
"(",
"distribution",
".",
"dtype",
")",
",",
"dtype_util",
".",
"name",
"(",
"expected_base_dtype",
")",
")",
")",
"# Although `reparameterization_type` is a static property, we guard it by",
"# `validate_args`. This allows users to use a `distribution` which is not",
"# reparameterized itself. However, we tacitly assume that although the",
"# distribution is not reparameterized, it only depends on non-trainable",
"# variables.",
"if",
"validate_args",
"and",
"(",
"distribution",
".",
"reparameterization_type",
"!=",
"reparameterization",
".",
"FULLY_REPARAMETERIZED",
")",
":",
"raise",
"ValueError",
"(",
"\"Base distribution should be reparameterized or be \"",
"\"a function of non-trainable variables; \"",
"\"distribution.reparameterization_type = \\\"{}\\\" \"",
"\"!= \\\"FULLY_REPARAMETERIZED\\\".\"",
".",
"format",
"(",
"distribution",
".",
"reparameterization_type",
")",
")",
"with",
"tf",
".",
"name_scope",
"(",
"\"check_distribution\"",
")",
":",
"assertions",
"=",
"[",
"]",
"def",
"check_is_scalar",
"(",
"is_scalar",
",",
"name",
")",
":",
"is_scalar_",
"=",
"tf",
".",
"get_static_value",
"(",
"is_scalar",
")",
"if",
"is_scalar_",
"is",
"not",
"None",
":",
"if",
"not",
"is_scalar_",
":",
"raise",
"ValueError",
"(",
"\"distribution must be scalar; \"",
"\"distribution.{}=False is not True\"",
".",
"format",
"(",
"name",
")",
")",
"elif",
"validate_args",
":",
"assertions",
".",
"append",
"(",
"assert_util",
".",
"assert_equal",
"(",
"is_scalar",
",",
"True",
",",
"message",
"=",
"(",
"\"distribution must be scalar; \"",
"\"distribution.{}=False is not True\"",
".",
"format",
"(",
"name",
")",
")",
")",
")",
"check_is_scalar",
"(",
"distribution",
".",
"is_scalar_event",
"(",
")",
",",
"\"is_scalar_event\"",
")",
"check_is_scalar",
"(",
"distribution",
".",
"is_scalar_batch",
"(",
")",
",",
"\"is_scalar_batch\"",
")",
"return",
"assertions"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
pad_mixture_dimensions
|
Pad dimensions of event tensors for mixture distributions.
See `Mixture._sample_n` and `MixtureSameFamily._sample_n` for usage examples.
Args:
x: event tensor to pad.
mixture_distribution: Base distribution of the mixture.
categorical_distribution: `Categorical` distribution that mixes the base
distribution.
event_ndims: Integer specifying the number of event dimensions in the event
tensor.
Returns:
A padded version of `x` that can broadcast with `categorical_distribution`.
|
tensorflow_probability/python/internal/distribution_util.py
|
def pad_mixture_dimensions(x, mixture_distribution, categorical_distribution,
event_ndims):
"""Pad dimensions of event tensors for mixture distributions.
See `Mixture._sample_n` and `MixtureSameFamily._sample_n` for usage examples.
Args:
x: event tensor to pad.
mixture_distribution: Base distribution of the mixture.
categorical_distribution: `Categorical` distribution that mixes the base
distribution.
event_ndims: Integer specifying the number of event dimensions in the event
tensor.
Returns:
A padded version of `x` that can broadcast with `categorical_distribution`.
"""
with tf.name_scope("pad_mix_dims"):
def _get_ndims(d):
if tensorshape_util.rank(d.batch_shape) is not None:
return tensorshape_util.rank(d.batch_shape)
return tf.shape(input=d.batch_shape_tensor())[0]
dist_batch_ndims = _get_ndims(mixture_distribution)
cat_batch_ndims = _get_ndims(categorical_distribution)
pad_ndims = tf.where(categorical_distribution.is_scalar_batch(),
dist_batch_ndims, dist_batch_ndims - cat_batch_ndims)
s = tf.shape(input=x)
x = tf.reshape(
x,
shape=tf.concat([
s[:-1],
tf.ones([pad_ndims], dtype=tf.int32),
s[-1:],
tf.ones([event_ndims], dtype=tf.int32),
],
axis=0))
return x
|
def pad_mixture_dimensions(x, mixture_distribution, categorical_distribution,
event_ndims):
"""Pad dimensions of event tensors for mixture distributions.
See `Mixture._sample_n` and `MixtureSameFamily._sample_n` for usage examples.
Args:
x: event tensor to pad.
mixture_distribution: Base distribution of the mixture.
categorical_distribution: `Categorical` distribution that mixes the base
distribution.
event_ndims: Integer specifying the number of event dimensions in the event
tensor.
Returns:
A padded version of `x` that can broadcast with `categorical_distribution`.
"""
with tf.name_scope("pad_mix_dims"):
def _get_ndims(d):
if tensorshape_util.rank(d.batch_shape) is not None:
return tensorshape_util.rank(d.batch_shape)
return tf.shape(input=d.batch_shape_tensor())[0]
dist_batch_ndims = _get_ndims(mixture_distribution)
cat_batch_ndims = _get_ndims(categorical_distribution)
pad_ndims = tf.where(categorical_distribution.is_scalar_batch(),
dist_batch_ndims, dist_batch_ndims - cat_batch_ndims)
s = tf.shape(input=x)
x = tf.reshape(
x,
shape=tf.concat([
s[:-1],
tf.ones([pad_ndims], dtype=tf.int32),
s[-1:],
tf.ones([event_ndims], dtype=tf.int32),
],
axis=0))
return x
|
[
"Pad",
"dimensions",
"of",
"event",
"tensors",
"for",
"mixture",
"distributions",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L465-L503
|
[
"def",
"pad_mixture_dimensions",
"(",
"x",
",",
"mixture_distribution",
",",
"categorical_distribution",
",",
"event_ndims",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"\"pad_mix_dims\"",
")",
":",
"def",
"_get_ndims",
"(",
"d",
")",
":",
"if",
"tensorshape_util",
".",
"rank",
"(",
"d",
".",
"batch_shape",
")",
"is",
"not",
"None",
":",
"return",
"tensorshape_util",
".",
"rank",
"(",
"d",
".",
"batch_shape",
")",
"return",
"tf",
".",
"shape",
"(",
"input",
"=",
"d",
".",
"batch_shape_tensor",
"(",
")",
")",
"[",
"0",
"]",
"dist_batch_ndims",
"=",
"_get_ndims",
"(",
"mixture_distribution",
")",
"cat_batch_ndims",
"=",
"_get_ndims",
"(",
"categorical_distribution",
")",
"pad_ndims",
"=",
"tf",
".",
"where",
"(",
"categorical_distribution",
".",
"is_scalar_batch",
"(",
")",
",",
"dist_batch_ndims",
",",
"dist_batch_ndims",
"-",
"cat_batch_ndims",
")",
"s",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
"x",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"shape",
"=",
"tf",
".",
"concat",
"(",
"[",
"s",
"[",
":",
"-",
"1",
"]",
",",
"tf",
".",
"ones",
"(",
"[",
"pad_ndims",
"]",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"s",
"[",
"-",
"1",
":",
"]",
",",
"tf",
".",
"ones",
"(",
"[",
"event_ndims",
"]",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"]",
",",
"axis",
"=",
"0",
")",
")",
"return",
"x"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
pick_scalar_condition
|
Convenience function that chooses one of two values based on the predicate.
This utility is equivalent to a version of `tf.where` that accepts only a
scalar predicate and computes its result statically when possible. It may also
be used in place of `tf.cond` when both branches yield a `Tensor` of the same
shape; the operational difference is that `tf.cond` uses control flow to
evaluate only the branch that's needed, while `tf.where` (and thus
this method) may evaluate both branches before the predicate's truth is known.
This means that `tf.cond` is preferred when one of the branches is expensive
to evaluate (like performing a large matmul), while this method is preferred
when both branches are cheap, e.g., constants. In the latter case, we expect
this method to be substantially faster than `tf.cond` on GPU and to give
similar performance on CPU.
Args:
pred: Scalar `bool` `Tensor` predicate.
true_value: `Tensor` to return if `pred` is `True`.
false_value: `Tensor` to return if `pred` is `False`. Must have the same
shape as `true_value`.
name: Python `str` name given to ops managed by this object.
Returns:
result: a `Tensor` (or `Tensor`-convertible Python value) equal to
`true_value` if `pred` evaluates to `True` and `false_value` otherwise.
If the condition can be evaluated statically, the result returned is one
of the input Python values, with no graph side effects.
|
tensorflow_probability/python/internal/distribution_util.py
|
def pick_scalar_condition(pred, true_value, false_value, name=None):
"""Convenience function that chooses one of two values based on the predicate.
This utility is equivalent to a version of `tf.where` that accepts only a
scalar predicate and computes its result statically when possible. It may also
be used in place of `tf.cond` when both branches yield a `Tensor` of the same
shape; the operational difference is that `tf.cond` uses control flow to
evaluate only the branch that's needed, while `tf.where` (and thus
this method) may evaluate both branches before the predicate's truth is known.
This means that `tf.cond` is preferred when one of the branches is expensive
to evaluate (like performing a large matmul), while this method is preferred
when both branches are cheap, e.g., constants. In the latter case, we expect
this method to be substantially faster than `tf.cond` on GPU and to give
similar performance on CPU.
Args:
pred: Scalar `bool` `Tensor` predicate.
true_value: `Tensor` to return if `pred` is `True`.
false_value: `Tensor` to return if `pred` is `False`. Must have the same
shape as `true_value`.
name: Python `str` name given to ops managed by this object.
Returns:
result: a `Tensor` (or `Tensor`-convertible Python value) equal to
`true_value` if `pred` evaluates to `True` and `false_value` otherwise.
If the condition can be evaluated statically, the result returned is one
of the input Python values, with no graph side effects.
"""
with tf.name_scope(name or "pick_scalar_condition"):
pred = tf.convert_to_tensor(
value=pred, dtype_hint=tf.bool, name="pred")
true_value = tf.convert_to_tensor(value=true_value, name="true_value")
false_value = tf.convert_to_tensor(value=false_value, name="false_value")
pred_ = tf.get_static_value(pred)
if pred_ is None:
return tf.where(pred, true_value, false_value)
return true_value if pred_ else false_value
|
def pick_scalar_condition(pred, true_value, false_value, name=None):
"""Convenience function that chooses one of two values based on the predicate.
This utility is equivalent to a version of `tf.where` that accepts only a
scalar predicate and computes its result statically when possible. It may also
be used in place of `tf.cond` when both branches yield a `Tensor` of the same
shape; the operational difference is that `tf.cond` uses control flow to
evaluate only the branch that's needed, while `tf.where` (and thus
this method) may evaluate both branches before the predicate's truth is known.
This means that `tf.cond` is preferred when one of the branches is expensive
to evaluate (like performing a large matmul), while this method is preferred
when both branches are cheap, e.g., constants. In the latter case, we expect
this method to be substantially faster than `tf.cond` on GPU and to give
similar performance on CPU.
Args:
pred: Scalar `bool` `Tensor` predicate.
true_value: `Tensor` to return if `pred` is `True`.
false_value: `Tensor` to return if `pred` is `False`. Must have the same
shape as `true_value`.
name: Python `str` name given to ops managed by this object.
Returns:
result: a `Tensor` (or `Tensor`-convertible Python value) equal to
`true_value` if `pred` evaluates to `True` and `false_value` otherwise.
If the condition can be evaluated statically, the result returned is one
of the input Python values, with no graph side effects.
"""
with tf.name_scope(name or "pick_scalar_condition"):
pred = tf.convert_to_tensor(
value=pred, dtype_hint=tf.bool, name="pred")
true_value = tf.convert_to_tensor(value=true_value, name="true_value")
false_value = tf.convert_to_tensor(value=false_value, name="false_value")
pred_ = tf.get_static_value(pred)
if pred_ is None:
return tf.where(pred, true_value, false_value)
return true_value if pred_ else false_value
|
[
"Convenience",
"function",
"that",
"chooses",
"one",
"of",
"two",
"values",
"based",
"on",
"the",
"predicate",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L506-L542
|
[
"def",
"pick_scalar_condition",
"(",
"pred",
",",
"true_value",
",",
"false_value",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"\"pick_scalar_condition\"",
")",
":",
"pred",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"pred",
",",
"dtype_hint",
"=",
"tf",
".",
"bool",
",",
"name",
"=",
"\"pred\"",
")",
"true_value",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"true_value",
",",
"name",
"=",
"\"true_value\"",
")",
"false_value",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"false_value",
",",
"name",
"=",
"\"false_value\"",
")",
"pred_",
"=",
"tf",
".",
"get_static_value",
"(",
"pred",
")",
"if",
"pred_",
"is",
"None",
":",
"return",
"tf",
".",
"where",
"(",
"pred",
",",
"true_value",
",",
"false_value",
")",
"return",
"true_value",
"if",
"pred_",
"else",
"false_value"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
make_non_negative_axis
|
Make (possibly negatively indexed) `axis` argument non-negative.
|
tensorflow_probability/python/internal/distribution_util.py
|
def make_non_negative_axis(axis, rank):
"""Make (possibly negatively indexed) `axis` argument non-negative."""
axis = tf.convert_to_tensor(value=axis, name="axis")
rank = tf.convert_to_tensor(value=rank, name="rank")
axis_ = tf.get_static_value(axis)
rank_ = tf.get_static_value(rank)
# Static case.
if axis_ is not None and rank_ is not None:
is_scalar = axis_.ndim == 0
if is_scalar:
axis_ = [axis_]
positive_axis = []
for a_ in axis_:
if a_ < 0:
positive_axis.append(rank_ + a_)
else:
positive_axis.append(a_)
if is_scalar:
positive_axis = positive_axis[0]
return tf.convert_to_tensor(value=positive_axis, dtype=axis.dtype)
# Dynamic case.
# Unfortunately static values are lost by this tf.where.
return tf.where(axis < 0, rank + axis, axis)
|
def make_non_negative_axis(axis, rank):
"""Make (possibly negatively indexed) `axis` argument non-negative."""
axis = tf.convert_to_tensor(value=axis, name="axis")
rank = tf.convert_to_tensor(value=rank, name="rank")
axis_ = tf.get_static_value(axis)
rank_ = tf.get_static_value(rank)
# Static case.
if axis_ is not None and rank_ is not None:
is_scalar = axis_.ndim == 0
if is_scalar:
axis_ = [axis_]
positive_axis = []
for a_ in axis_:
if a_ < 0:
positive_axis.append(rank_ + a_)
else:
positive_axis.append(a_)
if is_scalar:
positive_axis = positive_axis[0]
return tf.convert_to_tensor(value=positive_axis, dtype=axis.dtype)
# Dynamic case.
# Unfortunately static values are lost by this tf.where.
return tf.where(axis < 0, rank + axis, axis)
|
[
"Make",
"(",
"possibly",
"negatively",
"indexed",
")",
"axis",
"argument",
"non",
"-",
"negative",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L545-L569
|
[
"def",
"make_non_negative_axis",
"(",
"axis",
",",
"rank",
")",
":",
"axis",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"axis",
",",
"name",
"=",
"\"axis\"",
")",
"rank",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"rank",
",",
"name",
"=",
"\"rank\"",
")",
"axis_",
"=",
"tf",
".",
"get_static_value",
"(",
"axis",
")",
"rank_",
"=",
"tf",
".",
"get_static_value",
"(",
"rank",
")",
"# Static case.",
"if",
"axis_",
"is",
"not",
"None",
"and",
"rank_",
"is",
"not",
"None",
":",
"is_scalar",
"=",
"axis_",
".",
"ndim",
"==",
"0",
"if",
"is_scalar",
":",
"axis_",
"=",
"[",
"axis_",
"]",
"positive_axis",
"=",
"[",
"]",
"for",
"a_",
"in",
"axis_",
":",
"if",
"a_",
"<",
"0",
":",
"positive_axis",
".",
"append",
"(",
"rank_",
"+",
"a_",
")",
"else",
":",
"positive_axis",
".",
"append",
"(",
"a_",
")",
"if",
"is_scalar",
":",
"positive_axis",
"=",
"positive_axis",
"[",
"0",
"]",
"return",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"positive_axis",
",",
"dtype",
"=",
"axis",
".",
"dtype",
")",
"# Dynamic case.",
"# Unfortunately static values are lost by this tf.where.",
"return",
"tf",
".",
"where",
"(",
"axis",
"<",
"0",
",",
"rank",
"+",
"axis",
",",
"axis",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
move_dimension
|
Move a single tensor dimension within its shape.
This is a special case of `tf.transpose()`, which applies
arbitrary permutations to tensor dimensions.
Args:
x: Tensor of rank `ndims`.
source_idx: Integer index into `x.shape` (negative indexing is supported).
dest_idx: Integer index into `x.shape` (negative indexing is supported).
Returns:
x_perm: Tensor of rank `ndims`, in which the dimension at original
index `source_idx` has been moved to new index `dest_idx`, with
all other dimensions retained in their original order.
Example:
```python
x = tf.placeholder(shape=[200, 30, 4, 1, 6])
x_perm = _move_dimension(x, 1, 1) # no-op
x_perm = _move_dimension(x, 0, 3) # result shape [30, 4, 1, 200, 6]
x_perm = _move_dimension(x, 0, -2) # equivalent to previous
x_perm = _move_dimension(x, 4, 2) # result shape [200, 30, 6, 4, 1]
```
|
tensorflow_probability/python/internal/distribution_util.py
|
def move_dimension(x, source_idx, dest_idx):
"""Move a single tensor dimension within its shape.
This is a special case of `tf.transpose()`, which applies
arbitrary permutations to tensor dimensions.
Args:
x: Tensor of rank `ndims`.
source_idx: Integer index into `x.shape` (negative indexing is supported).
dest_idx: Integer index into `x.shape` (negative indexing is supported).
Returns:
x_perm: Tensor of rank `ndims`, in which the dimension at original
index `source_idx` has been moved to new index `dest_idx`, with
all other dimensions retained in their original order.
Example:
```python
x = tf.placeholder(shape=[200, 30, 4, 1, 6])
x_perm = _move_dimension(x, 1, 1) # no-op
x_perm = _move_dimension(x, 0, 3) # result shape [30, 4, 1, 200, 6]
x_perm = _move_dimension(x, 0, -2) # equivalent to previous
x_perm = _move_dimension(x, 4, 2) # result shape [200, 30, 6, 4, 1]
```
"""
ndims = prefer_static_rank(x)
dtype = dtype_util.common_dtype([source_idx, dest_idx],
preferred_dtype=tf.int32)
source_idx = tf.convert_to_tensor(value=source_idx, dtype=dtype)
dest_idx = tf.convert_to_tensor(value=dest_idx, dtype=dtype)
# Handle negative indexing.
source_idx = pick_scalar_condition(source_idx < 0, ndims + source_idx,
source_idx)
dest_idx = pick_scalar_condition(dest_idx < 0, ndims + dest_idx, dest_idx)
# Construct the appropriate permutation of dimensions, depending
# whether the source is before or after the destination.
def move_left_permutation():
return prefer_static_value(
tf.concat([
tf.range(0, dest_idx, dtype=dtype), [source_idx],
tf.range(dest_idx, source_idx, dtype=dtype),
tf.range(source_idx + 1, ndims, dtype=dtype)
],
axis=0))
def move_right_permutation():
return prefer_static_value(
tf.concat([
tf.range(0, source_idx, dtype=dtype),
tf.range(source_idx + 1, dest_idx + 1, dtype=dtype), [source_idx],
tf.range(dest_idx + 1, ndims, dtype=dtype)
],
axis=0))
def x_permuted():
return tf.transpose(
a=x,
perm=prefer_static.cond(source_idx < dest_idx,
move_right_permutation,
move_left_permutation))
# One final conditional to handle the special case where source
# and destination indices are equal.
return prefer_static.cond(tf.equal(source_idx, dest_idx),
lambda: x, x_permuted)
|
def move_dimension(x, source_idx, dest_idx):
"""Move a single tensor dimension within its shape.
This is a special case of `tf.transpose()`, which applies
arbitrary permutations to tensor dimensions.
Args:
x: Tensor of rank `ndims`.
source_idx: Integer index into `x.shape` (negative indexing is supported).
dest_idx: Integer index into `x.shape` (negative indexing is supported).
Returns:
x_perm: Tensor of rank `ndims`, in which the dimension at original
index `source_idx` has been moved to new index `dest_idx`, with
all other dimensions retained in their original order.
Example:
```python
x = tf.placeholder(shape=[200, 30, 4, 1, 6])
x_perm = _move_dimension(x, 1, 1) # no-op
x_perm = _move_dimension(x, 0, 3) # result shape [30, 4, 1, 200, 6]
x_perm = _move_dimension(x, 0, -2) # equivalent to previous
x_perm = _move_dimension(x, 4, 2) # result shape [200, 30, 6, 4, 1]
```
"""
ndims = prefer_static_rank(x)
dtype = dtype_util.common_dtype([source_idx, dest_idx],
preferred_dtype=tf.int32)
source_idx = tf.convert_to_tensor(value=source_idx, dtype=dtype)
dest_idx = tf.convert_to_tensor(value=dest_idx, dtype=dtype)
# Handle negative indexing.
source_idx = pick_scalar_condition(source_idx < 0, ndims + source_idx,
source_idx)
dest_idx = pick_scalar_condition(dest_idx < 0, ndims + dest_idx, dest_idx)
# Construct the appropriate permutation of dimensions, depending
# whether the source is before or after the destination.
def move_left_permutation():
return prefer_static_value(
tf.concat([
tf.range(0, dest_idx, dtype=dtype), [source_idx],
tf.range(dest_idx, source_idx, dtype=dtype),
tf.range(source_idx + 1, ndims, dtype=dtype)
],
axis=0))
def move_right_permutation():
return prefer_static_value(
tf.concat([
tf.range(0, source_idx, dtype=dtype),
tf.range(source_idx + 1, dest_idx + 1, dtype=dtype), [source_idx],
tf.range(dest_idx + 1, ndims, dtype=dtype)
],
axis=0))
def x_permuted():
return tf.transpose(
a=x,
perm=prefer_static.cond(source_idx < dest_idx,
move_right_permutation,
move_left_permutation))
# One final conditional to handle the special case where source
# and destination indices are equal.
return prefer_static.cond(tf.equal(source_idx, dest_idx),
lambda: x, x_permuted)
|
[
"Move",
"a",
"single",
"tensor",
"dimension",
"within",
"its",
"shape",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L572-L639
|
[
"def",
"move_dimension",
"(",
"x",
",",
"source_idx",
",",
"dest_idx",
")",
":",
"ndims",
"=",
"prefer_static_rank",
"(",
"x",
")",
"dtype",
"=",
"dtype_util",
".",
"common_dtype",
"(",
"[",
"source_idx",
",",
"dest_idx",
"]",
",",
"preferred_dtype",
"=",
"tf",
".",
"int32",
")",
"source_idx",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"source_idx",
",",
"dtype",
"=",
"dtype",
")",
"dest_idx",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"dest_idx",
",",
"dtype",
"=",
"dtype",
")",
"# Handle negative indexing.",
"source_idx",
"=",
"pick_scalar_condition",
"(",
"source_idx",
"<",
"0",
",",
"ndims",
"+",
"source_idx",
",",
"source_idx",
")",
"dest_idx",
"=",
"pick_scalar_condition",
"(",
"dest_idx",
"<",
"0",
",",
"ndims",
"+",
"dest_idx",
",",
"dest_idx",
")",
"# Construct the appropriate permutation of dimensions, depending",
"# whether the source is before or after the destination.",
"def",
"move_left_permutation",
"(",
")",
":",
"return",
"prefer_static_value",
"(",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"range",
"(",
"0",
",",
"dest_idx",
",",
"dtype",
"=",
"dtype",
")",
",",
"[",
"source_idx",
"]",
",",
"tf",
".",
"range",
"(",
"dest_idx",
",",
"source_idx",
",",
"dtype",
"=",
"dtype",
")",
",",
"tf",
".",
"range",
"(",
"source_idx",
"+",
"1",
",",
"ndims",
",",
"dtype",
"=",
"dtype",
")",
"]",
",",
"axis",
"=",
"0",
")",
")",
"def",
"move_right_permutation",
"(",
")",
":",
"return",
"prefer_static_value",
"(",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"range",
"(",
"0",
",",
"source_idx",
",",
"dtype",
"=",
"dtype",
")",
",",
"tf",
".",
"range",
"(",
"source_idx",
"+",
"1",
",",
"dest_idx",
"+",
"1",
",",
"dtype",
"=",
"dtype",
")",
",",
"[",
"source_idx",
"]",
",",
"tf",
".",
"range",
"(",
"dest_idx",
"+",
"1",
",",
"ndims",
",",
"dtype",
"=",
"dtype",
")",
"]",
",",
"axis",
"=",
"0",
")",
")",
"def",
"x_permuted",
"(",
")",
":",
"return",
"tf",
".",
"transpose",
"(",
"a",
"=",
"x",
",",
"perm",
"=",
"prefer_static",
".",
"cond",
"(",
"source_idx",
"<",
"dest_idx",
",",
"move_right_permutation",
",",
"move_left_permutation",
")",
")",
"# One final conditional to handle the special case where source",
"# and destination indices are equal.",
"return",
"prefer_static",
".",
"cond",
"(",
"tf",
".",
"equal",
"(",
"source_idx",
",",
"dest_idx",
")",
",",
"lambda",
":",
"x",
",",
"x_permuted",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
assert_integer_form
|
Assert that x has integer components (or floats equal to integers).
Args:
x: Floating-point `Tensor`
data: The tensors to print out if the condition is `False`. Defaults to
error message and first few entries of `x` and `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
int_dtype: A `tf.dtype` used to cast the float to. The default (`None`)
implies the smallest possible signed int will be used for casting.
name: A name for this operation (optional).
Returns:
Op raising `InvalidArgumentError` if `cast(x, int_dtype) != x`.
|
tensorflow_probability/python/internal/distribution_util.py
|
def assert_integer_form(x,
data=None,
summarize=None,
message=None,
int_dtype=None,
name="assert_integer_form"):
"""Assert that x has integer components (or floats equal to integers).
Args:
x: Floating-point `Tensor`
data: The tensors to print out if the condition is `False`. Defaults to
error message and first few entries of `x` and `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
int_dtype: A `tf.dtype` used to cast the float to. The default (`None`)
implies the smallest possible signed int will be used for casting.
name: A name for this operation (optional).
Returns:
Op raising `InvalidArgumentError` if `cast(x, int_dtype) != x`.
"""
with tf.name_scope(name):
x = tf.convert_to_tensor(value=x, name="x")
if dtype_util.is_integer(x.dtype):
return tf.no_op()
message = message or "{} has non-integer components".format(x)
if int_dtype is None:
try:
int_dtype = {
tf.float16: tf.int16,
tf.float32: tf.int32,
tf.float64: tf.int64,
}[dtype_util.base_dtype(x.dtype)]
except KeyError:
raise TypeError("Unrecognized type {}".format(dtype_util.name(x.dtype)))
return assert_util.assert_equal(
x,
tf.cast(tf.cast(x, int_dtype), x.dtype),
data=data,
summarize=summarize,
message=message,
name=name)
|
def assert_integer_form(x,
data=None,
summarize=None,
message=None,
int_dtype=None,
name="assert_integer_form"):
"""Assert that x has integer components (or floats equal to integers).
Args:
x: Floating-point `Tensor`
data: The tensors to print out if the condition is `False`. Defaults to
error message and first few entries of `x` and `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
int_dtype: A `tf.dtype` used to cast the float to. The default (`None`)
implies the smallest possible signed int will be used for casting.
name: A name for this operation (optional).
Returns:
Op raising `InvalidArgumentError` if `cast(x, int_dtype) != x`.
"""
with tf.name_scope(name):
x = tf.convert_to_tensor(value=x, name="x")
if dtype_util.is_integer(x.dtype):
return tf.no_op()
message = message or "{} has non-integer components".format(x)
if int_dtype is None:
try:
int_dtype = {
tf.float16: tf.int16,
tf.float32: tf.int32,
tf.float64: tf.int64,
}[dtype_util.base_dtype(x.dtype)]
except KeyError:
raise TypeError("Unrecognized type {}".format(dtype_util.name(x.dtype)))
return assert_util.assert_equal(
x,
tf.cast(tf.cast(x, int_dtype), x.dtype),
data=data,
summarize=summarize,
message=message,
name=name)
|
[
"Assert",
"that",
"x",
"has",
"integer",
"components",
"(",
"or",
"floats",
"equal",
"to",
"integers",
")",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L642-L683
|
[
"def",
"assert_integer_form",
"(",
"x",
",",
"data",
"=",
"None",
",",
"summarize",
"=",
"None",
",",
"message",
"=",
"None",
",",
"int_dtype",
"=",
"None",
",",
"name",
"=",
"\"assert_integer_form\"",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
")",
":",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"\"x\"",
")",
"if",
"dtype_util",
".",
"is_integer",
"(",
"x",
".",
"dtype",
")",
":",
"return",
"tf",
".",
"no_op",
"(",
")",
"message",
"=",
"message",
"or",
"\"{} has non-integer components\"",
".",
"format",
"(",
"x",
")",
"if",
"int_dtype",
"is",
"None",
":",
"try",
":",
"int_dtype",
"=",
"{",
"tf",
".",
"float16",
":",
"tf",
".",
"int16",
",",
"tf",
".",
"float32",
":",
"tf",
".",
"int32",
",",
"tf",
".",
"float64",
":",
"tf",
".",
"int64",
",",
"}",
"[",
"dtype_util",
".",
"base_dtype",
"(",
"x",
".",
"dtype",
")",
"]",
"except",
"KeyError",
":",
"raise",
"TypeError",
"(",
"\"Unrecognized type {}\"",
".",
"format",
"(",
"dtype_util",
".",
"name",
"(",
"x",
".",
"dtype",
")",
")",
")",
"return",
"assert_util",
".",
"assert_equal",
"(",
"x",
",",
"tf",
".",
"cast",
"(",
"tf",
".",
"cast",
"(",
"x",
",",
"int_dtype",
")",
",",
"x",
".",
"dtype",
")",
",",
"data",
"=",
"data",
",",
"summarize",
"=",
"summarize",
",",
"message",
"=",
"message",
",",
"name",
"=",
"name",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
embed_check_nonnegative_integer_form
|
Assert x is a non-negative tensor, and optionally of integers.
|
tensorflow_probability/python/internal/distribution_util.py
|
def embed_check_nonnegative_integer_form(
x, name="embed_check_nonnegative_integer_form"):
"""Assert x is a non-negative tensor, and optionally of integers."""
with tf.name_scope(name):
x = tf.convert_to_tensor(value=x, name="x")
assertions = [
assert_util.assert_non_negative(
x, message="'{}' must be non-negative.".format(x)),
]
if not dtype_util.is_integer(x.dtype):
assertions += [
assert_integer_form(
x,
message="'{}' cannot contain fractional components.".format(x)),
]
return with_dependencies(assertions, x)
|
def embed_check_nonnegative_integer_form(
x, name="embed_check_nonnegative_integer_form"):
"""Assert x is a non-negative tensor, and optionally of integers."""
with tf.name_scope(name):
x = tf.convert_to_tensor(value=x, name="x")
assertions = [
assert_util.assert_non_negative(
x, message="'{}' must be non-negative.".format(x)),
]
if not dtype_util.is_integer(x.dtype):
assertions += [
assert_integer_form(
x,
message="'{}' cannot contain fractional components.".format(x)),
]
return with_dependencies(assertions, x)
|
[
"Assert",
"x",
"is",
"a",
"non",
"-",
"negative",
"tensor",
"and",
"optionally",
"of",
"integers",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L692-L707
|
[
"def",
"embed_check_nonnegative_integer_form",
"(",
"x",
",",
"name",
"=",
"\"embed_check_nonnegative_integer_form\"",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
")",
":",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"\"x\"",
")",
"assertions",
"=",
"[",
"assert_util",
".",
"assert_non_negative",
"(",
"x",
",",
"message",
"=",
"\"'{}' must be non-negative.\"",
".",
"format",
"(",
"x",
")",
")",
",",
"]",
"if",
"not",
"dtype_util",
".",
"is_integer",
"(",
"x",
".",
"dtype",
")",
":",
"assertions",
"+=",
"[",
"assert_integer_form",
"(",
"x",
",",
"message",
"=",
"\"'{}' cannot contain fractional components.\"",
".",
"format",
"(",
"x",
")",
")",
",",
"]",
"return",
"with_dependencies",
"(",
"assertions",
",",
"x",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
same_dynamic_shape
|
Returns whether a and b have the same dynamic shape.
Args:
a: `Tensor`
b: `Tensor`
Returns:
`bool` `Tensor` representing if both tensors have the same shape.
|
tensorflow_probability/python/internal/distribution_util.py
|
def same_dynamic_shape(a, b):
"""Returns whether a and b have the same dynamic shape.
Args:
a: `Tensor`
b: `Tensor`
Returns:
`bool` `Tensor` representing if both tensors have the same shape.
"""
a = tf.convert_to_tensor(value=a, name="a")
b = tf.convert_to_tensor(value=b, name="b")
# Here we can't just do tf.equal(a.shape, b.shape), since
# static shape inference may break the equality comparison between
# shape(a) and shape(b) in tf.equal.
def all_shapes_equal():
return tf.reduce_all(
input_tensor=tf.equal(
tf.concat([tf.shape(input=a), tf.shape(input=b)], 0),
tf.concat([tf.shape(input=b), tf.shape(input=a)], 0)))
# One of the shapes isn't fully defined, so we need to use the dynamic
# shape.
return tf.cond(
pred=tf.equal(tf.rank(a), tf.rank(b)),
true_fn=all_shapes_equal,
false_fn=lambda: tf.constant(False))
|
def same_dynamic_shape(a, b):
"""Returns whether a and b have the same dynamic shape.
Args:
a: `Tensor`
b: `Tensor`
Returns:
`bool` `Tensor` representing if both tensors have the same shape.
"""
a = tf.convert_to_tensor(value=a, name="a")
b = tf.convert_to_tensor(value=b, name="b")
# Here we can't just do tf.equal(a.shape, b.shape), since
# static shape inference may break the equality comparison between
# shape(a) and shape(b) in tf.equal.
def all_shapes_equal():
return tf.reduce_all(
input_tensor=tf.equal(
tf.concat([tf.shape(input=a), tf.shape(input=b)], 0),
tf.concat([tf.shape(input=b), tf.shape(input=a)], 0)))
# One of the shapes isn't fully defined, so we need to use the dynamic
# shape.
return tf.cond(
pred=tf.equal(tf.rank(a), tf.rank(b)),
true_fn=all_shapes_equal,
false_fn=lambda: tf.constant(False))
|
[
"Returns",
"whether",
"a",
"and",
"b",
"have",
"the",
"same",
"dynamic",
"shape",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L710-L737
|
[
"def",
"same_dynamic_shape",
"(",
"a",
",",
"b",
")",
":",
"a",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"a",
",",
"name",
"=",
"\"a\"",
")",
"b",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"b",
",",
"name",
"=",
"\"b\"",
")",
"# Here we can't just do tf.equal(a.shape, b.shape), since",
"# static shape inference may break the equality comparison between",
"# shape(a) and shape(b) in tf.equal.",
"def",
"all_shapes_equal",
"(",
")",
":",
"return",
"tf",
".",
"reduce_all",
"(",
"input_tensor",
"=",
"tf",
".",
"equal",
"(",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"shape",
"(",
"input",
"=",
"a",
")",
",",
"tf",
".",
"shape",
"(",
"input",
"=",
"b",
")",
"]",
",",
"0",
")",
",",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"shape",
"(",
"input",
"=",
"b",
")",
",",
"tf",
".",
"shape",
"(",
"input",
"=",
"a",
")",
"]",
",",
"0",
")",
")",
")",
"# One of the shapes isn't fully defined, so we need to use the dynamic",
"# shape.",
"return",
"tf",
".",
"cond",
"(",
"pred",
"=",
"tf",
".",
"equal",
"(",
"tf",
".",
"rank",
"(",
"a",
")",
",",
"tf",
".",
"rank",
"(",
"b",
")",
")",
",",
"true_fn",
"=",
"all_shapes_equal",
",",
"false_fn",
"=",
"lambda",
":",
"tf",
".",
"constant",
"(",
"False",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
maybe_get_static_value
|
Helper which tries to return a static value.
Given `x`, extract it's value statically, optionally casting to a specific
dtype. If this is not possible, None is returned.
Args:
x: `Tensor` for which to extract a value statically.
dtype: Optional dtype to cast to.
Returns:
Statically inferred value if possible, otherwise None.
|
tensorflow_probability/python/internal/distribution_util.py
|
def maybe_get_static_value(x, dtype=None):
"""Helper which tries to return a static value.
Given `x`, extract it's value statically, optionally casting to a specific
dtype. If this is not possible, None is returned.
Args:
x: `Tensor` for which to extract a value statically.
dtype: Optional dtype to cast to.
Returns:
Statically inferred value if possible, otherwise None.
"""
if x is None:
return x
try:
# This returns an np.ndarray.
x_ = tf.get_static_value(x)
except TypeError:
x_ = x
if x_ is None or dtype is None:
return x_
return np.array(x_, dtype)
|
def maybe_get_static_value(x, dtype=None):
"""Helper which tries to return a static value.
Given `x`, extract it's value statically, optionally casting to a specific
dtype. If this is not possible, None is returned.
Args:
x: `Tensor` for which to extract a value statically.
dtype: Optional dtype to cast to.
Returns:
Statically inferred value if possible, otherwise None.
"""
if x is None:
return x
try:
# This returns an np.ndarray.
x_ = tf.get_static_value(x)
except TypeError:
x_ = x
if x_ is None or dtype is None:
return x_
return np.array(x_, dtype)
|
[
"Helper",
"which",
"tries",
"to",
"return",
"a",
"static",
"value",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L740-L762
|
[
"def",
"maybe_get_static_value",
"(",
"x",
",",
"dtype",
"=",
"None",
")",
":",
"if",
"x",
"is",
"None",
":",
"return",
"x",
"try",
":",
"# This returns an np.ndarray.",
"x_",
"=",
"tf",
".",
"get_static_value",
"(",
"x",
")",
"except",
"TypeError",
":",
"x_",
"=",
"x",
"if",
"x_",
"is",
"None",
"or",
"dtype",
"is",
"None",
":",
"return",
"x_",
"return",
"np",
".",
"array",
"(",
"x_",
",",
"dtype",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
get_logits_and_probs
|
Converts logit to probabilities (or vice-versa), and returns both.
Args:
logits: Floating-point `Tensor` representing log-odds.
probs: Floating-point `Tensor` representing probabilities.
multidimensional: Python `bool`, default `False`. If `True`, represents
whether the last dimension of `logits` or `probs`, a `[N1, N2, ... k]`
dimensional tensor, representing the logit or probability of `shape[-1]`
classes.
validate_args: Python `bool`, default `False`. When `True`, either assert `0
<= probs <= 1` (if not `multidimensional`) or that the last dimension of
`probs` sums to one.
name: A name for this operation (optional).
dtype: `tf.DType` to prefer when converting args to `Tensor`s.
Returns:
logits, probs: Tuple of `Tensor`s. If `probs` has an entry that is `0` or
`1`, then the corresponding entry in the returned logit will be `-Inf` and
`Inf` respectively.
Raises:
ValueError: if neither `probs` nor `logits` were passed in, or both were.
|
tensorflow_probability/python/internal/distribution_util.py
|
def get_logits_and_probs(logits=None,
probs=None,
multidimensional=False,
validate_args=False,
name="get_logits_and_probs",
dtype=None):
"""Converts logit to probabilities (or vice-versa), and returns both.
Args:
logits: Floating-point `Tensor` representing log-odds.
probs: Floating-point `Tensor` representing probabilities.
multidimensional: Python `bool`, default `False`. If `True`, represents
whether the last dimension of `logits` or `probs`, a `[N1, N2, ... k]`
dimensional tensor, representing the logit or probability of `shape[-1]`
classes.
validate_args: Python `bool`, default `False`. When `True`, either assert `0
<= probs <= 1` (if not `multidimensional`) or that the last dimension of
`probs` sums to one.
name: A name for this operation (optional).
dtype: `tf.DType` to prefer when converting args to `Tensor`s.
Returns:
logits, probs: Tuple of `Tensor`s. If `probs` has an entry that is `0` or
`1`, then the corresponding entry in the returned logit will be `-Inf` and
`Inf` respectively.
Raises:
ValueError: if neither `probs` nor `logits` were passed in, or both were.
"""
if dtype is None:
dtype = dtype_util.common_dtype([probs, logits], preferred_dtype=tf.float32)
with tf.name_scope(name):
if (probs is None) == (logits is None):
raise ValueError("Must pass probs or logits, but not both.")
if probs is None:
logits = tf.convert_to_tensor(value=logits, name="logits", dtype=dtype)
if not dtype_util.is_floating(logits.dtype):
raise TypeError("logits must having floating type.")
# We can early return since we constructed probs and therefore know
# they're valid.
if multidimensional:
if validate_args:
logits = embed_check_categorical_event_shape(logits)
return logits, tf.nn.softmax(logits, name="probs")
return logits, tf.sigmoid(logits, name="probs")
probs = tf.convert_to_tensor(value=probs, name="probs", dtype=dtype)
if not dtype_util.is_floating(probs.dtype):
raise TypeError("probs must having floating type.")
if validate_args:
with tf.name_scope("validate_probs"):
one = tf.constant(1., probs.dtype)
dependencies = [assert_util.assert_non_negative(probs)]
if multidimensional:
probs = embed_check_categorical_event_shape(probs)
dependencies += [
assert_util.assert_near(
tf.reduce_sum(input_tensor=probs, axis=-1),
one,
message="probs does not sum to 1.")
]
else:
dependencies += [
assert_util.assert_less_equal(
probs, one, message="probs has components greater than 1.")
]
probs = with_dependencies(dependencies, probs)
with tf.name_scope("logits"):
if multidimensional:
# Here we don't compute the multidimensional case, in a manner
# consistent with respect to the unidimensional case. We do so
# following the TF convention. Typically, you might expect to see
# logits = log(probs) - log(probs[pivot]). A side-effect of
# being consistent with the TF approach is that the unidimensional case
# implicitly handles the second dimension but the multidimensional case
# explicitly keeps the pivot dimension.
return tf.math.log(probs), probs
return tf.math.log(probs) - tf.math.log1p(-1. * probs), probs
|
def get_logits_and_probs(logits=None,
probs=None,
multidimensional=False,
validate_args=False,
name="get_logits_and_probs",
dtype=None):
"""Converts logit to probabilities (or vice-versa), and returns both.
Args:
logits: Floating-point `Tensor` representing log-odds.
probs: Floating-point `Tensor` representing probabilities.
multidimensional: Python `bool`, default `False`. If `True`, represents
whether the last dimension of `logits` or `probs`, a `[N1, N2, ... k]`
dimensional tensor, representing the logit or probability of `shape[-1]`
classes.
validate_args: Python `bool`, default `False`. When `True`, either assert `0
<= probs <= 1` (if not `multidimensional`) or that the last dimension of
`probs` sums to one.
name: A name for this operation (optional).
dtype: `tf.DType` to prefer when converting args to `Tensor`s.
Returns:
logits, probs: Tuple of `Tensor`s. If `probs` has an entry that is `0` or
`1`, then the corresponding entry in the returned logit will be `-Inf` and
`Inf` respectively.
Raises:
ValueError: if neither `probs` nor `logits` were passed in, or both were.
"""
if dtype is None:
dtype = dtype_util.common_dtype([probs, logits], preferred_dtype=tf.float32)
with tf.name_scope(name):
if (probs is None) == (logits is None):
raise ValueError("Must pass probs or logits, but not both.")
if probs is None:
logits = tf.convert_to_tensor(value=logits, name="logits", dtype=dtype)
if not dtype_util.is_floating(logits.dtype):
raise TypeError("logits must having floating type.")
# We can early return since we constructed probs and therefore know
# they're valid.
if multidimensional:
if validate_args:
logits = embed_check_categorical_event_shape(logits)
return logits, tf.nn.softmax(logits, name="probs")
return logits, tf.sigmoid(logits, name="probs")
probs = tf.convert_to_tensor(value=probs, name="probs", dtype=dtype)
if not dtype_util.is_floating(probs.dtype):
raise TypeError("probs must having floating type.")
if validate_args:
with tf.name_scope("validate_probs"):
one = tf.constant(1., probs.dtype)
dependencies = [assert_util.assert_non_negative(probs)]
if multidimensional:
probs = embed_check_categorical_event_shape(probs)
dependencies += [
assert_util.assert_near(
tf.reduce_sum(input_tensor=probs, axis=-1),
one,
message="probs does not sum to 1.")
]
else:
dependencies += [
assert_util.assert_less_equal(
probs, one, message="probs has components greater than 1.")
]
probs = with_dependencies(dependencies, probs)
with tf.name_scope("logits"):
if multidimensional:
# Here we don't compute the multidimensional case, in a manner
# consistent with respect to the unidimensional case. We do so
# following the TF convention. Typically, you might expect to see
# logits = log(probs) - log(probs[pivot]). A side-effect of
# being consistent with the TF approach is that the unidimensional case
# implicitly handles the second dimension but the multidimensional case
# explicitly keeps the pivot dimension.
return tf.math.log(probs), probs
return tf.math.log(probs) - tf.math.log1p(-1. * probs), probs
|
[
"Converts",
"logit",
"to",
"probabilities",
"(",
"or",
"vice",
"-",
"versa",
")",
"and",
"returns",
"both",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L765-L845
|
[
"def",
"get_logits_and_probs",
"(",
"logits",
"=",
"None",
",",
"probs",
"=",
"None",
",",
"multidimensional",
"=",
"False",
",",
"validate_args",
"=",
"False",
",",
"name",
"=",
"\"get_logits_and_probs\"",
",",
"dtype",
"=",
"None",
")",
":",
"if",
"dtype",
"is",
"None",
":",
"dtype",
"=",
"dtype_util",
".",
"common_dtype",
"(",
"[",
"probs",
",",
"logits",
"]",
",",
"preferred_dtype",
"=",
"tf",
".",
"float32",
")",
"with",
"tf",
".",
"name_scope",
"(",
"name",
")",
":",
"if",
"(",
"probs",
"is",
"None",
")",
"==",
"(",
"logits",
"is",
"None",
")",
":",
"raise",
"ValueError",
"(",
"\"Must pass probs or logits, but not both.\"",
")",
"if",
"probs",
"is",
"None",
":",
"logits",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"logits",
",",
"name",
"=",
"\"logits\"",
",",
"dtype",
"=",
"dtype",
")",
"if",
"not",
"dtype_util",
".",
"is_floating",
"(",
"logits",
".",
"dtype",
")",
":",
"raise",
"TypeError",
"(",
"\"logits must having floating type.\"",
")",
"# We can early return since we constructed probs and therefore know",
"# they're valid.",
"if",
"multidimensional",
":",
"if",
"validate_args",
":",
"logits",
"=",
"embed_check_categorical_event_shape",
"(",
"logits",
")",
"return",
"logits",
",",
"tf",
".",
"nn",
".",
"softmax",
"(",
"logits",
",",
"name",
"=",
"\"probs\"",
")",
"return",
"logits",
",",
"tf",
".",
"sigmoid",
"(",
"logits",
",",
"name",
"=",
"\"probs\"",
")",
"probs",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"probs",
",",
"name",
"=",
"\"probs\"",
",",
"dtype",
"=",
"dtype",
")",
"if",
"not",
"dtype_util",
".",
"is_floating",
"(",
"probs",
".",
"dtype",
")",
":",
"raise",
"TypeError",
"(",
"\"probs must having floating type.\"",
")",
"if",
"validate_args",
":",
"with",
"tf",
".",
"name_scope",
"(",
"\"validate_probs\"",
")",
":",
"one",
"=",
"tf",
".",
"constant",
"(",
"1.",
",",
"probs",
".",
"dtype",
")",
"dependencies",
"=",
"[",
"assert_util",
".",
"assert_non_negative",
"(",
"probs",
")",
"]",
"if",
"multidimensional",
":",
"probs",
"=",
"embed_check_categorical_event_shape",
"(",
"probs",
")",
"dependencies",
"+=",
"[",
"assert_util",
".",
"assert_near",
"(",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"probs",
",",
"axis",
"=",
"-",
"1",
")",
",",
"one",
",",
"message",
"=",
"\"probs does not sum to 1.\"",
")",
"]",
"else",
":",
"dependencies",
"+=",
"[",
"assert_util",
".",
"assert_less_equal",
"(",
"probs",
",",
"one",
",",
"message",
"=",
"\"probs has components greater than 1.\"",
")",
"]",
"probs",
"=",
"with_dependencies",
"(",
"dependencies",
",",
"probs",
")",
"with",
"tf",
".",
"name_scope",
"(",
"\"logits\"",
")",
":",
"if",
"multidimensional",
":",
"# Here we don't compute the multidimensional case, in a manner",
"# consistent with respect to the unidimensional case. We do so",
"# following the TF convention. Typically, you might expect to see",
"# logits = log(probs) - log(probs[pivot]). A side-effect of",
"# being consistent with the TF approach is that the unidimensional case",
"# implicitly handles the second dimension but the multidimensional case",
"# explicitly keeps the pivot dimension.",
"return",
"tf",
".",
"math",
".",
"log",
"(",
"probs",
")",
",",
"probs",
"return",
"tf",
".",
"math",
".",
"log",
"(",
"probs",
")",
"-",
"tf",
".",
"math",
".",
"log1p",
"(",
"-",
"1.",
"*",
"probs",
")",
",",
"probs"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_is_known_unsigned_by_dtype
|
Helper returning True if dtype is known to be unsigned.
|
tensorflow_probability/python/internal/distribution_util.py
|
def _is_known_unsigned_by_dtype(dt):
"""Helper returning True if dtype is known to be unsigned."""
return {
tf.bool: True,
tf.uint8: True,
tf.uint16: True,
}.get(dt.base_dtype, False)
|
def _is_known_unsigned_by_dtype(dt):
"""Helper returning True if dtype is known to be unsigned."""
return {
tf.bool: True,
tf.uint8: True,
tf.uint16: True,
}.get(dt.base_dtype, False)
|
[
"Helper",
"returning",
"True",
"if",
"dtype",
"is",
"known",
"to",
"be",
"unsigned",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L848-L854
|
[
"def",
"_is_known_unsigned_by_dtype",
"(",
"dt",
")",
":",
"return",
"{",
"tf",
".",
"bool",
":",
"True",
",",
"tf",
".",
"uint8",
":",
"True",
",",
"tf",
".",
"uint16",
":",
"True",
",",
"}",
".",
"get",
"(",
"dt",
".",
"base_dtype",
",",
"False",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_is_known_signed_by_dtype
|
Helper returning True if dtype is known to be signed.
|
tensorflow_probability/python/internal/distribution_util.py
|
def _is_known_signed_by_dtype(dt):
"""Helper returning True if dtype is known to be signed."""
return {
tf.float16: True,
tf.float32: True,
tf.float64: True,
tf.int8: True,
tf.int16: True,
tf.int32: True,
tf.int64: True,
}.get(dt.base_dtype, False)
|
def _is_known_signed_by_dtype(dt):
"""Helper returning True if dtype is known to be signed."""
return {
tf.float16: True,
tf.float32: True,
tf.float64: True,
tf.int8: True,
tf.int16: True,
tf.int32: True,
tf.int64: True,
}.get(dt.base_dtype, False)
|
[
"Helper",
"returning",
"True",
"if",
"dtype",
"is",
"known",
"to",
"be",
"signed",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L857-L867
|
[
"def",
"_is_known_signed_by_dtype",
"(",
"dt",
")",
":",
"return",
"{",
"tf",
".",
"float16",
":",
"True",
",",
"tf",
".",
"float32",
":",
"True",
",",
"tf",
".",
"float64",
":",
"True",
",",
"tf",
".",
"int8",
":",
"True",
",",
"tf",
".",
"int16",
":",
"True",
",",
"tf",
".",
"int32",
":",
"True",
",",
"tf",
".",
"int64",
":",
"True",
",",
"}",
".",
"get",
"(",
"dt",
".",
"base_dtype",
",",
"False",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_largest_integer_by_dtype
|
Helper returning the largest integer exactly representable by dtype.
|
tensorflow_probability/python/internal/distribution_util.py
|
def _largest_integer_by_dtype(dt):
"""Helper returning the largest integer exactly representable by dtype."""
if not _is_known_dtype(dt):
raise TypeError("Unrecognized dtype: {}".format(dt.name))
if dt.is_floating:
return int(2**(np.finfo(dt.as_numpy_dtype).nmant + 1))
if dt.is_integer:
return np.iinfo(dt.as_numpy_dtype).max
if dt.base_dtype == tf.bool:
return int(1)
# We actually can't land here but keep the case for completeness.
raise TypeError("Unrecognized dtype: {}".format(dt.name))
|
def _largest_integer_by_dtype(dt):
"""Helper returning the largest integer exactly representable by dtype."""
if not _is_known_dtype(dt):
raise TypeError("Unrecognized dtype: {}".format(dt.name))
if dt.is_floating:
return int(2**(np.finfo(dt.as_numpy_dtype).nmant + 1))
if dt.is_integer:
return np.iinfo(dt.as_numpy_dtype).max
if dt.base_dtype == tf.bool:
return int(1)
# We actually can't land here but keep the case for completeness.
raise TypeError("Unrecognized dtype: {}".format(dt.name))
|
[
"Helper",
"returning",
"the",
"largest",
"integer",
"exactly",
"representable",
"by",
"dtype",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L875-L886
|
[
"def",
"_largest_integer_by_dtype",
"(",
"dt",
")",
":",
"if",
"not",
"_is_known_dtype",
"(",
"dt",
")",
":",
"raise",
"TypeError",
"(",
"\"Unrecognized dtype: {}\"",
".",
"format",
"(",
"dt",
".",
"name",
")",
")",
"if",
"dt",
".",
"is_floating",
":",
"return",
"int",
"(",
"2",
"**",
"(",
"np",
".",
"finfo",
"(",
"dt",
".",
"as_numpy_dtype",
")",
".",
"nmant",
"+",
"1",
")",
")",
"if",
"dt",
".",
"is_integer",
":",
"return",
"np",
".",
"iinfo",
"(",
"dt",
".",
"as_numpy_dtype",
")",
".",
"max",
"if",
"dt",
".",
"base_dtype",
"==",
"tf",
".",
"bool",
":",
"return",
"int",
"(",
"1",
")",
"# We actually can't land here but keep the case for completeness.",
"raise",
"TypeError",
"(",
"\"Unrecognized dtype: {}\"",
".",
"format",
"(",
"dt",
".",
"name",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_smallest_integer_by_dtype
|
Helper returning the smallest integer exactly representable by dtype.
|
tensorflow_probability/python/internal/distribution_util.py
|
def _smallest_integer_by_dtype(dt):
"""Helper returning the smallest integer exactly representable by dtype."""
if not _is_known_dtype(dt):
raise TypeError("Unrecognized dtype: {}".format(dt.name))
if _is_known_unsigned_by_dtype(dt):
return 0
return -1 * _largest_integer_by_dtype(dt)
|
def _smallest_integer_by_dtype(dt):
"""Helper returning the smallest integer exactly representable by dtype."""
if not _is_known_dtype(dt):
raise TypeError("Unrecognized dtype: {}".format(dt.name))
if _is_known_unsigned_by_dtype(dt):
return 0
return -1 * _largest_integer_by_dtype(dt)
|
[
"Helper",
"returning",
"the",
"smallest",
"integer",
"exactly",
"representable",
"by",
"dtype",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L889-L895
|
[
"def",
"_smallest_integer_by_dtype",
"(",
"dt",
")",
":",
"if",
"not",
"_is_known_dtype",
"(",
"dt",
")",
":",
"raise",
"TypeError",
"(",
"\"Unrecognized dtype: {}\"",
".",
"format",
"(",
"dt",
".",
"name",
")",
")",
"if",
"_is_known_unsigned_by_dtype",
"(",
"dt",
")",
":",
"return",
"0",
"return",
"-",
"1",
"*",
"_largest_integer_by_dtype",
"(",
"dt",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_is_integer_like_by_dtype
|
Helper returning True if dtype.is_integer or is `bool`.
|
tensorflow_probability/python/internal/distribution_util.py
|
def _is_integer_like_by_dtype(dt):
"""Helper returning True if dtype.is_integer or is `bool`."""
if not _is_known_dtype(dt):
raise TypeError("Unrecognized dtype: {}".format(dt.name))
return dt.is_integer or dt.base_dtype == tf.bool
|
def _is_integer_like_by_dtype(dt):
"""Helper returning True if dtype.is_integer or is `bool`."""
if not _is_known_dtype(dt):
raise TypeError("Unrecognized dtype: {}".format(dt.name))
return dt.is_integer or dt.base_dtype == tf.bool
|
[
"Helper",
"returning",
"True",
"if",
"dtype",
".",
"is_integer",
"or",
"is",
"bool",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L898-L902
|
[
"def",
"_is_integer_like_by_dtype",
"(",
"dt",
")",
":",
"if",
"not",
"_is_known_dtype",
"(",
"dt",
")",
":",
"raise",
"TypeError",
"(",
"\"Unrecognized dtype: {}\"",
".",
"format",
"(",
"dt",
".",
"name",
")",
")",
"return",
"dt",
".",
"is_integer",
"or",
"dt",
".",
"base_dtype",
"==",
"tf",
".",
"bool"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
embed_check_categorical_event_shape
|
Embeds checks that categorical distributions don't have too many classes.
A categorical-type distribution is one which, e.g., returns the class label
rather than a one-hot encoding. E.g., `Categorical(probs)`.
Since distributions output samples in the same dtype as the parameters, we
must ensure that casting doesn't lose precision. That is, the
`parameter.dtype` implies a maximum number of classes. However, since shape is
`int32` and categorical variables are presumed to be indexes into a `Tensor`,
we must also ensure that the number of classes is no larger than the largest
possible `int32` index, i.e., `2**31-1`.
In other words the number of classes, `K`, must satisfy the following
condition:
```python
K <= min(
int(2**31 - 1), # Largest float as an index.
{
tf.float16: int(2**11), # Largest int as a float16.
tf.float32: int(2**24),
tf.float64: int(2**53),
}.get(dtype_util.base_dtype(categorical_param.dtype), 0))
```
Args:
categorical_param: Floating-point `Tensor` representing parameters of
distribution over categories. The rightmost shape is presumed to be the
number of categories.
name: A name for this operation (optional).
Returns:
categorical_param: Input `Tensor` with appropriate assertions embedded.
Raises:
TypeError: if `categorical_param` has an unknown `dtype`.
ValueError: if we can statically identify `categorical_param` as being too
large (for being closed under int32/float casting).
|
tensorflow_probability/python/internal/distribution_util.py
|
def embed_check_categorical_event_shape(
categorical_param, name="embed_check_categorical_event_shape"):
"""Embeds checks that categorical distributions don't have too many classes.
A categorical-type distribution is one which, e.g., returns the class label
rather than a one-hot encoding. E.g., `Categorical(probs)`.
Since distributions output samples in the same dtype as the parameters, we
must ensure that casting doesn't lose precision. That is, the
`parameter.dtype` implies a maximum number of classes. However, since shape is
`int32` and categorical variables are presumed to be indexes into a `Tensor`,
we must also ensure that the number of classes is no larger than the largest
possible `int32` index, i.e., `2**31-1`.
In other words the number of classes, `K`, must satisfy the following
condition:
```python
K <= min(
int(2**31 - 1), # Largest float as an index.
{
tf.float16: int(2**11), # Largest int as a float16.
tf.float32: int(2**24),
tf.float64: int(2**53),
}.get(dtype_util.base_dtype(categorical_param.dtype), 0))
```
Args:
categorical_param: Floating-point `Tensor` representing parameters of
distribution over categories. The rightmost shape is presumed to be the
number of categories.
name: A name for this operation (optional).
Returns:
categorical_param: Input `Tensor` with appropriate assertions embedded.
Raises:
TypeError: if `categorical_param` has an unknown `dtype`.
ValueError: if we can statically identify `categorical_param` as being too
large (for being closed under int32/float casting).
"""
with tf.name_scope(name):
x = tf.convert_to_tensor(value=categorical_param, name="categorical_param")
# The size must not exceed both of:
# - The largest possible int32 (since categorical values are presumed to be
# indexes into a Tensor).
# - The largest possible integer exactly representable under the given
# floating-point dtype (since we need to cast to/from).
#
# The chosen floating-point thresholds are 2**(1 + mantissa_bits).
# For more details, see:
# https://en.wikipedia.org/wiki/Floating-point_arithmetic#Internal_representation
x_dtype = dtype_util.base_dtype(x.dtype)
max_event_size = (
_largest_integer_by_dtype(x_dtype)
if dtype_util.is_floating(x_dtype) else 0)
if max_event_size is 0:
raise TypeError("Unable to validate size of unrecognized dtype "
"({}).".format(dtype_util.name(x_dtype)))
try:
x_shape_static = tensorshape_util.with_rank_at_least(x.shape, 1)
except ValueError:
raise ValueError("A categorical-distribution parameter must have "
"at least 1 dimension.")
event_size = tf.compat.dimension_value(x_shape_static[-1])
if event_size is not None:
if event_size < 2:
raise ValueError("A categorical-distribution parameter must have at "
"least 2 events.")
if event_size > max_event_size:
raise ValueError("Number of classes exceeds `dtype` precision, i.e., "
"{} implies shape ({}) cannot exceed {}.".format(
dtype_util.name(x_dtype), event_size,
max_event_size))
return x
else:
event_size = tf.shape(input=x, out_type=tf.int64, name="x_shape")[-1]
return with_dependencies([
assert_util.assert_rank_at_least(
x,
1,
message=("A categorical-distribution parameter must have "
"at least 1 dimension.")),
assert_util.assert_greater_equal(
tf.shape(input=x)[-1],
2,
message=("A categorical-distribution parameter must have at "
"least 2 events.")),
assert_util.assert_less_equal(
event_size,
tf.convert_to_tensor(max_event_size, dtype=tf.int64),
message="Number of classes exceeds `dtype` precision, "
"i.e., {} dtype cannot exceed {} shape.".format(
dtype_util.name(x_dtype), max_event_size)),
], x)
|
def embed_check_categorical_event_shape(
categorical_param, name="embed_check_categorical_event_shape"):
"""Embeds checks that categorical distributions don't have too many classes.
A categorical-type distribution is one which, e.g., returns the class label
rather than a one-hot encoding. E.g., `Categorical(probs)`.
Since distributions output samples in the same dtype as the parameters, we
must ensure that casting doesn't lose precision. That is, the
`parameter.dtype` implies a maximum number of classes. However, since shape is
`int32` and categorical variables are presumed to be indexes into a `Tensor`,
we must also ensure that the number of classes is no larger than the largest
possible `int32` index, i.e., `2**31-1`.
In other words the number of classes, `K`, must satisfy the following
condition:
```python
K <= min(
int(2**31 - 1), # Largest float as an index.
{
tf.float16: int(2**11), # Largest int as a float16.
tf.float32: int(2**24),
tf.float64: int(2**53),
}.get(dtype_util.base_dtype(categorical_param.dtype), 0))
```
Args:
categorical_param: Floating-point `Tensor` representing parameters of
distribution over categories. The rightmost shape is presumed to be the
number of categories.
name: A name for this operation (optional).
Returns:
categorical_param: Input `Tensor` with appropriate assertions embedded.
Raises:
TypeError: if `categorical_param` has an unknown `dtype`.
ValueError: if we can statically identify `categorical_param` as being too
large (for being closed under int32/float casting).
"""
with tf.name_scope(name):
x = tf.convert_to_tensor(value=categorical_param, name="categorical_param")
# The size must not exceed both of:
# - The largest possible int32 (since categorical values are presumed to be
# indexes into a Tensor).
# - The largest possible integer exactly representable under the given
# floating-point dtype (since we need to cast to/from).
#
# The chosen floating-point thresholds are 2**(1 + mantissa_bits).
# For more details, see:
# https://en.wikipedia.org/wiki/Floating-point_arithmetic#Internal_representation
x_dtype = dtype_util.base_dtype(x.dtype)
max_event_size = (
_largest_integer_by_dtype(x_dtype)
if dtype_util.is_floating(x_dtype) else 0)
if max_event_size is 0:
raise TypeError("Unable to validate size of unrecognized dtype "
"({}).".format(dtype_util.name(x_dtype)))
try:
x_shape_static = tensorshape_util.with_rank_at_least(x.shape, 1)
except ValueError:
raise ValueError("A categorical-distribution parameter must have "
"at least 1 dimension.")
event_size = tf.compat.dimension_value(x_shape_static[-1])
if event_size is not None:
if event_size < 2:
raise ValueError("A categorical-distribution parameter must have at "
"least 2 events.")
if event_size > max_event_size:
raise ValueError("Number of classes exceeds `dtype` precision, i.e., "
"{} implies shape ({}) cannot exceed {}.".format(
dtype_util.name(x_dtype), event_size,
max_event_size))
return x
else:
event_size = tf.shape(input=x, out_type=tf.int64, name="x_shape")[-1]
return with_dependencies([
assert_util.assert_rank_at_least(
x,
1,
message=("A categorical-distribution parameter must have "
"at least 1 dimension.")),
assert_util.assert_greater_equal(
tf.shape(input=x)[-1],
2,
message=("A categorical-distribution parameter must have at "
"least 2 events.")),
assert_util.assert_less_equal(
event_size,
tf.convert_to_tensor(max_event_size, dtype=tf.int64),
message="Number of classes exceeds `dtype` precision, "
"i.e., {} dtype cannot exceed {} shape.".format(
dtype_util.name(x_dtype), max_event_size)),
], x)
|
[
"Embeds",
"checks",
"that",
"categorical",
"distributions",
"don",
"t",
"have",
"too",
"many",
"classes",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L905-L999
|
[
"def",
"embed_check_categorical_event_shape",
"(",
"categorical_param",
",",
"name",
"=",
"\"embed_check_categorical_event_shape\"",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
")",
":",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"categorical_param",
",",
"name",
"=",
"\"categorical_param\"",
")",
"# The size must not exceed both of:",
"# - The largest possible int32 (since categorical values are presumed to be",
"# indexes into a Tensor).",
"# - The largest possible integer exactly representable under the given",
"# floating-point dtype (since we need to cast to/from).",
"#",
"# The chosen floating-point thresholds are 2**(1 + mantissa_bits).",
"# For more details, see:",
"# https://en.wikipedia.org/wiki/Floating-point_arithmetic#Internal_representation",
"x_dtype",
"=",
"dtype_util",
".",
"base_dtype",
"(",
"x",
".",
"dtype",
")",
"max_event_size",
"=",
"(",
"_largest_integer_by_dtype",
"(",
"x_dtype",
")",
"if",
"dtype_util",
".",
"is_floating",
"(",
"x_dtype",
")",
"else",
"0",
")",
"if",
"max_event_size",
"is",
"0",
":",
"raise",
"TypeError",
"(",
"\"Unable to validate size of unrecognized dtype \"",
"\"({}).\"",
".",
"format",
"(",
"dtype_util",
".",
"name",
"(",
"x_dtype",
")",
")",
")",
"try",
":",
"x_shape_static",
"=",
"tensorshape_util",
".",
"with_rank_at_least",
"(",
"x",
".",
"shape",
",",
"1",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"A categorical-distribution parameter must have \"",
"\"at least 1 dimension.\"",
")",
"event_size",
"=",
"tf",
".",
"compat",
".",
"dimension_value",
"(",
"x_shape_static",
"[",
"-",
"1",
"]",
")",
"if",
"event_size",
"is",
"not",
"None",
":",
"if",
"event_size",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"\"A categorical-distribution parameter must have at \"",
"\"least 2 events.\"",
")",
"if",
"event_size",
">",
"max_event_size",
":",
"raise",
"ValueError",
"(",
"\"Number of classes exceeds `dtype` precision, i.e., \"",
"\"{} implies shape ({}) cannot exceed {}.\"",
".",
"format",
"(",
"dtype_util",
".",
"name",
"(",
"x_dtype",
")",
",",
"event_size",
",",
"max_event_size",
")",
")",
"return",
"x",
"else",
":",
"event_size",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
",",
"out_type",
"=",
"tf",
".",
"int64",
",",
"name",
"=",
"\"x_shape\"",
")",
"[",
"-",
"1",
"]",
"return",
"with_dependencies",
"(",
"[",
"assert_util",
".",
"assert_rank_at_least",
"(",
"x",
",",
"1",
",",
"message",
"=",
"(",
"\"A categorical-distribution parameter must have \"",
"\"at least 1 dimension.\"",
")",
")",
",",
"assert_util",
".",
"assert_greater_equal",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
"[",
"-",
"1",
"]",
",",
"2",
",",
"message",
"=",
"(",
"\"A categorical-distribution parameter must have at \"",
"\"least 2 events.\"",
")",
")",
",",
"assert_util",
".",
"assert_less_equal",
"(",
"event_size",
",",
"tf",
".",
"convert_to_tensor",
"(",
"max_event_size",
",",
"dtype",
"=",
"tf",
".",
"int64",
")",
",",
"message",
"=",
"\"Number of classes exceeds `dtype` precision, \"",
"\"i.e., {} dtype cannot exceed {} shape.\"",
".",
"format",
"(",
"dtype_util",
".",
"name",
"(",
"x_dtype",
")",
",",
"max_event_size",
")",
")",
",",
"]",
",",
"x",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
embed_check_integer_casting_closed
|
Ensures integers remain unaffected despite casting to/from int/float types.
Example integer-types: `uint8`, `int32`, `bool`.
Example floating-types: `float32`, `float64`.
The largest possible integer representable by an IEEE754 floating-point is
`2**(1 + mantissa_bits)` yet the largest possible integer as an int-type is
`2**(bits - 1) - 1`. This function ensures that a `Tensor` purporting to have
integer-form values can be cast to some other type without loss of precision.
The smallest representable integer is the negative of the largest
representable integer, except for types: `uint8`, `uint16`, `bool`. For these
types, the smallest representable integer is `0`.
Args:
x: `Tensor` representing integer-form values.
target_dtype: TF `dtype` under which `x` should have identical values.
assert_nonnegative: `bool` indicating `x` should contain nonnegative values.
assert_positive: `bool` indicating `x` should contain positive values.
name: A name for this operation (optional).
Returns:
x: Input `Tensor` with appropriate assertions embedded.
Raises:
TypeError: if `x` is neither integer- nor floating-type.
TypeError: if `target_dtype` is neither integer- nor floating-type.
TypeError: if neither `x` nor `target_dtype` are integer-type.
|
tensorflow_probability/python/internal/distribution_util.py
|
def embed_check_integer_casting_closed(x,
target_dtype,
assert_nonnegative=True,
assert_positive=False,
name="embed_check_casting_closed"):
"""Ensures integers remain unaffected despite casting to/from int/float types.
Example integer-types: `uint8`, `int32`, `bool`.
Example floating-types: `float32`, `float64`.
The largest possible integer representable by an IEEE754 floating-point is
`2**(1 + mantissa_bits)` yet the largest possible integer as an int-type is
`2**(bits - 1) - 1`. This function ensures that a `Tensor` purporting to have
integer-form values can be cast to some other type without loss of precision.
The smallest representable integer is the negative of the largest
representable integer, except for types: `uint8`, `uint16`, `bool`. For these
types, the smallest representable integer is `0`.
Args:
x: `Tensor` representing integer-form values.
target_dtype: TF `dtype` under which `x` should have identical values.
assert_nonnegative: `bool` indicating `x` should contain nonnegative values.
assert_positive: `bool` indicating `x` should contain positive values.
name: A name for this operation (optional).
Returns:
x: Input `Tensor` with appropriate assertions embedded.
Raises:
TypeError: if `x` is neither integer- nor floating-type.
TypeError: if `target_dtype` is neither integer- nor floating-type.
TypeError: if neither `x` nor `target_dtype` are integer-type.
"""
with tf.name_scope(name):
x = tf.convert_to_tensor(value=x, name="x")
if (not _is_integer_like_by_dtype(x.dtype) and
not dtype_util.is_floating(x.dtype)):
raise TypeError("{}.dtype must be floating- or "
"integer-type.".format(dtype_util.name(x.dtype)))
if (not _is_integer_like_by_dtype(target_dtype) and
not dtype_util.is_floating(target_dtype)):
raise TypeError("target_dtype ({}) must be floating- or "
"integer-type.".format(dtype_util.name(target_dtype)))
if (not _is_integer_like_by_dtype(x.dtype) and
not _is_integer_like_by_dtype(target_dtype)):
raise TypeError("At least one of {}.dtype ({}) and target_dtype ({}) "
"must be integer-type.".format(
x, dtype_util.name(x.dtype),
dtype_util.name(target_dtype)))
assertions = []
if assert_positive:
assertions += [
assert_util.assert_positive(x, message="Elements must be positive."),
]
elif assert_nonnegative:
assertions += [
assert_util.assert_non_negative(
x, message="Elements must be non-negative."),
]
if dtype_util.is_floating(x.dtype):
# Being here means _is_integer_like_by_dtype(target_dtype) = True.
# Since this check implies the magnitude check below, we need only it.
assertions += [
assert_integer_form(
x,
int_dtype=target_dtype,
message="Elements must be {}-equivalent.".format(
dtype_util.name(target_dtype))),
]
else:
if (_largest_integer_by_dtype(x.dtype) >
_largest_integer_by_dtype(target_dtype)):
# Cast may lose integer precision.
assertions += [
assert_util.assert_less_equal(
x,
_largest_integer_by_dtype(target_dtype),
message=("Elements cannot exceed {}.".format(
_largest_integer_by_dtype(target_dtype)))),
]
if (not assert_nonnegative and (_smallest_integer_by_dtype(
x.dtype) < _smallest_integer_by_dtype(target_dtype))):
assertions += [
assert_util.assert_greater_equal(
x,
_smallest_integer_by_dtype(target_dtype),
message=("Elements cannot be smaller than {}.".format(
_smallest_integer_by_dtype(target_dtype)))),
]
if not assertions:
return x
return with_dependencies(assertions, x)
|
def embed_check_integer_casting_closed(x,
target_dtype,
assert_nonnegative=True,
assert_positive=False,
name="embed_check_casting_closed"):
"""Ensures integers remain unaffected despite casting to/from int/float types.
Example integer-types: `uint8`, `int32`, `bool`.
Example floating-types: `float32`, `float64`.
The largest possible integer representable by an IEEE754 floating-point is
`2**(1 + mantissa_bits)` yet the largest possible integer as an int-type is
`2**(bits - 1) - 1`. This function ensures that a `Tensor` purporting to have
integer-form values can be cast to some other type without loss of precision.
The smallest representable integer is the negative of the largest
representable integer, except for types: `uint8`, `uint16`, `bool`. For these
types, the smallest representable integer is `0`.
Args:
x: `Tensor` representing integer-form values.
target_dtype: TF `dtype` under which `x` should have identical values.
assert_nonnegative: `bool` indicating `x` should contain nonnegative values.
assert_positive: `bool` indicating `x` should contain positive values.
name: A name for this operation (optional).
Returns:
x: Input `Tensor` with appropriate assertions embedded.
Raises:
TypeError: if `x` is neither integer- nor floating-type.
TypeError: if `target_dtype` is neither integer- nor floating-type.
TypeError: if neither `x` nor `target_dtype` are integer-type.
"""
with tf.name_scope(name):
x = tf.convert_to_tensor(value=x, name="x")
if (not _is_integer_like_by_dtype(x.dtype) and
not dtype_util.is_floating(x.dtype)):
raise TypeError("{}.dtype must be floating- or "
"integer-type.".format(dtype_util.name(x.dtype)))
if (not _is_integer_like_by_dtype(target_dtype) and
not dtype_util.is_floating(target_dtype)):
raise TypeError("target_dtype ({}) must be floating- or "
"integer-type.".format(dtype_util.name(target_dtype)))
if (not _is_integer_like_by_dtype(x.dtype) and
not _is_integer_like_by_dtype(target_dtype)):
raise TypeError("At least one of {}.dtype ({}) and target_dtype ({}) "
"must be integer-type.".format(
x, dtype_util.name(x.dtype),
dtype_util.name(target_dtype)))
assertions = []
if assert_positive:
assertions += [
assert_util.assert_positive(x, message="Elements must be positive."),
]
elif assert_nonnegative:
assertions += [
assert_util.assert_non_negative(
x, message="Elements must be non-negative."),
]
if dtype_util.is_floating(x.dtype):
# Being here means _is_integer_like_by_dtype(target_dtype) = True.
# Since this check implies the magnitude check below, we need only it.
assertions += [
assert_integer_form(
x,
int_dtype=target_dtype,
message="Elements must be {}-equivalent.".format(
dtype_util.name(target_dtype))),
]
else:
if (_largest_integer_by_dtype(x.dtype) >
_largest_integer_by_dtype(target_dtype)):
# Cast may lose integer precision.
assertions += [
assert_util.assert_less_equal(
x,
_largest_integer_by_dtype(target_dtype),
message=("Elements cannot exceed {}.".format(
_largest_integer_by_dtype(target_dtype)))),
]
if (not assert_nonnegative and (_smallest_integer_by_dtype(
x.dtype) < _smallest_integer_by_dtype(target_dtype))):
assertions += [
assert_util.assert_greater_equal(
x,
_smallest_integer_by_dtype(target_dtype),
message=("Elements cannot be smaller than {}.".format(
_smallest_integer_by_dtype(target_dtype)))),
]
if not assertions:
return x
return with_dependencies(assertions, x)
|
[
"Ensures",
"integers",
"remain",
"unaffected",
"despite",
"casting",
"to",
"/",
"from",
"int",
"/",
"float",
"types",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L1002-L1098
|
[
"def",
"embed_check_integer_casting_closed",
"(",
"x",
",",
"target_dtype",
",",
"assert_nonnegative",
"=",
"True",
",",
"assert_positive",
"=",
"False",
",",
"name",
"=",
"\"embed_check_casting_closed\"",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
")",
":",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"\"x\"",
")",
"if",
"(",
"not",
"_is_integer_like_by_dtype",
"(",
"x",
".",
"dtype",
")",
"and",
"not",
"dtype_util",
".",
"is_floating",
"(",
"x",
".",
"dtype",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"{}.dtype must be floating- or \"",
"\"integer-type.\"",
".",
"format",
"(",
"dtype_util",
".",
"name",
"(",
"x",
".",
"dtype",
")",
")",
")",
"if",
"(",
"not",
"_is_integer_like_by_dtype",
"(",
"target_dtype",
")",
"and",
"not",
"dtype_util",
".",
"is_floating",
"(",
"target_dtype",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"target_dtype ({}) must be floating- or \"",
"\"integer-type.\"",
".",
"format",
"(",
"dtype_util",
".",
"name",
"(",
"target_dtype",
")",
")",
")",
"if",
"(",
"not",
"_is_integer_like_by_dtype",
"(",
"x",
".",
"dtype",
")",
"and",
"not",
"_is_integer_like_by_dtype",
"(",
"target_dtype",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"At least one of {}.dtype ({}) and target_dtype ({}) \"",
"\"must be integer-type.\"",
".",
"format",
"(",
"x",
",",
"dtype_util",
".",
"name",
"(",
"x",
".",
"dtype",
")",
",",
"dtype_util",
".",
"name",
"(",
"target_dtype",
")",
")",
")",
"assertions",
"=",
"[",
"]",
"if",
"assert_positive",
":",
"assertions",
"+=",
"[",
"assert_util",
".",
"assert_positive",
"(",
"x",
",",
"message",
"=",
"\"Elements must be positive.\"",
")",
",",
"]",
"elif",
"assert_nonnegative",
":",
"assertions",
"+=",
"[",
"assert_util",
".",
"assert_non_negative",
"(",
"x",
",",
"message",
"=",
"\"Elements must be non-negative.\"",
")",
",",
"]",
"if",
"dtype_util",
".",
"is_floating",
"(",
"x",
".",
"dtype",
")",
":",
"# Being here means _is_integer_like_by_dtype(target_dtype) = True.",
"# Since this check implies the magnitude check below, we need only it.",
"assertions",
"+=",
"[",
"assert_integer_form",
"(",
"x",
",",
"int_dtype",
"=",
"target_dtype",
",",
"message",
"=",
"\"Elements must be {}-equivalent.\"",
".",
"format",
"(",
"dtype_util",
".",
"name",
"(",
"target_dtype",
")",
")",
")",
",",
"]",
"else",
":",
"if",
"(",
"_largest_integer_by_dtype",
"(",
"x",
".",
"dtype",
")",
">",
"_largest_integer_by_dtype",
"(",
"target_dtype",
")",
")",
":",
"# Cast may lose integer precision.",
"assertions",
"+=",
"[",
"assert_util",
".",
"assert_less_equal",
"(",
"x",
",",
"_largest_integer_by_dtype",
"(",
"target_dtype",
")",
",",
"message",
"=",
"(",
"\"Elements cannot exceed {}.\"",
".",
"format",
"(",
"_largest_integer_by_dtype",
"(",
"target_dtype",
")",
")",
")",
")",
",",
"]",
"if",
"(",
"not",
"assert_nonnegative",
"and",
"(",
"_smallest_integer_by_dtype",
"(",
"x",
".",
"dtype",
")",
"<",
"_smallest_integer_by_dtype",
"(",
"target_dtype",
")",
")",
")",
":",
"assertions",
"+=",
"[",
"assert_util",
".",
"assert_greater_equal",
"(",
"x",
",",
"_smallest_integer_by_dtype",
"(",
"target_dtype",
")",
",",
"message",
"=",
"(",
"\"Elements cannot be smaller than {}.\"",
".",
"format",
"(",
"_smallest_integer_by_dtype",
"(",
"target_dtype",
")",
")",
")",
")",
",",
"]",
"if",
"not",
"assertions",
":",
"return",
"x",
"return",
"with_dependencies",
"(",
"assertions",
",",
"x",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
log_combinations
|
Multinomial coefficient.
Given `n` and `counts`, where `counts` has last dimension `k`, we compute
the multinomial coefficient as:
```n! / sum_i n_i!```
where `i` runs over all `k` classes.
Args:
n: Floating-point `Tensor` broadcastable with `counts`. This represents `n`
outcomes.
counts: Floating-point `Tensor` broadcastable with `n`. This represents
counts in `k` classes, where `k` is the last dimension of the tensor.
name: A name for this operation (optional).
Returns:
`Tensor` representing the multinomial coefficient between `n` and `counts`.
|
tensorflow_probability/python/internal/distribution_util.py
|
def log_combinations(n, counts, name="log_combinations"):
"""Multinomial coefficient.
Given `n` and `counts`, where `counts` has last dimension `k`, we compute
the multinomial coefficient as:
```n! / sum_i n_i!```
where `i` runs over all `k` classes.
Args:
n: Floating-point `Tensor` broadcastable with `counts`. This represents `n`
outcomes.
counts: Floating-point `Tensor` broadcastable with `n`. This represents
counts in `k` classes, where `k` is the last dimension of the tensor.
name: A name for this operation (optional).
Returns:
`Tensor` representing the multinomial coefficient between `n` and `counts`.
"""
# First a bit about the number of ways counts could have come in:
# E.g. if counts = [1, 2], then this is 3 choose 2.
# In general, this is (sum counts)! / sum(counts!)
# The sum should be along the last dimension of counts. This is the
# "distribution" dimension. Here n a priori represents the sum of counts.
with tf.name_scope(name):
n = tf.convert_to_tensor(value=n, name="n")
counts = tf.convert_to_tensor(value=counts, name="counts")
total_permutations = tf.math.lgamma(n + 1)
counts_factorial = tf.math.lgamma(counts + 1)
redundant_permutations = tf.reduce_sum(
input_tensor=counts_factorial, axis=[-1])
return total_permutations - redundant_permutations
|
def log_combinations(n, counts, name="log_combinations"):
"""Multinomial coefficient.
Given `n` and `counts`, where `counts` has last dimension `k`, we compute
the multinomial coefficient as:
```n! / sum_i n_i!```
where `i` runs over all `k` classes.
Args:
n: Floating-point `Tensor` broadcastable with `counts`. This represents `n`
outcomes.
counts: Floating-point `Tensor` broadcastable with `n`. This represents
counts in `k` classes, where `k` is the last dimension of the tensor.
name: A name for this operation (optional).
Returns:
`Tensor` representing the multinomial coefficient between `n` and `counts`.
"""
# First a bit about the number of ways counts could have come in:
# E.g. if counts = [1, 2], then this is 3 choose 2.
# In general, this is (sum counts)! / sum(counts!)
# The sum should be along the last dimension of counts. This is the
# "distribution" dimension. Here n a priori represents the sum of counts.
with tf.name_scope(name):
n = tf.convert_to_tensor(value=n, name="n")
counts = tf.convert_to_tensor(value=counts, name="counts")
total_permutations = tf.math.lgamma(n + 1)
counts_factorial = tf.math.lgamma(counts + 1)
redundant_permutations = tf.reduce_sum(
input_tensor=counts_factorial, axis=[-1])
return total_permutations - redundant_permutations
|
[
"Multinomial",
"coefficient",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L1101-L1133
|
[
"def",
"log_combinations",
"(",
"n",
",",
"counts",
",",
"name",
"=",
"\"log_combinations\"",
")",
":",
"# First a bit about the number of ways counts could have come in:",
"# E.g. if counts = [1, 2], then this is 3 choose 2.",
"# In general, this is (sum counts)! / sum(counts!)",
"# The sum should be along the last dimension of counts. This is the",
"# \"distribution\" dimension. Here n a priori represents the sum of counts.",
"with",
"tf",
".",
"name_scope",
"(",
"name",
")",
":",
"n",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"n",
",",
"name",
"=",
"\"n\"",
")",
"counts",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"counts",
",",
"name",
"=",
"\"counts\"",
")",
"total_permutations",
"=",
"tf",
".",
"math",
".",
"lgamma",
"(",
"n",
"+",
"1",
")",
"counts_factorial",
"=",
"tf",
".",
"math",
".",
"lgamma",
"(",
"counts",
"+",
"1",
")",
"redundant_permutations",
"=",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"counts_factorial",
",",
"axis",
"=",
"[",
"-",
"1",
"]",
")",
"return",
"total_permutations",
"-",
"redundant_permutations"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
matrix_diag_transform
|
Transform diagonal of [batch-]matrix, leave rest of matrix unchanged.
Create a trainable covariance defined by a Cholesky factor:
```python
# Transform network layer into 2 x 2 array.
matrix_values = tf.contrib.layers.fully_connected(activations, 4)
matrix = tf.reshape(matrix_values, (batch_size, 2, 2))
# Make the diagonal positive. If the upper triangle was zero, this would be a
# valid Cholesky factor.
chol = matrix_diag_transform(matrix, transform=tf.nn.softplus)
# LinearOperatorLowerTriangular ignores the upper triangle.
operator = LinearOperatorLowerTriangular(chol)
```
Example of heteroskedastic 2-D linear regression.
```python
tfd = tfp.distributions
# Get a trainable Cholesky factor.
matrix_values = tf.contrib.layers.fully_connected(activations, 4)
matrix = tf.reshape(matrix_values, (batch_size, 2, 2))
chol = matrix_diag_transform(matrix, transform=tf.nn.softplus)
# Get a trainable mean.
mu = tf.contrib.layers.fully_connected(activations, 2)
# This is a fully trainable multivariate normal!
dist = tfd.MultivariateNormalTriL(mu, chol)
# Standard log loss. Minimizing this will "train" mu and chol, and then dist
# will be a distribution predicting labels as multivariate Gaussians.
loss = -1 * tf.reduce_mean(dist.log_prob(labels))
```
Args:
matrix: Rank `R` `Tensor`, `R >= 2`, where the last two dimensions are
equal.
transform: Element-wise function mapping `Tensors` to `Tensors`. To be
applied to the diagonal of `matrix`. If `None`, `matrix` is returned
unchanged. Defaults to `None`.
name: A name to give created ops. Defaults to "matrix_diag_transform".
Returns:
A `Tensor` with same shape and `dtype` as `matrix`.
|
tensorflow_probability/python/internal/distribution_util.py
|
def matrix_diag_transform(matrix, transform=None, name=None):
"""Transform diagonal of [batch-]matrix, leave rest of matrix unchanged.
Create a trainable covariance defined by a Cholesky factor:
```python
# Transform network layer into 2 x 2 array.
matrix_values = tf.contrib.layers.fully_connected(activations, 4)
matrix = tf.reshape(matrix_values, (batch_size, 2, 2))
# Make the diagonal positive. If the upper triangle was zero, this would be a
# valid Cholesky factor.
chol = matrix_diag_transform(matrix, transform=tf.nn.softplus)
# LinearOperatorLowerTriangular ignores the upper triangle.
operator = LinearOperatorLowerTriangular(chol)
```
Example of heteroskedastic 2-D linear regression.
```python
tfd = tfp.distributions
# Get a trainable Cholesky factor.
matrix_values = tf.contrib.layers.fully_connected(activations, 4)
matrix = tf.reshape(matrix_values, (batch_size, 2, 2))
chol = matrix_diag_transform(matrix, transform=tf.nn.softplus)
# Get a trainable mean.
mu = tf.contrib.layers.fully_connected(activations, 2)
# This is a fully trainable multivariate normal!
dist = tfd.MultivariateNormalTriL(mu, chol)
# Standard log loss. Minimizing this will "train" mu and chol, and then dist
# will be a distribution predicting labels as multivariate Gaussians.
loss = -1 * tf.reduce_mean(dist.log_prob(labels))
```
Args:
matrix: Rank `R` `Tensor`, `R >= 2`, where the last two dimensions are
equal.
transform: Element-wise function mapping `Tensors` to `Tensors`. To be
applied to the diagonal of `matrix`. If `None`, `matrix` is returned
unchanged. Defaults to `None`.
name: A name to give created ops. Defaults to "matrix_diag_transform".
Returns:
A `Tensor` with same shape and `dtype` as `matrix`.
"""
with tf.name_scope(name or "matrix_diag_transform"):
matrix = tf.convert_to_tensor(value=matrix, name="matrix")
if transform is None:
return matrix
# Replace the diag with transformed diag.
diag = tf.linalg.diag_part(matrix)
transformed_diag = transform(diag)
transformed_mat = tf.linalg.set_diag(matrix, transformed_diag)
return transformed_mat
|
def matrix_diag_transform(matrix, transform=None, name=None):
"""Transform diagonal of [batch-]matrix, leave rest of matrix unchanged.
Create a trainable covariance defined by a Cholesky factor:
```python
# Transform network layer into 2 x 2 array.
matrix_values = tf.contrib.layers.fully_connected(activations, 4)
matrix = tf.reshape(matrix_values, (batch_size, 2, 2))
# Make the diagonal positive. If the upper triangle was zero, this would be a
# valid Cholesky factor.
chol = matrix_diag_transform(matrix, transform=tf.nn.softplus)
# LinearOperatorLowerTriangular ignores the upper triangle.
operator = LinearOperatorLowerTriangular(chol)
```
Example of heteroskedastic 2-D linear regression.
```python
tfd = tfp.distributions
# Get a trainable Cholesky factor.
matrix_values = tf.contrib.layers.fully_connected(activations, 4)
matrix = tf.reshape(matrix_values, (batch_size, 2, 2))
chol = matrix_diag_transform(matrix, transform=tf.nn.softplus)
# Get a trainable mean.
mu = tf.contrib.layers.fully_connected(activations, 2)
# This is a fully trainable multivariate normal!
dist = tfd.MultivariateNormalTriL(mu, chol)
# Standard log loss. Minimizing this will "train" mu and chol, and then dist
# will be a distribution predicting labels as multivariate Gaussians.
loss = -1 * tf.reduce_mean(dist.log_prob(labels))
```
Args:
matrix: Rank `R` `Tensor`, `R >= 2`, where the last two dimensions are
equal.
transform: Element-wise function mapping `Tensors` to `Tensors`. To be
applied to the diagonal of `matrix`. If `None`, `matrix` is returned
unchanged. Defaults to `None`.
name: A name to give created ops. Defaults to "matrix_diag_transform".
Returns:
A `Tensor` with same shape and `dtype` as `matrix`.
"""
with tf.name_scope(name or "matrix_diag_transform"):
matrix = tf.convert_to_tensor(value=matrix, name="matrix")
if transform is None:
return matrix
# Replace the diag with transformed diag.
diag = tf.linalg.diag_part(matrix)
transformed_diag = transform(diag)
transformed_mat = tf.linalg.set_diag(matrix, transformed_diag)
return transformed_mat
|
[
"Transform",
"diagonal",
"of",
"[",
"batch",
"-",
"]",
"matrix",
"leave",
"rest",
"of",
"matrix",
"unchanged",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L1136-L1195
|
[
"def",
"matrix_diag_transform",
"(",
"matrix",
",",
"transform",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"\"matrix_diag_transform\"",
")",
":",
"matrix",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"matrix",
",",
"name",
"=",
"\"matrix\"",
")",
"if",
"transform",
"is",
"None",
":",
"return",
"matrix",
"# Replace the diag with transformed diag.",
"diag",
"=",
"tf",
".",
"linalg",
".",
"diag_part",
"(",
"matrix",
")",
"transformed_diag",
"=",
"transform",
"(",
"diag",
")",
"transformed_mat",
"=",
"tf",
".",
"linalg",
".",
"set_diag",
"(",
"matrix",
",",
"transformed_diag",
")",
"return",
"transformed_mat"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
rotate_transpose
|
Circularly moves dims left or right.
Effectively identical to:
```python
numpy.transpose(x, numpy.roll(numpy.arange(len(x.shape)), shift))
```
When `validate_args=False` additional graph-runtime checks are
performed. These checks entail moving data from to GPU to CPU.
Example:
```python
x = tf.random_normal([1, 2, 3, 4]) # Tensor of shape [1, 2, 3, 4].
rotate_transpose(x, -1).shape == [2, 3, 4, 1]
rotate_transpose(x, -2).shape == [3, 4, 1, 2]
rotate_transpose(x, 1).shape == [4, 1, 2, 3]
rotate_transpose(x, 2).shape == [3, 4, 1, 2]
rotate_transpose(x, 7).shape == rotate_transpose(x, 3).shape # [2, 3, 4, 1]
rotate_transpose(x, -7).shape == rotate_transpose(x, -3).shape # [4, 1, 2, 3]
```
Args:
x: `Tensor`.
shift: `Tensor`. Number of dimensions to transpose left (shift<0) or
transpose right (shift>0).
name: Python `str`. The name to give this op.
Returns:
rotated_x: Input `Tensor` with dimensions circularly rotated by shift.
Raises:
TypeError: if shift is not integer type.
|
tensorflow_probability/python/internal/distribution_util.py
|
def rotate_transpose(x, shift, name="rotate_transpose"):
"""Circularly moves dims left or right.
Effectively identical to:
```python
numpy.transpose(x, numpy.roll(numpy.arange(len(x.shape)), shift))
```
When `validate_args=False` additional graph-runtime checks are
performed. These checks entail moving data from to GPU to CPU.
Example:
```python
x = tf.random_normal([1, 2, 3, 4]) # Tensor of shape [1, 2, 3, 4].
rotate_transpose(x, -1).shape == [2, 3, 4, 1]
rotate_transpose(x, -2).shape == [3, 4, 1, 2]
rotate_transpose(x, 1).shape == [4, 1, 2, 3]
rotate_transpose(x, 2).shape == [3, 4, 1, 2]
rotate_transpose(x, 7).shape == rotate_transpose(x, 3).shape # [2, 3, 4, 1]
rotate_transpose(x, -7).shape == rotate_transpose(x, -3).shape # [4, 1, 2, 3]
```
Args:
x: `Tensor`.
shift: `Tensor`. Number of dimensions to transpose left (shift<0) or
transpose right (shift>0).
name: Python `str`. The name to give this op.
Returns:
rotated_x: Input `Tensor` with dimensions circularly rotated by shift.
Raises:
TypeError: if shift is not integer type.
"""
with tf.name_scope(name):
x = tf.convert_to_tensor(value=x, name="x")
shift = tf.convert_to_tensor(value=shift, name="shift")
# We do not assign back to preserve constant-ness.
assert_util.assert_integer(shift)
shift_value_static = tf.get_static_value(shift)
ndims = tensorshape_util.rank(x.shape)
if ndims is not None and shift_value_static is not None:
if ndims < 2:
return x
shift_value_static = np.sign(shift_value_static) * (
abs(shift_value_static) % ndims)
if shift_value_static == 0:
return x
perm = np.roll(np.arange(ndims), shift_value_static)
return tf.transpose(a=x, perm=perm)
else:
# Consider if we always had a positive shift, and some specified
# direction.
# When shifting left we want the new array:
# last(x, n-shift) + first(x, shift)
# and if shifting right then we want:
# last(x, shift) + first(x, n-shift)
# Observe that last(a) == slice(a, n) and first(a) == slice(0, a).
# Also, we can encode direction and shift as one: direction * shift.
# Combining these facts, we have:
# a = cond(shift<0, -shift, n-shift)
# last(x, n-a) + first(x, a) == x[a:n] + x[0:a]
# Finally, we transform shift by modulo length so it can be specified
# independently from the array upon which it operates (like python).
ndims = tf.rank(x)
shift = tf.where(
tf.less(shift, 0), -shift % ndims,
ndims - shift % ndims)
first = tf.range(0, shift)
last = tf.range(shift, ndims)
perm = tf.concat([last, first], 0)
return tf.transpose(a=x, perm=perm)
|
def rotate_transpose(x, shift, name="rotate_transpose"):
"""Circularly moves dims left or right.
Effectively identical to:
```python
numpy.transpose(x, numpy.roll(numpy.arange(len(x.shape)), shift))
```
When `validate_args=False` additional graph-runtime checks are
performed. These checks entail moving data from to GPU to CPU.
Example:
```python
x = tf.random_normal([1, 2, 3, 4]) # Tensor of shape [1, 2, 3, 4].
rotate_transpose(x, -1).shape == [2, 3, 4, 1]
rotate_transpose(x, -2).shape == [3, 4, 1, 2]
rotate_transpose(x, 1).shape == [4, 1, 2, 3]
rotate_transpose(x, 2).shape == [3, 4, 1, 2]
rotate_transpose(x, 7).shape == rotate_transpose(x, 3).shape # [2, 3, 4, 1]
rotate_transpose(x, -7).shape == rotate_transpose(x, -3).shape # [4, 1, 2, 3]
```
Args:
x: `Tensor`.
shift: `Tensor`. Number of dimensions to transpose left (shift<0) or
transpose right (shift>0).
name: Python `str`. The name to give this op.
Returns:
rotated_x: Input `Tensor` with dimensions circularly rotated by shift.
Raises:
TypeError: if shift is not integer type.
"""
with tf.name_scope(name):
x = tf.convert_to_tensor(value=x, name="x")
shift = tf.convert_to_tensor(value=shift, name="shift")
# We do not assign back to preserve constant-ness.
assert_util.assert_integer(shift)
shift_value_static = tf.get_static_value(shift)
ndims = tensorshape_util.rank(x.shape)
if ndims is not None and shift_value_static is not None:
if ndims < 2:
return x
shift_value_static = np.sign(shift_value_static) * (
abs(shift_value_static) % ndims)
if shift_value_static == 0:
return x
perm = np.roll(np.arange(ndims), shift_value_static)
return tf.transpose(a=x, perm=perm)
else:
# Consider if we always had a positive shift, and some specified
# direction.
# When shifting left we want the new array:
# last(x, n-shift) + first(x, shift)
# and if shifting right then we want:
# last(x, shift) + first(x, n-shift)
# Observe that last(a) == slice(a, n) and first(a) == slice(0, a).
# Also, we can encode direction and shift as one: direction * shift.
# Combining these facts, we have:
# a = cond(shift<0, -shift, n-shift)
# last(x, n-a) + first(x, a) == x[a:n] + x[0:a]
# Finally, we transform shift by modulo length so it can be specified
# independently from the array upon which it operates (like python).
ndims = tf.rank(x)
shift = tf.where(
tf.less(shift, 0), -shift % ndims,
ndims - shift % ndims)
first = tf.range(0, shift)
last = tf.range(shift, ndims)
perm = tf.concat([last, first], 0)
return tf.transpose(a=x, perm=perm)
|
[
"Circularly",
"moves",
"dims",
"left",
"or",
"right",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L1198-L1271
|
[
"def",
"rotate_transpose",
"(",
"x",
",",
"shift",
",",
"name",
"=",
"\"rotate_transpose\"",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
")",
":",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"\"x\"",
")",
"shift",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"shift",
",",
"name",
"=",
"\"shift\"",
")",
"# We do not assign back to preserve constant-ness.",
"assert_util",
".",
"assert_integer",
"(",
"shift",
")",
"shift_value_static",
"=",
"tf",
".",
"get_static_value",
"(",
"shift",
")",
"ndims",
"=",
"tensorshape_util",
".",
"rank",
"(",
"x",
".",
"shape",
")",
"if",
"ndims",
"is",
"not",
"None",
"and",
"shift_value_static",
"is",
"not",
"None",
":",
"if",
"ndims",
"<",
"2",
":",
"return",
"x",
"shift_value_static",
"=",
"np",
".",
"sign",
"(",
"shift_value_static",
")",
"*",
"(",
"abs",
"(",
"shift_value_static",
")",
"%",
"ndims",
")",
"if",
"shift_value_static",
"==",
"0",
":",
"return",
"x",
"perm",
"=",
"np",
".",
"roll",
"(",
"np",
".",
"arange",
"(",
"ndims",
")",
",",
"shift_value_static",
")",
"return",
"tf",
".",
"transpose",
"(",
"a",
"=",
"x",
",",
"perm",
"=",
"perm",
")",
"else",
":",
"# Consider if we always had a positive shift, and some specified",
"# direction.",
"# When shifting left we want the new array:",
"# last(x, n-shift) + first(x, shift)",
"# and if shifting right then we want:",
"# last(x, shift) + first(x, n-shift)",
"# Observe that last(a) == slice(a, n) and first(a) == slice(0, a).",
"# Also, we can encode direction and shift as one: direction * shift.",
"# Combining these facts, we have:",
"# a = cond(shift<0, -shift, n-shift)",
"# last(x, n-a) + first(x, a) == x[a:n] + x[0:a]",
"# Finally, we transform shift by modulo length so it can be specified",
"# independently from the array upon which it operates (like python).",
"ndims",
"=",
"tf",
".",
"rank",
"(",
"x",
")",
"shift",
"=",
"tf",
".",
"where",
"(",
"tf",
".",
"less",
"(",
"shift",
",",
"0",
")",
",",
"-",
"shift",
"%",
"ndims",
",",
"ndims",
"-",
"shift",
"%",
"ndims",
")",
"first",
"=",
"tf",
".",
"range",
"(",
"0",
",",
"shift",
")",
"last",
"=",
"tf",
".",
"range",
"(",
"shift",
",",
"ndims",
")",
"perm",
"=",
"tf",
".",
"concat",
"(",
"[",
"last",
",",
"first",
"]",
",",
"0",
")",
"return",
"tf",
".",
"transpose",
"(",
"a",
"=",
"x",
",",
"perm",
"=",
"perm",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
pick_vector
|
Picks possibly different length row `Tensor`s based on condition.
Value `Tensor`s should have exactly one dimension.
If `cond` is a python Boolean or `tf.constant` then either `true_vector` or
`false_vector` is immediately returned. I.e., no graph nodes are created and
no validation happens.
Args:
cond: `Tensor`. Must have `dtype=tf.bool` and be scalar.
true_vector: `Tensor` of one dimension. Returned when cond is `True`.
false_vector: `Tensor` of one dimension. Returned when cond is `False`.
name: Python `str`. The name to give this op.
Example: ```python pick_vector(tf.less(0, 5), tf.range(10, 12), tf.range(15,
18)) # [10, 11] pick_vector(tf.less(5, 0), tf.range(10, 12), tf.range(15,
18)) # [15, 16, 17] ```
Returns:
true_or_false_vector: `Tensor`.
Raises:
TypeError: if `cond.dtype != tf.bool`
TypeError: if `cond` is not a constant and
`true_vector.dtype != false_vector.dtype`
|
tensorflow_probability/python/internal/distribution_util.py
|
def pick_vector(cond, true_vector, false_vector, name="pick_vector"):
"""Picks possibly different length row `Tensor`s based on condition.
Value `Tensor`s should have exactly one dimension.
If `cond` is a python Boolean or `tf.constant` then either `true_vector` or
`false_vector` is immediately returned. I.e., no graph nodes are created and
no validation happens.
Args:
cond: `Tensor`. Must have `dtype=tf.bool` and be scalar.
true_vector: `Tensor` of one dimension. Returned when cond is `True`.
false_vector: `Tensor` of one dimension. Returned when cond is `False`.
name: Python `str`. The name to give this op.
Example: ```python pick_vector(tf.less(0, 5), tf.range(10, 12), tf.range(15,
18)) # [10, 11] pick_vector(tf.less(5, 0), tf.range(10, 12), tf.range(15,
18)) # [15, 16, 17] ```
Returns:
true_or_false_vector: `Tensor`.
Raises:
TypeError: if `cond.dtype != tf.bool`
TypeError: if `cond` is not a constant and
`true_vector.dtype != false_vector.dtype`
"""
with tf.name_scope(name):
cond = tf.convert_to_tensor(
value=cond, dtype_hint=tf.bool, name="cond")
if cond.dtype != tf.bool:
raise TypeError(
"{}.dtype={} which is not {}".format(cond, cond.dtype, tf.bool))
true_vector = tf.convert_to_tensor(value=true_vector, name="true_vector")
false_vector = tf.convert_to_tensor(value=false_vector, name="false_vector")
if true_vector.dtype != false_vector.dtype:
raise TypeError(
"{}.dtype={} does not match {}.dtype={}".format(
true_vector, true_vector.dtype, false_vector, false_vector.dtype))
cond_value_static = tf.get_static_value(cond)
if cond_value_static is not None:
return true_vector if cond_value_static else false_vector
n = tf.shape(input=true_vector)[0]
return tf.slice(
tf.concat([true_vector, false_vector], 0), [tf.where(cond, 0, n)],
[tf.where(cond, n, -1)])
|
def pick_vector(cond, true_vector, false_vector, name="pick_vector"):
"""Picks possibly different length row `Tensor`s based on condition.
Value `Tensor`s should have exactly one dimension.
If `cond` is a python Boolean or `tf.constant` then either `true_vector` or
`false_vector` is immediately returned. I.e., no graph nodes are created and
no validation happens.
Args:
cond: `Tensor`. Must have `dtype=tf.bool` and be scalar.
true_vector: `Tensor` of one dimension. Returned when cond is `True`.
false_vector: `Tensor` of one dimension. Returned when cond is `False`.
name: Python `str`. The name to give this op.
Example: ```python pick_vector(tf.less(0, 5), tf.range(10, 12), tf.range(15,
18)) # [10, 11] pick_vector(tf.less(5, 0), tf.range(10, 12), tf.range(15,
18)) # [15, 16, 17] ```
Returns:
true_or_false_vector: `Tensor`.
Raises:
TypeError: if `cond.dtype != tf.bool`
TypeError: if `cond` is not a constant and
`true_vector.dtype != false_vector.dtype`
"""
with tf.name_scope(name):
cond = tf.convert_to_tensor(
value=cond, dtype_hint=tf.bool, name="cond")
if cond.dtype != tf.bool:
raise TypeError(
"{}.dtype={} which is not {}".format(cond, cond.dtype, tf.bool))
true_vector = tf.convert_to_tensor(value=true_vector, name="true_vector")
false_vector = tf.convert_to_tensor(value=false_vector, name="false_vector")
if true_vector.dtype != false_vector.dtype:
raise TypeError(
"{}.dtype={} does not match {}.dtype={}".format(
true_vector, true_vector.dtype, false_vector, false_vector.dtype))
cond_value_static = tf.get_static_value(cond)
if cond_value_static is not None:
return true_vector if cond_value_static else false_vector
n = tf.shape(input=true_vector)[0]
return tf.slice(
tf.concat([true_vector, false_vector], 0), [tf.where(cond, 0, n)],
[tf.where(cond, n, -1)])
|
[
"Picks",
"possibly",
"different",
"length",
"row",
"Tensor",
"s",
"based",
"on",
"condition",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L1274-L1320
|
[
"def",
"pick_vector",
"(",
"cond",
",",
"true_vector",
",",
"false_vector",
",",
"name",
"=",
"\"pick_vector\"",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
")",
":",
"cond",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"cond",
",",
"dtype_hint",
"=",
"tf",
".",
"bool",
",",
"name",
"=",
"\"cond\"",
")",
"if",
"cond",
".",
"dtype",
"!=",
"tf",
".",
"bool",
":",
"raise",
"TypeError",
"(",
"\"{}.dtype={} which is not {}\"",
".",
"format",
"(",
"cond",
",",
"cond",
".",
"dtype",
",",
"tf",
".",
"bool",
")",
")",
"true_vector",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"true_vector",
",",
"name",
"=",
"\"true_vector\"",
")",
"false_vector",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"false_vector",
",",
"name",
"=",
"\"false_vector\"",
")",
"if",
"true_vector",
".",
"dtype",
"!=",
"false_vector",
".",
"dtype",
":",
"raise",
"TypeError",
"(",
"\"{}.dtype={} does not match {}.dtype={}\"",
".",
"format",
"(",
"true_vector",
",",
"true_vector",
".",
"dtype",
",",
"false_vector",
",",
"false_vector",
".",
"dtype",
")",
")",
"cond_value_static",
"=",
"tf",
".",
"get_static_value",
"(",
"cond",
")",
"if",
"cond_value_static",
"is",
"not",
"None",
":",
"return",
"true_vector",
"if",
"cond_value_static",
"else",
"false_vector",
"n",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"true_vector",
")",
"[",
"0",
"]",
"return",
"tf",
".",
"slice",
"(",
"tf",
".",
"concat",
"(",
"[",
"true_vector",
",",
"false_vector",
"]",
",",
"0",
")",
",",
"[",
"tf",
".",
"where",
"(",
"cond",
",",
"0",
",",
"n",
")",
"]",
",",
"[",
"tf",
".",
"where",
"(",
"cond",
",",
"n",
",",
"-",
"1",
")",
"]",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
prefer_static_broadcast_shape
|
Convenience function which statically broadcasts shape when possible.
Args:
shape1: `1-D` integer `Tensor`. Already converted to tensor!
shape2: `1-D` integer `Tensor`. Already converted to tensor!
name: A string name to prepend to created ops.
Returns:
The broadcast shape, either as `TensorShape` (if broadcast can be done
statically), or as a `Tensor`.
|
tensorflow_probability/python/internal/distribution_util.py
|
def prefer_static_broadcast_shape(shape1,
shape2,
name="prefer_static_broadcast_shape"):
"""Convenience function which statically broadcasts shape when possible.
Args:
shape1: `1-D` integer `Tensor`. Already converted to tensor!
shape2: `1-D` integer `Tensor`. Already converted to tensor!
name: A string name to prepend to created ops.
Returns:
The broadcast shape, either as `TensorShape` (if broadcast can be done
statically), or as a `Tensor`.
"""
with tf.name_scope(name):
def make_shape_tensor(x):
return tf.convert_to_tensor(value=x, name="shape", dtype=tf.int32)
def get_tensor_shape(s):
if isinstance(s, tf.TensorShape):
return s
s_ = tf.get_static_value(make_shape_tensor(s))
if s_ is not None:
return tf.TensorShape(s_)
return None
def get_shape_tensor(s):
if not isinstance(s, tf.TensorShape):
return make_shape_tensor(s)
if tensorshape_util.is_fully_defined(s):
return make_shape_tensor(tensorshape_util.as_list(s))
raise ValueError("Cannot broadcast from partially "
"defined `TensorShape`.")
shape1_ = get_tensor_shape(shape1)
shape2_ = get_tensor_shape(shape2)
if shape1_ is not None and shape2_ is not None:
return tf.broadcast_static_shape(shape1_, shape2_)
shape1_ = get_shape_tensor(shape1)
shape2_ = get_shape_tensor(shape2)
return tf.broadcast_dynamic_shape(shape1_, shape2_)
|
def prefer_static_broadcast_shape(shape1,
shape2,
name="prefer_static_broadcast_shape"):
"""Convenience function which statically broadcasts shape when possible.
Args:
shape1: `1-D` integer `Tensor`. Already converted to tensor!
shape2: `1-D` integer `Tensor`. Already converted to tensor!
name: A string name to prepend to created ops.
Returns:
The broadcast shape, either as `TensorShape` (if broadcast can be done
statically), or as a `Tensor`.
"""
with tf.name_scope(name):
def make_shape_tensor(x):
return tf.convert_to_tensor(value=x, name="shape", dtype=tf.int32)
def get_tensor_shape(s):
if isinstance(s, tf.TensorShape):
return s
s_ = tf.get_static_value(make_shape_tensor(s))
if s_ is not None:
return tf.TensorShape(s_)
return None
def get_shape_tensor(s):
if not isinstance(s, tf.TensorShape):
return make_shape_tensor(s)
if tensorshape_util.is_fully_defined(s):
return make_shape_tensor(tensorshape_util.as_list(s))
raise ValueError("Cannot broadcast from partially "
"defined `TensorShape`.")
shape1_ = get_tensor_shape(shape1)
shape2_ = get_tensor_shape(shape2)
if shape1_ is not None and shape2_ is not None:
return tf.broadcast_static_shape(shape1_, shape2_)
shape1_ = get_shape_tensor(shape1)
shape2_ = get_shape_tensor(shape2)
return tf.broadcast_dynamic_shape(shape1_, shape2_)
|
[
"Convenience",
"function",
"which",
"statically",
"broadcasts",
"shape",
"when",
"possible",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L1323-L1365
|
[
"def",
"prefer_static_broadcast_shape",
"(",
"shape1",
",",
"shape2",
",",
"name",
"=",
"\"prefer_static_broadcast_shape\"",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
")",
":",
"def",
"make_shape_tensor",
"(",
"x",
")",
":",
"return",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"\"shape\"",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"def",
"get_tensor_shape",
"(",
"s",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"tf",
".",
"TensorShape",
")",
":",
"return",
"s",
"s_",
"=",
"tf",
".",
"get_static_value",
"(",
"make_shape_tensor",
"(",
"s",
")",
")",
"if",
"s_",
"is",
"not",
"None",
":",
"return",
"tf",
".",
"TensorShape",
"(",
"s_",
")",
"return",
"None",
"def",
"get_shape_tensor",
"(",
"s",
")",
":",
"if",
"not",
"isinstance",
"(",
"s",
",",
"tf",
".",
"TensorShape",
")",
":",
"return",
"make_shape_tensor",
"(",
"s",
")",
"if",
"tensorshape_util",
".",
"is_fully_defined",
"(",
"s",
")",
":",
"return",
"make_shape_tensor",
"(",
"tensorshape_util",
".",
"as_list",
"(",
"s",
")",
")",
"raise",
"ValueError",
"(",
"\"Cannot broadcast from partially \"",
"\"defined `TensorShape`.\"",
")",
"shape1_",
"=",
"get_tensor_shape",
"(",
"shape1",
")",
"shape2_",
"=",
"get_tensor_shape",
"(",
"shape2",
")",
"if",
"shape1_",
"is",
"not",
"None",
"and",
"shape2_",
"is",
"not",
"None",
":",
"return",
"tf",
".",
"broadcast_static_shape",
"(",
"shape1_",
",",
"shape2_",
")",
"shape1_",
"=",
"get_shape_tensor",
"(",
"shape1",
")",
"shape2_",
"=",
"get_shape_tensor",
"(",
"shape2",
")",
"return",
"tf",
".",
"broadcast_dynamic_shape",
"(",
"shape1_",
",",
"shape2_",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
gen_new_seed
|
Generate a new seed, from the given seed and salt.
|
tensorflow_probability/python/internal/distribution_util.py
|
def gen_new_seed(seed, salt):
"""Generate a new seed, from the given seed and salt."""
if seed is None:
return None
string = (str(seed) + salt).encode("utf-8")
return int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF
|
def gen_new_seed(seed, salt):
"""Generate a new seed, from the given seed and salt."""
if seed is None:
return None
string = (str(seed) + salt).encode("utf-8")
return int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF
|
[
"Generate",
"a",
"new",
"seed",
"from",
"the",
"given",
"seed",
"and",
"salt",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L1407-L1412
|
[
"def",
"gen_new_seed",
"(",
"seed",
",",
"salt",
")",
":",
"if",
"seed",
"is",
"None",
":",
"return",
"None",
"string",
"=",
"(",
"str",
"(",
"seed",
")",
"+",
"salt",
")",
".",
"encode",
"(",
"\"utf-8\"",
")",
"return",
"int",
"(",
"hashlib",
".",
"md5",
"(",
"string",
")",
".",
"hexdigest",
"(",
")",
"[",
":",
"8",
"]",
",",
"16",
")",
"&",
"0x7FFFFFFF"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
fill_triangular
|
r"""Creates a (batch of) triangular matrix from a vector of inputs.
Created matrix can be lower- or upper-triangular. (It is more efficient to
create the matrix as upper or lower, rather than transpose.)
Triangular matrix elements are filled in a clockwise spiral. See example,
below.
If `x.shape` is `[b1, b2, ..., bB, d]` then the output shape is
`[b1, b2, ..., bB, n, n]` where `n` is such that `d = n(n+1)/2`, i.e.,
`n = int(np.sqrt(0.25 + 2. * m) - 0.5)`.
Example:
```python
fill_triangular([1, 2, 3, 4, 5, 6])
# ==> [[4, 0, 0],
# [6, 5, 0],
# [3, 2, 1]]
fill_triangular([1, 2, 3, 4, 5, 6], upper=True)
# ==> [[1, 2, 3],
# [0, 5, 6],
# [0, 0, 4]]
```
The key trick is to create an upper triangular matrix by concatenating `x`
and a tail of itself, then reshaping.
Suppose that we are filling the upper triangle of an `n`-by-`n` matrix `M`
from a vector `x`. The matrix `M` contains n**2 entries total. The vector `x`
contains `n * (n+1) / 2` entries. For concreteness, we'll consider `n = 5`
(so `x` has `15` entries and `M` has `25`). We'll concatenate `x` and `x` with
the first (`n = 5`) elements removed and reversed:
```python
x = np.arange(15) + 1
xc = np.concatenate([x, x[5:][::-1]])
# ==> array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 15, 14, 13,
# 12, 11, 10, 9, 8, 7, 6])
# (We add one to the arange result to disambiguate the zeros below the
# diagonal of our upper-triangular matrix from the first entry in `x`.)
# Now, when reshapedlay this out as a matrix:
y = np.reshape(xc, [5, 5])
# ==> array([[ 1, 2, 3, 4, 5],
# [ 6, 7, 8, 9, 10],
# [11, 12, 13, 14, 15],
# [15, 14, 13, 12, 11],
# [10, 9, 8, 7, 6]])
# Finally, zero the elements below the diagonal:
y = np.triu(y, k=0)
# ==> array([[ 1, 2, 3, 4, 5],
# [ 0, 7, 8, 9, 10],
# [ 0, 0, 13, 14, 15],
# [ 0, 0, 0, 12, 11],
# [ 0, 0, 0, 0, 6]])
```
From this example we see that the resuting matrix is upper-triangular, and
contains all the entries of x, as desired. The rest is details:
- If `n` is even, `x` doesn't exactly fill an even number of rows (it fills
`n / 2` rows and half of an additional row), but the whole scheme still
works.
- If we want a lower triangular matrix instead of an upper triangular,
we remove the first `n` elements from `x` rather than from the reversed
`x`.
For additional comparisons, a pure numpy version of this function can be found
in `distribution_util_test.py`, function `_fill_triangular`.
Args:
x: `Tensor` representing lower (or upper) triangular elements.
upper: Python `bool` representing whether output matrix should be upper
triangular (`True`) or lower triangular (`False`, default).
name: Python `str`. The name to give this op.
Returns:
tril: `Tensor` with lower (or upper) triangular elements filled from `x`.
Raises:
ValueError: if `x` cannot be mapped to a triangular matrix.
|
tensorflow_probability/python/internal/distribution_util.py
|
def fill_triangular(x, upper=False, name=None):
r"""Creates a (batch of) triangular matrix from a vector of inputs.
Created matrix can be lower- or upper-triangular. (It is more efficient to
create the matrix as upper or lower, rather than transpose.)
Triangular matrix elements are filled in a clockwise spiral. See example,
below.
If `x.shape` is `[b1, b2, ..., bB, d]` then the output shape is
`[b1, b2, ..., bB, n, n]` where `n` is such that `d = n(n+1)/2`, i.e.,
`n = int(np.sqrt(0.25 + 2. * m) - 0.5)`.
Example:
```python
fill_triangular([1, 2, 3, 4, 5, 6])
# ==> [[4, 0, 0],
# [6, 5, 0],
# [3, 2, 1]]
fill_triangular([1, 2, 3, 4, 5, 6], upper=True)
# ==> [[1, 2, 3],
# [0, 5, 6],
# [0, 0, 4]]
```
The key trick is to create an upper triangular matrix by concatenating `x`
and a tail of itself, then reshaping.
Suppose that we are filling the upper triangle of an `n`-by-`n` matrix `M`
from a vector `x`. The matrix `M` contains n**2 entries total. The vector `x`
contains `n * (n+1) / 2` entries. For concreteness, we'll consider `n = 5`
(so `x` has `15` entries and `M` has `25`). We'll concatenate `x` and `x` with
the first (`n = 5`) elements removed and reversed:
```python
x = np.arange(15) + 1
xc = np.concatenate([x, x[5:][::-1]])
# ==> array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 15, 14, 13,
# 12, 11, 10, 9, 8, 7, 6])
# (We add one to the arange result to disambiguate the zeros below the
# diagonal of our upper-triangular matrix from the first entry in `x`.)
# Now, when reshapedlay this out as a matrix:
y = np.reshape(xc, [5, 5])
# ==> array([[ 1, 2, 3, 4, 5],
# [ 6, 7, 8, 9, 10],
# [11, 12, 13, 14, 15],
# [15, 14, 13, 12, 11],
# [10, 9, 8, 7, 6]])
# Finally, zero the elements below the diagonal:
y = np.triu(y, k=0)
# ==> array([[ 1, 2, 3, 4, 5],
# [ 0, 7, 8, 9, 10],
# [ 0, 0, 13, 14, 15],
# [ 0, 0, 0, 12, 11],
# [ 0, 0, 0, 0, 6]])
```
From this example we see that the resuting matrix is upper-triangular, and
contains all the entries of x, as desired. The rest is details:
- If `n` is even, `x` doesn't exactly fill an even number of rows (it fills
`n / 2` rows and half of an additional row), but the whole scheme still
works.
- If we want a lower triangular matrix instead of an upper triangular,
we remove the first `n` elements from `x` rather than from the reversed
`x`.
For additional comparisons, a pure numpy version of this function can be found
in `distribution_util_test.py`, function `_fill_triangular`.
Args:
x: `Tensor` representing lower (or upper) triangular elements.
upper: Python `bool` representing whether output matrix should be upper
triangular (`True`) or lower triangular (`False`, default).
name: Python `str`. The name to give this op.
Returns:
tril: `Tensor` with lower (or upper) triangular elements filled from `x`.
Raises:
ValueError: if `x` cannot be mapped to a triangular matrix.
"""
with tf.name_scope(name or "fill_triangular"):
x = tf.convert_to_tensor(value=x, name="x")
m = tf.compat.dimension_value(
tensorshape_util.with_rank_at_least(x.shape, 1)[-1])
if m is not None:
# Formula derived by solving for n: m = n(n+1)/2.
m = np.int32(m)
n = np.sqrt(0.25 + 2. * m) - 0.5
if n != np.floor(n):
raise ValueError("Input right-most shape ({}) does not "
"correspond to a triangular matrix.".format(m))
n = np.int32(n)
static_final_shape = x.shape[:-1].concatenate([n, n])
else:
m = tf.shape(input=x)[-1]
# For derivation, see above. Casting automatically lops off the 0.5, so we
# omit it. We don't validate n is an integer because this has
# graph-execution cost; an error will be thrown from the reshape, below.
n = tf.cast(
tf.sqrt(0.25 + tf.cast(2 * m, dtype=tf.float32)), dtype=tf.int32)
static_final_shape = tensorshape_util.with_rank_at_least(
x.shape, 1)[:-1].concatenate([None, None])
# Try it out in numpy:
# n = 3
# x = np.arange(n * (n + 1) / 2)
# m = x.shape[0]
# n = np.int32(np.sqrt(.25 + 2 * m) - .5)
# x_tail = x[(m - (n**2 - m)):]
# np.concatenate([x_tail, x[::-1]], 0).reshape(n, n) # lower
# # ==> array([[3, 4, 5],
# [5, 4, 3],
# [2, 1, 0]])
# np.concatenate([x, x_tail[::-1]], 0).reshape(n, n) # upper
# # ==> array([[0, 1, 2],
# [3, 4, 5],
# [5, 4, 3]])
#
# Note that we can't simply do `x[..., -(n**2 - m):]` because this doesn't
# correctly handle `m == n == 1`. Hence, we do nonnegative indexing.
# Furthermore observe that:
# m - (n**2 - m)
# = n**2 / 2 + n / 2 - (n**2 - n**2 / 2 + n / 2)
# = 2 (n**2 / 2 + n / 2) - n**2
# = n**2 + n - n**2
# = n
ndims = prefer_static_rank(x)
if upper:
x_list = [x, tf.reverse(x[..., n:], axis=[ndims - 1])]
else:
x_list = [x[..., n:], tf.reverse(x, axis=[ndims - 1])]
new_shape = (
tensorshape_util.as_list(static_final_shape)
if tensorshape_util.is_fully_defined(static_final_shape) else tf.concat(
[tf.shape(input=x)[:-1], [n, n]], axis=0))
x = tf.reshape(tf.concat(x_list, axis=-1), new_shape)
x = tf.linalg.band_part(
x, num_lower=(0 if upper else -1), num_upper=(-1 if upper else 0))
tensorshape_util.set_shape(x, static_final_shape)
return x
|
def fill_triangular(x, upper=False, name=None):
r"""Creates a (batch of) triangular matrix from a vector of inputs.
Created matrix can be lower- or upper-triangular. (It is more efficient to
create the matrix as upper or lower, rather than transpose.)
Triangular matrix elements are filled in a clockwise spiral. See example,
below.
If `x.shape` is `[b1, b2, ..., bB, d]` then the output shape is
`[b1, b2, ..., bB, n, n]` where `n` is such that `d = n(n+1)/2`, i.e.,
`n = int(np.sqrt(0.25 + 2. * m) - 0.5)`.
Example:
```python
fill_triangular([1, 2, 3, 4, 5, 6])
# ==> [[4, 0, 0],
# [6, 5, 0],
# [3, 2, 1]]
fill_triangular([1, 2, 3, 4, 5, 6], upper=True)
# ==> [[1, 2, 3],
# [0, 5, 6],
# [0, 0, 4]]
```
The key trick is to create an upper triangular matrix by concatenating `x`
and a tail of itself, then reshaping.
Suppose that we are filling the upper triangle of an `n`-by-`n` matrix `M`
from a vector `x`. The matrix `M` contains n**2 entries total. The vector `x`
contains `n * (n+1) / 2` entries. For concreteness, we'll consider `n = 5`
(so `x` has `15` entries and `M` has `25`). We'll concatenate `x` and `x` with
the first (`n = 5`) elements removed and reversed:
```python
x = np.arange(15) + 1
xc = np.concatenate([x, x[5:][::-1]])
# ==> array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 15, 14, 13,
# 12, 11, 10, 9, 8, 7, 6])
# (We add one to the arange result to disambiguate the zeros below the
# diagonal of our upper-triangular matrix from the first entry in `x`.)
# Now, when reshapedlay this out as a matrix:
y = np.reshape(xc, [5, 5])
# ==> array([[ 1, 2, 3, 4, 5],
# [ 6, 7, 8, 9, 10],
# [11, 12, 13, 14, 15],
# [15, 14, 13, 12, 11],
# [10, 9, 8, 7, 6]])
# Finally, zero the elements below the diagonal:
y = np.triu(y, k=0)
# ==> array([[ 1, 2, 3, 4, 5],
# [ 0, 7, 8, 9, 10],
# [ 0, 0, 13, 14, 15],
# [ 0, 0, 0, 12, 11],
# [ 0, 0, 0, 0, 6]])
```
From this example we see that the resuting matrix is upper-triangular, and
contains all the entries of x, as desired. The rest is details:
- If `n` is even, `x` doesn't exactly fill an even number of rows (it fills
`n / 2` rows and half of an additional row), but the whole scheme still
works.
- If we want a lower triangular matrix instead of an upper triangular,
we remove the first `n` elements from `x` rather than from the reversed
`x`.
For additional comparisons, a pure numpy version of this function can be found
in `distribution_util_test.py`, function `_fill_triangular`.
Args:
x: `Tensor` representing lower (or upper) triangular elements.
upper: Python `bool` representing whether output matrix should be upper
triangular (`True`) or lower triangular (`False`, default).
name: Python `str`. The name to give this op.
Returns:
tril: `Tensor` with lower (or upper) triangular elements filled from `x`.
Raises:
ValueError: if `x` cannot be mapped to a triangular matrix.
"""
with tf.name_scope(name or "fill_triangular"):
x = tf.convert_to_tensor(value=x, name="x")
m = tf.compat.dimension_value(
tensorshape_util.with_rank_at_least(x.shape, 1)[-1])
if m is not None:
# Formula derived by solving for n: m = n(n+1)/2.
m = np.int32(m)
n = np.sqrt(0.25 + 2. * m) - 0.5
if n != np.floor(n):
raise ValueError("Input right-most shape ({}) does not "
"correspond to a triangular matrix.".format(m))
n = np.int32(n)
static_final_shape = x.shape[:-1].concatenate([n, n])
else:
m = tf.shape(input=x)[-1]
# For derivation, see above. Casting automatically lops off the 0.5, so we
# omit it. We don't validate n is an integer because this has
# graph-execution cost; an error will be thrown from the reshape, below.
n = tf.cast(
tf.sqrt(0.25 + tf.cast(2 * m, dtype=tf.float32)), dtype=tf.int32)
static_final_shape = tensorshape_util.with_rank_at_least(
x.shape, 1)[:-1].concatenate([None, None])
# Try it out in numpy:
# n = 3
# x = np.arange(n * (n + 1) / 2)
# m = x.shape[0]
# n = np.int32(np.sqrt(.25 + 2 * m) - .5)
# x_tail = x[(m - (n**2 - m)):]
# np.concatenate([x_tail, x[::-1]], 0).reshape(n, n) # lower
# # ==> array([[3, 4, 5],
# [5, 4, 3],
# [2, 1, 0]])
# np.concatenate([x, x_tail[::-1]], 0).reshape(n, n) # upper
# # ==> array([[0, 1, 2],
# [3, 4, 5],
# [5, 4, 3]])
#
# Note that we can't simply do `x[..., -(n**2 - m):]` because this doesn't
# correctly handle `m == n == 1`. Hence, we do nonnegative indexing.
# Furthermore observe that:
# m - (n**2 - m)
# = n**2 / 2 + n / 2 - (n**2 - n**2 / 2 + n / 2)
# = 2 (n**2 / 2 + n / 2) - n**2
# = n**2 + n - n**2
# = n
ndims = prefer_static_rank(x)
if upper:
x_list = [x, tf.reverse(x[..., n:], axis=[ndims - 1])]
else:
x_list = [x[..., n:], tf.reverse(x, axis=[ndims - 1])]
new_shape = (
tensorshape_util.as_list(static_final_shape)
if tensorshape_util.is_fully_defined(static_final_shape) else tf.concat(
[tf.shape(input=x)[:-1], [n, n]], axis=0))
x = tf.reshape(tf.concat(x_list, axis=-1), new_shape)
x = tf.linalg.band_part(
x, num_lower=(0 if upper else -1), num_upper=(-1 if upper else 0))
tensorshape_util.set_shape(x, static_final_shape)
return x
|
[
"r",
"Creates",
"a",
"(",
"batch",
"of",
")",
"triangular",
"matrix",
"from",
"a",
"vector",
"of",
"inputs",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L1415-L1561
|
[
"def",
"fill_triangular",
"(",
"x",
",",
"upper",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"\"fill_triangular\"",
")",
":",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"\"x\"",
")",
"m",
"=",
"tf",
".",
"compat",
".",
"dimension_value",
"(",
"tensorshape_util",
".",
"with_rank_at_least",
"(",
"x",
".",
"shape",
",",
"1",
")",
"[",
"-",
"1",
"]",
")",
"if",
"m",
"is",
"not",
"None",
":",
"# Formula derived by solving for n: m = n(n+1)/2.",
"m",
"=",
"np",
".",
"int32",
"(",
"m",
")",
"n",
"=",
"np",
".",
"sqrt",
"(",
"0.25",
"+",
"2.",
"*",
"m",
")",
"-",
"0.5",
"if",
"n",
"!=",
"np",
".",
"floor",
"(",
"n",
")",
":",
"raise",
"ValueError",
"(",
"\"Input right-most shape ({}) does not \"",
"\"correspond to a triangular matrix.\"",
".",
"format",
"(",
"m",
")",
")",
"n",
"=",
"np",
".",
"int32",
"(",
"n",
")",
"static_final_shape",
"=",
"x",
".",
"shape",
"[",
":",
"-",
"1",
"]",
".",
"concatenate",
"(",
"[",
"n",
",",
"n",
"]",
")",
"else",
":",
"m",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
"[",
"-",
"1",
"]",
"# For derivation, see above. Casting automatically lops off the 0.5, so we",
"# omit it. We don't validate n is an integer because this has",
"# graph-execution cost; an error will be thrown from the reshape, below.",
"n",
"=",
"tf",
".",
"cast",
"(",
"tf",
".",
"sqrt",
"(",
"0.25",
"+",
"tf",
".",
"cast",
"(",
"2",
"*",
"m",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
")",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"static_final_shape",
"=",
"tensorshape_util",
".",
"with_rank_at_least",
"(",
"x",
".",
"shape",
",",
"1",
")",
"[",
":",
"-",
"1",
"]",
".",
"concatenate",
"(",
"[",
"None",
",",
"None",
"]",
")",
"# Try it out in numpy:",
"# n = 3",
"# x = np.arange(n * (n + 1) / 2)",
"# m = x.shape[0]",
"# n = np.int32(np.sqrt(.25 + 2 * m) - .5)",
"# x_tail = x[(m - (n**2 - m)):]",
"# np.concatenate([x_tail, x[::-1]], 0).reshape(n, n) # lower",
"# # ==> array([[3, 4, 5],",
"# [5, 4, 3],",
"# [2, 1, 0]])",
"# np.concatenate([x, x_tail[::-1]], 0).reshape(n, n) # upper",
"# # ==> array([[0, 1, 2],",
"# [3, 4, 5],",
"# [5, 4, 3]])",
"#",
"# Note that we can't simply do `x[..., -(n**2 - m):]` because this doesn't",
"# correctly handle `m == n == 1`. Hence, we do nonnegative indexing.",
"# Furthermore observe that:",
"# m - (n**2 - m)",
"# = n**2 / 2 + n / 2 - (n**2 - n**2 / 2 + n / 2)",
"# = 2 (n**2 / 2 + n / 2) - n**2",
"# = n**2 + n - n**2",
"# = n",
"ndims",
"=",
"prefer_static_rank",
"(",
"x",
")",
"if",
"upper",
":",
"x_list",
"=",
"[",
"x",
",",
"tf",
".",
"reverse",
"(",
"x",
"[",
"...",
",",
"n",
":",
"]",
",",
"axis",
"=",
"[",
"ndims",
"-",
"1",
"]",
")",
"]",
"else",
":",
"x_list",
"=",
"[",
"x",
"[",
"...",
",",
"n",
":",
"]",
",",
"tf",
".",
"reverse",
"(",
"x",
",",
"axis",
"=",
"[",
"ndims",
"-",
"1",
"]",
")",
"]",
"new_shape",
"=",
"(",
"tensorshape_util",
".",
"as_list",
"(",
"static_final_shape",
")",
"if",
"tensorshape_util",
".",
"is_fully_defined",
"(",
"static_final_shape",
")",
"else",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
"[",
":",
"-",
"1",
"]",
",",
"[",
"n",
",",
"n",
"]",
"]",
",",
"axis",
"=",
"0",
")",
")",
"x",
"=",
"tf",
".",
"reshape",
"(",
"tf",
".",
"concat",
"(",
"x_list",
",",
"axis",
"=",
"-",
"1",
")",
",",
"new_shape",
")",
"x",
"=",
"tf",
".",
"linalg",
".",
"band_part",
"(",
"x",
",",
"num_lower",
"=",
"(",
"0",
"if",
"upper",
"else",
"-",
"1",
")",
",",
"num_upper",
"=",
"(",
"-",
"1",
"if",
"upper",
"else",
"0",
")",
")",
"tensorshape_util",
".",
"set_shape",
"(",
"x",
",",
"static_final_shape",
")",
"return",
"x"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
fill_triangular_inverse
|
Creates a vector from a (batch of) triangular matrix.
The vector is created from the lower-triangular or upper-triangular portion
depending on the value of the parameter `upper`.
If `x.shape` is `[b1, b2, ..., bB, n, n]` then the output shape is
`[b1, b2, ..., bB, d]` where `d = n (n + 1) / 2`.
Example:
```python
fill_triangular_inverse(
[[4, 0, 0],
[6, 5, 0],
[3, 2, 1]])
# ==> [1, 2, 3, 4, 5, 6]
fill_triangular_inverse(
[[1, 2, 3],
[0, 5, 6],
[0, 0, 4]], upper=True)
# ==> [1, 2, 3, 4, 5, 6]
```
Args:
x: `Tensor` representing lower (or upper) triangular elements.
upper: Python `bool` representing whether output matrix should be upper
triangular (`True`) or lower triangular (`False`, default).
name: Python `str`. The name to give this op.
Returns:
flat_tril: (Batch of) vector-shaped `Tensor` representing vectorized lower
(or upper) triangular elements from `x`.
|
tensorflow_probability/python/internal/distribution_util.py
|
def fill_triangular_inverse(x, upper=False, name=None):
"""Creates a vector from a (batch of) triangular matrix.
The vector is created from the lower-triangular or upper-triangular portion
depending on the value of the parameter `upper`.
If `x.shape` is `[b1, b2, ..., bB, n, n]` then the output shape is
`[b1, b2, ..., bB, d]` where `d = n (n + 1) / 2`.
Example:
```python
fill_triangular_inverse(
[[4, 0, 0],
[6, 5, 0],
[3, 2, 1]])
# ==> [1, 2, 3, 4, 5, 6]
fill_triangular_inverse(
[[1, 2, 3],
[0, 5, 6],
[0, 0, 4]], upper=True)
# ==> [1, 2, 3, 4, 5, 6]
```
Args:
x: `Tensor` representing lower (or upper) triangular elements.
upper: Python `bool` representing whether output matrix should be upper
triangular (`True`) or lower triangular (`False`, default).
name: Python `str`. The name to give this op.
Returns:
flat_tril: (Batch of) vector-shaped `Tensor` representing vectorized lower
(or upper) triangular elements from `x`.
"""
with tf.name_scope(name or "fill_triangular_inverse"):
x = tf.convert_to_tensor(value=x, name="x")
n = tf.compat.dimension_value(
tensorshape_util.with_rank_at_least(x.shape, 2)[-1])
if n is not None:
n = np.int32(n)
m = np.int32((n * (n + 1)) // 2)
static_final_shape = x.shape[:-2].concatenate([m])
else:
n = tf.shape(input=x)[-1]
m = (n * (n + 1)) // 2
static_final_shape = tensorshape_util.with_rank_at_least(
x.shape, 2)[:-2].concatenate([None])
ndims = prefer_static_rank(x)
if upper:
initial_elements = x[..., 0, :]
triangular_portion = x[..., 1:, :]
else:
initial_elements = tf.reverse(x[..., -1, :], axis=[ndims - 2])
triangular_portion = x[..., :-1, :]
rotated_triangular_portion = tf.reverse(
tf.reverse(triangular_portion, axis=[ndims - 1]), axis=[ndims - 2])
consolidated_matrix = triangular_portion + rotated_triangular_portion
end_sequence = tf.reshape(
consolidated_matrix,
tf.concat([tf.shape(input=x)[:-2], [n * (n - 1)]], axis=0))
y = tf.concat([initial_elements, end_sequence[..., :m - n]], axis=-1)
tensorshape_util.set_shape(y, static_final_shape)
return y
|
def fill_triangular_inverse(x, upper=False, name=None):
"""Creates a vector from a (batch of) triangular matrix.
The vector is created from the lower-triangular or upper-triangular portion
depending on the value of the parameter `upper`.
If `x.shape` is `[b1, b2, ..., bB, n, n]` then the output shape is
`[b1, b2, ..., bB, d]` where `d = n (n + 1) / 2`.
Example:
```python
fill_triangular_inverse(
[[4, 0, 0],
[6, 5, 0],
[3, 2, 1]])
# ==> [1, 2, 3, 4, 5, 6]
fill_triangular_inverse(
[[1, 2, 3],
[0, 5, 6],
[0, 0, 4]], upper=True)
# ==> [1, 2, 3, 4, 5, 6]
```
Args:
x: `Tensor` representing lower (or upper) triangular elements.
upper: Python `bool` representing whether output matrix should be upper
triangular (`True`) or lower triangular (`False`, default).
name: Python `str`. The name to give this op.
Returns:
flat_tril: (Batch of) vector-shaped `Tensor` representing vectorized lower
(or upper) triangular elements from `x`.
"""
with tf.name_scope(name or "fill_triangular_inverse"):
x = tf.convert_to_tensor(value=x, name="x")
n = tf.compat.dimension_value(
tensorshape_util.with_rank_at_least(x.shape, 2)[-1])
if n is not None:
n = np.int32(n)
m = np.int32((n * (n + 1)) // 2)
static_final_shape = x.shape[:-2].concatenate([m])
else:
n = tf.shape(input=x)[-1]
m = (n * (n + 1)) // 2
static_final_shape = tensorshape_util.with_rank_at_least(
x.shape, 2)[:-2].concatenate([None])
ndims = prefer_static_rank(x)
if upper:
initial_elements = x[..., 0, :]
triangular_portion = x[..., 1:, :]
else:
initial_elements = tf.reverse(x[..., -1, :], axis=[ndims - 2])
triangular_portion = x[..., :-1, :]
rotated_triangular_portion = tf.reverse(
tf.reverse(triangular_portion, axis=[ndims - 1]), axis=[ndims - 2])
consolidated_matrix = triangular_portion + rotated_triangular_portion
end_sequence = tf.reshape(
consolidated_matrix,
tf.concat([tf.shape(input=x)[:-2], [n * (n - 1)]], axis=0))
y = tf.concat([initial_elements, end_sequence[..., :m - n]], axis=-1)
tensorshape_util.set_shape(y, static_final_shape)
return y
|
[
"Creates",
"a",
"vector",
"from",
"a",
"(",
"batch",
"of",
")",
"triangular",
"matrix",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L1564-L1630
|
[
"def",
"fill_triangular_inverse",
"(",
"x",
",",
"upper",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"\"fill_triangular_inverse\"",
")",
":",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"\"x\"",
")",
"n",
"=",
"tf",
".",
"compat",
".",
"dimension_value",
"(",
"tensorshape_util",
".",
"with_rank_at_least",
"(",
"x",
".",
"shape",
",",
"2",
")",
"[",
"-",
"1",
"]",
")",
"if",
"n",
"is",
"not",
"None",
":",
"n",
"=",
"np",
".",
"int32",
"(",
"n",
")",
"m",
"=",
"np",
".",
"int32",
"(",
"(",
"n",
"*",
"(",
"n",
"+",
"1",
")",
")",
"//",
"2",
")",
"static_final_shape",
"=",
"x",
".",
"shape",
"[",
":",
"-",
"2",
"]",
".",
"concatenate",
"(",
"[",
"m",
"]",
")",
"else",
":",
"n",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
"[",
"-",
"1",
"]",
"m",
"=",
"(",
"n",
"*",
"(",
"n",
"+",
"1",
")",
")",
"//",
"2",
"static_final_shape",
"=",
"tensorshape_util",
".",
"with_rank_at_least",
"(",
"x",
".",
"shape",
",",
"2",
")",
"[",
":",
"-",
"2",
"]",
".",
"concatenate",
"(",
"[",
"None",
"]",
")",
"ndims",
"=",
"prefer_static_rank",
"(",
"x",
")",
"if",
"upper",
":",
"initial_elements",
"=",
"x",
"[",
"...",
",",
"0",
",",
":",
"]",
"triangular_portion",
"=",
"x",
"[",
"...",
",",
"1",
":",
",",
":",
"]",
"else",
":",
"initial_elements",
"=",
"tf",
".",
"reverse",
"(",
"x",
"[",
"...",
",",
"-",
"1",
",",
":",
"]",
",",
"axis",
"=",
"[",
"ndims",
"-",
"2",
"]",
")",
"triangular_portion",
"=",
"x",
"[",
"...",
",",
":",
"-",
"1",
",",
":",
"]",
"rotated_triangular_portion",
"=",
"tf",
".",
"reverse",
"(",
"tf",
".",
"reverse",
"(",
"triangular_portion",
",",
"axis",
"=",
"[",
"ndims",
"-",
"1",
"]",
")",
",",
"axis",
"=",
"[",
"ndims",
"-",
"2",
"]",
")",
"consolidated_matrix",
"=",
"triangular_portion",
"+",
"rotated_triangular_portion",
"end_sequence",
"=",
"tf",
".",
"reshape",
"(",
"consolidated_matrix",
",",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
"[",
":",
"-",
"2",
"]",
",",
"[",
"n",
"*",
"(",
"n",
"-",
"1",
")",
"]",
"]",
",",
"axis",
"=",
"0",
")",
")",
"y",
"=",
"tf",
".",
"concat",
"(",
"[",
"initial_elements",
",",
"end_sequence",
"[",
"...",
",",
":",
"m",
"-",
"n",
"]",
"]",
",",
"axis",
"=",
"-",
"1",
")",
"tensorshape_util",
".",
"set_shape",
"(",
"y",
",",
"static_final_shape",
")",
"return",
"y"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
tridiag
|
Creates a matrix with values set above, below, and on the diagonal.
Example:
```python
tridiag(below=[1., 2., 3.],
diag=[4., 5., 6., 7.],
above=[8., 9., 10.])
# ==> array([[ 4., 8., 0., 0.],
# [ 1., 5., 9., 0.],
# [ 0., 2., 6., 10.],
# [ 0., 0., 3., 7.]], dtype=float32)
```
Warning: This Op is intended for convenience, not efficiency.
Args:
below: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the below
diagonal part. `None` is logically equivalent to `below = 0`.
diag: `Tensor` of shape `[B1, ..., Bb, d]` corresponding to the diagonal
part. `None` is logically equivalent to `diag = 0`.
above: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the above
diagonal part. `None` is logically equivalent to `above = 0`.
name: Python `str`. The name to give this op.
Returns:
tridiag: `Tensor` with values set above, below and on the diagonal.
Raises:
ValueError: if all inputs are `None`.
|
tensorflow_probability/python/internal/distribution_util.py
|
def tridiag(below=None, diag=None, above=None, name=None):
"""Creates a matrix with values set above, below, and on the diagonal.
Example:
```python
tridiag(below=[1., 2., 3.],
diag=[4., 5., 6., 7.],
above=[8., 9., 10.])
# ==> array([[ 4., 8., 0., 0.],
# [ 1., 5., 9., 0.],
# [ 0., 2., 6., 10.],
# [ 0., 0., 3., 7.]], dtype=float32)
```
Warning: This Op is intended for convenience, not efficiency.
Args:
below: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the below
diagonal part. `None` is logically equivalent to `below = 0`.
diag: `Tensor` of shape `[B1, ..., Bb, d]` corresponding to the diagonal
part. `None` is logically equivalent to `diag = 0`.
above: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the above
diagonal part. `None` is logically equivalent to `above = 0`.
name: Python `str`. The name to give this op.
Returns:
tridiag: `Tensor` with values set above, below and on the diagonal.
Raises:
ValueError: if all inputs are `None`.
"""
def _pad(x):
"""Prepends and appends a zero to every vector in a batch of vectors."""
shape = tf.concat([tf.shape(input=x)[:-1], [1]], axis=0)
z = tf.zeros(shape, dtype=x.dtype)
return tf.concat([z, x, z], axis=-1)
def _add(*x):
"""Adds list of Tensors, ignoring `None`."""
s = None
for y in x:
if y is None:
continue
elif s is None:
s = y
else:
s += y
if s is None:
raise ValueError("Must specify at least one of `below`, `diag`, `above`.")
return s
with tf.name_scope(name or "tridiag"):
if below is not None:
below = tf.convert_to_tensor(value=below, name="below")
below = tf.linalg.diag(_pad(below))[..., :-1, 1:]
if diag is not None:
diag = tf.convert_to_tensor(value=diag, name="diag")
diag = tf.linalg.diag(diag)
if above is not None:
above = tf.convert_to_tensor(value=above, name="above")
above = tf.linalg.diag(_pad(above))[..., 1:, :-1]
# TODO(jvdillon): Consider using scatter_nd instead of creating three full
# matrices.
return _add(below, diag, above)
|
def tridiag(below=None, diag=None, above=None, name=None):
"""Creates a matrix with values set above, below, and on the diagonal.
Example:
```python
tridiag(below=[1., 2., 3.],
diag=[4., 5., 6., 7.],
above=[8., 9., 10.])
# ==> array([[ 4., 8., 0., 0.],
# [ 1., 5., 9., 0.],
# [ 0., 2., 6., 10.],
# [ 0., 0., 3., 7.]], dtype=float32)
```
Warning: This Op is intended for convenience, not efficiency.
Args:
below: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the below
diagonal part. `None` is logically equivalent to `below = 0`.
diag: `Tensor` of shape `[B1, ..., Bb, d]` corresponding to the diagonal
part. `None` is logically equivalent to `diag = 0`.
above: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the above
diagonal part. `None` is logically equivalent to `above = 0`.
name: Python `str`. The name to give this op.
Returns:
tridiag: `Tensor` with values set above, below and on the diagonal.
Raises:
ValueError: if all inputs are `None`.
"""
def _pad(x):
"""Prepends and appends a zero to every vector in a batch of vectors."""
shape = tf.concat([tf.shape(input=x)[:-1], [1]], axis=0)
z = tf.zeros(shape, dtype=x.dtype)
return tf.concat([z, x, z], axis=-1)
def _add(*x):
"""Adds list of Tensors, ignoring `None`."""
s = None
for y in x:
if y is None:
continue
elif s is None:
s = y
else:
s += y
if s is None:
raise ValueError("Must specify at least one of `below`, `diag`, `above`.")
return s
with tf.name_scope(name or "tridiag"):
if below is not None:
below = tf.convert_to_tensor(value=below, name="below")
below = tf.linalg.diag(_pad(below))[..., :-1, 1:]
if diag is not None:
diag = tf.convert_to_tensor(value=diag, name="diag")
diag = tf.linalg.diag(diag)
if above is not None:
above = tf.convert_to_tensor(value=above, name="above")
above = tf.linalg.diag(_pad(above))[..., 1:, :-1]
# TODO(jvdillon): Consider using scatter_nd instead of creating three full
# matrices.
return _add(below, diag, above)
|
[
"Creates",
"a",
"matrix",
"with",
"values",
"set",
"above",
"below",
"and",
"on",
"the",
"diagonal",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L1633-L1698
|
[
"def",
"tridiag",
"(",
"below",
"=",
"None",
",",
"diag",
"=",
"None",
",",
"above",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"def",
"_pad",
"(",
"x",
")",
":",
"\"\"\"Prepends and appends a zero to every vector in a batch of vectors.\"\"\"",
"shape",
"=",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
"[",
":",
"-",
"1",
"]",
",",
"[",
"1",
"]",
"]",
",",
"axis",
"=",
"0",
")",
"z",
"=",
"tf",
".",
"zeros",
"(",
"shape",
",",
"dtype",
"=",
"x",
".",
"dtype",
")",
"return",
"tf",
".",
"concat",
"(",
"[",
"z",
",",
"x",
",",
"z",
"]",
",",
"axis",
"=",
"-",
"1",
")",
"def",
"_add",
"(",
"*",
"x",
")",
":",
"\"\"\"Adds list of Tensors, ignoring `None`.\"\"\"",
"s",
"=",
"None",
"for",
"y",
"in",
"x",
":",
"if",
"y",
"is",
"None",
":",
"continue",
"elif",
"s",
"is",
"None",
":",
"s",
"=",
"y",
"else",
":",
"s",
"+=",
"y",
"if",
"s",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Must specify at least one of `below`, `diag`, `above`.\"",
")",
"return",
"s",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"\"tridiag\"",
")",
":",
"if",
"below",
"is",
"not",
"None",
":",
"below",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"below",
",",
"name",
"=",
"\"below\"",
")",
"below",
"=",
"tf",
".",
"linalg",
".",
"diag",
"(",
"_pad",
"(",
"below",
")",
")",
"[",
"...",
",",
":",
"-",
"1",
",",
"1",
":",
"]",
"if",
"diag",
"is",
"not",
"None",
":",
"diag",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"diag",
",",
"name",
"=",
"\"diag\"",
")",
"diag",
"=",
"tf",
".",
"linalg",
".",
"diag",
"(",
"diag",
")",
"if",
"above",
"is",
"not",
"None",
":",
"above",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"above",
",",
"name",
"=",
"\"above\"",
")",
"above",
"=",
"tf",
".",
"linalg",
".",
"diag",
"(",
"_pad",
"(",
"above",
")",
")",
"[",
"...",
",",
"1",
":",
",",
":",
"-",
"1",
"]",
"# TODO(jvdillon): Consider using scatter_nd instead of creating three full",
"# matrices.",
"return",
"_add",
"(",
"below",
",",
"diag",
",",
"above",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
reduce_weighted_logsumexp
|
Computes `log(abs(sum(weight * exp(elements across tensor dimensions))))`.
If all weights `w` are known to be positive, it is more efficient to directly
use `reduce_logsumexp`, i.e., `tf.reduce_logsumexp(logx + tf.log(w))` is more
efficient than `du.reduce_weighted_logsumexp(logx, w)`.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(w * exp(input))). It
avoids overflows caused by taking the exp of large inputs and underflows
caused by taking the log of small inputs.
For example:
```python
x = tf.constant([[0., 0, 0],
[0, 0, 0]])
w = tf.constant([[-1., 1, 1],
[1, 1, 1]])
du.reduce_weighted_logsumexp(x, w)
# ==> log(-1*1 + 1*1 + 1*1 + 1*1 + 1*1 + 1*1) = log(4)
du.reduce_weighted_logsumexp(x, w, axis=0)
# ==> [log(-1+1), log(1+1), log(1+1)]
du.reduce_weighted_logsumexp(x, w, axis=1)
# ==> [log(-1+1+1), log(1+1+1)]
du.reduce_weighted_logsumexp(x, w, axis=1, keep_dims=True)
# ==> [[log(-1+1+1)], [log(1+1+1)]]
du.reduce_weighted_logsumexp(x, w, axis=[0, 1])
# ==> log(-1+5)
```
Args:
logx: The tensor to reduce. Should have numeric type.
w: The weight tensor. Should have numeric type identical to `logx`.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
return_sign: If `True`, returns the sign of the result.
name: A name for the operation (optional).
Returns:
lswe: The `log(abs(sum(weight * exp(x))))` reduced tensor.
sign: (Optional) The sign of `sum(weight * exp(x))`.
|
tensorflow_probability/python/internal/distribution_util.py
|
def reduce_weighted_logsumexp(logx,
w=None,
axis=None,
keep_dims=False,
return_sign=False,
name=None):
"""Computes `log(abs(sum(weight * exp(elements across tensor dimensions))))`.
If all weights `w` are known to be positive, it is more efficient to directly
use `reduce_logsumexp`, i.e., `tf.reduce_logsumexp(logx + tf.log(w))` is more
efficient than `du.reduce_weighted_logsumexp(logx, w)`.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(w * exp(input))). It
avoids overflows caused by taking the exp of large inputs and underflows
caused by taking the log of small inputs.
For example:
```python
x = tf.constant([[0., 0, 0],
[0, 0, 0]])
w = tf.constant([[-1., 1, 1],
[1, 1, 1]])
du.reduce_weighted_logsumexp(x, w)
# ==> log(-1*1 + 1*1 + 1*1 + 1*1 + 1*1 + 1*1) = log(4)
du.reduce_weighted_logsumexp(x, w, axis=0)
# ==> [log(-1+1), log(1+1), log(1+1)]
du.reduce_weighted_logsumexp(x, w, axis=1)
# ==> [log(-1+1+1), log(1+1+1)]
du.reduce_weighted_logsumexp(x, w, axis=1, keep_dims=True)
# ==> [[log(-1+1+1)], [log(1+1+1)]]
du.reduce_weighted_logsumexp(x, w, axis=[0, 1])
# ==> log(-1+5)
```
Args:
logx: The tensor to reduce. Should have numeric type.
w: The weight tensor. Should have numeric type identical to `logx`.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
return_sign: If `True`, returns the sign of the result.
name: A name for the operation (optional).
Returns:
lswe: The `log(abs(sum(weight * exp(x))))` reduced tensor.
sign: (Optional) The sign of `sum(weight * exp(x))`.
"""
with tf.name_scope(name or "reduce_weighted_logsumexp"):
logx = tf.convert_to_tensor(value=logx, name="logx")
if w is None:
lswe = tf.reduce_logsumexp(
input_tensor=logx, axis=axis, keepdims=keep_dims)
if return_sign:
sgn = tf.ones_like(lswe)
return lswe, sgn
return lswe
w = tf.convert_to_tensor(value=w, dtype=logx.dtype, name="w")
log_absw_x = logx + tf.math.log(tf.abs(w))
max_log_absw_x = tf.reduce_max(
input_tensor=log_absw_x, axis=axis, keepdims=True)
# If the largest element is `-inf` or `inf` then we don't bother subtracting
# off the max. We do this because otherwise we'd get `inf - inf = NaN`. That
# this is ok follows from the fact that we're actually free to subtract any
# value we like, so long as we add it back after taking the `log(sum(...))`.
max_log_absw_x = tf.where(
tf.math.is_inf(max_log_absw_x), tf.zeros_like(max_log_absw_x),
max_log_absw_x)
wx_over_max_absw_x = (tf.sign(w) * tf.exp(log_absw_x - max_log_absw_x))
sum_wx_over_max_absw_x = tf.reduce_sum(
input_tensor=wx_over_max_absw_x, axis=axis, keepdims=keep_dims)
if not keep_dims:
max_log_absw_x = tf.squeeze(max_log_absw_x, axis)
sgn = tf.sign(sum_wx_over_max_absw_x)
lswe = max_log_absw_x + tf.math.log(sgn * sum_wx_over_max_absw_x)
if return_sign:
return lswe, sgn
return lswe
|
def reduce_weighted_logsumexp(logx,
w=None,
axis=None,
keep_dims=False,
return_sign=False,
name=None):
"""Computes `log(abs(sum(weight * exp(elements across tensor dimensions))))`.
If all weights `w` are known to be positive, it is more efficient to directly
use `reduce_logsumexp`, i.e., `tf.reduce_logsumexp(logx + tf.log(w))` is more
efficient than `du.reduce_weighted_logsumexp(logx, w)`.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(w * exp(input))). It
avoids overflows caused by taking the exp of large inputs and underflows
caused by taking the log of small inputs.
For example:
```python
x = tf.constant([[0., 0, 0],
[0, 0, 0]])
w = tf.constant([[-1., 1, 1],
[1, 1, 1]])
du.reduce_weighted_logsumexp(x, w)
# ==> log(-1*1 + 1*1 + 1*1 + 1*1 + 1*1 + 1*1) = log(4)
du.reduce_weighted_logsumexp(x, w, axis=0)
# ==> [log(-1+1), log(1+1), log(1+1)]
du.reduce_weighted_logsumexp(x, w, axis=1)
# ==> [log(-1+1+1), log(1+1+1)]
du.reduce_weighted_logsumexp(x, w, axis=1, keep_dims=True)
# ==> [[log(-1+1+1)], [log(1+1+1)]]
du.reduce_weighted_logsumexp(x, w, axis=[0, 1])
# ==> log(-1+5)
```
Args:
logx: The tensor to reduce. Should have numeric type.
w: The weight tensor. Should have numeric type identical to `logx`.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
return_sign: If `True`, returns the sign of the result.
name: A name for the operation (optional).
Returns:
lswe: The `log(abs(sum(weight * exp(x))))` reduced tensor.
sign: (Optional) The sign of `sum(weight * exp(x))`.
"""
with tf.name_scope(name or "reduce_weighted_logsumexp"):
logx = tf.convert_to_tensor(value=logx, name="logx")
if w is None:
lswe = tf.reduce_logsumexp(
input_tensor=logx, axis=axis, keepdims=keep_dims)
if return_sign:
sgn = tf.ones_like(lswe)
return lswe, sgn
return lswe
w = tf.convert_to_tensor(value=w, dtype=logx.dtype, name="w")
log_absw_x = logx + tf.math.log(tf.abs(w))
max_log_absw_x = tf.reduce_max(
input_tensor=log_absw_x, axis=axis, keepdims=True)
# If the largest element is `-inf` or `inf` then we don't bother subtracting
# off the max. We do this because otherwise we'd get `inf - inf = NaN`. That
# this is ok follows from the fact that we're actually free to subtract any
# value we like, so long as we add it back after taking the `log(sum(...))`.
max_log_absw_x = tf.where(
tf.math.is_inf(max_log_absw_x), tf.zeros_like(max_log_absw_x),
max_log_absw_x)
wx_over_max_absw_x = (tf.sign(w) * tf.exp(log_absw_x - max_log_absw_x))
sum_wx_over_max_absw_x = tf.reduce_sum(
input_tensor=wx_over_max_absw_x, axis=axis, keepdims=keep_dims)
if not keep_dims:
max_log_absw_x = tf.squeeze(max_log_absw_x, axis)
sgn = tf.sign(sum_wx_over_max_absw_x)
lswe = max_log_absw_x + tf.math.log(sgn * sum_wx_over_max_absw_x)
if return_sign:
return lswe, sgn
return lswe
|
[
"Computes",
"log",
"(",
"abs",
"(",
"sum",
"(",
"weight",
"*",
"exp",
"(",
"elements",
"across",
"tensor",
"dimensions",
"))))",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L1701-L1793
|
[
"def",
"reduce_weighted_logsumexp",
"(",
"logx",
",",
"w",
"=",
"None",
",",
"axis",
"=",
"None",
",",
"keep_dims",
"=",
"False",
",",
"return_sign",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"\"reduce_weighted_logsumexp\"",
")",
":",
"logx",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"logx",
",",
"name",
"=",
"\"logx\"",
")",
"if",
"w",
"is",
"None",
":",
"lswe",
"=",
"tf",
".",
"reduce_logsumexp",
"(",
"input_tensor",
"=",
"logx",
",",
"axis",
"=",
"axis",
",",
"keepdims",
"=",
"keep_dims",
")",
"if",
"return_sign",
":",
"sgn",
"=",
"tf",
".",
"ones_like",
"(",
"lswe",
")",
"return",
"lswe",
",",
"sgn",
"return",
"lswe",
"w",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"w",
",",
"dtype",
"=",
"logx",
".",
"dtype",
",",
"name",
"=",
"\"w\"",
")",
"log_absw_x",
"=",
"logx",
"+",
"tf",
".",
"math",
".",
"log",
"(",
"tf",
".",
"abs",
"(",
"w",
")",
")",
"max_log_absw_x",
"=",
"tf",
".",
"reduce_max",
"(",
"input_tensor",
"=",
"log_absw_x",
",",
"axis",
"=",
"axis",
",",
"keepdims",
"=",
"True",
")",
"# If the largest element is `-inf` or `inf` then we don't bother subtracting",
"# off the max. We do this because otherwise we'd get `inf - inf = NaN`. That",
"# this is ok follows from the fact that we're actually free to subtract any",
"# value we like, so long as we add it back after taking the `log(sum(...))`.",
"max_log_absw_x",
"=",
"tf",
".",
"where",
"(",
"tf",
".",
"math",
".",
"is_inf",
"(",
"max_log_absw_x",
")",
",",
"tf",
".",
"zeros_like",
"(",
"max_log_absw_x",
")",
",",
"max_log_absw_x",
")",
"wx_over_max_absw_x",
"=",
"(",
"tf",
".",
"sign",
"(",
"w",
")",
"*",
"tf",
".",
"exp",
"(",
"log_absw_x",
"-",
"max_log_absw_x",
")",
")",
"sum_wx_over_max_absw_x",
"=",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"wx_over_max_absw_x",
",",
"axis",
"=",
"axis",
",",
"keepdims",
"=",
"keep_dims",
")",
"if",
"not",
"keep_dims",
":",
"max_log_absw_x",
"=",
"tf",
".",
"squeeze",
"(",
"max_log_absw_x",
",",
"axis",
")",
"sgn",
"=",
"tf",
".",
"sign",
"(",
"sum_wx_over_max_absw_x",
")",
"lswe",
"=",
"max_log_absw_x",
"+",
"tf",
".",
"math",
".",
"log",
"(",
"sgn",
"*",
"sum_wx_over_max_absw_x",
")",
"if",
"return_sign",
":",
"return",
"lswe",
",",
"sgn",
"return",
"lswe"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
softplus_inverse
|
Computes the inverse softplus, i.e., x = softplus_inverse(softplus(x)).
Mathematically this op is equivalent to:
```none
softplus_inverse = log(exp(x) - 1.)
```
Args:
x: `Tensor`. Non-negative (not enforced), floating-point.
name: A name for the operation (optional).
Returns:
`Tensor`. Has the same type/shape as input `x`.
|
tensorflow_probability/python/internal/distribution_util.py
|
def softplus_inverse(x, name=None):
"""Computes the inverse softplus, i.e., x = softplus_inverse(softplus(x)).
Mathematically this op is equivalent to:
```none
softplus_inverse = log(exp(x) - 1.)
```
Args:
x: `Tensor`. Non-negative (not enforced), floating-point.
name: A name for the operation (optional).
Returns:
`Tensor`. Has the same type/shape as input `x`.
"""
with tf.name_scope(name or "softplus_inverse"):
x = tf.convert_to_tensor(value=x, name="x")
# We begin by deriving a more numerically stable softplus_inverse:
# x = softplus(y) = Log[1 + exp{y}], (which means x > 0).
# ==> exp{x} = 1 + exp{y} (1)
# ==> y = Log[exp{x} - 1] (2)
# = Log[(exp{x} - 1) / exp{x}] + Log[exp{x}]
# = Log[(1 - exp{-x}) / 1] + Log[exp{x}]
# = Log[1 - exp{-x}] + x (3)
# (2) is the "obvious" inverse, but (3) is more stable than (2) for large x.
# For small x (e.g. x = 1e-10), (3) will become -inf since 1 - exp{-x} will
# be zero. To fix this, we use 1 - exp{-x} approx x for small x > 0.
#
# In addition to the numerically stable derivation above, we clamp
# small/large values to be congruent with the logic in:
# tensorflow/core/kernels/softplus_op.h
#
# Finally, we set the input to one whenever the input is too large or too
# small. This ensures that no unchosen codepath is +/- inf. This is
# necessary to ensure the gradient doesn't get NaNs. Recall that the
# gradient of `where` behaves like `pred*pred_true + (1-pred)*pred_false`
# thus an `inf` in an unselected path results in `0*inf=nan`. We are careful
# to overwrite `x` with ones only when we will never actually use this
# value. Note that we use ones and not zeros since `log(expm1(0.)) = -inf`.
threshold = np.log(np.finfo(dtype_util.as_numpy_dtype(x.dtype)).eps) + 2.
is_too_small = tf.less(x, np.exp(threshold))
is_too_large = tf.greater(x, -threshold)
too_small_value = tf.math.log(x)
too_large_value = x
# This `where` will ultimately be a NOP because we won't select this
# codepath whenever we used the surrogate `ones_like`.
x = tf.where(tf.logical_or(is_too_small, is_too_large), tf.ones_like(x), x)
y = x + tf.math.log(-tf.math.expm1(-x)) # == log(expm1(x))
return tf.where(is_too_small, too_small_value,
tf.where(is_too_large, too_large_value, y))
|
def softplus_inverse(x, name=None):
"""Computes the inverse softplus, i.e., x = softplus_inverse(softplus(x)).
Mathematically this op is equivalent to:
```none
softplus_inverse = log(exp(x) - 1.)
```
Args:
x: `Tensor`. Non-negative (not enforced), floating-point.
name: A name for the operation (optional).
Returns:
`Tensor`. Has the same type/shape as input `x`.
"""
with tf.name_scope(name or "softplus_inverse"):
x = tf.convert_to_tensor(value=x, name="x")
# We begin by deriving a more numerically stable softplus_inverse:
# x = softplus(y) = Log[1 + exp{y}], (which means x > 0).
# ==> exp{x} = 1 + exp{y} (1)
# ==> y = Log[exp{x} - 1] (2)
# = Log[(exp{x} - 1) / exp{x}] + Log[exp{x}]
# = Log[(1 - exp{-x}) / 1] + Log[exp{x}]
# = Log[1 - exp{-x}] + x (3)
# (2) is the "obvious" inverse, but (3) is more stable than (2) for large x.
# For small x (e.g. x = 1e-10), (3) will become -inf since 1 - exp{-x} will
# be zero. To fix this, we use 1 - exp{-x} approx x for small x > 0.
#
# In addition to the numerically stable derivation above, we clamp
# small/large values to be congruent with the logic in:
# tensorflow/core/kernels/softplus_op.h
#
# Finally, we set the input to one whenever the input is too large or too
# small. This ensures that no unchosen codepath is +/- inf. This is
# necessary to ensure the gradient doesn't get NaNs. Recall that the
# gradient of `where` behaves like `pred*pred_true + (1-pred)*pred_false`
# thus an `inf` in an unselected path results in `0*inf=nan`. We are careful
# to overwrite `x` with ones only when we will never actually use this
# value. Note that we use ones and not zeros since `log(expm1(0.)) = -inf`.
threshold = np.log(np.finfo(dtype_util.as_numpy_dtype(x.dtype)).eps) + 2.
is_too_small = tf.less(x, np.exp(threshold))
is_too_large = tf.greater(x, -threshold)
too_small_value = tf.math.log(x)
too_large_value = x
# This `where` will ultimately be a NOP because we won't select this
# codepath whenever we used the surrogate `ones_like`.
x = tf.where(tf.logical_or(is_too_small, is_too_large), tf.ones_like(x), x)
y = x + tf.math.log(-tf.math.expm1(-x)) # == log(expm1(x))
return tf.where(is_too_small, too_small_value,
tf.where(is_too_large, too_large_value, y))
|
[
"Computes",
"the",
"inverse",
"softplus",
"i",
".",
"e",
".",
"x",
"=",
"softplus_inverse",
"(",
"softplus",
"(",
"x",
"))",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L1799-L1849
|
[
"def",
"softplus_inverse",
"(",
"x",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"\"softplus_inverse\"",
")",
":",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"\"x\"",
")",
"# We begin by deriving a more numerically stable softplus_inverse:",
"# x = softplus(y) = Log[1 + exp{y}], (which means x > 0).",
"# ==> exp{x} = 1 + exp{y} (1)",
"# ==> y = Log[exp{x} - 1] (2)",
"# = Log[(exp{x} - 1) / exp{x}] + Log[exp{x}]",
"# = Log[(1 - exp{-x}) / 1] + Log[exp{x}]",
"# = Log[1 - exp{-x}] + x (3)",
"# (2) is the \"obvious\" inverse, but (3) is more stable than (2) for large x.",
"# For small x (e.g. x = 1e-10), (3) will become -inf since 1 - exp{-x} will",
"# be zero. To fix this, we use 1 - exp{-x} approx x for small x > 0.",
"#",
"# In addition to the numerically stable derivation above, we clamp",
"# small/large values to be congruent with the logic in:",
"# tensorflow/core/kernels/softplus_op.h",
"#",
"# Finally, we set the input to one whenever the input is too large or too",
"# small. This ensures that no unchosen codepath is +/- inf. This is",
"# necessary to ensure the gradient doesn't get NaNs. Recall that the",
"# gradient of `where` behaves like `pred*pred_true + (1-pred)*pred_false`",
"# thus an `inf` in an unselected path results in `0*inf=nan`. We are careful",
"# to overwrite `x` with ones only when we will never actually use this",
"# value. Note that we use ones and not zeros since `log(expm1(0.)) = -inf`.",
"threshold",
"=",
"np",
".",
"log",
"(",
"np",
".",
"finfo",
"(",
"dtype_util",
".",
"as_numpy_dtype",
"(",
"x",
".",
"dtype",
")",
")",
".",
"eps",
")",
"+",
"2.",
"is_too_small",
"=",
"tf",
".",
"less",
"(",
"x",
",",
"np",
".",
"exp",
"(",
"threshold",
")",
")",
"is_too_large",
"=",
"tf",
".",
"greater",
"(",
"x",
",",
"-",
"threshold",
")",
"too_small_value",
"=",
"tf",
".",
"math",
".",
"log",
"(",
"x",
")",
"too_large_value",
"=",
"x",
"# This `where` will ultimately be a NOP because we won't select this",
"# codepath whenever we used the surrogate `ones_like`.",
"x",
"=",
"tf",
".",
"where",
"(",
"tf",
".",
"logical_or",
"(",
"is_too_small",
",",
"is_too_large",
")",
",",
"tf",
".",
"ones_like",
"(",
"x",
")",
",",
"x",
")",
"y",
"=",
"x",
"+",
"tf",
".",
"math",
".",
"log",
"(",
"-",
"tf",
".",
"math",
".",
"expm1",
"(",
"-",
"x",
")",
")",
"# == log(expm1(x))",
"return",
"tf",
".",
"where",
"(",
"is_too_small",
",",
"too_small_value",
",",
"tf",
".",
"where",
"(",
"is_too_large",
",",
"too_large_value",
",",
"y",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
dimension_size
|
Returns the size of a specific dimension.
|
tensorflow_probability/python/internal/distribution_util.py
|
def dimension_size(x, axis):
"""Returns the size of a specific dimension."""
# Since tf.gather isn't "constant-in, constant-out", we must first check the
# static shape or fallback to dynamic shape.
s = tf.compat.dimension_value(
tensorshape_util.with_rank_at_least(x.shape, np.abs(axis))[axis])
if s is not None:
return s
return tf.shape(input=x)[axis]
|
def dimension_size(x, axis):
"""Returns the size of a specific dimension."""
# Since tf.gather isn't "constant-in, constant-out", we must first check the
# static shape or fallback to dynamic shape.
s = tf.compat.dimension_value(
tensorshape_util.with_rank_at_least(x.shape, np.abs(axis))[axis])
if s is not None:
return s
return tf.shape(input=x)[axis]
|
[
"Returns",
"the",
"size",
"of",
"a",
"specific",
"dimension",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L1853-L1861
|
[
"def",
"dimension_size",
"(",
"x",
",",
"axis",
")",
":",
"# Since tf.gather isn't \"constant-in, constant-out\", we must first check the",
"# static shape or fallback to dynamic shape.",
"s",
"=",
"tf",
".",
"compat",
".",
"dimension_value",
"(",
"tensorshape_util",
".",
"with_rank_at_least",
"(",
"x",
".",
"shape",
",",
"np",
".",
"abs",
"(",
"axis",
")",
")",
"[",
"axis",
"]",
")",
"if",
"s",
"is",
"not",
"None",
":",
"return",
"s",
"return",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
"[",
"axis",
"]"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
process_quadrature_grid_and_probs
|
Validates quadrature grid, probs or computes them as necessary.
Args:
quadrature_grid_and_probs: Python pair of `float`-like `Tensor`s
representing the sample points and the corresponding (possibly
normalized) weight. When `None`, defaults to:
`np.polynomial.hermite.hermgauss(deg=8)`.
dtype: The expected `dtype` of `grid` and `probs`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
quadrature_grid_and_probs: Python pair of `float`-like `Tensor`s
representing the sample points and the corresponding (possibly
normalized) weight.
Raises:
ValueError: if `quadrature_grid_and_probs is not None` and
`len(quadrature_grid_and_probs[0]) != len(quadrature_grid_and_probs[1])`
|
tensorflow_probability/python/internal/distribution_util.py
|
def process_quadrature_grid_and_probs(quadrature_grid_and_probs,
dtype,
validate_args,
name=None):
"""Validates quadrature grid, probs or computes them as necessary.
Args:
quadrature_grid_and_probs: Python pair of `float`-like `Tensor`s
representing the sample points and the corresponding (possibly
normalized) weight. When `None`, defaults to:
`np.polynomial.hermite.hermgauss(deg=8)`.
dtype: The expected `dtype` of `grid` and `probs`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
quadrature_grid_and_probs: Python pair of `float`-like `Tensor`s
representing the sample points and the corresponding (possibly
normalized) weight.
Raises:
ValueError: if `quadrature_grid_and_probs is not None` and
`len(quadrature_grid_and_probs[0]) != len(quadrature_grid_and_probs[1])`
"""
with tf.name_scope(name or "process_quadrature_grid_and_probs"):
if quadrature_grid_and_probs is None:
grid, probs = np.polynomial.hermite.hermgauss(deg=8)
grid = grid.astype(dtype_util.as_numpy_dtype(dtype))
probs = probs.astype(dtype_util.as_numpy_dtype(dtype))
probs /= np.linalg.norm(probs, ord=1, keepdims=True)
grid = tf.convert_to_tensor(value=grid, name="grid", dtype=dtype)
probs = tf.convert_to_tensor(value=probs, name="probs", dtype=dtype)
return grid, probs
grid, probs = tuple(quadrature_grid_and_probs)
grid = tf.convert_to_tensor(value=grid, name="grid", dtype=dtype)
probs = tf.convert_to_tensor(
value=probs, name="unnormalized_probs", dtype=dtype)
probs /= tf.norm(tensor=probs, ord=1, axis=-1, keepdims=True, name="probs")
def _static_event_size(x):
"""Returns the static size of a specific dimension or `None`."""
return tf.compat.dimension_value(
tensorshape_util.with_rank_at_least(x.shape, 1)[-1])
m, n = _static_event_size(probs), _static_event_size(grid)
if m is not None and n is not None:
if m != n:
raise ValueError("`quadrature_grid_and_probs` must be a `tuple` of "
"same-length zero-th-dimension `Tensor`s "
"(saw lengths {}, {})".format(m, n))
elif validate_args:
assertions = [
assert_util.assert_equal(
dimension_size(probs, axis=-1),
dimension_size(grid, axis=-1),
message=("`quadrature_grid_and_probs` must be a `tuple` of "
"same-length zero-th-dimension `Tensor`s")),
]
with tf.control_dependencies(assertions):
grid = tf.identity(grid)
probs = tf.identity(probs)
return grid, probs
|
def process_quadrature_grid_and_probs(quadrature_grid_and_probs,
dtype,
validate_args,
name=None):
"""Validates quadrature grid, probs or computes them as necessary.
Args:
quadrature_grid_and_probs: Python pair of `float`-like `Tensor`s
representing the sample points and the corresponding (possibly
normalized) weight. When `None`, defaults to:
`np.polynomial.hermite.hermgauss(deg=8)`.
dtype: The expected `dtype` of `grid` and `probs`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
quadrature_grid_and_probs: Python pair of `float`-like `Tensor`s
representing the sample points and the corresponding (possibly
normalized) weight.
Raises:
ValueError: if `quadrature_grid_and_probs is not None` and
`len(quadrature_grid_and_probs[0]) != len(quadrature_grid_and_probs[1])`
"""
with tf.name_scope(name or "process_quadrature_grid_and_probs"):
if quadrature_grid_and_probs is None:
grid, probs = np.polynomial.hermite.hermgauss(deg=8)
grid = grid.astype(dtype_util.as_numpy_dtype(dtype))
probs = probs.astype(dtype_util.as_numpy_dtype(dtype))
probs /= np.linalg.norm(probs, ord=1, keepdims=True)
grid = tf.convert_to_tensor(value=grid, name="grid", dtype=dtype)
probs = tf.convert_to_tensor(value=probs, name="probs", dtype=dtype)
return grid, probs
grid, probs = tuple(quadrature_grid_and_probs)
grid = tf.convert_to_tensor(value=grid, name="grid", dtype=dtype)
probs = tf.convert_to_tensor(
value=probs, name="unnormalized_probs", dtype=dtype)
probs /= tf.norm(tensor=probs, ord=1, axis=-1, keepdims=True, name="probs")
def _static_event_size(x):
"""Returns the static size of a specific dimension or `None`."""
return tf.compat.dimension_value(
tensorshape_util.with_rank_at_least(x.shape, 1)[-1])
m, n = _static_event_size(probs), _static_event_size(grid)
if m is not None and n is not None:
if m != n:
raise ValueError("`quadrature_grid_and_probs` must be a `tuple` of "
"same-length zero-th-dimension `Tensor`s "
"(saw lengths {}, {})".format(m, n))
elif validate_args:
assertions = [
assert_util.assert_equal(
dimension_size(probs, axis=-1),
dimension_size(grid, axis=-1),
message=("`quadrature_grid_and_probs` must be a `tuple` of "
"same-length zero-th-dimension `Tensor`s")),
]
with tf.control_dependencies(assertions):
grid = tf.identity(grid)
probs = tf.identity(probs)
return grid, probs
|
[
"Validates",
"quadrature",
"grid",
"probs",
"or",
"computes",
"them",
"as",
"necessary",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L1864-L1929
|
[
"def",
"process_quadrature_grid_and_probs",
"(",
"quadrature_grid_and_probs",
",",
"dtype",
",",
"validate_args",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"\"process_quadrature_grid_and_probs\"",
")",
":",
"if",
"quadrature_grid_and_probs",
"is",
"None",
":",
"grid",
",",
"probs",
"=",
"np",
".",
"polynomial",
".",
"hermite",
".",
"hermgauss",
"(",
"deg",
"=",
"8",
")",
"grid",
"=",
"grid",
".",
"astype",
"(",
"dtype_util",
".",
"as_numpy_dtype",
"(",
"dtype",
")",
")",
"probs",
"=",
"probs",
".",
"astype",
"(",
"dtype_util",
".",
"as_numpy_dtype",
"(",
"dtype",
")",
")",
"probs",
"/=",
"np",
".",
"linalg",
".",
"norm",
"(",
"probs",
",",
"ord",
"=",
"1",
",",
"keepdims",
"=",
"True",
")",
"grid",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"grid",
",",
"name",
"=",
"\"grid\"",
",",
"dtype",
"=",
"dtype",
")",
"probs",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"probs",
",",
"name",
"=",
"\"probs\"",
",",
"dtype",
"=",
"dtype",
")",
"return",
"grid",
",",
"probs",
"grid",
",",
"probs",
"=",
"tuple",
"(",
"quadrature_grid_and_probs",
")",
"grid",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"grid",
",",
"name",
"=",
"\"grid\"",
",",
"dtype",
"=",
"dtype",
")",
"probs",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"probs",
",",
"name",
"=",
"\"unnormalized_probs\"",
",",
"dtype",
"=",
"dtype",
")",
"probs",
"/=",
"tf",
".",
"norm",
"(",
"tensor",
"=",
"probs",
",",
"ord",
"=",
"1",
",",
"axis",
"=",
"-",
"1",
",",
"keepdims",
"=",
"True",
",",
"name",
"=",
"\"probs\"",
")",
"def",
"_static_event_size",
"(",
"x",
")",
":",
"\"\"\"Returns the static size of a specific dimension or `None`.\"\"\"",
"return",
"tf",
".",
"compat",
".",
"dimension_value",
"(",
"tensorshape_util",
".",
"with_rank_at_least",
"(",
"x",
".",
"shape",
",",
"1",
")",
"[",
"-",
"1",
"]",
")",
"m",
",",
"n",
"=",
"_static_event_size",
"(",
"probs",
")",
",",
"_static_event_size",
"(",
"grid",
")",
"if",
"m",
"is",
"not",
"None",
"and",
"n",
"is",
"not",
"None",
":",
"if",
"m",
"!=",
"n",
":",
"raise",
"ValueError",
"(",
"\"`quadrature_grid_and_probs` must be a `tuple` of \"",
"\"same-length zero-th-dimension `Tensor`s \"",
"\"(saw lengths {}, {})\"",
".",
"format",
"(",
"m",
",",
"n",
")",
")",
"elif",
"validate_args",
":",
"assertions",
"=",
"[",
"assert_util",
".",
"assert_equal",
"(",
"dimension_size",
"(",
"probs",
",",
"axis",
"=",
"-",
"1",
")",
",",
"dimension_size",
"(",
"grid",
",",
"axis",
"=",
"-",
"1",
")",
",",
"message",
"=",
"(",
"\"`quadrature_grid_and_probs` must be a `tuple` of \"",
"\"same-length zero-th-dimension `Tensor`s\"",
")",
")",
",",
"]",
"with",
"tf",
".",
"control_dependencies",
"(",
"assertions",
")",
":",
"grid",
"=",
"tf",
".",
"identity",
"(",
"grid",
")",
"probs",
"=",
"tf",
".",
"identity",
"(",
"probs",
")",
"return",
"grid",
",",
"probs"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
pad
|
Pads `value` to the front and/or back of a `Tensor` dim, `count` times.
Args:
x: `Tensor` input.
axis: Scalar `int`-like `Tensor` representing the single dimension to pad.
(Negative indexing is supported.)
front: Python `bool`; if `True` the beginning of the `axis` dimension is
padded with `value`, `count` times. If `False` no front padding is made.
back: Python `bool`; if `True` the end of the `axis` dimension is padded
with `value`, `count` times. If `False` no end padding is made.
value: Scalar `int`-like `Tensor` representing the actual value added to the
front and/or back of the `axis` dimension of `x`.
count: Scalar `int`-like `Tensor` representing number of elements added to
the front and/or back of the `axis` dimension of `x`. E.g., if `front =
back = True` then `2 * count` elements are added.
name: Python `str` name prefixed to Ops created by this function.
Returns:
pad: The padded version of input `x`.
Raises:
ValueError: if both `front` and `back` are `False`.
TypeError: if `count` is not `int`-like.
|
tensorflow_probability/python/internal/distribution_util.py
|
def pad(x, axis, front=False, back=False, value=0, count=1, name=None):
"""Pads `value` to the front and/or back of a `Tensor` dim, `count` times.
Args:
x: `Tensor` input.
axis: Scalar `int`-like `Tensor` representing the single dimension to pad.
(Negative indexing is supported.)
front: Python `bool`; if `True` the beginning of the `axis` dimension is
padded with `value`, `count` times. If `False` no front padding is made.
back: Python `bool`; if `True` the end of the `axis` dimension is padded
with `value`, `count` times. If `False` no end padding is made.
value: Scalar `int`-like `Tensor` representing the actual value added to the
front and/or back of the `axis` dimension of `x`.
count: Scalar `int`-like `Tensor` representing number of elements added to
the front and/or back of the `axis` dimension of `x`. E.g., if `front =
back = True` then `2 * count` elements are added.
name: Python `str` name prefixed to Ops created by this function.
Returns:
pad: The padded version of input `x`.
Raises:
ValueError: if both `front` and `back` are `False`.
TypeError: if `count` is not `int`-like.
"""
with tf.name_scope(name or "pad"):
x = tf.convert_to_tensor(value=x, name="x")
value = tf.convert_to_tensor(value=value, dtype=x.dtype, name="value")
count = tf.convert_to_tensor(value=count, name="count")
if not dtype_util.is_integer(count.dtype):
raise TypeError("`count.dtype` (`{}`) must be `int`-like.".format(
dtype_util.name(count.dtype)))
if not front and not back:
raise ValueError("At least one of `front`, `back` must be `True`.")
ndims = (
tensorshape_util.rank(x.shape)
if tensorshape_util.rank(x.shape) is not None else tf.rank(
x, name="ndims"))
axis = tf.convert_to_tensor(value=axis, name="axis")
axis_ = tf.get_static_value(axis)
if axis_ is not None:
axis = axis_
if axis < 0:
axis = ndims + axis
count_ = tf.get_static_value(count)
if axis_ >= 0 or tensorshape_util.rank(x.shape) is not None:
head = x.shape[:axis]
mid_dim_value = tf.compat.dimension_value(x.shape[axis])
if count_ is None or mid_dim_value is None:
middle = tf.TensorShape(None)
else:
middle = tf.TensorShape(mid_dim_value + count_ * (front + back))
tail = x.shape[axis + 1:]
final_shape = head.concatenate(middle.concatenate(tail))
else:
final_shape = None
else:
axis = tf.where(axis < 0, ndims + axis, axis)
final_shape = None
x = tf.pad(
tensor=x,
paddings=tf.one_hot(
indices=tf.stack([axis if front else -1, axis if back else -1]),
depth=ndims,
axis=0,
on_value=count,
dtype=tf.int32),
constant_values=value)
if final_shape is not None:
tensorshape_util.set_shape(x, final_shape)
return x
|
def pad(x, axis, front=False, back=False, value=0, count=1, name=None):
"""Pads `value` to the front and/or back of a `Tensor` dim, `count` times.
Args:
x: `Tensor` input.
axis: Scalar `int`-like `Tensor` representing the single dimension to pad.
(Negative indexing is supported.)
front: Python `bool`; if `True` the beginning of the `axis` dimension is
padded with `value`, `count` times. If `False` no front padding is made.
back: Python `bool`; if `True` the end of the `axis` dimension is padded
with `value`, `count` times. If `False` no end padding is made.
value: Scalar `int`-like `Tensor` representing the actual value added to the
front and/or back of the `axis` dimension of `x`.
count: Scalar `int`-like `Tensor` representing number of elements added to
the front and/or back of the `axis` dimension of `x`. E.g., if `front =
back = True` then `2 * count` elements are added.
name: Python `str` name prefixed to Ops created by this function.
Returns:
pad: The padded version of input `x`.
Raises:
ValueError: if both `front` and `back` are `False`.
TypeError: if `count` is not `int`-like.
"""
with tf.name_scope(name or "pad"):
x = tf.convert_to_tensor(value=x, name="x")
value = tf.convert_to_tensor(value=value, dtype=x.dtype, name="value")
count = tf.convert_to_tensor(value=count, name="count")
if not dtype_util.is_integer(count.dtype):
raise TypeError("`count.dtype` (`{}`) must be `int`-like.".format(
dtype_util.name(count.dtype)))
if not front and not back:
raise ValueError("At least one of `front`, `back` must be `True`.")
ndims = (
tensorshape_util.rank(x.shape)
if tensorshape_util.rank(x.shape) is not None else tf.rank(
x, name="ndims"))
axis = tf.convert_to_tensor(value=axis, name="axis")
axis_ = tf.get_static_value(axis)
if axis_ is not None:
axis = axis_
if axis < 0:
axis = ndims + axis
count_ = tf.get_static_value(count)
if axis_ >= 0 or tensorshape_util.rank(x.shape) is not None:
head = x.shape[:axis]
mid_dim_value = tf.compat.dimension_value(x.shape[axis])
if count_ is None or mid_dim_value is None:
middle = tf.TensorShape(None)
else:
middle = tf.TensorShape(mid_dim_value + count_ * (front + back))
tail = x.shape[axis + 1:]
final_shape = head.concatenate(middle.concatenate(tail))
else:
final_shape = None
else:
axis = tf.where(axis < 0, ndims + axis, axis)
final_shape = None
x = tf.pad(
tensor=x,
paddings=tf.one_hot(
indices=tf.stack([axis if front else -1, axis if back else -1]),
depth=ndims,
axis=0,
on_value=count,
dtype=tf.int32),
constant_values=value)
if final_shape is not None:
tensorshape_util.set_shape(x, final_shape)
return x
|
[
"Pads",
"value",
"to",
"the",
"front",
"and",
"/",
"or",
"back",
"of",
"a",
"Tensor",
"dim",
"count",
"times",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L1932-L2002
|
[
"def",
"pad",
"(",
"x",
",",
"axis",
",",
"front",
"=",
"False",
",",
"back",
"=",
"False",
",",
"value",
"=",
"0",
",",
"count",
"=",
"1",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"\"pad\"",
")",
":",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"\"x\"",
")",
"value",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"value",
",",
"dtype",
"=",
"x",
".",
"dtype",
",",
"name",
"=",
"\"value\"",
")",
"count",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"count",
",",
"name",
"=",
"\"count\"",
")",
"if",
"not",
"dtype_util",
".",
"is_integer",
"(",
"count",
".",
"dtype",
")",
":",
"raise",
"TypeError",
"(",
"\"`count.dtype` (`{}`) must be `int`-like.\"",
".",
"format",
"(",
"dtype_util",
".",
"name",
"(",
"count",
".",
"dtype",
")",
")",
")",
"if",
"not",
"front",
"and",
"not",
"back",
":",
"raise",
"ValueError",
"(",
"\"At least one of `front`, `back` must be `True`.\"",
")",
"ndims",
"=",
"(",
"tensorshape_util",
".",
"rank",
"(",
"x",
".",
"shape",
")",
"if",
"tensorshape_util",
".",
"rank",
"(",
"x",
".",
"shape",
")",
"is",
"not",
"None",
"else",
"tf",
".",
"rank",
"(",
"x",
",",
"name",
"=",
"\"ndims\"",
")",
")",
"axis",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"axis",
",",
"name",
"=",
"\"axis\"",
")",
"axis_",
"=",
"tf",
".",
"get_static_value",
"(",
"axis",
")",
"if",
"axis_",
"is",
"not",
"None",
":",
"axis",
"=",
"axis_",
"if",
"axis",
"<",
"0",
":",
"axis",
"=",
"ndims",
"+",
"axis",
"count_",
"=",
"tf",
".",
"get_static_value",
"(",
"count",
")",
"if",
"axis_",
">=",
"0",
"or",
"tensorshape_util",
".",
"rank",
"(",
"x",
".",
"shape",
")",
"is",
"not",
"None",
":",
"head",
"=",
"x",
".",
"shape",
"[",
":",
"axis",
"]",
"mid_dim_value",
"=",
"tf",
".",
"compat",
".",
"dimension_value",
"(",
"x",
".",
"shape",
"[",
"axis",
"]",
")",
"if",
"count_",
"is",
"None",
"or",
"mid_dim_value",
"is",
"None",
":",
"middle",
"=",
"tf",
".",
"TensorShape",
"(",
"None",
")",
"else",
":",
"middle",
"=",
"tf",
".",
"TensorShape",
"(",
"mid_dim_value",
"+",
"count_",
"*",
"(",
"front",
"+",
"back",
")",
")",
"tail",
"=",
"x",
".",
"shape",
"[",
"axis",
"+",
"1",
":",
"]",
"final_shape",
"=",
"head",
".",
"concatenate",
"(",
"middle",
".",
"concatenate",
"(",
"tail",
")",
")",
"else",
":",
"final_shape",
"=",
"None",
"else",
":",
"axis",
"=",
"tf",
".",
"where",
"(",
"axis",
"<",
"0",
",",
"ndims",
"+",
"axis",
",",
"axis",
")",
"final_shape",
"=",
"None",
"x",
"=",
"tf",
".",
"pad",
"(",
"tensor",
"=",
"x",
",",
"paddings",
"=",
"tf",
".",
"one_hot",
"(",
"indices",
"=",
"tf",
".",
"stack",
"(",
"[",
"axis",
"if",
"front",
"else",
"-",
"1",
",",
"axis",
"if",
"back",
"else",
"-",
"1",
"]",
")",
",",
"depth",
"=",
"ndims",
",",
"axis",
"=",
"0",
",",
"on_value",
"=",
"count",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"constant_values",
"=",
"value",
")",
"if",
"final_shape",
"is",
"not",
"None",
":",
"tensorshape_util",
".",
"set_shape",
"(",
"x",
",",
"final_shape",
")",
"return",
"x"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
parent_frame_arguments
|
Returns parent frame arguments.
When called inside a function, returns a dictionary with the caller's function
arguments. These are positional arguments and keyword arguments (**kwargs),
while variable arguments (*varargs) are excluded.
When called at global scope, this will return an empty dictionary, since there
are no arguments.
WARNING: If caller function argument names are overloaded before invoking
this method, then values will reflect the overloaded value. For this reason,
we recommend calling `parent_frame_arguments` at the beginning of the
function.
|
tensorflow_probability/python/internal/distribution_util.py
|
def parent_frame_arguments():
"""Returns parent frame arguments.
When called inside a function, returns a dictionary with the caller's function
arguments. These are positional arguments and keyword arguments (**kwargs),
while variable arguments (*varargs) are excluded.
When called at global scope, this will return an empty dictionary, since there
are no arguments.
WARNING: If caller function argument names are overloaded before invoking
this method, then values will reflect the overloaded value. For this reason,
we recommend calling `parent_frame_arguments` at the beginning of the
function.
"""
# All arguments and the names used for *varargs, and **kwargs
arg_names, variable_arg_name, keyword_arg_name, local_vars = (
tf_inspect._inspect.getargvalues( # pylint: disable=protected-access
# Get the first frame of the caller of this method.
tf_inspect._inspect.stack()[1][0])) # pylint: disable=protected-access
# Remove the *varargs, and flatten the **kwargs. Both are
# nested lists.
local_vars.pop(variable_arg_name, {})
keyword_args = local_vars.pop(keyword_arg_name, {})
final_args = {}
# Copy over arguments and their values. In general, local_vars
# may contain more than just the arguments, since this method
# can be called anywhere in a function.
for arg_name in arg_names:
final_args[arg_name] = local_vars.pop(arg_name)
final_args.update(keyword_args)
return final_args
|
def parent_frame_arguments():
"""Returns parent frame arguments.
When called inside a function, returns a dictionary with the caller's function
arguments. These are positional arguments and keyword arguments (**kwargs),
while variable arguments (*varargs) are excluded.
When called at global scope, this will return an empty dictionary, since there
are no arguments.
WARNING: If caller function argument names are overloaded before invoking
this method, then values will reflect the overloaded value. For this reason,
we recommend calling `parent_frame_arguments` at the beginning of the
function.
"""
# All arguments and the names used for *varargs, and **kwargs
arg_names, variable_arg_name, keyword_arg_name, local_vars = (
tf_inspect._inspect.getargvalues( # pylint: disable=protected-access
# Get the first frame of the caller of this method.
tf_inspect._inspect.stack()[1][0])) # pylint: disable=protected-access
# Remove the *varargs, and flatten the **kwargs. Both are
# nested lists.
local_vars.pop(variable_arg_name, {})
keyword_args = local_vars.pop(keyword_arg_name, {})
final_args = {}
# Copy over arguments and their values. In general, local_vars
# may contain more than just the arguments, since this method
# can be called anywhere in a function.
for arg_name in arg_names:
final_args[arg_name] = local_vars.pop(arg_name)
final_args.update(keyword_args)
return final_args
|
[
"Returns",
"parent",
"frame",
"arguments",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L2005-L2039
|
[
"def",
"parent_frame_arguments",
"(",
")",
":",
"# All arguments and the names used for *varargs, and **kwargs",
"arg_names",
",",
"variable_arg_name",
",",
"keyword_arg_name",
",",
"local_vars",
"=",
"(",
"tf_inspect",
".",
"_inspect",
".",
"getargvalues",
"(",
"# pylint: disable=protected-access",
"# Get the first frame of the caller of this method.",
"tf_inspect",
".",
"_inspect",
".",
"stack",
"(",
")",
"[",
"1",
"]",
"[",
"0",
"]",
")",
")",
"# pylint: disable=protected-access",
"# Remove the *varargs, and flatten the **kwargs. Both are",
"# nested lists.",
"local_vars",
".",
"pop",
"(",
"variable_arg_name",
",",
"{",
"}",
")",
"keyword_args",
"=",
"local_vars",
".",
"pop",
"(",
"keyword_arg_name",
",",
"{",
"}",
")",
"final_args",
"=",
"{",
"}",
"# Copy over arguments and their values. In general, local_vars",
"# may contain more than just the arguments, since this method",
"# can be called anywhere in a function.",
"for",
"arg_name",
"in",
"arg_names",
":",
"final_args",
"[",
"arg_name",
"]",
"=",
"local_vars",
".",
"pop",
"(",
"arg_name",
")",
"final_args",
".",
"update",
"(",
"keyword_args",
")",
"return",
"final_args"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
expand_to_vector
|
Transform a 0-D or 1-D `Tensor` to be 1-D.
For user convenience, many parts of the TensorFlow Probability API accept
inputs of rank 0 or 1 -- i.e., allowing an `event_shape` of `[5]` to be passed
to the API as either `5` or `[5]`. This function can be used to transform
such an argument to always be 1-D.
NOTE: Python or NumPy values will be converted to `Tensor`s with standard type
inference/conversion. In particular, an empty list or tuple will become an
empty `Tensor` with dtype `float32`. Callers should convert values to
`Tensor`s before calling this function if different behavior is desired
(e.g. converting empty lists / other values to `Tensor`s with dtype `int32`).
Args:
x: A 0-D or 1-D `Tensor`.
tensor_name: Python `str` name for `Tensor`s created by this function.
op_name: Python `str` name for `Op`s created by this function.
validate_args: Python `bool, default `False`. When `True`, arguments may be
checked for validity at execution time, possibly degrading runtime
performance. When `False`, invalid inputs may silently render incorrect
outputs.
Returns:
vector: a 1-D `Tensor`.
|
tensorflow_probability/python/internal/distribution_util.py
|
def expand_to_vector(x, tensor_name=None, op_name=None, validate_args=False):
"""Transform a 0-D or 1-D `Tensor` to be 1-D.
For user convenience, many parts of the TensorFlow Probability API accept
inputs of rank 0 or 1 -- i.e., allowing an `event_shape` of `[5]` to be passed
to the API as either `5` or `[5]`. This function can be used to transform
such an argument to always be 1-D.
NOTE: Python or NumPy values will be converted to `Tensor`s with standard type
inference/conversion. In particular, an empty list or tuple will become an
empty `Tensor` with dtype `float32`. Callers should convert values to
`Tensor`s before calling this function if different behavior is desired
(e.g. converting empty lists / other values to `Tensor`s with dtype `int32`).
Args:
x: A 0-D or 1-D `Tensor`.
tensor_name: Python `str` name for `Tensor`s created by this function.
op_name: Python `str` name for `Op`s created by this function.
validate_args: Python `bool, default `False`. When `True`, arguments may be
checked for validity at execution time, possibly degrading runtime
performance. When `False`, invalid inputs may silently render incorrect
outputs.
Returns:
vector: a 1-D `Tensor`.
"""
with tf.name_scope(op_name or "expand_to_vector"):
x = tf.convert_to_tensor(value=x, name="x")
ndims = tensorshape_util.rank(x.shape)
if ndims is None:
# Maybe expand ndims from 0 to 1.
if validate_args:
x = with_dependencies([
assert_util.assert_rank_at_most(
x, 1, message="Input is neither scalar nor vector.")
], x)
ndims = tf.rank(x)
expanded_shape = pick_vector(
tf.equal(ndims, 0), np.array([1], dtype=np.int32), tf.shape(input=x))
return tf.reshape(x, expanded_shape)
elif ndims == 0:
# Definitely expand ndims from 0 to 1.
x_const = tf.get_static_value(x)
if x_const is not None:
return tf.convert_to_tensor(
value=dtype_util.as_numpy_dtype(x.dtype)([x_const]),
name=tensor_name)
else:
return tf.reshape(x, [1])
elif ndims != 1:
raise ValueError("Input is neither scalar nor vector.")
# ndims == 1
return x
|
def expand_to_vector(x, tensor_name=None, op_name=None, validate_args=False):
"""Transform a 0-D or 1-D `Tensor` to be 1-D.
For user convenience, many parts of the TensorFlow Probability API accept
inputs of rank 0 or 1 -- i.e., allowing an `event_shape` of `[5]` to be passed
to the API as either `5` or `[5]`. This function can be used to transform
such an argument to always be 1-D.
NOTE: Python or NumPy values will be converted to `Tensor`s with standard type
inference/conversion. In particular, an empty list or tuple will become an
empty `Tensor` with dtype `float32`. Callers should convert values to
`Tensor`s before calling this function if different behavior is desired
(e.g. converting empty lists / other values to `Tensor`s with dtype `int32`).
Args:
x: A 0-D or 1-D `Tensor`.
tensor_name: Python `str` name for `Tensor`s created by this function.
op_name: Python `str` name for `Op`s created by this function.
validate_args: Python `bool, default `False`. When `True`, arguments may be
checked for validity at execution time, possibly degrading runtime
performance. When `False`, invalid inputs may silently render incorrect
outputs.
Returns:
vector: a 1-D `Tensor`.
"""
with tf.name_scope(op_name or "expand_to_vector"):
x = tf.convert_to_tensor(value=x, name="x")
ndims = tensorshape_util.rank(x.shape)
if ndims is None:
# Maybe expand ndims from 0 to 1.
if validate_args:
x = with_dependencies([
assert_util.assert_rank_at_most(
x, 1, message="Input is neither scalar nor vector.")
], x)
ndims = tf.rank(x)
expanded_shape = pick_vector(
tf.equal(ndims, 0), np.array([1], dtype=np.int32), tf.shape(input=x))
return tf.reshape(x, expanded_shape)
elif ndims == 0:
# Definitely expand ndims from 0 to 1.
x_const = tf.get_static_value(x)
if x_const is not None:
return tf.convert_to_tensor(
value=dtype_util.as_numpy_dtype(x.dtype)([x_const]),
name=tensor_name)
else:
return tf.reshape(x, [1])
elif ndims != 1:
raise ValueError("Input is neither scalar nor vector.")
# ndims == 1
return x
|
[
"Transform",
"a",
"0",
"-",
"D",
"or",
"1",
"-",
"D",
"Tensor",
"to",
"be",
"1",
"-",
"D",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L2103-L2159
|
[
"def",
"expand_to_vector",
"(",
"x",
",",
"tensor_name",
"=",
"None",
",",
"op_name",
"=",
"None",
",",
"validate_args",
"=",
"False",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"op_name",
"or",
"\"expand_to_vector\"",
")",
":",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"\"x\"",
")",
"ndims",
"=",
"tensorshape_util",
".",
"rank",
"(",
"x",
".",
"shape",
")",
"if",
"ndims",
"is",
"None",
":",
"# Maybe expand ndims from 0 to 1.",
"if",
"validate_args",
":",
"x",
"=",
"with_dependencies",
"(",
"[",
"assert_util",
".",
"assert_rank_at_most",
"(",
"x",
",",
"1",
",",
"message",
"=",
"\"Input is neither scalar nor vector.\"",
")",
"]",
",",
"x",
")",
"ndims",
"=",
"tf",
".",
"rank",
"(",
"x",
")",
"expanded_shape",
"=",
"pick_vector",
"(",
"tf",
".",
"equal",
"(",
"ndims",
",",
"0",
")",
",",
"np",
".",
"array",
"(",
"[",
"1",
"]",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
",",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
")",
"return",
"tf",
".",
"reshape",
"(",
"x",
",",
"expanded_shape",
")",
"elif",
"ndims",
"==",
"0",
":",
"# Definitely expand ndims from 0 to 1.",
"x_const",
"=",
"tf",
".",
"get_static_value",
"(",
"x",
")",
"if",
"x_const",
"is",
"not",
"None",
":",
"return",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"dtype_util",
".",
"as_numpy_dtype",
"(",
"x",
".",
"dtype",
")",
"(",
"[",
"x_const",
"]",
")",
",",
"name",
"=",
"tensor_name",
")",
"else",
":",
"return",
"tf",
".",
"reshape",
"(",
"x",
",",
"[",
"1",
"]",
")",
"elif",
"ndims",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"Input is neither scalar nor vector.\"",
")",
"# ndims == 1",
"return",
"x"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
with_dependencies
|
Produces the content of `output_tensor` only after `dependencies`.
In some cases, a user may want the output of an operation to be consumed
externally only after some other dependencies have run first. This function
returns `output_tensor`, but only after all operations in `dependencies` have
run. Note that this means that there is no guarantee that `output_tensor` will
be evaluated after any `dependencies` have run.
See also `tf.tuple` and `tf.group`.
Args:
dependencies: Iterable of operations to run before this op finishes.
output_tensor: A `Tensor` or `IndexedSlices` that will be returned.
name: (Optional) A name for this operation.
Returns:
output_with_deps: Same as `output_tensor` but with embedded dependencies.
Raises:
TypeError: if `output_tensor` is not a `Tensor` or `IndexedSlices`.
|
tensorflow_probability/python/internal/distribution_util.py
|
def with_dependencies(dependencies, output_tensor, name=None):
"""Produces the content of `output_tensor` only after `dependencies`.
In some cases, a user may want the output of an operation to be consumed
externally only after some other dependencies have run first. This function
returns `output_tensor`, but only after all operations in `dependencies` have
run. Note that this means that there is no guarantee that `output_tensor` will
be evaluated after any `dependencies` have run.
See also `tf.tuple` and `tf.group`.
Args:
dependencies: Iterable of operations to run before this op finishes.
output_tensor: A `Tensor` or `IndexedSlices` that will be returned.
name: (Optional) A name for this operation.
Returns:
output_with_deps: Same as `output_tensor` but with embedded dependencies.
Raises:
TypeError: if `output_tensor` is not a `Tensor` or `IndexedSlices`.
"""
if tf.executing_eagerly():
return output_tensor
with tf.name_scope(name or "control_dependency") as name:
with tf.control_dependencies(d for d in dependencies if d is not None):
output_tensor = tf.convert_to_tensor(value=output_tensor)
if isinstance(output_tensor, tf.Tensor):
return tf.identity(output_tensor, name=name)
else:
return tf.IndexedSlices(
tf.identity(output_tensor.values, name=name),
output_tensor.indices,
output_tensor.dense_shape)
|
def with_dependencies(dependencies, output_tensor, name=None):
"""Produces the content of `output_tensor` only after `dependencies`.
In some cases, a user may want the output of an operation to be consumed
externally only after some other dependencies have run first. This function
returns `output_tensor`, but only after all operations in `dependencies` have
run. Note that this means that there is no guarantee that `output_tensor` will
be evaluated after any `dependencies` have run.
See also `tf.tuple` and `tf.group`.
Args:
dependencies: Iterable of operations to run before this op finishes.
output_tensor: A `Tensor` or `IndexedSlices` that will be returned.
name: (Optional) A name for this operation.
Returns:
output_with_deps: Same as `output_tensor` but with embedded dependencies.
Raises:
TypeError: if `output_tensor` is not a `Tensor` or `IndexedSlices`.
"""
if tf.executing_eagerly():
return output_tensor
with tf.name_scope(name or "control_dependency") as name:
with tf.control_dependencies(d for d in dependencies if d is not None):
output_tensor = tf.convert_to_tensor(value=output_tensor)
if isinstance(output_tensor, tf.Tensor):
return tf.identity(output_tensor, name=name)
else:
return tf.IndexedSlices(
tf.identity(output_tensor.values, name=name),
output_tensor.indices,
output_tensor.dense_shape)
|
[
"Produces",
"the",
"content",
"of",
"output_tensor",
"only",
"after",
"dependencies",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L2162-L2195
|
[
"def",
"with_dependencies",
"(",
"dependencies",
",",
"output_tensor",
",",
"name",
"=",
"None",
")",
":",
"if",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"return",
"output_tensor",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"\"control_dependency\"",
")",
"as",
"name",
":",
"with",
"tf",
".",
"control_dependencies",
"(",
"d",
"for",
"d",
"in",
"dependencies",
"if",
"d",
"is",
"not",
"None",
")",
":",
"output_tensor",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"output_tensor",
")",
"if",
"isinstance",
"(",
"output_tensor",
",",
"tf",
".",
"Tensor",
")",
":",
"return",
"tf",
".",
"identity",
"(",
"output_tensor",
",",
"name",
"=",
"name",
")",
"else",
":",
"return",
"tf",
".",
"IndexedSlices",
"(",
"tf",
".",
"identity",
"(",
"output_tensor",
".",
"values",
",",
"name",
"=",
"name",
")",
",",
"output_tensor",
".",
"indices",
",",
"output_tensor",
".",
"dense_shape",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_maybe_validate_rightmost_transposed_ndims
|
Checks that `rightmost_transposed_ndims` is valid.
|
tensorflow_probability/python/bijectors/transpose.py
|
def _maybe_validate_rightmost_transposed_ndims(
rightmost_transposed_ndims, validate_args, name=None):
"""Checks that `rightmost_transposed_ndims` is valid."""
with tf.name_scope(name or 'maybe_validate_rightmost_transposed_ndims'):
assertions = []
if not dtype_util.is_integer(rightmost_transposed_ndims.dtype):
raise TypeError('`rightmost_transposed_ndims` must be integer type.')
if tensorshape_util.rank(rightmost_transposed_ndims.shape) is not None:
if tensorshape_util.rank(rightmost_transposed_ndims.shape) != 0:
raise ValueError('`rightmost_transposed_ndims` must be a scalar, '
'saw rank: {}.'.format(
tensorshape_util.rank(
rightmost_transposed_ndims.shape)))
elif validate_args:
assertions += [assert_util.assert_rank(rightmost_transposed_ndims, 0)]
rightmost_transposed_ndims_ = tf.get_static_value(
rightmost_transposed_ndims)
msg = '`rightmost_transposed_ndims` must be non-negative.'
if rightmost_transposed_ndims_ is not None:
if rightmost_transposed_ndims_ < 0:
raise ValueError(msg[:-1] + ', saw: {}.'.format(
rightmost_transposed_ndims_))
elif validate_args:
assertions += [
assert_util.assert_non_negative(
rightmost_transposed_ndims, message=msg)
]
return assertions
|
def _maybe_validate_rightmost_transposed_ndims(
rightmost_transposed_ndims, validate_args, name=None):
"""Checks that `rightmost_transposed_ndims` is valid."""
with tf.name_scope(name or 'maybe_validate_rightmost_transposed_ndims'):
assertions = []
if not dtype_util.is_integer(rightmost_transposed_ndims.dtype):
raise TypeError('`rightmost_transposed_ndims` must be integer type.')
if tensorshape_util.rank(rightmost_transposed_ndims.shape) is not None:
if tensorshape_util.rank(rightmost_transposed_ndims.shape) != 0:
raise ValueError('`rightmost_transposed_ndims` must be a scalar, '
'saw rank: {}.'.format(
tensorshape_util.rank(
rightmost_transposed_ndims.shape)))
elif validate_args:
assertions += [assert_util.assert_rank(rightmost_transposed_ndims, 0)]
rightmost_transposed_ndims_ = tf.get_static_value(
rightmost_transposed_ndims)
msg = '`rightmost_transposed_ndims` must be non-negative.'
if rightmost_transposed_ndims_ is not None:
if rightmost_transposed_ndims_ < 0:
raise ValueError(msg[:-1] + ', saw: {}.'.format(
rightmost_transposed_ndims_))
elif validate_args:
assertions += [
assert_util.assert_non_negative(
rightmost_transposed_ndims, message=msg)
]
return assertions
|
[
"Checks",
"that",
"rightmost_transposed_ndims",
"is",
"valid",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/transpose.py#L258-L288
|
[
"def",
"_maybe_validate_rightmost_transposed_ndims",
"(",
"rightmost_transposed_ndims",
",",
"validate_args",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"'maybe_validate_rightmost_transposed_ndims'",
")",
":",
"assertions",
"=",
"[",
"]",
"if",
"not",
"dtype_util",
".",
"is_integer",
"(",
"rightmost_transposed_ndims",
".",
"dtype",
")",
":",
"raise",
"TypeError",
"(",
"'`rightmost_transposed_ndims` must be integer type.'",
")",
"if",
"tensorshape_util",
".",
"rank",
"(",
"rightmost_transposed_ndims",
".",
"shape",
")",
"is",
"not",
"None",
":",
"if",
"tensorshape_util",
".",
"rank",
"(",
"rightmost_transposed_ndims",
".",
"shape",
")",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"'`rightmost_transposed_ndims` must be a scalar, '",
"'saw rank: {}.'",
".",
"format",
"(",
"tensorshape_util",
".",
"rank",
"(",
"rightmost_transposed_ndims",
".",
"shape",
")",
")",
")",
"elif",
"validate_args",
":",
"assertions",
"+=",
"[",
"assert_util",
".",
"assert_rank",
"(",
"rightmost_transposed_ndims",
",",
"0",
")",
"]",
"rightmost_transposed_ndims_",
"=",
"tf",
".",
"get_static_value",
"(",
"rightmost_transposed_ndims",
")",
"msg",
"=",
"'`rightmost_transposed_ndims` must be non-negative.'",
"if",
"rightmost_transposed_ndims_",
"is",
"not",
"None",
":",
"if",
"rightmost_transposed_ndims_",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"msg",
"[",
":",
"-",
"1",
"]",
"+",
"', saw: {}.'",
".",
"format",
"(",
"rightmost_transposed_ndims_",
")",
")",
"elif",
"validate_args",
":",
"assertions",
"+=",
"[",
"assert_util",
".",
"assert_non_negative",
"(",
"rightmost_transposed_ndims",
",",
"message",
"=",
"msg",
")",
"]",
"return",
"assertions"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_maybe_validate_perm
|
Checks that `perm` is valid.
|
tensorflow_probability/python/bijectors/transpose.py
|
def _maybe_validate_perm(perm, validate_args, name=None):
"""Checks that `perm` is valid."""
with tf.name_scope(name or 'maybe_validate_perm'):
assertions = []
if not dtype_util.is_integer(perm.dtype):
raise TypeError('`perm` must be integer type')
msg = '`perm` must be a vector.'
if tensorshape_util.rank(perm.shape) is not None:
if tensorshape_util.rank(perm.shape) != 1:
raise ValueError(
msg[:-1] +
', saw rank: {}.'.format(tensorshape_util.rank(perm.shape)))
elif validate_args:
assertions += [assert_util.assert_rank(perm, 1, message=msg)]
perm_ = tf.get_static_value(perm)
msg = '`perm` must be a valid permutation vector.'
if perm_ is not None:
if not np.all(np.arange(np.size(perm_)) == np.sort(perm_)):
raise ValueError(msg[:-1] + ', saw: {}.'.format(perm_))
elif validate_args:
assertions += [
assert_util.assert_equal(
tf.sort(perm), tf.range(tf.size(input=perm)), message=msg)
]
return assertions
|
def _maybe_validate_perm(perm, validate_args, name=None):
"""Checks that `perm` is valid."""
with tf.name_scope(name or 'maybe_validate_perm'):
assertions = []
if not dtype_util.is_integer(perm.dtype):
raise TypeError('`perm` must be integer type')
msg = '`perm` must be a vector.'
if tensorshape_util.rank(perm.shape) is not None:
if tensorshape_util.rank(perm.shape) != 1:
raise ValueError(
msg[:-1] +
', saw rank: {}.'.format(tensorshape_util.rank(perm.shape)))
elif validate_args:
assertions += [assert_util.assert_rank(perm, 1, message=msg)]
perm_ = tf.get_static_value(perm)
msg = '`perm` must be a valid permutation vector.'
if perm_ is not None:
if not np.all(np.arange(np.size(perm_)) == np.sort(perm_)):
raise ValueError(msg[:-1] + ', saw: {}.'.format(perm_))
elif validate_args:
assertions += [
assert_util.assert_equal(
tf.sort(perm), tf.range(tf.size(input=perm)), message=msg)
]
return assertions
|
[
"Checks",
"that",
"perm",
"is",
"valid",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/transpose.py#L291-L318
|
[
"def",
"_maybe_validate_perm",
"(",
"perm",
",",
"validate_args",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"'maybe_validate_perm'",
")",
":",
"assertions",
"=",
"[",
"]",
"if",
"not",
"dtype_util",
".",
"is_integer",
"(",
"perm",
".",
"dtype",
")",
":",
"raise",
"TypeError",
"(",
"'`perm` must be integer type'",
")",
"msg",
"=",
"'`perm` must be a vector.'",
"if",
"tensorshape_util",
".",
"rank",
"(",
"perm",
".",
"shape",
")",
"is",
"not",
"None",
":",
"if",
"tensorshape_util",
".",
"rank",
"(",
"perm",
".",
"shape",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"msg",
"[",
":",
"-",
"1",
"]",
"+",
"', saw rank: {}.'",
".",
"format",
"(",
"tensorshape_util",
".",
"rank",
"(",
"perm",
".",
"shape",
")",
")",
")",
"elif",
"validate_args",
":",
"assertions",
"+=",
"[",
"assert_util",
".",
"assert_rank",
"(",
"perm",
",",
"1",
",",
"message",
"=",
"msg",
")",
"]",
"perm_",
"=",
"tf",
".",
"get_static_value",
"(",
"perm",
")",
"msg",
"=",
"'`perm` must be a valid permutation vector.'",
"if",
"perm_",
"is",
"not",
"None",
":",
"if",
"not",
"np",
".",
"all",
"(",
"np",
".",
"arange",
"(",
"np",
".",
"size",
"(",
"perm_",
")",
")",
"==",
"np",
".",
"sort",
"(",
"perm_",
")",
")",
":",
"raise",
"ValueError",
"(",
"msg",
"[",
":",
"-",
"1",
"]",
"+",
"', saw: {}.'",
".",
"format",
"(",
"perm_",
")",
")",
"elif",
"validate_args",
":",
"assertions",
"+=",
"[",
"assert_util",
".",
"assert_equal",
"(",
"tf",
".",
"sort",
"(",
"perm",
")",
",",
"tf",
".",
"range",
"(",
"tf",
".",
"size",
"(",
"input",
"=",
"perm",
")",
")",
",",
"message",
"=",
"msg",
")",
"]",
"return",
"assertions"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
Transpose._event_shape
|
Helper for _forward and _inverse_event_shape.
|
tensorflow_probability/python/bijectors/transpose.py
|
def _event_shape(self, shape, static_perm_to_shape):
"""Helper for _forward and _inverse_event_shape."""
rightmost_ = tf.get_static_value(self.rightmost_transposed_ndims)
if tensorshape_util.rank(shape) is None or rightmost_ is None:
return tf.TensorShape(None)
if tensorshape_util.rank(shape) < rightmost_:
raise ValueError('Invalid shape: min event ndims={} but got {}'.format(
rightmost_, shape))
perm_ = tf.get_static_value(self.perm, partial=True)
if perm_ is None:
return shape[:tensorshape_util.rank(shape) - rightmost_].concatenate(
[None] * int(rightmost_))
# We can use elimination to reidentify a single None dimension.
if sum(p is None for p in perm_) == 1:
present = np.argsort([-1 if p is None else p for p in perm_])
for i, p in enumerate(present[1:]): # The -1 sorts to position 0.
if i != p:
perm_ = [i if p is None else p for p in perm_]
break
return shape[:tensorshape_util.rank(shape) - rightmost_].concatenate(
static_perm_to_shape(shape[tensorshape_util.rank(shape) - rightmost_:],
perm_))
|
def _event_shape(self, shape, static_perm_to_shape):
"""Helper for _forward and _inverse_event_shape."""
rightmost_ = tf.get_static_value(self.rightmost_transposed_ndims)
if tensorshape_util.rank(shape) is None or rightmost_ is None:
return tf.TensorShape(None)
if tensorshape_util.rank(shape) < rightmost_:
raise ValueError('Invalid shape: min event ndims={} but got {}'.format(
rightmost_, shape))
perm_ = tf.get_static_value(self.perm, partial=True)
if perm_ is None:
return shape[:tensorshape_util.rank(shape) - rightmost_].concatenate(
[None] * int(rightmost_))
# We can use elimination to reidentify a single None dimension.
if sum(p is None for p in perm_) == 1:
present = np.argsort([-1 if p is None else p for p in perm_])
for i, p in enumerate(present[1:]): # The -1 sorts to position 0.
if i != p:
perm_ = [i if p is None else p for p in perm_]
break
return shape[:tensorshape_util.rank(shape) - rightmost_].concatenate(
static_perm_to_shape(shape[tensorshape_util.rank(shape) - rightmost_:],
perm_))
|
[
"Helper",
"for",
"_forward",
"and",
"_inverse_event_shape",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/transpose.py#L184-L205
|
[
"def",
"_event_shape",
"(",
"self",
",",
"shape",
",",
"static_perm_to_shape",
")",
":",
"rightmost_",
"=",
"tf",
".",
"get_static_value",
"(",
"self",
".",
"rightmost_transposed_ndims",
")",
"if",
"tensorshape_util",
".",
"rank",
"(",
"shape",
")",
"is",
"None",
"or",
"rightmost_",
"is",
"None",
":",
"return",
"tf",
".",
"TensorShape",
"(",
"None",
")",
"if",
"tensorshape_util",
".",
"rank",
"(",
"shape",
")",
"<",
"rightmost_",
":",
"raise",
"ValueError",
"(",
"'Invalid shape: min event ndims={} but got {}'",
".",
"format",
"(",
"rightmost_",
",",
"shape",
")",
")",
"perm_",
"=",
"tf",
".",
"get_static_value",
"(",
"self",
".",
"perm",
",",
"partial",
"=",
"True",
")",
"if",
"perm_",
"is",
"None",
":",
"return",
"shape",
"[",
":",
"tensorshape_util",
".",
"rank",
"(",
"shape",
")",
"-",
"rightmost_",
"]",
".",
"concatenate",
"(",
"[",
"None",
"]",
"*",
"int",
"(",
"rightmost_",
")",
")",
"# We can use elimination to reidentify a single None dimension.",
"if",
"sum",
"(",
"p",
"is",
"None",
"for",
"p",
"in",
"perm_",
")",
"==",
"1",
":",
"present",
"=",
"np",
".",
"argsort",
"(",
"[",
"-",
"1",
"if",
"p",
"is",
"None",
"else",
"p",
"for",
"p",
"in",
"perm_",
"]",
")",
"for",
"i",
",",
"p",
"in",
"enumerate",
"(",
"present",
"[",
"1",
":",
"]",
")",
":",
"# The -1 sorts to position 0.",
"if",
"i",
"!=",
"p",
":",
"perm_",
"=",
"[",
"i",
"if",
"p",
"is",
"None",
"else",
"p",
"for",
"p",
"in",
"perm_",
"]",
"break",
"return",
"shape",
"[",
":",
"tensorshape_util",
".",
"rank",
"(",
"shape",
")",
"-",
"rightmost_",
"]",
".",
"concatenate",
"(",
"static_perm_to_shape",
"(",
"shape",
"[",
"tensorshape_util",
".",
"rank",
"(",
"shape",
")",
"-",
"rightmost_",
":",
"]",
",",
"perm_",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
concatenate
|
Returns the concatenation of the dimension in `x` and `other`.
*Note:* If either `x` or `other` is completely unknown, concatenation will
discard information about the other shape. In future, we might support
concatenation that preserves this information for use with slicing.
For more details, see `help(tf.TensorShape.concatenate)`.
Args:
x: object representing a shape; convertible to `tf.TensorShape`.
other: object representing a shape; convertible to `tf.TensorShape`.
Returns:
new_shape: an object like `x` whose elements are the concatenation of the
dimensions in `x` and `other`.
|
tensorflow_probability/python/internal/tensorshape_util.py
|
def concatenate(x, other):
"""Returns the concatenation of the dimension in `x` and `other`.
*Note:* If either `x` or `other` is completely unknown, concatenation will
discard information about the other shape. In future, we might support
concatenation that preserves this information for use with slicing.
For more details, see `help(tf.TensorShape.concatenate)`.
Args:
x: object representing a shape; convertible to `tf.TensorShape`.
other: object representing a shape; convertible to `tf.TensorShape`.
Returns:
new_shape: an object like `x` whose elements are the concatenation of the
dimensions in `x` and `other`.
"""
return type(x)(tf.TensorShape(x).concatenate(other))
|
def concatenate(x, other):
"""Returns the concatenation of the dimension in `x` and `other`.
*Note:* If either `x` or `other` is completely unknown, concatenation will
discard information about the other shape. In future, we might support
concatenation that preserves this information for use with slicing.
For more details, see `help(tf.TensorShape.concatenate)`.
Args:
x: object representing a shape; convertible to `tf.TensorShape`.
other: object representing a shape; convertible to `tf.TensorShape`.
Returns:
new_shape: an object like `x` whose elements are the concatenation of the
dimensions in `x` and `other`.
"""
return type(x)(tf.TensorShape(x).concatenate(other))
|
[
"Returns",
"the",
"concatenation",
"of",
"the",
"dimension",
"in",
"x",
"and",
"other",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/tensorshape_util.py#L99-L116
|
[
"def",
"concatenate",
"(",
"x",
",",
"other",
")",
":",
"return",
"type",
"(",
"x",
")",
"(",
"tf",
".",
"TensorShape",
"(",
"x",
")",
".",
"concatenate",
"(",
"other",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
constant_value_as_shape
|
A version of `constant_value()` that returns a `TensorShape`.
This version should be used when a constant tensor value is
interpreted as a (possibly partial) shape, e.g. in the shape
function for `tf.reshape()`. By explicitly requesting a
`TensorShape` as the return value, it is possible to represent
unknown dimensions; by contrast, `constant_value()` is
all-or-nothing.
Args:
tensor: The rank-0 or rank-1 Tensor to be evaluated.
Returns:
A `TensorShape` based on the constant value of the given `tensor`.
Raises:
ValueError: If the shape is rank-0 and is not statically known to be -1.
|
tensorflow_probability/python/internal/tensorshape_util.py
|
def constant_value_as_shape(tensor): # pylint: disable=invalid-name
"""A version of `constant_value()` that returns a `TensorShape`.
This version should be used when a constant tensor value is
interpreted as a (possibly partial) shape, e.g. in the shape
function for `tf.reshape()`. By explicitly requesting a
`TensorShape` as the return value, it is possible to represent
unknown dimensions; by contrast, `constant_value()` is
all-or-nothing.
Args:
tensor: The rank-0 or rank-1 Tensor to be evaluated.
Returns:
A `TensorShape` based on the constant value of the given `tensor`.
Raises:
ValueError: If the shape is rank-0 and is not statically known to be -1.
"""
shape = tf.get_static_value(tensor)
if shape is not None:
return [None if dim == -1 else dim for dim in shape]
return tensor_util.constant_value_as_shape(tensor)
|
def constant_value_as_shape(tensor): # pylint: disable=invalid-name
"""A version of `constant_value()` that returns a `TensorShape`.
This version should be used when a constant tensor value is
interpreted as a (possibly partial) shape, e.g. in the shape
function for `tf.reshape()`. By explicitly requesting a
`TensorShape` as the return value, it is possible to represent
unknown dimensions; by contrast, `constant_value()` is
all-or-nothing.
Args:
tensor: The rank-0 or rank-1 Tensor to be evaluated.
Returns:
A `TensorShape` based on the constant value of the given `tensor`.
Raises:
ValueError: If the shape is rank-0 and is not statically known to be -1.
"""
shape = tf.get_static_value(tensor)
if shape is not None:
return [None if dim == -1 else dim for dim in shape]
return tensor_util.constant_value_as_shape(tensor)
|
[
"A",
"version",
"of",
"constant_value",
"()",
"that",
"returns",
"a",
"TensorShape",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/tensorshape_util.py#L119-L141
|
[
"def",
"constant_value_as_shape",
"(",
"tensor",
")",
":",
"# pylint: disable=invalid-name",
"shape",
"=",
"tf",
".",
"get_static_value",
"(",
"tensor",
")",
"if",
"shape",
"is",
"not",
"None",
":",
"return",
"[",
"None",
"if",
"dim",
"==",
"-",
"1",
"else",
"dim",
"for",
"dim",
"in",
"shape",
"]",
"return",
"tensor_util",
".",
"constant_value_as_shape",
"(",
"tensor",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
dims
|
Returns a list of dimension sizes, or `None` if `rank` is unknown.
For more details, see `help(tf.TensorShape.dims)`.
Args:
x: object representing a shape; convertible to `tf.TensorShape`.
Returns:
shape_as_list: list of sizes or `None` values representing each
dimensions size if known. A size is `tf.Dimension` if input is a
`tf.TensorShape` and an `int` otherwise.
|
tensorflow_probability/python/internal/tensorshape_util.py
|
def dims(x):
"""Returns a list of dimension sizes, or `None` if `rank` is unknown.
For more details, see `help(tf.TensorShape.dims)`.
Args:
x: object representing a shape; convertible to `tf.TensorShape`.
Returns:
shape_as_list: list of sizes or `None` values representing each
dimensions size if known. A size is `tf.Dimension` if input is a
`tf.TensorShape` and an `int` otherwise.
"""
if isinstance(x, tf.TensorShape):
return x.dims
r = tf.TensorShape(x).dims
return None if r is None else list(map(tf.compat.dimension_value, r))
|
def dims(x):
"""Returns a list of dimension sizes, or `None` if `rank` is unknown.
For more details, see `help(tf.TensorShape.dims)`.
Args:
x: object representing a shape; convertible to `tf.TensorShape`.
Returns:
shape_as_list: list of sizes or `None` values representing each
dimensions size if known. A size is `tf.Dimension` if input is a
`tf.TensorShape` and an `int` otherwise.
"""
if isinstance(x, tf.TensorShape):
return x.dims
r = tf.TensorShape(x).dims
return None if r is None else list(map(tf.compat.dimension_value, r))
|
[
"Returns",
"a",
"list",
"of",
"dimension",
"sizes",
"or",
"None",
"if",
"rank",
"is",
"unknown",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/tensorshape_util.py#L144-L160
|
[
"def",
"dims",
"(",
"x",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"tf",
".",
"TensorShape",
")",
":",
"return",
"x",
".",
"dims",
"r",
"=",
"tf",
".",
"TensorShape",
"(",
"x",
")",
".",
"dims",
"return",
"None",
"if",
"r",
"is",
"None",
"else",
"list",
"(",
"map",
"(",
"tf",
".",
"compat",
".",
"dimension_value",
",",
"r",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
merge_with
|
Returns a shape combining the information in `x` and `other`.
The dimensions in `x` and `other` are merged elementwise, according to the
rules defined for `tf.Dimension.merge_with()`.
For more details, see `help(tf.TensorShape.merge_with)`.
Args:
x: object representing a shape; convertible to `tf.TensorShape`.
other: object representing a shape; convertible to `tf.TensorShape`.
Returns:
merged_shape: shape having `type(x)` containing the combined information of
`x` and `other`.
Raises:
ValueError: If `x` and `other` are not compatible.
|
tensorflow_probability/python/internal/tensorshape_util.py
|
def merge_with(x, other):
"""Returns a shape combining the information in `x` and `other`.
The dimensions in `x` and `other` are merged elementwise, according to the
rules defined for `tf.Dimension.merge_with()`.
For more details, see `help(tf.TensorShape.merge_with)`.
Args:
x: object representing a shape; convertible to `tf.TensorShape`.
other: object representing a shape; convertible to `tf.TensorShape`.
Returns:
merged_shape: shape having `type(x)` containing the combined information of
`x` and `other`.
Raises:
ValueError: If `x` and `other` are not compatible.
"""
return type(x)(tf.TensorShape(x).merge_with(other))
|
def merge_with(x, other):
"""Returns a shape combining the information in `x` and `other`.
The dimensions in `x` and `other` are merged elementwise, according to the
rules defined for `tf.Dimension.merge_with()`.
For more details, see `help(tf.TensorShape.merge_with)`.
Args:
x: object representing a shape; convertible to `tf.TensorShape`.
other: object representing a shape; convertible to `tf.TensorShape`.
Returns:
merged_shape: shape having `type(x)` containing the combined information of
`x` and `other`.
Raises:
ValueError: If `x` and `other` are not compatible.
"""
return type(x)(tf.TensorShape(x).merge_with(other))
|
[
"Returns",
"a",
"shape",
"combining",
"the",
"information",
"in",
"x",
"and",
"other",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/tensorshape_util.py#L192-L211
|
[
"def",
"merge_with",
"(",
"x",
",",
"other",
")",
":",
"return",
"type",
"(",
"x",
")",
"(",
"tf",
".",
"TensorShape",
"(",
"x",
")",
".",
"merge_with",
"(",
"other",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
with_rank_at_least
|
Returns a shape based on `x` with at least the given `rank`.
For more details, see `help(tf.TensorShape.with_rank_at_least)`.
Args:
x: object representing a shape; convertible to `tf.TensorShape`.
rank: An `int` representing the minimum rank of `x` or else an assertion is
raised.
Returns:
shape: a shape having `type(x)` but guaranteed to have at least the given
rank (or else an assertion was raised).
Raises:
ValueError: If `x` does not represent a shape with at least the given
`rank`.
|
tensorflow_probability/python/internal/tensorshape_util.py
|
def with_rank_at_least(x, rank): # pylint: disable=redefined-outer-name
"""Returns a shape based on `x` with at least the given `rank`.
For more details, see `help(tf.TensorShape.with_rank_at_least)`.
Args:
x: object representing a shape; convertible to `tf.TensorShape`.
rank: An `int` representing the minimum rank of `x` or else an assertion is
raised.
Returns:
shape: a shape having `type(x)` but guaranteed to have at least the given
rank (or else an assertion was raised).
Raises:
ValueError: If `x` does not represent a shape with at least the given
`rank`.
"""
return type(x)(tf.TensorShape(x).with_rank_at_least(rank))
|
def with_rank_at_least(x, rank): # pylint: disable=redefined-outer-name
"""Returns a shape based on `x` with at least the given `rank`.
For more details, see `help(tf.TensorShape.with_rank_at_least)`.
Args:
x: object representing a shape; convertible to `tf.TensorShape`.
rank: An `int` representing the minimum rank of `x` or else an assertion is
raised.
Returns:
shape: a shape having `type(x)` but guaranteed to have at least the given
rank (or else an assertion was raised).
Raises:
ValueError: If `x` does not represent a shape with at least the given
`rank`.
"""
return type(x)(tf.TensorShape(x).with_rank_at_least(rank))
|
[
"Returns",
"a",
"shape",
"based",
"on",
"x",
"with",
"at",
"least",
"the",
"given",
"rank",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/tensorshape_util.py#L285-L303
|
[
"def",
"with_rank_at_least",
"(",
"x",
",",
"rank",
")",
":",
"# pylint: disable=redefined-outer-name",
"return",
"type",
"(",
"x",
")",
"(",
"tf",
".",
"TensorShape",
"(",
"x",
")",
".",
"with_rank_at_least",
"(",
"rank",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_check_equal_shape
|
Check that source and target shape match, statically if possible.
|
tensorflow_probability/python/distributions/linear_gaussian_ssm.py
|
def _check_equal_shape(name,
static_shape,
dynamic_shape,
static_target_shape,
dynamic_target_shape=None):
"""Check that source and target shape match, statically if possible."""
static_target_shape = tf.TensorShape(static_target_shape)
if tensorshape_util.is_fully_defined(
static_shape) and tensorshape_util.is_fully_defined(static_target_shape):
if static_shape != static_target_shape:
raise ValueError("{}: required shape {} but found {}".
format(name, static_target_shape, static_shape))
return None
else:
if dynamic_target_shape is None:
if tensorshape_util.is_fully_defined(static_target_shape):
dynamic_target_shape = tensorshape_util.as_list(static_target_shape)
else:
raise ValueError("{}: cannot infer target shape: no dynamic shape "
"specified and static shape {} is not fully defined".
format(name, static_target_shape))
return assert_util.assert_equal(
dynamic_shape,
dynamic_target_shape,
message=("{}: required shape {}".format(name, static_target_shape)))
|
def _check_equal_shape(name,
static_shape,
dynamic_shape,
static_target_shape,
dynamic_target_shape=None):
"""Check that source and target shape match, statically if possible."""
static_target_shape = tf.TensorShape(static_target_shape)
if tensorshape_util.is_fully_defined(
static_shape) and tensorshape_util.is_fully_defined(static_target_shape):
if static_shape != static_target_shape:
raise ValueError("{}: required shape {} but found {}".
format(name, static_target_shape, static_shape))
return None
else:
if dynamic_target_shape is None:
if tensorshape_util.is_fully_defined(static_target_shape):
dynamic_target_shape = tensorshape_util.as_list(static_target_shape)
else:
raise ValueError("{}: cannot infer target shape: no dynamic shape "
"specified and static shape {} is not fully defined".
format(name, static_target_shape))
return assert_util.assert_equal(
dynamic_shape,
dynamic_target_shape,
message=("{}: required shape {}".format(name, static_target_shape)))
|
[
"Check",
"that",
"source",
"and",
"target",
"shape",
"match",
"statically",
"if",
"possible",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L44-L69
|
[
"def",
"_check_equal_shape",
"(",
"name",
",",
"static_shape",
",",
"dynamic_shape",
",",
"static_target_shape",
",",
"dynamic_target_shape",
"=",
"None",
")",
":",
"static_target_shape",
"=",
"tf",
".",
"TensorShape",
"(",
"static_target_shape",
")",
"if",
"tensorshape_util",
".",
"is_fully_defined",
"(",
"static_shape",
")",
"and",
"tensorshape_util",
".",
"is_fully_defined",
"(",
"static_target_shape",
")",
":",
"if",
"static_shape",
"!=",
"static_target_shape",
":",
"raise",
"ValueError",
"(",
"\"{}: required shape {} but found {}\"",
".",
"format",
"(",
"name",
",",
"static_target_shape",
",",
"static_shape",
")",
")",
"return",
"None",
"else",
":",
"if",
"dynamic_target_shape",
"is",
"None",
":",
"if",
"tensorshape_util",
".",
"is_fully_defined",
"(",
"static_target_shape",
")",
":",
"dynamic_target_shape",
"=",
"tensorshape_util",
".",
"as_list",
"(",
"static_target_shape",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"{}: cannot infer target shape: no dynamic shape \"",
"\"specified and static shape {} is not fully defined\"",
".",
"format",
"(",
"name",
",",
"static_target_shape",
")",
")",
"return",
"assert_util",
".",
"assert_equal",
"(",
"dynamic_shape",
",",
"dynamic_target_shape",
",",
"message",
"=",
"(",
"\"{}: required shape {}\"",
".",
"format",
"(",
"name",
",",
"static_target_shape",
")",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_augment_sample_shape
|
Augment a sample shape to broadcast batch dimensions.
Computes an augmented sample shape, so that any batch dimensions not
part of the distribution `partial_batch_dist` are treated as identical
distributions.
# partial_batch_dist.batch_shape = [ 7]
# full_sample_and_batch_shape = [3, 4, 7]
# => return an augmented sample shape of [3, 4] so that
# partial_batch_dist.sample(augmented_sample_shape) has combined
# sample and batch shape of [3, 4, 7].
Args:
partial_batch_dist: `tfd.Distribution` instance with batch shape a
prefix of `full_sample_and_batch_shape`.
full_sample_and_batch_shape: a Tensor or Tensor-like shape.
validate_args: if True, check for shape errors at runtime.
Returns:
augmented_sample_shape: sample shape such that
`partial_batch_dist.sample(augmented_sample_shape)` has combined
sample and batch shape of `full_sample_and_batch_shape`.
Raises:
ValueError: if `partial_batch_dist.batch_shape` has more dimensions than
`full_sample_and_batch_shape`.
NotImplementedError: if broadcasting would be required to make
`partial_batch_dist.batch_shape` into a prefix of
`full_sample_and_batch_shape` .
|
tensorflow_probability/python/distributions/linear_gaussian_ssm.py
|
def _augment_sample_shape(partial_batch_dist,
full_sample_and_batch_shape,
validate_args=False):
"""Augment a sample shape to broadcast batch dimensions.
Computes an augmented sample shape, so that any batch dimensions not
part of the distribution `partial_batch_dist` are treated as identical
distributions.
# partial_batch_dist.batch_shape = [ 7]
# full_sample_and_batch_shape = [3, 4, 7]
# => return an augmented sample shape of [3, 4] so that
# partial_batch_dist.sample(augmented_sample_shape) has combined
# sample and batch shape of [3, 4, 7].
Args:
partial_batch_dist: `tfd.Distribution` instance with batch shape a
prefix of `full_sample_and_batch_shape`.
full_sample_and_batch_shape: a Tensor or Tensor-like shape.
validate_args: if True, check for shape errors at runtime.
Returns:
augmented_sample_shape: sample shape such that
`partial_batch_dist.sample(augmented_sample_shape)` has combined
sample and batch shape of `full_sample_and_batch_shape`.
Raises:
ValueError: if `partial_batch_dist.batch_shape` has more dimensions than
`full_sample_and_batch_shape`.
NotImplementedError: if broadcasting would be required to make
`partial_batch_dist.batch_shape` into a prefix of
`full_sample_and_batch_shape` .
"""
full_ndims = distribution_util.prefer_static_shape(
full_sample_and_batch_shape)[0]
partial_batch_ndims = (
tensorshape_util.rank(partial_batch_dist.batch_shape) # pylint: disable=g-long-ternary
if tensorshape_util.rank(partial_batch_dist.batch_shape) is not None
else distribution_util.prefer_static_shape(
partial_batch_dist.batch_shape_tensor())[0])
num_broadcast_dims = full_ndims - partial_batch_ndims
expected_partial_batch_shape = (
full_sample_and_batch_shape[num_broadcast_dims:])
expected_partial_batch_shape_static = tf.get_static_value(
full_sample_and_batch_shape[num_broadcast_dims:])
# Raise errors statically if possible.
num_broadcast_dims_static = tf.get_static_value(num_broadcast_dims)
if num_broadcast_dims_static is not None:
if num_broadcast_dims_static < 0:
raise ValueError("Cannot broadcast distribution {} batch shape to "
"target batch shape with fewer dimensions"
.format(partial_batch_dist))
if (expected_partial_batch_shape_static is not None and
tensorshape_util.is_fully_defined(partial_batch_dist.batch_shape)):
if (partial_batch_dist.batch_shape and
any(expected_partial_batch_shape_static != tensorshape_util.as_list(
partial_batch_dist.batch_shape))):
raise NotImplementedError("Broadcasting is not supported; "
"unexpected batch shape "
"(expected {}, saw {}).".format(
expected_partial_batch_shape_static,
partial_batch_dist.batch_shape
))
runtime_assertions = []
if validate_args:
runtime_assertions.append(
assert_util.assert_greater_equal(
tf.convert_to_tensor(value=num_broadcast_dims, dtype=tf.int32),
tf.zeros((), dtype=tf.int32),
message=("Cannot broadcast distribution {} batch shape to "
"target batch shape with fewer dimensions.".format(
partial_batch_dist))))
runtime_assertions.append(
assert_util.assert_equal(
expected_partial_batch_shape,
partial_batch_dist.batch_shape_tensor(),
message=("Broadcasting is not supported; "
"unexpected batch shape."),
name="assert_batch_shape_same"))
with tf.control_dependencies(runtime_assertions):
return full_sample_and_batch_shape[:num_broadcast_dims]
|
def _augment_sample_shape(partial_batch_dist,
full_sample_and_batch_shape,
validate_args=False):
"""Augment a sample shape to broadcast batch dimensions.
Computes an augmented sample shape, so that any batch dimensions not
part of the distribution `partial_batch_dist` are treated as identical
distributions.
# partial_batch_dist.batch_shape = [ 7]
# full_sample_and_batch_shape = [3, 4, 7]
# => return an augmented sample shape of [3, 4] so that
# partial_batch_dist.sample(augmented_sample_shape) has combined
# sample and batch shape of [3, 4, 7].
Args:
partial_batch_dist: `tfd.Distribution` instance with batch shape a
prefix of `full_sample_and_batch_shape`.
full_sample_and_batch_shape: a Tensor or Tensor-like shape.
validate_args: if True, check for shape errors at runtime.
Returns:
augmented_sample_shape: sample shape such that
`partial_batch_dist.sample(augmented_sample_shape)` has combined
sample and batch shape of `full_sample_and_batch_shape`.
Raises:
ValueError: if `partial_batch_dist.batch_shape` has more dimensions than
`full_sample_and_batch_shape`.
NotImplementedError: if broadcasting would be required to make
`partial_batch_dist.batch_shape` into a prefix of
`full_sample_and_batch_shape` .
"""
full_ndims = distribution_util.prefer_static_shape(
full_sample_and_batch_shape)[0]
partial_batch_ndims = (
tensorshape_util.rank(partial_batch_dist.batch_shape) # pylint: disable=g-long-ternary
if tensorshape_util.rank(partial_batch_dist.batch_shape) is not None
else distribution_util.prefer_static_shape(
partial_batch_dist.batch_shape_tensor())[0])
num_broadcast_dims = full_ndims - partial_batch_ndims
expected_partial_batch_shape = (
full_sample_and_batch_shape[num_broadcast_dims:])
expected_partial_batch_shape_static = tf.get_static_value(
full_sample_and_batch_shape[num_broadcast_dims:])
# Raise errors statically if possible.
num_broadcast_dims_static = tf.get_static_value(num_broadcast_dims)
if num_broadcast_dims_static is not None:
if num_broadcast_dims_static < 0:
raise ValueError("Cannot broadcast distribution {} batch shape to "
"target batch shape with fewer dimensions"
.format(partial_batch_dist))
if (expected_partial_batch_shape_static is not None and
tensorshape_util.is_fully_defined(partial_batch_dist.batch_shape)):
if (partial_batch_dist.batch_shape and
any(expected_partial_batch_shape_static != tensorshape_util.as_list(
partial_batch_dist.batch_shape))):
raise NotImplementedError("Broadcasting is not supported; "
"unexpected batch shape "
"(expected {}, saw {}).".format(
expected_partial_batch_shape_static,
partial_batch_dist.batch_shape
))
runtime_assertions = []
if validate_args:
runtime_assertions.append(
assert_util.assert_greater_equal(
tf.convert_to_tensor(value=num_broadcast_dims, dtype=tf.int32),
tf.zeros((), dtype=tf.int32),
message=("Cannot broadcast distribution {} batch shape to "
"target batch shape with fewer dimensions.".format(
partial_batch_dist))))
runtime_assertions.append(
assert_util.assert_equal(
expected_partial_batch_shape,
partial_batch_dist.batch_shape_tensor(),
message=("Broadcasting is not supported; "
"unexpected batch shape."),
name="assert_batch_shape_same"))
with tf.control_dependencies(runtime_assertions):
return full_sample_and_batch_shape[:num_broadcast_dims]
|
[
"Augment",
"a",
"sample",
"shape",
"to",
"broadcast",
"batch",
"dimensions",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L72-L155
|
[
"def",
"_augment_sample_shape",
"(",
"partial_batch_dist",
",",
"full_sample_and_batch_shape",
",",
"validate_args",
"=",
"False",
")",
":",
"full_ndims",
"=",
"distribution_util",
".",
"prefer_static_shape",
"(",
"full_sample_and_batch_shape",
")",
"[",
"0",
"]",
"partial_batch_ndims",
"=",
"(",
"tensorshape_util",
".",
"rank",
"(",
"partial_batch_dist",
".",
"batch_shape",
")",
"# pylint: disable=g-long-ternary",
"if",
"tensorshape_util",
".",
"rank",
"(",
"partial_batch_dist",
".",
"batch_shape",
")",
"is",
"not",
"None",
"else",
"distribution_util",
".",
"prefer_static_shape",
"(",
"partial_batch_dist",
".",
"batch_shape_tensor",
"(",
")",
")",
"[",
"0",
"]",
")",
"num_broadcast_dims",
"=",
"full_ndims",
"-",
"partial_batch_ndims",
"expected_partial_batch_shape",
"=",
"(",
"full_sample_and_batch_shape",
"[",
"num_broadcast_dims",
":",
"]",
")",
"expected_partial_batch_shape_static",
"=",
"tf",
".",
"get_static_value",
"(",
"full_sample_and_batch_shape",
"[",
"num_broadcast_dims",
":",
"]",
")",
"# Raise errors statically if possible.",
"num_broadcast_dims_static",
"=",
"tf",
".",
"get_static_value",
"(",
"num_broadcast_dims",
")",
"if",
"num_broadcast_dims_static",
"is",
"not",
"None",
":",
"if",
"num_broadcast_dims_static",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Cannot broadcast distribution {} batch shape to \"",
"\"target batch shape with fewer dimensions\"",
".",
"format",
"(",
"partial_batch_dist",
")",
")",
"if",
"(",
"expected_partial_batch_shape_static",
"is",
"not",
"None",
"and",
"tensorshape_util",
".",
"is_fully_defined",
"(",
"partial_batch_dist",
".",
"batch_shape",
")",
")",
":",
"if",
"(",
"partial_batch_dist",
".",
"batch_shape",
"and",
"any",
"(",
"expected_partial_batch_shape_static",
"!=",
"tensorshape_util",
".",
"as_list",
"(",
"partial_batch_dist",
".",
"batch_shape",
")",
")",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"Broadcasting is not supported; \"",
"\"unexpected batch shape \"",
"\"(expected {}, saw {}).\"",
".",
"format",
"(",
"expected_partial_batch_shape_static",
",",
"partial_batch_dist",
".",
"batch_shape",
")",
")",
"runtime_assertions",
"=",
"[",
"]",
"if",
"validate_args",
":",
"runtime_assertions",
".",
"append",
"(",
"assert_util",
".",
"assert_greater_equal",
"(",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"num_broadcast_dims",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"tf",
".",
"zeros",
"(",
"(",
")",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"message",
"=",
"(",
"\"Cannot broadcast distribution {} batch shape to \"",
"\"target batch shape with fewer dimensions.\"",
".",
"format",
"(",
"partial_batch_dist",
")",
")",
")",
")",
"runtime_assertions",
".",
"append",
"(",
"assert_util",
".",
"assert_equal",
"(",
"expected_partial_batch_shape",
",",
"partial_batch_dist",
".",
"batch_shape_tensor",
"(",
")",
",",
"message",
"=",
"(",
"\"Broadcasting is not supported; \"",
"\"unexpected batch shape.\"",
")",
",",
"name",
"=",
"\"assert_batch_shape_same\"",
")",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"runtime_assertions",
")",
":",
"return",
"full_sample_and_batch_shape",
"[",
":",
"num_broadcast_dims",
"]"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
build_backward_pass_step
|
Build a callable that perform one step for backward smoothing.
Args:
get_transition_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[latent_size, latent_size]`.
Returns:
backward_pass_step: a callable that updates a BackwardPassState
from timestep `t` to `t-1`.
|
tensorflow_probability/python/distributions/linear_gaussian_ssm.py
|
def build_backward_pass_step(get_transition_matrix_for_timestep):
"""Build a callable that perform one step for backward smoothing.
Args:
get_transition_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[latent_size, latent_size]`.
Returns:
backward_pass_step: a callable that updates a BackwardPassState
from timestep `t` to `t-1`.
"""
def backward_pass_step(state,
filtered_parameters):
"""Run a single step of backward smoothing."""
(filtered_mean, filtered_cov,
predicted_mean, predicted_cov) = filtered_parameters
transition_matrix = get_transition_matrix_for_timestep(state.timestep)
next_posterior_mean = state.backward_mean
next_posterior_cov = state.backward_cov
posterior_mean, posterior_cov = backward_smoothing_update(
filtered_mean,
filtered_cov,
predicted_mean,
predicted_cov,
next_posterior_mean,
next_posterior_cov,
transition_matrix)
return BackwardPassState(backward_mean=posterior_mean,
backward_cov=posterior_cov,
timestep=state.timestep-1)
return backward_pass_step
|
def build_backward_pass_step(get_transition_matrix_for_timestep):
"""Build a callable that perform one step for backward smoothing.
Args:
get_transition_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[latent_size, latent_size]`.
Returns:
backward_pass_step: a callable that updates a BackwardPassState
from timestep `t` to `t-1`.
"""
def backward_pass_step(state,
filtered_parameters):
"""Run a single step of backward smoothing."""
(filtered_mean, filtered_cov,
predicted_mean, predicted_cov) = filtered_parameters
transition_matrix = get_transition_matrix_for_timestep(state.timestep)
next_posterior_mean = state.backward_mean
next_posterior_cov = state.backward_cov
posterior_mean, posterior_cov = backward_smoothing_update(
filtered_mean,
filtered_cov,
predicted_mean,
predicted_cov,
next_posterior_mean,
next_posterior_cov,
transition_matrix)
return BackwardPassState(backward_mean=posterior_mean,
backward_cov=posterior_cov,
timestep=state.timestep-1)
return backward_pass_step
|
[
"Build",
"a",
"callable",
"that",
"perform",
"one",
"step",
"for",
"backward",
"smoothing",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L1158-L1195
|
[
"def",
"build_backward_pass_step",
"(",
"get_transition_matrix_for_timestep",
")",
":",
"def",
"backward_pass_step",
"(",
"state",
",",
"filtered_parameters",
")",
":",
"\"\"\"Run a single step of backward smoothing.\"\"\"",
"(",
"filtered_mean",
",",
"filtered_cov",
",",
"predicted_mean",
",",
"predicted_cov",
")",
"=",
"filtered_parameters",
"transition_matrix",
"=",
"get_transition_matrix_for_timestep",
"(",
"state",
".",
"timestep",
")",
"next_posterior_mean",
"=",
"state",
".",
"backward_mean",
"next_posterior_cov",
"=",
"state",
".",
"backward_cov",
"posterior_mean",
",",
"posterior_cov",
"=",
"backward_smoothing_update",
"(",
"filtered_mean",
",",
"filtered_cov",
",",
"predicted_mean",
",",
"predicted_cov",
",",
"next_posterior_mean",
",",
"next_posterior_cov",
",",
"transition_matrix",
")",
"return",
"BackwardPassState",
"(",
"backward_mean",
"=",
"posterior_mean",
",",
"backward_cov",
"=",
"posterior_cov",
",",
"timestep",
"=",
"state",
".",
"timestep",
"-",
"1",
")",
"return",
"backward_pass_step"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
backward_smoothing_update
|
Backward update for a Kalman smoother.
Give the `filtered_mean` mu(t | t), `filtered_cov` sigma(t | t),
`predicted_mean` mu(t+1 | t) and `predicted_cov` sigma(t+1 | t),
as returns from the `forward_filter` function, as well as
`next_posterior_mean` mu(t+1 | 1:T) and `next_posterior_cov` sigma(t+1 | 1:T),
if the `transition_matrix` of states from time t to time t+1
is given as A(t+1), the 1 step backward smoothed distribution parameter
could be calculated as:
p(z(t) | Obs(1:T)) = N( mu(t | 1:T), sigma(t | 1:T)),
mu(t | 1:T) = mu(t | t) + J(t) * (mu(t+1 | 1:T) - mu(t+1 | t)),
sigma(t | 1:T) = sigma(t | t)
+ J(t) * (sigma(t+1 | 1:T) - sigma(t+1 | t) * J(t)',
J(t) = sigma(t | t) * A(t+1)' / sigma(t+1 | t),
where all the multiplications are matrix multiplication, and `/` is
the matrix inverse. J(t) is the backward Kalman gain matrix.
The algorithm can be intialized from mu(T | 1:T) and sigma(T | 1:T),
which are the last step parameters returned by forward_filter.
Args:
filtered_mean: `Tensor` with event shape `[latent_size, 1]` and
batch shape `B`, containing mu(t | t).
filtered_cov: `Tensor` with event shape `[latent_size, latent_size]` and
batch shape `B`, containing sigma(t | t).
predicted_mean: `Tensor` with event shape `[latent_size, 1]` and
batch shape `B`, containing mu(t+1 | t).
predicted_cov: `Tensor` with event shape `[latent_size, latent_size]` and
batch shape `B`, containing sigma(t+1 | t).
next_posterior_mean: `Tensor` with event shape `[latent_size, 1]` and
batch shape `B`, containing mu(t+1 | 1:T).
next_posterior_cov: `Tensor` with event shape `[latent_size, latent_size]`
and batch shape `B`, containing sigma(t+1 | 1:T).
transition_matrix: `LinearOperator` with shape
`[latent_size, latent_size]` and batch shape broadcastable
to `B`.
Returns:
posterior_mean: `Tensor` with event shape `[latent_size, 1]` and
batch shape `B`, containing mu(t | 1:T).
posterior_cov: `Tensor` with event shape `[latent_size, latent_size]` and
batch shape `B`, containing sigma(t | 1:T).
|
tensorflow_probability/python/distributions/linear_gaussian_ssm.py
|
def backward_smoothing_update(filtered_mean,
filtered_cov,
predicted_mean,
predicted_cov,
next_posterior_mean,
next_posterior_cov,
transition_matrix):
"""Backward update for a Kalman smoother.
Give the `filtered_mean` mu(t | t), `filtered_cov` sigma(t | t),
`predicted_mean` mu(t+1 | t) and `predicted_cov` sigma(t+1 | t),
as returns from the `forward_filter` function, as well as
`next_posterior_mean` mu(t+1 | 1:T) and `next_posterior_cov` sigma(t+1 | 1:T),
if the `transition_matrix` of states from time t to time t+1
is given as A(t+1), the 1 step backward smoothed distribution parameter
could be calculated as:
p(z(t) | Obs(1:T)) = N( mu(t | 1:T), sigma(t | 1:T)),
mu(t | 1:T) = mu(t | t) + J(t) * (mu(t+1 | 1:T) - mu(t+1 | t)),
sigma(t | 1:T) = sigma(t | t)
+ J(t) * (sigma(t+1 | 1:T) - sigma(t+1 | t) * J(t)',
J(t) = sigma(t | t) * A(t+1)' / sigma(t+1 | t),
where all the multiplications are matrix multiplication, and `/` is
the matrix inverse. J(t) is the backward Kalman gain matrix.
The algorithm can be intialized from mu(T | 1:T) and sigma(T | 1:T),
which are the last step parameters returned by forward_filter.
Args:
filtered_mean: `Tensor` with event shape `[latent_size, 1]` and
batch shape `B`, containing mu(t | t).
filtered_cov: `Tensor` with event shape `[latent_size, latent_size]` and
batch shape `B`, containing sigma(t | t).
predicted_mean: `Tensor` with event shape `[latent_size, 1]` and
batch shape `B`, containing mu(t+1 | t).
predicted_cov: `Tensor` with event shape `[latent_size, latent_size]` and
batch shape `B`, containing sigma(t+1 | t).
next_posterior_mean: `Tensor` with event shape `[latent_size, 1]` and
batch shape `B`, containing mu(t+1 | 1:T).
next_posterior_cov: `Tensor` with event shape `[latent_size, latent_size]`
and batch shape `B`, containing sigma(t+1 | 1:T).
transition_matrix: `LinearOperator` with shape
`[latent_size, latent_size]` and batch shape broadcastable
to `B`.
Returns:
posterior_mean: `Tensor` with event shape `[latent_size, 1]` and
batch shape `B`, containing mu(t | 1:T).
posterior_cov: `Tensor` with event shape `[latent_size, latent_size]` and
batch shape `B`, containing sigma(t | 1:T).
"""
# Compute backward Kalman gain:
# J = F * T' * P^{-1}
# Since both F(iltered) and P(redictive) are cov matrices,
# thus self-adjoint, we can take the transpose.
# computation:
# = (P^{-1} * T * F)'
# = (P^{-1} * tmp_gain_cov) '
# = (P \ tmp_gain_cov)'
tmp_gain_cov = transition_matrix.matmul(filtered_cov)
predicted_cov_chol = tf.linalg.cholesky(predicted_cov)
gain_transpose = tf.linalg.cholesky_solve(predicted_cov_chol, tmp_gain_cov)
posterior_mean = (filtered_mean +
tf.linalg.matmul(gain_transpose,
next_posterior_mean - predicted_mean,
adjoint_a=True))
posterior_cov = (
filtered_cov +
tf.linalg.matmul(gain_transpose,
tf.linalg.matmul(
next_posterior_cov - predicted_cov, gain_transpose),
adjoint_a=True))
return (posterior_mean, posterior_cov)
|
def backward_smoothing_update(filtered_mean,
filtered_cov,
predicted_mean,
predicted_cov,
next_posterior_mean,
next_posterior_cov,
transition_matrix):
"""Backward update for a Kalman smoother.
Give the `filtered_mean` mu(t | t), `filtered_cov` sigma(t | t),
`predicted_mean` mu(t+1 | t) and `predicted_cov` sigma(t+1 | t),
as returns from the `forward_filter` function, as well as
`next_posterior_mean` mu(t+1 | 1:T) and `next_posterior_cov` sigma(t+1 | 1:T),
if the `transition_matrix` of states from time t to time t+1
is given as A(t+1), the 1 step backward smoothed distribution parameter
could be calculated as:
p(z(t) | Obs(1:T)) = N( mu(t | 1:T), sigma(t | 1:T)),
mu(t | 1:T) = mu(t | t) + J(t) * (mu(t+1 | 1:T) - mu(t+1 | t)),
sigma(t | 1:T) = sigma(t | t)
+ J(t) * (sigma(t+1 | 1:T) - sigma(t+1 | t) * J(t)',
J(t) = sigma(t | t) * A(t+1)' / sigma(t+1 | t),
where all the multiplications are matrix multiplication, and `/` is
the matrix inverse. J(t) is the backward Kalman gain matrix.
The algorithm can be intialized from mu(T | 1:T) and sigma(T | 1:T),
which are the last step parameters returned by forward_filter.
Args:
filtered_mean: `Tensor` with event shape `[latent_size, 1]` and
batch shape `B`, containing mu(t | t).
filtered_cov: `Tensor` with event shape `[latent_size, latent_size]` and
batch shape `B`, containing sigma(t | t).
predicted_mean: `Tensor` with event shape `[latent_size, 1]` and
batch shape `B`, containing mu(t+1 | t).
predicted_cov: `Tensor` with event shape `[latent_size, latent_size]` and
batch shape `B`, containing sigma(t+1 | t).
next_posterior_mean: `Tensor` with event shape `[latent_size, 1]` and
batch shape `B`, containing mu(t+1 | 1:T).
next_posterior_cov: `Tensor` with event shape `[latent_size, latent_size]`
and batch shape `B`, containing sigma(t+1 | 1:T).
transition_matrix: `LinearOperator` with shape
`[latent_size, latent_size]` and batch shape broadcastable
to `B`.
Returns:
posterior_mean: `Tensor` with event shape `[latent_size, 1]` and
batch shape `B`, containing mu(t | 1:T).
posterior_cov: `Tensor` with event shape `[latent_size, latent_size]` and
batch shape `B`, containing sigma(t | 1:T).
"""
# Compute backward Kalman gain:
# J = F * T' * P^{-1}
# Since both F(iltered) and P(redictive) are cov matrices,
# thus self-adjoint, we can take the transpose.
# computation:
# = (P^{-1} * T * F)'
# = (P^{-1} * tmp_gain_cov) '
# = (P \ tmp_gain_cov)'
tmp_gain_cov = transition_matrix.matmul(filtered_cov)
predicted_cov_chol = tf.linalg.cholesky(predicted_cov)
gain_transpose = tf.linalg.cholesky_solve(predicted_cov_chol, tmp_gain_cov)
posterior_mean = (filtered_mean +
tf.linalg.matmul(gain_transpose,
next_posterior_mean - predicted_mean,
adjoint_a=True))
posterior_cov = (
filtered_cov +
tf.linalg.matmul(gain_transpose,
tf.linalg.matmul(
next_posterior_cov - predicted_cov, gain_transpose),
adjoint_a=True))
return (posterior_mean, posterior_cov)
|
[
"Backward",
"update",
"for",
"a",
"Kalman",
"smoother",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L1198-L1272
|
[
"def",
"backward_smoothing_update",
"(",
"filtered_mean",
",",
"filtered_cov",
",",
"predicted_mean",
",",
"predicted_cov",
",",
"next_posterior_mean",
",",
"next_posterior_cov",
",",
"transition_matrix",
")",
":",
"# Compute backward Kalman gain:",
"# J = F * T' * P^{-1}",
"# Since both F(iltered) and P(redictive) are cov matrices,",
"# thus self-adjoint, we can take the transpose.",
"# computation:",
"# = (P^{-1} * T * F)'",
"# = (P^{-1} * tmp_gain_cov) '",
"# = (P \\ tmp_gain_cov)'",
"tmp_gain_cov",
"=",
"transition_matrix",
".",
"matmul",
"(",
"filtered_cov",
")",
"predicted_cov_chol",
"=",
"tf",
".",
"linalg",
".",
"cholesky",
"(",
"predicted_cov",
")",
"gain_transpose",
"=",
"tf",
".",
"linalg",
".",
"cholesky_solve",
"(",
"predicted_cov_chol",
",",
"tmp_gain_cov",
")",
"posterior_mean",
"=",
"(",
"filtered_mean",
"+",
"tf",
".",
"linalg",
".",
"matmul",
"(",
"gain_transpose",
",",
"next_posterior_mean",
"-",
"predicted_mean",
",",
"adjoint_a",
"=",
"True",
")",
")",
"posterior_cov",
"=",
"(",
"filtered_cov",
"+",
"tf",
".",
"linalg",
".",
"matmul",
"(",
"gain_transpose",
",",
"tf",
".",
"linalg",
".",
"matmul",
"(",
"next_posterior_cov",
"-",
"predicted_cov",
",",
"gain_transpose",
")",
",",
"adjoint_a",
"=",
"True",
")",
")",
"return",
"(",
"posterior_mean",
",",
"posterior_cov",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
build_kalman_filter_step
|
Build a callable that performs one step of Kalman filtering.
Args:
get_transition_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[latent_size, latent_size]`.
get_transition_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[latent_size]`.
get_observation_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[observation_size, observation_size]`.
get_observation_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[observation_size]`.
Returns:
kalman_filter_step: a callable that updates a KalmanFilterState
from timestep `t-1` to `t`.
|
tensorflow_probability/python/distributions/linear_gaussian_ssm.py
|
def build_kalman_filter_step(get_transition_matrix_for_timestep,
get_transition_noise_for_timestep,
get_observation_matrix_for_timestep,
get_observation_noise_for_timestep):
"""Build a callable that performs one step of Kalman filtering.
Args:
get_transition_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[latent_size, latent_size]`.
get_transition_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[latent_size]`.
get_observation_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[observation_size, observation_size]`.
get_observation_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[observation_size]`.
Returns:
kalman_filter_step: a callable that updates a KalmanFilterState
from timestep `t-1` to `t`.
"""
def kalman_filter_step(state, elems_t):
"""Run a single step of Kalman filtering.
Args:
state: A `KalmanFilterState` object representing the previous
filter state at time `t-1`.
elems_t: A tuple of Tensors `(x_t, mask_t)`, or a `Tensor` `x_t`.
`x_t` is a `Tensor` with rightmost shape dimensions
`[observation_size, 1]` representing the vector observed at time `t`,
and `mask_t` is a `Tensor` with rightmost dimensions`[1, 1]`
representing the observation mask at time `t`. Both `x_t` and `mask_t`
may have batch dimensions, which must be compatible with the batch
dimensions of `state.predicted_mean` and `state.predictived_cov`
respectively. If `mask_t` is not provided, it is assumed to be `None`.
Returns:
new_state: A `KalmanFilterState` object representing the new
filter state at time `t`.
"""
if isinstance(elems_t, tuple):
x_t, mask_t = elems_t
else:
x_t = elems_t
mask_t = None
observation_matrix = get_observation_matrix_for_timestep(state.timestep)
observation_noise = get_observation_noise_for_timestep(state.timestep)
if mask_t is not None:
# Before running the update, fill in masked observations using the prior
# expectation. The precise filled value shouldn't matter since updates
# from masked elements will not be selected below, but we need to ensure
# that any results we incidently compute on masked values are at least
# finite (not inf or NaN) so that they don't screw up gradient propagation
# through `tf.where`, as described in
# https://github.com/tensorflow/tensorflow/issues/2540.
# We fill with the prior expectation because any fixed value such as zero
# might be arbitrarily unlikely under the prior, leading to overflow in
# the updates, but the prior expectation should always be a
# 'reasonable' observation.
x_expected = _propagate_mean(state.predicted_mean,
observation_matrix,
observation_noise) * tf.ones_like(x_t)
x_t = tf.where(
tf.broadcast_to(mask_t, tf.shape(input=x_expected)), x_expected,
tf.broadcast_to(x_t, tf.shape(input=x_expected)))
# Given predicted mean u_{t|t-1} and covariance P_{t|t-1} from the
# previous step, incorporate the observation x_t, producing the
# filtered mean u_t and covariance P_t.
(filtered_mean,
filtered_cov,
observation_dist) = linear_gaussian_update(
state.predicted_mean, state.predicted_cov,
observation_matrix, observation_noise,
x_t)
# Compute the marginal likelihood p(x_{t} | x_{:t-1}) for this
# observation.
log_marginal_likelihood = observation_dist.log_prob(x_t[..., 0])
if mask_t is not None:
filtered_mean = tf.where(
tf.broadcast_to(mask_t, tf.shape(input=filtered_mean)),
state.predicted_mean, filtered_mean)
filtered_cov = tf.where(
tf.broadcast_to(mask_t, tf.shape(input=filtered_cov)),
state.predicted_cov, filtered_cov)
log_marginal_likelihood = tf.where(
tf.broadcast_to(mask_t[..., 0, 0],
tf.shape(input=log_marginal_likelihood)),
tf.zeros_like(log_marginal_likelihood),
log_marginal_likelihood)
# Run the filtered posterior through the transition
# model to predict the next time step:
# u_{t|t-1} = F_t u_{t-1} + b_t
# P_{t|t-1} = F_t P_{t-1} F_t' + Q_t
predicted_mean, predicted_cov = kalman_transition(
filtered_mean,
filtered_cov,
get_transition_matrix_for_timestep(state.timestep),
get_transition_noise_for_timestep(state.timestep))
return KalmanFilterState(
filtered_mean, filtered_cov,
predicted_mean, predicted_cov,
observation_dist.mean()[..., tf.newaxis],
observation_dist.covariance(),
log_marginal_likelihood,
state.timestep+1)
return kalman_filter_step
|
def build_kalman_filter_step(get_transition_matrix_for_timestep,
get_transition_noise_for_timestep,
get_observation_matrix_for_timestep,
get_observation_noise_for_timestep):
"""Build a callable that performs one step of Kalman filtering.
Args:
get_transition_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[latent_size, latent_size]`.
get_transition_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[latent_size]`.
get_observation_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[observation_size, observation_size]`.
get_observation_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[observation_size]`.
Returns:
kalman_filter_step: a callable that updates a KalmanFilterState
from timestep `t-1` to `t`.
"""
def kalman_filter_step(state, elems_t):
"""Run a single step of Kalman filtering.
Args:
state: A `KalmanFilterState` object representing the previous
filter state at time `t-1`.
elems_t: A tuple of Tensors `(x_t, mask_t)`, or a `Tensor` `x_t`.
`x_t` is a `Tensor` with rightmost shape dimensions
`[observation_size, 1]` representing the vector observed at time `t`,
and `mask_t` is a `Tensor` with rightmost dimensions`[1, 1]`
representing the observation mask at time `t`. Both `x_t` and `mask_t`
may have batch dimensions, which must be compatible with the batch
dimensions of `state.predicted_mean` and `state.predictived_cov`
respectively. If `mask_t` is not provided, it is assumed to be `None`.
Returns:
new_state: A `KalmanFilterState` object representing the new
filter state at time `t`.
"""
if isinstance(elems_t, tuple):
x_t, mask_t = elems_t
else:
x_t = elems_t
mask_t = None
observation_matrix = get_observation_matrix_for_timestep(state.timestep)
observation_noise = get_observation_noise_for_timestep(state.timestep)
if mask_t is not None:
# Before running the update, fill in masked observations using the prior
# expectation. The precise filled value shouldn't matter since updates
# from masked elements will not be selected below, but we need to ensure
# that any results we incidently compute on masked values are at least
# finite (not inf or NaN) so that they don't screw up gradient propagation
# through `tf.where`, as described in
# https://github.com/tensorflow/tensorflow/issues/2540.
# We fill with the prior expectation because any fixed value such as zero
# might be arbitrarily unlikely under the prior, leading to overflow in
# the updates, but the prior expectation should always be a
# 'reasonable' observation.
x_expected = _propagate_mean(state.predicted_mean,
observation_matrix,
observation_noise) * tf.ones_like(x_t)
x_t = tf.where(
tf.broadcast_to(mask_t, tf.shape(input=x_expected)), x_expected,
tf.broadcast_to(x_t, tf.shape(input=x_expected)))
# Given predicted mean u_{t|t-1} and covariance P_{t|t-1} from the
# previous step, incorporate the observation x_t, producing the
# filtered mean u_t and covariance P_t.
(filtered_mean,
filtered_cov,
observation_dist) = linear_gaussian_update(
state.predicted_mean, state.predicted_cov,
observation_matrix, observation_noise,
x_t)
# Compute the marginal likelihood p(x_{t} | x_{:t-1}) for this
# observation.
log_marginal_likelihood = observation_dist.log_prob(x_t[..., 0])
if mask_t is not None:
filtered_mean = tf.where(
tf.broadcast_to(mask_t, tf.shape(input=filtered_mean)),
state.predicted_mean, filtered_mean)
filtered_cov = tf.where(
tf.broadcast_to(mask_t, tf.shape(input=filtered_cov)),
state.predicted_cov, filtered_cov)
log_marginal_likelihood = tf.where(
tf.broadcast_to(mask_t[..., 0, 0],
tf.shape(input=log_marginal_likelihood)),
tf.zeros_like(log_marginal_likelihood),
log_marginal_likelihood)
# Run the filtered posterior through the transition
# model to predict the next time step:
# u_{t|t-1} = F_t u_{t-1} + b_t
# P_{t|t-1} = F_t P_{t-1} F_t' + Q_t
predicted_mean, predicted_cov = kalman_transition(
filtered_mean,
filtered_cov,
get_transition_matrix_for_timestep(state.timestep),
get_transition_noise_for_timestep(state.timestep))
return KalmanFilterState(
filtered_mean, filtered_cov,
predicted_mean, predicted_cov,
observation_dist.mean()[..., tf.newaxis],
observation_dist.covariance(),
log_marginal_likelihood,
state.timestep+1)
return kalman_filter_step
|
[
"Build",
"a",
"callable",
"that",
"performs",
"one",
"step",
"of",
"Kalman",
"filtering",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L1275-L1394
|
[
"def",
"build_kalman_filter_step",
"(",
"get_transition_matrix_for_timestep",
",",
"get_transition_noise_for_timestep",
",",
"get_observation_matrix_for_timestep",
",",
"get_observation_noise_for_timestep",
")",
":",
"def",
"kalman_filter_step",
"(",
"state",
",",
"elems_t",
")",
":",
"\"\"\"Run a single step of Kalman filtering.\n\n Args:\n state: A `KalmanFilterState` object representing the previous\n filter state at time `t-1`.\n elems_t: A tuple of Tensors `(x_t, mask_t)`, or a `Tensor` `x_t`.\n `x_t` is a `Tensor` with rightmost shape dimensions\n `[observation_size, 1]` representing the vector observed at time `t`,\n and `mask_t` is a `Tensor` with rightmost dimensions`[1, 1]`\n representing the observation mask at time `t`. Both `x_t` and `mask_t`\n may have batch dimensions, which must be compatible with the batch\n dimensions of `state.predicted_mean` and `state.predictived_cov`\n respectively. If `mask_t` is not provided, it is assumed to be `None`.\n\n Returns:\n new_state: A `KalmanFilterState` object representing the new\n filter state at time `t`.\n \"\"\"",
"if",
"isinstance",
"(",
"elems_t",
",",
"tuple",
")",
":",
"x_t",
",",
"mask_t",
"=",
"elems_t",
"else",
":",
"x_t",
"=",
"elems_t",
"mask_t",
"=",
"None",
"observation_matrix",
"=",
"get_observation_matrix_for_timestep",
"(",
"state",
".",
"timestep",
")",
"observation_noise",
"=",
"get_observation_noise_for_timestep",
"(",
"state",
".",
"timestep",
")",
"if",
"mask_t",
"is",
"not",
"None",
":",
"# Before running the update, fill in masked observations using the prior",
"# expectation. The precise filled value shouldn't matter since updates",
"# from masked elements will not be selected below, but we need to ensure",
"# that any results we incidently compute on masked values are at least",
"# finite (not inf or NaN) so that they don't screw up gradient propagation",
"# through `tf.where`, as described in",
"# https://github.com/tensorflow/tensorflow/issues/2540.",
"# We fill with the prior expectation because any fixed value such as zero",
"# might be arbitrarily unlikely under the prior, leading to overflow in",
"# the updates, but the prior expectation should always be a",
"# 'reasonable' observation.",
"x_expected",
"=",
"_propagate_mean",
"(",
"state",
".",
"predicted_mean",
",",
"observation_matrix",
",",
"observation_noise",
")",
"*",
"tf",
".",
"ones_like",
"(",
"x_t",
")",
"x_t",
"=",
"tf",
".",
"where",
"(",
"tf",
".",
"broadcast_to",
"(",
"mask_t",
",",
"tf",
".",
"shape",
"(",
"input",
"=",
"x_expected",
")",
")",
",",
"x_expected",
",",
"tf",
".",
"broadcast_to",
"(",
"x_t",
",",
"tf",
".",
"shape",
"(",
"input",
"=",
"x_expected",
")",
")",
")",
"# Given predicted mean u_{t|t-1} and covariance P_{t|t-1} from the",
"# previous step, incorporate the observation x_t, producing the",
"# filtered mean u_t and covariance P_t.",
"(",
"filtered_mean",
",",
"filtered_cov",
",",
"observation_dist",
")",
"=",
"linear_gaussian_update",
"(",
"state",
".",
"predicted_mean",
",",
"state",
".",
"predicted_cov",
",",
"observation_matrix",
",",
"observation_noise",
",",
"x_t",
")",
"# Compute the marginal likelihood p(x_{t} | x_{:t-1}) for this",
"# observation.",
"log_marginal_likelihood",
"=",
"observation_dist",
".",
"log_prob",
"(",
"x_t",
"[",
"...",
",",
"0",
"]",
")",
"if",
"mask_t",
"is",
"not",
"None",
":",
"filtered_mean",
"=",
"tf",
".",
"where",
"(",
"tf",
".",
"broadcast_to",
"(",
"mask_t",
",",
"tf",
".",
"shape",
"(",
"input",
"=",
"filtered_mean",
")",
")",
",",
"state",
".",
"predicted_mean",
",",
"filtered_mean",
")",
"filtered_cov",
"=",
"tf",
".",
"where",
"(",
"tf",
".",
"broadcast_to",
"(",
"mask_t",
",",
"tf",
".",
"shape",
"(",
"input",
"=",
"filtered_cov",
")",
")",
",",
"state",
".",
"predicted_cov",
",",
"filtered_cov",
")",
"log_marginal_likelihood",
"=",
"tf",
".",
"where",
"(",
"tf",
".",
"broadcast_to",
"(",
"mask_t",
"[",
"...",
",",
"0",
",",
"0",
"]",
",",
"tf",
".",
"shape",
"(",
"input",
"=",
"log_marginal_likelihood",
")",
")",
",",
"tf",
".",
"zeros_like",
"(",
"log_marginal_likelihood",
")",
",",
"log_marginal_likelihood",
")",
"# Run the filtered posterior through the transition",
"# model to predict the next time step:",
"# u_{t|t-1} = F_t u_{t-1} + b_t",
"# P_{t|t-1} = F_t P_{t-1} F_t' + Q_t",
"predicted_mean",
",",
"predicted_cov",
"=",
"kalman_transition",
"(",
"filtered_mean",
",",
"filtered_cov",
",",
"get_transition_matrix_for_timestep",
"(",
"state",
".",
"timestep",
")",
",",
"get_transition_noise_for_timestep",
"(",
"state",
".",
"timestep",
")",
")",
"return",
"KalmanFilterState",
"(",
"filtered_mean",
",",
"filtered_cov",
",",
"predicted_mean",
",",
"predicted_cov",
",",
"observation_dist",
".",
"mean",
"(",
")",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
",",
"observation_dist",
".",
"covariance",
"(",
")",
",",
"log_marginal_likelihood",
",",
"state",
".",
"timestep",
"+",
"1",
")",
"return",
"kalman_filter_step"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
linear_gaussian_update
|
Conjugate update for a linear Gaussian model.
Given a normal prior on a latent variable `z`,
`p(z) = N(prior_mean, prior_cov) = N(u, P)`,
for which we observe a linear Gaussian transformation `x`,
`p(x|z) = N(H * z + c, R)`,
the posterior is also normal:
`p(z|x) = N(u*, P*)`.
We can write this update as
x_expected = H * u + c # pushforward prior mean
S = R + H * P * H' # pushforward prior cov
K = P * H' * S^{-1} # optimal Kalman gain
u* = u + K * (x_observed - x_expected) # posterior mean
P* = (I - K * H) * P (I - K * H)' + K * R * K' # posterior cov
(see, e.g., https://en.wikipedia.org/wiki/Kalman_filter#Update)
Args:
prior_mean: `Tensor` with event shape `[latent_size, 1]` and
potential batch shape `B = [b1, ..., b_n]`.
prior_cov: `Tensor` with event shape `[latent_size, latent_size]`
and batch shape `B` (matching `prior_mean`).
observation_matrix: `LinearOperator` with shape
`[observation_size, latent_size]` and batch shape broadcastable
to `B`.
observation_noise: potentially-batched
`MultivariateNormalLinearOperator` instance with event shape
`[observation_size]` and batch shape broadcastable to `B`.
x_observed: potentially batched `Tensor` with event shape
`[observation_size, 1]` and batch shape `B`.
Returns:
posterior_mean: `Tensor` with event shape `[latent_size, 1]` and
batch shape `B`.
posterior_cov: `Tensor` with event shape `[latent_size,
latent_size]` and batch shape `B`.
predictive_dist: the prior predictive distribution `p(x|z)`,
as a `Distribution` instance with event
shape `[observation_size]` and batch shape `B`. This will
typically be `tfd.MultivariateNormalTriL`, but when
`observation_size=1` we return a `tfd.Independent(tfd.Normal)`
instance as an optimization.
|
tensorflow_probability/python/distributions/linear_gaussian_ssm.py
|
def linear_gaussian_update(
prior_mean, prior_cov, observation_matrix, observation_noise, x_observed):
"""Conjugate update for a linear Gaussian model.
Given a normal prior on a latent variable `z`,
`p(z) = N(prior_mean, prior_cov) = N(u, P)`,
for which we observe a linear Gaussian transformation `x`,
`p(x|z) = N(H * z + c, R)`,
the posterior is also normal:
`p(z|x) = N(u*, P*)`.
We can write this update as
x_expected = H * u + c # pushforward prior mean
S = R + H * P * H' # pushforward prior cov
K = P * H' * S^{-1} # optimal Kalman gain
u* = u + K * (x_observed - x_expected) # posterior mean
P* = (I - K * H) * P (I - K * H)' + K * R * K' # posterior cov
(see, e.g., https://en.wikipedia.org/wiki/Kalman_filter#Update)
Args:
prior_mean: `Tensor` with event shape `[latent_size, 1]` and
potential batch shape `B = [b1, ..., b_n]`.
prior_cov: `Tensor` with event shape `[latent_size, latent_size]`
and batch shape `B` (matching `prior_mean`).
observation_matrix: `LinearOperator` with shape
`[observation_size, latent_size]` and batch shape broadcastable
to `B`.
observation_noise: potentially-batched
`MultivariateNormalLinearOperator` instance with event shape
`[observation_size]` and batch shape broadcastable to `B`.
x_observed: potentially batched `Tensor` with event shape
`[observation_size, 1]` and batch shape `B`.
Returns:
posterior_mean: `Tensor` with event shape `[latent_size, 1]` and
batch shape `B`.
posterior_cov: `Tensor` with event shape `[latent_size,
latent_size]` and batch shape `B`.
predictive_dist: the prior predictive distribution `p(x|z)`,
as a `Distribution` instance with event
shape `[observation_size]` and batch shape `B`. This will
typically be `tfd.MultivariateNormalTriL`, but when
`observation_size=1` we return a `tfd.Independent(tfd.Normal)`
instance as an optimization.
"""
# If observations are scalar, we can avoid some matrix ops.
observation_size_is_static_and_scalar = (
tf.compat.dimension_value(observation_matrix.shape[-2]) == 1)
# Push the predicted mean for the latent state through the
# observation model
x_expected = _propagate_mean(prior_mean,
observation_matrix,
observation_noise)
# Push the predictive covariance of the latent state through the
# observation model:
# S = R + H * P * H'.
# We use a temporary variable for H * P,
# reused below to compute Kalman gain.
tmp_obs_cov = observation_matrix.matmul(prior_cov)
predicted_obs_cov = (
observation_matrix.matmul(tmp_obs_cov, adjoint_arg=True)
+ observation_noise.covariance())
# Compute optimal Kalman gain:
# K = P * H' * S^{-1}
# Since both S and P are cov matrices, thus symmetric,
# we can take the transpose and reuse our previous
# computation:
# = (S^{-1} * H * P)'
# = (S^{-1} * tmp_obs_cov) '
# = (S \ tmp_obs_cov)'
if observation_size_is_static_and_scalar:
gain_transpose = tmp_obs_cov/predicted_obs_cov
else:
predicted_obs_cov_chol = tf.linalg.cholesky(predicted_obs_cov)
gain_transpose = tf.linalg.cholesky_solve(predicted_obs_cov_chol,
tmp_obs_cov)
# Compute the posterior mean, incorporating the observation.
# u* = u + K (x_observed - x_expected)
posterior_mean = (prior_mean +
tf.linalg.matmul(gain_transpose, x_observed - x_expected,
adjoint_a=True))
# For the posterior covariance, we could use the simple update
# P* = P - K * H * P
# but this is prone to numerical issues because it subtracts a
# value from a PSD matrix. We choose instead to use the more
# expensive Jordan form update
# P* = (I - K H) * P * (I - K H)' + K R K'
# which always produces a PSD result. This uses
# tmp_term = (I - K * H)'
# as an intermediate quantity.
tmp_term = -observation_matrix.matmul(gain_transpose, adjoint=True) # -K * H
tmp_term = tf.linalg.set_diag(tmp_term, tf.linalg.diag_part(tmp_term) + 1)
posterior_cov = (
tf.linalg.matmul(
tmp_term, tf.linalg.matmul(prior_cov, tmp_term), adjoint_a=True)
+ tf.linalg.matmul(gain_transpose,
tf.linalg.matmul(
observation_noise.covariance(), gain_transpose),
adjoint_a=True))
if observation_size_is_static_and_scalar:
# A plain Normal would have event shape `[]`; wrapping with Independent
# ensures `event_shape=[1]` as required.
predictive_dist = independent.Independent(
normal.Normal(loc=x_expected[..., 0],
scale=tf.sqrt(predicted_obs_cov[..., 0])),
reinterpreted_batch_ndims=1)
# Minor hack to define the covariance, so that `predictive_dist` can pass as
# an MVNTriL-like object.
predictive_dist.covariance = lambda: predicted_obs_cov
else:
predictive_dist = mvn_tril.MultivariateNormalTriL(
loc=x_expected[..., 0],
scale_tril=predicted_obs_cov_chol)
return posterior_mean, posterior_cov, predictive_dist
|
def linear_gaussian_update(
prior_mean, prior_cov, observation_matrix, observation_noise, x_observed):
"""Conjugate update for a linear Gaussian model.
Given a normal prior on a latent variable `z`,
`p(z) = N(prior_mean, prior_cov) = N(u, P)`,
for which we observe a linear Gaussian transformation `x`,
`p(x|z) = N(H * z + c, R)`,
the posterior is also normal:
`p(z|x) = N(u*, P*)`.
We can write this update as
x_expected = H * u + c # pushforward prior mean
S = R + H * P * H' # pushforward prior cov
K = P * H' * S^{-1} # optimal Kalman gain
u* = u + K * (x_observed - x_expected) # posterior mean
P* = (I - K * H) * P (I - K * H)' + K * R * K' # posterior cov
(see, e.g., https://en.wikipedia.org/wiki/Kalman_filter#Update)
Args:
prior_mean: `Tensor` with event shape `[latent_size, 1]` and
potential batch shape `B = [b1, ..., b_n]`.
prior_cov: `Tensor` with event shape `[latent_size, latent_size]`
and batch shape `B` (matching `prior_mean`).
observation_matrix: `LinearOperator` with shape
`[observation_size, latent_size]` and batch shape broadcastable
to `B`.
observation_noise: potentially-batched
`MultivariateNormalLinearOperator` instance with event shape
`[observation_size]` and batch shape broadcastable to `B`.
x_observed: potentially batched `Tensor` with event shape
`[observation_size, 1]` and batch shape `B`.
Returns:
posterior_mean: `Tensor` with event shape `[latent_size, 1]` and
batch shape `B`.
posterior_cov: `Tensor` with event shape `[latent_size,
latent_size]` and batch shape `B`.
predictive_dist: the prior predictive distribution `p(x|z)`,
as a `Distribution` instance with event
shape `[observation_size]` and batch shape `B`. This will
typically be `tfd.MultivariateNormalTriL`, but when
`observation_size=1` we return a `tfd.Independent(tfd.Normal)`
instance as an optimization.
"""
# If observations are scalar, we can avoid some matrix ops.
observation_size_is_static_and_scalar = (
tf.compat.dimension_value(observation_matrix.shape[-2]) == 1)
# Push the predicted mean for the latent state through the
# observation model
x_expected = _propagate_mean(prior_mean,
observation_matrix,
observation_noise)
# Push the predictive covariance of the latent state through the
# observation model:
# S = R + H * P * H'.
# We use a temporary variable for H * P,
# reused below to compute Kalman gain.
tmp_obs_cov = observation_matrix.matmul(prior_cov)
predicted_obs_cov = (
observation_matrix.matmul(tmp_obs_cov, adjoint_arg=True)
+ observation_noise.covariance())
# Compute optimal Kalman gain:
# K = P * H' * S^{-1}
# Since both S and P are cov matrices, thus symmetric,
# we can take the transpose and reuse our previous
# computation:
# = (S^{-1} * H * P)'
# = (S^{-1} * tmp_obs_cov) '
# = (S \ tmp_obs_cov)'
if observation_size_is_static_and_scalar:
gain_transpose = tmp_obs_cov/predicted_obs_cov
else:
predicted_obs_cov_chol = tf.linalg.cholesky(predicted_obs_cov)
gain_transpose = tf.linalg.cholesky_solve(predicted_obs_cov_chol,
tmp_obs_cov)
# Compute the posterior mean, incorporating the observation.
# u* = u + K (x_observed - x_expected)
posterior_mean = (prior_mean +
tf.linalg.matmul(gain_transpose, x_observed - x_expected,
adjoint_a=True))
# For the posterior covariance, we could use the simple update
# P* = P - K * H * P
# but this is prone to numerical issues because it subtracts a
# value from a PSD matrix. We choose instead to use the more
# expensive Jordan form update
# P* = (I - K H) * P * (I - K H)' + K R K'
# which always produces a PSD result. This uses
# tmp_term = (I - K * H)'
# as an intermediate quantity.
tmp_term = -observation_matrix.matmul(gain_transpose, adjoint=True) # -K * H
tmp_term = tf.linalg.set_diag(tmp_term, tf.linalg.diag_part(tmp_term) + 1)
posterior_cov = (
tf.linalg.matmul(
tmp_term, tf.linalg.matmul(prior_cov, tmp_term), adjoint_a=True)
+ tf.linalg.matmul(gain_transpose,
tf.linalg.matmul(
observation_noise.covariance(), gain_transpose),
adjoint_a=True))
if observation_size_is_static_and_scalar:
# A plain Normal would have event shape `[]`; wrapping with Independent
# ensures `event_shape=[1]` as required.
predictive_dist = independent.Independent(
normal.Normal(loc=x_expected[..., 0],
scale=tf.sqrt(predicted_obs_cov[..., 0])),
reinterpreted_batch_ndims=1)
# Minor hack to define the covariance, so that `predictive_dist` can pass as
# an MVNTriL-like object.
predictive_dist.covariance = lambda: predicted_obs_cov
else:
predictive_dist = mvn_tril.MultivariateNormalTriL(
loc=x_expected[..., 0],
scale_tril=predicted_obs_cov_chol)
return posterior_mean, posterior_cov, predictive_dist
|
[
"Conjugate",
"update",
"for",
"a",
"linear",
"Gaussian",
"model",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L1397-L1519
|
[
"def",
"linear_gaussian_update",
"(",
"prior_mean",
",",
"prior_cov",
",",
"observation_matrix",
",",
"observation_noise",
",",
"x_observed",
")",
":",
"# If observations are scalar, we can avoid some matrix ops.",
"observation_size_is_static_and_scalar",
"=",
"(",
"tf",
".",
"compat",
".",
"dimension_value",
"(",
"observation_matrix",
".",
"shape",
"[",
"-",
"2",
"]",
")",
"==",
"1",
")",
"# Push the predicted mean for the latent state through the",
"# observation model",
"x_expected",
"=",
"_propagate_mean",
"(",
"prior_mean",
",",
"observation_matrix",
",",
"observation_noise",
")",
"# Push the predictive covariance of the latent state through the",
"# observation model:",
"# S = R + H * P * H'.",
"# We use a temporary variable for H * P,",
"# reused below to compute Kalman gain.",
"tmp_obs_cov",
"=",
"observation_matrix",
".",
"matmul",
"(",
"prior_cov",
")",
"predicted_obs_cov",
"=",
"(",
"observation_matrix",
".",
"matmul",
"(",
"tmp_obs_cov",
",",
"adjoint_arg",
"=",
"True",
")",
"+",
"observation_noise",
".",
"covariance",
"(",
")",
")",
"# Compute optimal Kalman gain:",
"# K = P * H' * S^{-1}",
"# Since both S and P are cov matrices, thus symmetric,",
"# we can take the transpose and reuse our previous",
"# computation:",
"# = (S^{-1} * H * P)'",
"# = (S^{-1} * tmp_obs_cov) '",
"# = (S \\ tmp_obs_cov)'",
"if",
"observation_size_is_static_and_scalar",
":",
"gain_transpose",
"=",
"tmp_obs_cov",
"/",
"predicted_obs_cov",
"else",
":",
"predicted_obs_cov_chol",
"=",
"tf",
".",
"linalg",
".",
"cholesky",
"(",
"predicted_obs_cov",
")",
"gain_transpose",
"=",
"tf",
".",
"linalg",
".",
"cholesky_solve",
"(",
"predicted_obs_cov_chol",
",",
"tmp_obs_cov",
")",
"# Compute the posterior mean, incorporating the observation.",
"# u* = u + K (x_observed - x_expected)",
"posterior_mean",
"=",
"(",
"prior_mean",
"+",
"tf",
".",
"linalg",
".",
"matmul",
"(",
"gain_transpose",
",",
"x_observed",
"-",
"x_expected",
",",
"adjoint_a",
"=",
"True",
")",
")",
"# For the posterior covariance, we could use the simple update",
"# P* = P - K * H * P",
"# but this is prone to numerical issues because it subtracts a",
"# value from a PSD matrix. We choose instead to use the more",
"# expensive Jordan form update",
"# P* = (I - K H) * P * (I - K H)' + K R K'",
"# which always produces a PSD result. This uses",
"# tmp_term = (I - K * H)'",
"# as an intermediate quantity.",
"tmp_term",
"=",
"-",
"observation_matrix",
".",
"matmul",
"(",
"gain_transpose",
",",
"adjoint",
"=",
"True",
")",
"# -K * H",
"tmp_term",
"=",
"tf",
".",
"linalg",
".",
"set_diag",
"(",
"tmp_term",
",",
"tf",
".",
"linalg",
".",
"diag_part",
"(",
"tmp_term",
")",
"+",
"1",
")",
"posterior_cov",
"=",
"(",
"tf",
".",
"linalg",
".",
"matmul",
"(",
"tmp_term",
",",
"tf",
".",
"linalg",
".",
"matmul",
"(",
"prior_cov",
",",
"tmp_term",
")",
",",
"adjoint_a",
"=",
"True",
")",
"+",
"tf",
".",
"linalg",
".",
"matmul",
"(",
"gain_transpose",
",",
"tf",
".",
"linalg",
".",
"matmul",
"(",
"observation_noise",
".",
"covariance",
"(",
")",
",",
"gain_transpose",
")",
",",
"adjoint_a",
"=",
"True",
")",
")",
"if",
"observation_size_is_static_and_scalar",
":",
"# A plain Normal would have event shape `[]`; wrapping with Independent",
"# ensures `event_shape=[1]` as required.",
"predictive_dist",
"=",
"independent",
".",
"Independent",
"(",
"normal",
".",
"Normal",
"(",
"loc",
"=",
"x_expected",
"[",
"...",
",",
"0",
"]",
",",
"scale",
"=",
"tf",
".",
"sqrt",
"(",
"predicted_obs_cov",
"[",
"...",
",",
"0",
"]",
")",
")",
",",
"reinterpreted_batch_ndims",
"=",
"1",
")",
"# Minor hack to define the covariance, so that `predictive_dist` can pass as",
"# an MVNTriL-like object.",
"predictive_dist",
".",
"covariance",
"=",
"lambda",
":",
"predicted_obs_cov",
"else",
":",
"predictive_dist",
"=",
"mvn_tril",
".",
"MultivariateNormalTriL",
"(",
"loc",
"=",
"x_expected",
"[",
"...",
",",
"0",
"]",
",",
"scale_tril",
"=",
"predicted_obs_cov_chol",
")",
"return",
"posterior_mean",
",",
"posterior_cov",
",",
"predictive_dist"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
kalman_transition
|
Propagate a filtered distribution through a transition model.
|
tensorflow_probability/python/distributions/linear_gaussian_ssm.py
|
def kalman_transition(filtered_mean, filtered_cov,
transition_matrix, transition_noise):
"""Propagate a filtered distribution through a transition model."""
predicted_mean = _propagate_mean(filtered_mean,
transition_matrix,
transition_noise)
predicted_cov = _propagate_cov(filtered_cov,
transition_matrix,
transition_noise)
return predicted_mean, predicted_cov
|
def kalman_transition(filtered_mean, filtered_cov,
transition_matrix, transition_noise):
"""Propagate a filtered distribution through a transition model."""
predicted_mean = _propagate_mean(filtered_mean,
transition_matrix,
transition_noise)
predicted_cov = _propagate_cov(filtered_cov,
transition_matrix,
transition_noise)
return predicted_mean, predicted_cov
|
[
"Propagate",
"a",
"filtered",
"distribution",
"through",
"a",
"transition",
"model",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L1522-L1532
|
[
"def",
"kalman_transition",
"(",
"filtered_mean",
",",
"filtered_cov",
",",
"transition_matrix",
",",
"transition_noise",
")",
":",
"predicted_mean",
"=",
"_propagate_mean",
"(",
"filtered_mean",
",",
"transition_matrix",
",",
"transition_noise",
")",
"predicted_cov",
"=",
"_propagate_cov",
"(",
"filtered_cov",
",",
"transition_matrix",
",",
"transition_noise",
")",
"return",
"predicted_mean",
",",
"predicted_cov"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
build_kalman_mean_step
|
Build a callable that performs one step of Kalman mean recursion.
Args:
get_transition_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[latent_size, latent_size]`.
get_transition_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[latent_size]`.
get_observation_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[observation_size, observation_size]`.
get_observation_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[observation_size]`.
Returns:
kalman_mean_step: a callable that computes latent state and
observation means at time `t`, given latent mean at time `t-1`.
|
tensorflow_probability/python/distributions/linear_gaussian_ssm.py
|
def build_kalman_mean_step(get_transition_matrix_for_timestep,
get_transition_noise_for_timestep,
get_observation_matrix_for_timestep,
get_observation_noise_for_timestep):
"""Build a callable that performs one step of Kalman mean recursion.
Args:
get_transition_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[latent_size, latent_size]`.
get_transition_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[latent_size]`.
get_observation_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[observation_size, observation_size]`.
get_observation_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[observation_size]`.
Returns:
kalman_mean_step: a callable that computes latent state and
observation means at time `t`, given latent mean at time `t-1`.
"""
def mean_step(previous_means, t):
"""Single step of prior mean recursion."""
previous_latent_mean, _ = previous_means
latent_mean = _propagate_mean(previous_latent_mean,
get_transition_matrix_for_timestep(t - 1),
get_transition_noise_for_timestep(t - 1))
observation_mean = _propagate_mean(latent_mean,
get_observation_matrix_for_timestep(t),
get_observation_noise_for_timestep(t))
return (latent_mean, observation_mean)
return mean_step
|
def build_kalman_mean_step(get_transition_matrix_for_timestep,
get_transition_noise_for_timestep,
get_observation_matrix_for_timestep,
get_observation_noise_for_timestep):
"""Build a callable that performs one step of Kalman mean recursion.
Args:
get_transition_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[latent_size, latent_size]`.
get_transition_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[latent_size]`.
get_observation_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[observation_size, observation_size]`.
get_observation_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[observation_size]`.
Returns:
kalman_mean_step: a callable that computes latent state and
observation means at time `t`, given latent mean at time `t-1`.
"""
def mean_step(previous_means, t):
"""Single step of prior mean recursion."""
previous_latent_mean, _ = previous_means
latent_mean = _propagate_mean(previous_latent_mean,
get_transition_matrix_for_timestep(t - 1),
get_transition_noise_for_timestep(t - 1))
observation_mean = _propagate_mean(latent_mean,
get_observation_matrix_for_timestep(t),
get_observation_noise_for_timestep(t))
return (latent_mean, observation_mean)
return mean_step
|
[
"Build",
"a",
"callable",
"that",
"performs",
"one",
"step",
"of",
"Kalman",
"mean",
"recursion",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L1535-L1574
|
[
"def",
"build_kalman_mean_step",
"(",
"get_transition_matrix_for_timestep",
",",
"get_transition_noise_for_timestep",
",",
"get_observation_matrix_for_timestep",
",",
"get_observation_noise_for_timestep",
")",
":",
"def",
"mean_step",
"(",
"previous_means",
",",
"t",
")",
":",
"\"\"\"Single step of prior mean recursion.\"\"\"",
"previous_latent_mean",
",",
"_",
"=",
"previous_means",
"latent_mean",
"=",
"_propagate_mean",
"(",
"previous_latent_mean",
",",
"get_transition_matrix_for_timestep",
"(",
"t",
"-",
"1",
")",
",",
"get_transition_noise_for_timestep",
"(",
"t",
"-",
"1",
")",
")",
"observation_mean",
"=",
"_propagate_mean",
"(",
"latent_mean",
",",
"get_observation_matrix_for_timestep",
"(",
"t",
")",
",",
"get_observation_noise_for_timestep",
"(",
"t",
")",
")",
"return",
"(",
"latent_mean",
",",
"observation_mean",
")",
"return",
"mean_step"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
build_kalman_cov_step
|
Build a callable for one step of Kalman covariance recursion.
Args:
get_transition_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[latent_size, latent_size]`.
get_transition_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[latent_size]`.
get_observation_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[observation_size, observation_size]`.
get_observation_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[observation_size]`.
Returns:
cov_step: a callable that computes latent state and observation
covariance at time `t`, given latent covariance at time `t-1`.
|
tensorflow_probability/python/distributions/linear_gaussian_ssm.py
|
def build_kalman_cov_step(get_transition_matrix_for_timestep,
get_transition_noise_for_timestep,
get_observation_matrix_for_timestep,
get_observation_noise_for_timestep):
"""Build a callable for one step of Kalman covariance recursion.
Args:
get_transition_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[latent_size, latent_size]`.
get_transition_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[latent_size]`.
get_observation_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[observation_size, observation_size]`.
get_observation_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[observation_size]`.
Returns:
cov_step: a callable that computes latent state and observation
covariance at time `t`, given latent covariance at time `t-1`.
"""
def cov_step(previous_covs, t):
"""Single step of prior covariance recursion."""
previous_latent_cov, _ = previous_covs
latent_cov = _propagate_cov(
previous_latent_cov,
get_transition_matrix_for_timestep(t - 1),
get_transition_noise_for_timestep(t - 1))
observation_cov = _propagate_cov(
latent_cov,
get_observation_matrix_for_timestep(t),
get_observation_noise_for_timestep(t))
return (latent_cov, observation_cov)
return cov_step
|
def build_kalman_cov_step(get_transition_matrix_for_timestep,
get_transition_noise_for_timestep,
get_observation_matrix_for_timestep,
get_observation_noise_for_timestep):
"""Build a callable for one step of Kalman covariance recursion.
Args:
get_transition_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[latent_size, latent_size]`.
get_transition_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[latent_size]`.
get_observation_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[observation_size, observation_size]`.
get_observation_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[observation_size]`.
Returns:
cov_step: a callable that computes latent state and observation
covariance at time `t`, given latent covariance at time `t-1`.
"""
def cov_step(previous_covs, t):
"""Single step of prior covariance recursion."""
previous_latent_cov, _ = previous_covs
latent_cov = _propagate_cov(
previous_latent_cov,
get_transition_matrix_for_timestep(t - 1),
get_transition_noise_for_timestep(t - 1))
observation_cov = _propagate_cov(
latent_cov,
get_observation_matrix_for_timestep(t),
get_observation_noise_for_timestep(t))
return (latent_cov, observation_cov)
return cov_step
|
[
"Build",
"a",
"callable",
"for",
"one",
"step",
"of",
"Kalman",
"covariance",
"recursion",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L1577-L1619
|
[
"def",
"build_kalman_cov_step",
"(",
"get_transition_matrix_for_timestep",
",",
"get_transition_noise_for_timestep",
",",
"get_observation_matrix_for_timestep",
",",
"get_observation_noise_for_timestep",
")",
":",
"def",
"cov_step",
"(",
"previous_covs",
",",
"t",
")",
":",
"\"\"\"Single step of prior covariance recursion.\"\"\"",
"previous_latent_cov",
",",
"_",
"=",
"previous_covs",
"latent_cov",
"=",
"_propagate_cov",
"(",
"previous_latent_cov",
",",
"get_transition_matrix_for_timestep",
"(",
"t",
"-",
"1",
")",
",",
"get_transition_noise_for_timestep",
"(",
"t",
"-",
"1",
")",
")",
"observation_cov",
"=",
"_propagate_cov",
"(",
"latent_cov",
",",
"get_observation_matrix_for_timestep",
"(",
"t",
")",
",",
"get_observation_noise_for_timestep",
"(",
"t",
")",
")",
"return",
"(",
"latent_cov",
",",
"observation_cov",
")",
"return",
"cov_step"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
build_kalman_sample_step
|
Build a callable for one step of Kalman sampling recursion.
Args:
get_transition_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[latent_size, latent_size]`.
get_transition_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[latent_size]`.
get_observation_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[observation_size, observation_size]`.
get_observation_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[observation_size]`.
full_sample_and_batch_shape: Desired sample and batch shape of the
returned samples, concatenated in a single `Tensor`.
stream: `tfd.SeedStream` instance used to generate a
sequence of random seeds.
validate_args: if True, perform error checking at runtime.
Returns:
sample_step: a callable that samples the latent state and
observation at time `t`, given latent state at time `t-1`.
|
tensorflow_probability/python/distributions/linear_gaussian_ssm.py
|
def build_kalman_sample_step(get_transition_matrix_for_timestep,
get_transition_noise_for_timestep,
get_observation_matrix_for_timestep,
get_observation_noise_for_timestep,
full_sample_and_batch_shape,
stream,
validate_args=False):
"""Build a callable for one step of Kalman sampling recursion.
Args:
get_transition_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[latent_size, latent_size]`.
get_transition_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[latent_size]`.
get_observation_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[observation_size, observation_size]`.
get_observation_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[observation_size]`.
full_sample_and_batch_shape: Desired sample and batch shape of the
returned samples, concatenated in a single `Tensor`.
stream: `tfd.SeedStream` instance used to generate a
sequence of random seeds.
validate_args: if True, perform error checking at runtime.
Returns:
sample_step: a callable that samples the latent state and
observation at time `t`, given latent state at time `t-1`.
"""
def sample_step(sampled_prev, t):
"""Sample values for a single timestep."""
latent_prev, _ = sampled_prev
transition_matrix = get_transition_matrix_for_timestep(t - 1)
transition_noise = get_transition_noise_for_timestep(t - 1)
latent_pred = transition_matrix.matmul(latent_prev)
latent_sampled = latent_pred + transition_noise.sample(
sample_shape=_augment_sample_shape(
transition_noise,
full_sample_and_batch_shape,
validate_args),
seed=stream())[..., tf.newaxis]
observation_matrix = get_observation_matrix_for_timestep(t)
observation_noise = get_observation_noise_for_timestep(t)
observation_pred = observation_matrix.matmul(latent_sampled)
observation_sampled = observation_pred + observation_noise.sample(
sample_shape=_augment_sample_shape(
observation_noise,
full_sample_and_batch_shape,
validate_args),
seed=stream())[..., tf.newaxis]
return (latent_sampled, observation_sampled)
return sample_step
|
def build_kalman_sample_step(get_transition_matrix_for_timestep,
get_transition_noise_for_timestep,
get_observation_matrix_for_timestep,
get_observation_noise_for_timestep,
full_sample_and_batch_shape,
stream,
validate_args=False):
"""Build a callable for one step of Kalman sampling recursion.
Args:
get_transition_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[latent_size, latent_size]`.
get_transition_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[latent_size]`.
get_observation_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[observation_size, observation_size]`.
get_observation_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[observation_size]`.
full_sample_and_batch_shape: Desired sample and batch shape of the
returned samples, concatenated in a single `Tensor`.
stream: `tfd.SeedStream` instance used to generate a
sequence of random seeds.
validate_args: if True, perform error checking at runtime.
Returns:
sample_step: a callable that samples the latent state and
observation at time `t`, given latent state at time `t-1`.
"""
def sample_step(sampled_prev, t):
"""Sample values for a single timestep."""
latent_prev, _ = sampled_prev
transition_matrix = get_transition_matrix_for_timestep(t - 1)
transition_noise = get_transition_noise_for_timestep(t - 1)
latent_pred = transition_matrix.matmul(latent_prev)
latent_sampled = latent_pred + transition_noise.sample(
sample_shape=_augment_sample_shape(
transition_noise,
full_sample_and_batch_shape,
validate_args),
seed=stream())[..., tf.newaxis]
observation_matrix = get_observation_matrix_for_timestep(t)
observation_noise = get_observation_noise_for_timestep(t)
observation_pred = observation_matrix.matmul(latent_sampled)
observation_sampled = observation_pred + observation_noise.sample(
sample_shape=_augment_sample_shape(
observation_noise,
full_sample_and_batch_shape,
validate_args),
seed=stream())[..., tf.newaxis]
return (latent_sampled, observation_sampled)
return sample_step
|
[
"Build",
"a",
"callable",
"for",
"one",
"step",
"of",
"Kalman",
"sampling",
"recursion",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L1622-L1685
|
[
"def",
"build_kalman_sample_step",
"(",
"get_transition_matrix_for_timestep",
",",
"get_transition_noise_for_timestep",
",",
"get_observation_matrix_for_timestep",
",",
"get_observation_noise_for_timestep",
",",
"full_sample_and_batch_shape",
",",
"stream",
",",
"validate_args",
"=",
"False",
")",
":",
"def",
"sample_step",
"(",
"sampled_prev",
",",
"t",
")",
":",
"\"\"\"Sample values for a single timestep.\"\"\"",
"latent_prev",
",",
"_",
"=",
"sampled_prev",
"transition_matrix",
"=",
"get_transition_matrix_for_timestep",
"(",
"t",
"-",
"1",
")",
"transition_noise",
"=",
"get_transition_noise_for_timestep",
"(",
"t",
"-",
"1",
")",
"latent_pred",
"=",
"transition_matrix",
".",
"matmul",
"(",
"latent_prev",
")",
"latent_sampled",
"=",
"latent_pred",
"+",
"transition_noise",
".",
"sample",
"(",
"sample_shape",
"=",
"_augment_sample_shape",
"(",
"transition_noise",
",",
"full_sample_and_batch_shape",
",",
"validate_args",
")",
",",
"seed",
"=",
"stream",
"(",
")",
")",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
"observation_matrix",
"=",
"get_observation_matrix_for_timestep",
"(",
"t",
")",
"observation_noise",
"=",
"get_observation_noise_for_timestep",
"(",
"t",
")",
"observation_pred",
"=",
"observation_matrix",
".",
"matmul",
"(",
"latent_sampled",
")",
"observation_sampled",
"=",
"observation_pred",
"+",
"observation_noise",
".",
"sample",
"(",
"sample_shape",
"=",
"_augment_sample_shape",
"(",
"observation_noise",
",",
"full_sample_and_batch_shape",
",",
"validate_args",
")",
",",
"seed",
"=",
"stream",
"(",
")",
")",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
"return",
"(",
"latent_sampled",
",",
"observation_sampled",
")",
"return",
"sample_step"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
build_pushforward_latents_step
|
Build a callable to push latent means/covs to observed means/covs.
Args:
get_observation_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[observation_size, observation_size]`.
get_observation_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[observation_size]`.
Returns:
pushforward_latents_step: a callable that computes the observation mean and
covariance at time `t`, given latent mean and covariance at time `t`.
|
tensorflow_probability/python/distributions/linear_gaussian_ssm.py
|
def build_pushforward_latents_step(get_observation_matrix_for_timestep,
get_observation_noise_for_timestep):
"""Build a callable to push latent means/covs to observed means/covs.
Args:
get_observation_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[observation_size, observation_size]`.
get_observation_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[observation_size]`.
Returns:
pushforward_latents_step: a callable that computes the observation mean and
covariance at time `t`, given latent mean and covariance at time `t`.
"""
def pushforward_latents_step(_, latent_t_mean_cov):
"""Loop body fn to pushforward latents to observations at a time step."""
t, latent_mean, latent_cov = latent_t_mean_cov
observation_matrix = get_observation_matrix_for_timestep(t)
observation_noise = get_observation_noise_for_timestep(t)
observation_mean = _propagate_mean(latent_mean,
observation_matrix,
observation_noise)
observation_cov = _propagate_cov(latent_cov,
observation_matrix,
observation_noise)
return (observation_mean, observation_cov)
return pushforward_latents_step
|
def build_pushforward_latents_step(get_observation_matrix_for_timestep,
get_observation_noise_for_timestep):
"""Build a callable to push latent means/covs to observed means/covs.
Args:
get_observation_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[observation_size, observation_size]`.
get_observation_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[observation_size]`.
Returns:
pushforward_latents_step: a callable that computes the observation mean and
covariance at time `t`, given latent mean and covariance at time `t`.
"""
def pushforward_latents_step(_, latent_t_mean_cov):
"""Loop body fn to pushforward latents to observations at a time step."""
t, latent_mean, latent_cov = latent_t_mean_cov
observation_matrix = get_observation_matrix_for_timestep(t)
observation_noise = get_observation_noise_for_timestep(t)
observation_mean = _propagate_mean(latent_mean,
observation_matrix,
observation_noise)
observation_cov = _propagate_cov(latent_cov,
observation_matrix,
observation_noise)
return (observation_mean, observation_cov)
return pushforward_latents_step
|
[
"Build",
"a",
"callable",
"to",
"push",
"latent",
"means",
"/",
"covs",
"to",
"observed",
"means",
"/",
"covs",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L1688-L1721
|
[
"def",
"build_pushforward_latents_step",
"(",
"get_observation_matrix_for_timestep",
",",
"get_observation_noise_for_timestep",
")",
":",
"def",
"pushforward_latents_step",
"(",
"_",
",",
"latent_t_mean_cov",
")",
":",
"\"\"\"Loop body fn to pushforward latents to observations at a time step.\"\"\"",
"t",
",",
"latent_mean",
",",
"latent_cov",
"=",
"latent_t_mean_cov",
"observation_matrix",
"=",
"get_observation_matrix_for_timestep",
"(",
"t",
")",
"observation_noise",
"=",
"get_observation_noise_for_timestep",
"(",
"t",
")",
"observation_mean",
"=",
"_propagate_mean",
"(",
"latent_mean",
",",
"observation_matrix",
",",
"observation_noise",
")",
"observation_cov",
"=",
"_propagate_cov",
"(",
"latent_cov",
",",
"observation_matrix",
",",
"observation_noise",
")",
"return",
"(",
"observation_mean",
",",
"observation_cov",
")",
"return",
"pushforward_latents_step"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_propagate_mean
|
Propagate a mean through linear Gaussian transformation.
|
tensorflow_probability/python/distributions/linear_gaussian_ssm.py
|
def _propagate_mean(mean, linop, dist):
"""Propagate a mean through linear Gaussian transformation."""
return linop.matmul(mean) + dist.mean()[..., tf.newaxis]
|
def _propagate_mean(mean, linop, dist):
"""Propagate a mean through linear Gaussian transformation."""
return linop.matmul(mean) + dist.mean()[..., tf.newaxis]
|
[
"Propagate",
"a",
"mean",
"through",
"linear",
"Gaussian",
"transformation",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L1724-L1726
|
[
"def",
"_propagate_mean",
"(",
"mean",
",",
"linop",
",",
"dist",
")",
":",
"return",
"linop",
".",
"matmul",
"(",
"mean",
")",
"+",
"dist",
".",
"mean",
"(",
")",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_propagate_cov
|
Propagate covariance through linear Gaussian transformation.
|
tensorflow_probability/python/distributions/linear_gaussian_ssm.py
|
def _propagate_cov(cov, linop, dist):
"""Propagate covariance through linear Gaussian transformation."""
# For linop A and input cov P, returns `A P A' + dist.cov()`
return linop.matmul(linop.matmul(cov), adjoint_arg=True) + dist.covariance()
|
def _propagate_cov(cov, linop, dist):
"""Propagate covariance through linear Gaussian transformation."""
# For linop A and input cov P, returns `A P A' + dist.cov()`
return linop.matmul(linop.matmul(cov), adjoint_arg=True) + dist.covariance()
|
[
"Propagate",
"covariance",
"through",
"linear",
"Gaussian",
"transformation",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L1729-L1732
|
[
"def",
"_propagate_cov",
"(",
"cov",
",",
"linop",
",",
"dist",
")",
":",
"# For linop A and input cov P, returns `A P A' + dist.cov()`",
"return",
"linop",
".",
"matmul",
"(",
"linop",
".",
"matmul",
"(",
"cov",
")",
",",
"adjoint_arg",
"=",
"True",
")",
"+",
"dist",
".",
"covariance",
"(",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
LinearGaussianStateSpaceModel.backward_smoothing_pass
|
Run the backward pass in Kalman smoother.
The backward smoothing is using Rauch, Tung and Striebel smoother as
as discussed in section 18.3.2 of Kevin P. Murphy, 2012, Machine Learning:
A Probabilistic Perspective, The MIT Press. The inputs are returned by
`forward_filter` function.
Args:
filtered_means: Means of the per-timestep filtered marginal
distributions p(z_t | x_{:t}), as a Tensor of shape
`sample_shape(x) + batch_shape + [num_timesteps, latent_size]`.
filtered_covs: Covariances of the per-timestep filtered marginal
distributions p(z_t | x_{:t}), as a Tensor of shape
`batch_shape + [num_timesteps, latent_size, latent_size]`.
predicted_means: Means of the per-timestep predictive
distributions over latent states, p(z_{t+1} | x_{:t}), as a
Tensor of shape `sample_shape(x) + batch_shape +
[num_timesteps, latent_size]`.
predicted_covs: Covariances of the per-timestep predictive
distributions over latent states, p(z_{t+1} | x_{:t}), as a
Tensor of shape `batch_shape + [num_timesteps, latent_size,
latent_size]`.
Returns:
posterior_means: Means of the smoothed marginal distributions
p(z_t | x_{1:T}), as a Tensor of shape
`sample_shape(x) + batch_shape + [num_timesteps, latent_size]`,
which is of the same shape as filtered_means.
posterior_covs: Covariances of the smoothed marginal distributions
p(z_t | x_{1:T}), as a Tensor of shape
`batch_shape + [num_timesteps, latent_size, latent_size]`.
which is of the same shape as filtered_covs.
|
tensorflow_probability/python/distributions/linear_gaussian_ssm.py
|
def backward_smoothing_pass(self,
filtered_means,
filtered_covs,
predicted_means,
predicted_covs):
"""Run the backward pass in Kalman smoother.
The backward smoothing is using Rauch, Tung and Striebel smoother as
as discussed in section 18.3.2 of Kevin P. Murphy, 2012, Machine Learning:
A Probabilistic Perspective, The MIT Press. The inputs are returned by
`forward_filter` function.
Args:
filtered_means: Means of the per-timestep filtered marginal
distributions p(z_t | x_{:t}), as a Tensor of shape
`sample_shape(x) + batch_shape + [num_timesteps, latent_size]`.
filtered_covs: Covariances of the per-timestep filtered marginal
distributions p(z_t | x_{:t}), as a Tensor of shape
`batch_shape + [num_timesteps, latent_size, latent_size]`.
predicted_means: Means of the per-timestep predictive
distributions over latent states, p(z_{t+1} | x_{:t}), as a
Tensor of shape `sample_shape(x) + batch_shape +
[num_timesteps, latent_size]`.
predicted_covs: Covariances of the per-timestep predictive
distributions over latent states, p(z_{t+1} | x_{:t}), as a
Tensor of shape `batch_shape + [num_timesteps, latent_size,
latent_size]`.
Returns:
posterior_means: Means of the smoothed marginal distributions
p(z_t | x_{1:T}), as a Tensor of shape
`sample_shape(x) + batch_shape + [num_timesteps, latent_size]`,
which is of the same shape as filtered_means.
posterior_covs: Covariances of the smoothed marginal distributions
p(z_t | x_{1:T}), as a Tensor of shape
`batch_shape + [num_timesteps, latent_size, latent_size]`.
which is of the same shape as filtered_covs.
"""
with tf.name_scope("backward_pass"):
filtered_means = tf.convert_to_tensor(
value=filtered_means, name="filtered_means")
filtered_covs = tf.convert_to_tensor(
value=filtered_covs, name="filtered_covs")
predicted_means = tf.convert_to_tensor(
value=predicted_means, name="predicted_means")
predicted_covs = tf.convert_to_tensor(
value=predicted_covs, name="predicted_covs")
# To scan over time dimension, we need to move 'num_timesteps' from the
# event shape to the initial dimension of the tensor.
filtered_means = distribution_util.move_dimension(filtered_means, -2, 0)
filtered_covs = distribution_util.move_dimension(filtered_covs, -3, 0)
predicted_means = distribution_util.move_dimension(predicted_means, -2, 0)
predicted_covs = distribution_util.move_dimension(predicted_covs, -3, 0)
# The means are assumed to be vectors. Adding a dummy index to
# ensure the `matmul` op working smoothly.
filtered_means = filtered_means[..., tf.newaxis]
predicted_means = predicted_means[..., tf.newaxis]
initial_backward_mean = predicted_means[-1, ...]
initial_backward_cov = predicted_covs[-1, ...]
num_timesteps = tf.shape(input=filtered_means)[0]
initial_state = BackwardPassState(
backward_mean=initial_backward_mean,
backward_cov=initial_backward_cov,
timestep=self.initial_step + num_timesteps - 1)
update_step_fn = build_backward_pass_step(
self.get_transition_matrix_for_timestep)
# For backward pass, it scans the `elems` from last to first.
posterior_states = tf.scan(update_step_fn,
elems=(filtered_means,
filtered_covs,
predicted_means,
predicted_covs),
initializer=initial_state,
reverse=True)
# Move the time dimension back into the event shape.
posterior_means = distribution_util.move_dimension(
posterior_states.backward_mean[..., 0], 0, -2)
posterior_covs = distribution_util.move_dimension(
posterior_states.backward_cov, 0, -3)
return (posterior_means, posterior_covs)
|
def backward_smoothing_pass(self,
filtered_means,
filtered_covs,
predicted_means,
predicted_covs):
"""Run the backward pass in Kalman smoother.
The backward smoothing is using Rauch, Tung and Striebel smoother as
as discussed in section 18.3.2 of Kevin P. Murphy, 2012, Machine Learning:
A Probabilistic Perspective, The MIT Press. The inputs are returned by
`forward_filter` function.
Args:
filtered_means: Means of the per-timestep filtered marginal
distributions p(z_t | x_{:t}), as a Tensor of shape
`sample_shape(x) + batch_shape + [num_timesteps, latent_size]`.
filtered_covs: Covariances of the per-timestep filtered marginal
distributions p(z_t | x_{:t}), as a Tensor of shape
`batch_shape + [num_timesteps, latent_size, latent_size]`.
predicted_means: Means of the per-timestep predictive
distributions over latent states, p(z_{t+1} | x_{:t}), as a
Tensor of shape `sample_shape(x) + batch_shape +
[num_timesteps, latent_size]`.
predicted_covs: Covariances of the per-timestep predictive
distributions over latent states, p(z_{t+1} | x_{:t}), as a
Tensor of shape `batch_shape + [num_timesteps, latent_size,
latent_size]`.
Returns:
posterior_means: Means of the smoothed marginal distributions
p(z_t | x_{1:T}), as a Tensor of shape
`sample_shape(x) + batch_shape + [num_timesteps, latent_size]`,
which is of the same shape as filtered_means.
posterior_covs: Covariances of the smoothed marginal distributions
p(z_t | x_{1:T}), as a Tensor of shape
`batch_shape + [num_timesteps, latent_size, latent_size]`.
which is of the same shape as filtered_covs.
"""
with tf.name_scope("backward_pass"):
filtered_means = tf.convert_to_tensor(
value=filtered_means, name="filtered_means")
filtered_covs = tf.convert_to_tensor(
value=filtered_covs, name="filtered_covs")
predicted_means = tf.convert_to_tensor(
value=predicted_means, name="predicted_means")
predicted_covs = tf.convert_to_tensor(
value=predicted_covs, name="predicted_covs")
# To scan over time dimension, we need to move 'num_timesteps' from the
# event shape to the initial dimension of the tensor.
filtered_means = distribution_util.move_dimension(filtered_means, -2, 0)
filtered_covs = distribution_util.move_dimension(filtered_covs, -3, 0)
predicted_means = distribution_util.move_dimension(predicted_means, -2, 0)
predicted_covs = distribution_util.move_dimension(predicted_covs, -3, 0)
# The means are assumed to be vectors. Adding a dummy index to
# ensure the `matmul` op working smoothly.
filtered_means = filtered_means[..., tf.newaxis]
predicted_means = predicted_means[..., tf.newaxis]
initial_backward_mean = predicted_means[-1, ...]
initial_backward_cov = predicted_covs[-1, ...]
num_timesteps = tf.shape(input=filtered_means)[0]
initial_state = BackwardPassState(
backward_mean=initial_backward_mean,
backward_cov=initial_backward_cov,
timestep=self.initial_step + num_timesteps - 1)
update_step_fn = build_backward_pass_step(
self.get_transition_matrix_for_timestep)
# For backward pass, it scans the `elems` from last to first.
posterior_states = tf.scan(update_step_fn,
elems=(filtered_means,
filtered_covs,
predicted_means,
predicted_covs),
initializer=initial_state,
reverse=True)
# Move the time dimension back into the event shape.
posterior_means = distribution_util.move_dimension(
posterior_states.backward_mean[..., 0], 0, -2)
posterior_covs = distribution_util.move_dimension(
posterior_states.backward_cov, 0, -3)
return (posterior_means, posterior_covs)
|
[
"Run",
"the",
"backward",
"pass",
"in",
"Kalman",
"smoother",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L454-L541
|
[
"def",
"backward_smoothing_pass",
"(",
"self",
",",
"filtered_means",
",",
"filtered_covs",
",",
"predicted_means",
",",
"predicted_covs",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"\"backward_pass\"",
")",
":",
"filtered_means",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"filtered_means",
",",
"name",
"=",
"\"filtered_means\"",
")",
"filtered_covs",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"filtered_covs",
",",
"name",
"=",
"\"filtered_covs\"",
")",
"predicted_means",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"predicted_means",
",",
"name",
"=",
"\"predicted_means\"",
")",
"predicted_covs",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"predicted_covs",
",",
"name",
"=",
"\"predicted_covs\"",
")",
"# To scan over time dimension, we need to move 'num_timesteps' from the",
"# event shape to the initial dimension of the tensor.",
"filtered_means",
"=",
"distribution_util",
".",
"move_dimension",
"(",
"filtered_means",
",",
"-",
"2",
",",
"0",
")",
"filtered_covs",
"=",
"distribution_util",
".",
"move_dimension",
"(",
"filtered_covs",
",",
"-",
"3",
",",
"0",
")",
"predicted_means",
"=",
"distribution_util",
".",
"move_dimension",
"(",
"predicted_means",
",",
"-",
"2",
",",
"0",
")",
"predicted_covs",
"=",
"distribution_util",
".",
"move_dimension",
"(",
"predicted_covs",
",",
"-",
"3",
",",
"0",
")",
"# The means are assumed to be vectors. Adding a dummy index to",
"# ensure the `matmul` op working smoothly.",
"filtered_means",
"=",
"filtered_means",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
"predicted_means",
"=",
"predicted_means",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
"initial_backward_mean",
"=",
"predicted_means",
"[",
"-",
"1",
",",
"...",
"]",
"initial_backward_cov",
"=",
"predicted_covs",
"[",
"-",
"1",
",",
"...",
"]",
"num_timesteps",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"filtered_means",
")",
"[",
"0",
"]",
"initial_state",
"=",
"BackwardPassState",
"(",
"backward_mean",
"=",
"initial_backward_mean",
",",
"backward_cov",
"=",
"initial_backward_cov",
",",
"timestep",
"=",
"self",
".",
"initial_step",
"+",
"num_timesteps",
"-",
"1",
")",
"update_step_fn",
"=",
"build_backward_pass_step",
"(",
"self",
".",
"get_transition_matrix_for_timestep",
")",
"# For backward pass, it scans the `elems` from last to first.",
"posterior_states",
"=",
"tf",
".",
"scan",
"(",
"update_step_fn",
",",
"elems",
"=",
"(",
"filtered_means",
",",
"filtered_covs",
",",
"predicted_means",
",",
"predicted_covs",
")",
",",
"initializer",
"=",
"initial_state",
",",
"reverse",
"=",
"True",
")",
"# Move the time dimension back into the event shape.",
"posterior_means",
"=",
"distribution_util",
".",
"move_dimension",
"(",
"posterior_states",
".",
"backward_mean",
"[",
"...",
",",
"0",
"]",
",",
"0",
",",
"-",
"2",
")",
"posterior_covs",
"=",
"distribution_util",
".",
"move_dimension",
"(",
"posterior_states",
".",
"backward_cov",
",",
"0",
",",
"-",
"3",
")",
"return",
"(",
"posterior_means",
",",
"posterior_covs",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
LinearGaussianStateSpaceModel._joint_sample_n
|
Draw a joint sample from the prior over latents and observations.
|
tensorflow_probability/python/distributions/linear_gaussian_ssm.py
|
def _joint_sample_n(self, n, seed=None):
"""Draw a joint sample from the prior over latents and observations."""
with tf.name_scope("sample_n_joint"):
stream = seed_stream.SeedStream(
seed, salt="LinearGaussianStateSpaceModel_sample_n_joint")
sample_and_batch_shape = distribution_util.prefer_static_value(
tf.concat([[n], self.batch_shape_tensor()],
axis=0))
# Sample the initial timestep from the prior. Since we want
# this sample to have full batch shape (not just the batch shape
# of the self.initial_state_prior object which might in general be
# smaller), we augment the sample shape to include whatever
# extra batch dimensions are required.
with tf.control_dependencies(self.runtime_assertions):
initial_latent = self.initial_state_prior.sample(
sample_shape=_augment_sample_shape(
self.initial_state_prior,
sample_and_batch_shape,
self.validate_args),
seed=stream())
# Add a dummy dimension so that matmul() does matrix-vector
# multiplication.
initial_latent = initial_latent[..., tf.newaxis]
initial_observation_matrix = (
self.get_observation_matrix_for_timestep(self.initial_step))
initial_observation_noise = (
self.get_observation_noise_for_timestep(self.initial_step))
initial_observation_pred = initial_observation_matrix.matmul(
initial_latent)
initial_observation = (initial_observation_pred +
initial_observation_noise.sample(
sample_shape=_augment_sample_shape(
initial_observation_noise,
sample_and_batch_shape,
self.validate_args),
seed=stream())[..., tf.newaxis])
sample_step = build_kalman_sample_step(
self.get_transition_matrix_for_timestep,
self.get_transition_noise_for_timestep,
self.get_observation_matrix_for_timestep,
self.get_observation_noise_for_timestep,
full_sample_and_batch_shape=sample_and_batch_shape,
stream=stream,
validate_args=self.validate_args)
# Scan over all timesteps to sample latents and observations.
(latents, observations) = tf.scan(
sample_step,
elems=tf.range(self.initial_step+1, self.final_step),
initializer=(initial_latent, initial_observation))
# Combine the initial sampled timestep with the remaining timesteps.
latents = tf.concat([initial_latent[tf.newaxis, ...],
latents], axis=0)
observations = tf.concat([initial_observation[tf.newaxis, ...],
observations], axis=0)
# Put dimensions back in order. The samples we've computed are
# ordered by timestep, with shape `[num_timesteps, num_samples,
# batch_shape, size, 1]` where `size` represents `latent_size`
# or `observation_size` respectively. But timesteps are really
# part of each probabilistic event, so we need to return a Tensor
# of shape `[num_samples, batch_shape, num_timesteps, size]`.
latents = tf.squeeze(latents, -1)
latents = distribution_util.move_dimension(latents, 0, -2)
observations = tf.squeeze(observations, -1)
observations = distribution_util.move_dimension(observations, 0, -2)
return latents, observations
|
def _joint_sample_n(self, n, seed=None):
"""Draw a joint sample from the prior over latents and observations."""
with tf.name_scope("sample_n_joint"):
stream = seed_stream.SeedStream(
seed, salt="LinearGaussianStateSpaceModel_sample_n_joint")
sample_and_batch_shape = distribution_util.prefer_static_value(
tf.concat([[n], self.batch_shape_tensor()],
axis=0))
# Sample the initial timestep from the prior. Since we want
# this sample to have full batch shape (not just the batch shape
# of the self.initial_state_prior object which might in general be
# smaller), we augment the sample shape to include whatever
# extra batch dimensions are required.
with tf.control_dependencies(self.runtime_assertions):
initial_latent = self.initial_state_prior.sample(
sample_shape=_augment_sample_shape(
self.initial_state_prior,
sample_and_batch_shape,
self.validate_args),
seed=stream())
# Add a dummy dimension so that matmul() does matrix-vector
# multiplication.
initial_latent = initial_latent[..., tf.newaxis]
initial_observation_matrix = (
self.get_observation_matrix_for_timestep(self.initial_step))
initial_observation_noise = (
self.get_observation_noise_for_timestep(self.initial_step))
initial_observation_pred = initial_observation_matrix.matmul(
initial_latent)
initial_observation = (initial_observation_pred +
initial_observation_noise.sample(
sample_shape=_augment_sample_shape(
initial_observation_noise,
sample_and_batch_shape,
self.validate_args),
seed=stream())[..., tf.newaxis])
sample_step = build_kalman_sample_step(
self.get_transition_matrix_for_timestep,
self.get_transition_noise_for_timestep,
self.get_observation_matrix_for_timestep,
self.get_observation_noise_for_timestep,
full_sample_and_batch_shape=sample_and_batch_shape,
stream=stream,
validate_args=self.validate_args)
# Scan over all timesteps to sample latents and observations.
(latents, observations) = tf.scan(
sample_step,
elems=tf.range(self.initial_step+1, self.final_step),
initializer=(initial_latent, initial_observation))
# Combine the initial sampled timestep with the remaining timesteps.
latents = tf.concat([initial_latent[tf.newaxis, ...],
latents], axis=0)
observations = tf.concat([initial_observation[tf.newaxis, ...],
observations], axis=0)
# Put dimensions back in order. The samples we've computed are
# ordered by timestep, with shape `[num_timesteps, num_samples,
# batch_shape, size, 1]` where `size` represents `latent_size`
# or `observation_size` respectively. But timesteps are really
# part of each probabilistic event, so we need to return a Tensor
# of shape `[num_samples, batch_shape, num_timesteps, size]`.
latents = tf.squeeze(latents, -1)
latents = distribution_util.move_dimension(latents, 0, -2)
observations = tf.squeeze(observations, -1)
observations = distribution_util.move_dimension(observations, 0, -2)
return latents, observations
|
[
"Draw",
"a",
"joint",
"sample",
"from",
"the",
"prior",
"over",
"latents",
"and",
"observations",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L590-L665
|
[
"def",
"_joint_sample_n",
"(",
"self",
",",
"n",
",",
"seed",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"\"sample_n_joint\"",
")",
":",
"stream",
"=",
"seed_stream",
".",
"SeedStream",
"(",
"seed",
",",
"salt",
"=",
"\"LinearGaussianStateSpaceModel_sample_n_joint\"",
")",
"sample_and_batch_shape",
"=",
"distribution_util",
".",
"prefer_static_value",
"(",
"tf",
".",
"concat",
"(",
"[",
"[",
"n",
"]",
",",
"self",
".",
"batch_shape_tensor",
"(",
")",
"]",
",",
"axis",
"=",
"0",
")",
")",
"# Sample the initial timestep from the prior. Since we want",
"# this sample to have full batch shape (not just the batch shape",
"# of the self.initial_state_prior object which might in general be",
"# smaller), we augment the sample shape to include whatever",
"# extra batch dimensions are required.",
"with",
"tf",
".",
"control_dependencies",
"(",
"self",
".",
"runtime_assertions",
")",
":",
"initial_latent",
"=",
"self",
".",
"initial_state_prior",
".",
"sample",
"(",
"sample_shape",
"=",
"_augment_sample_shape",
"(",
"self",
".",
"initial_state_prior",
",",
"sample_and_batch_shape",
",",
"self",
".",
"validate_args",
")",
",",
"seed",
"=",
"stream",
"(",
")",
")",
"# Add a dummy dimension so that matmul() does matrix-vector",
"# multiplication.",
"initial_latent",
"=",
"initial_latent",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
"initial_observation_matrix",
"=",
"(",
"self",
".",
"get_observation_matrix_for_timestep",
"(",
"self",
".",
"initial_step",
")",
")",
"initial_observation_noise",
"=",
"(",
"self",
".",
"get_observation_noise_for_timestep",
"(",
"self",
".",
"initial_step",
")",
")",
"initial_observation_pred",
"=",
"initial_observation_matrix",
".",
"matmul",
"(",
"initial_latent",
")",
"initial_observation",
"=",
"(",
"initial_observation_pred",
"+",
"initial_observation_noise",
".",
"sample",
"(",
"sample_shape",
"=",
"_augment_sample_shape",
"(",
"initial_observation_noise",
",",
"sample_and_batch_shape",
",",
"self",
".",
"validate_args",
")",
",",
"seed",
"=",
"stream",
"(",
")",
")",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
")",
"sample_step",
"=",
"build_kalman_sample_step",
"(",
"self",
".",
"get_transition_matrix_for_timestep",
",",
"self",
".",
"get_transition_noise_for_timestep",
",",
"self",
".",
"get_observation_matrix_for_timestep",
",",
"self",
".",
"get_observation_noise_for_timestep",
",",
"full_sample_and_batch_shape",
"=",
"sample_and_batch_shape",
",",
"stream",
"=",
"stream",
",",
"validate_args",
"=",
"self",
".",
"validate_args",
")",
"# Scan over all timesteps to sample latents and observations.",
"(",
"latents",
",",
"observations",
")",
"=",
"tf",
".",
"scan",
"(",
"sample_step",
",",
"elems",
"=",
"tf",
".",
"range",
"(",
"self",
".",
"initial_step",
"+",
"1",
",",
"self",
".",
"final_step",
")",
",",
"initializer",
"=",
"(",
"initial_latent",
",",
"initial_observation",
")",
")",
"# Combine the initial sampled timestep with the remaining timesteps.",
"latents",
"=",
"tf",
".",
"concat",
"(",
"[",
"initial_latent",
"[",
"tf",
".",
"newaxis",
",",
"...",
"]",
",",
"latents",
"]",
",",
"axis",
"=",
"0",
")",
"observations",
"=",
"tf",
".",
"concat",
"(",
"[",
"initial_observation",
"[",
"tf",
".",
"newaxis",
",",
"...",
"]",
",",
"observations",
"]",
",",
"axis",
"=",
"0",
")",
"# Put dimensions back in order. The samples we've computed are",
"# ordered by timestep, with shape `[num_timesteps, num_samples,",
"# batch_shape, size, 1]` where `size` represents `latent_size`",
"# or `observation_size` respectively. But timesteps are really",
"# part of each probabilistic event, so we need to return a Tensor",
"# of shape `[num_samples, batch_shape, num_timesteps, size]`.",
"latents",
"=",
"tf",
".",
"squeeze",
"(",
"latents",
",",
"-",
"1",
")",
"latents",
"=",
"distribution_util",
".",
"move_dimension",
"(",
"latents",
",",
"0",
",",
"-",
"2",
")",
"observations",
"=",
"tf",
".",
"squeeze",
"(",
"observations",
",",
"-",
"1",
")",
"observations",
"=",
"distribution_util",
".",
"move_dimension",
"(",
"observations",
",",
"0",
",",
"-",
"2",
")",
"return",
"latents",
",",
"observations"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
LinearGaussianStateSpaceModel.forward_filter
|
Run a Kalman filter over a provided sequence of outputs.
Note that the returned values `filtered_means`, `predicted_means`, and
`observation_means` depend on the observed time series `x`, while the
corresponding covariances are independent of the observed series; i.e., they
depend only on the model itself. This means that the mean values have shape
`concat([sample_shape(x), batch_shape, [num_timesteps,
{latent/observation}_size]])`, while the covariances have shape
`concat[(batch_shape, [num_timesteps, {latent/observation}_size,
{latent/observation}_size]])`, which does not depend on the sample shape.
Args:
x: a float-type `Tensor` with rightmost dimensions
`[num_timesteps, observation_size]` matching
`self.event_shape`. Additional dimensions must match or be
broadcastable to `self.batch_shape`; any further dimensions
are interpreted as a sample shape.
mask: optional bool-type `Tensor` with rightmost dimension
`[num_timesteps]`; `True` values specify that the value of `x`
at that timestep is masked, i.e., not conditioned on. Additional
dimensions must match or be broadcastable to `self.batch_shape`; any
further dimensions must match or be broadcastable to the sample
shape of `x`.
Default value: `None`.
Returns:
log_likelihoods: Per-timestep log marginal likelihoods `log
p(x_t | x_{:t-1})` evaluated at the input `x`, as a `Tensor`
of shape `sample_shape(x) + batch_shape + [num_timesteps].`
filtered_means: Means of the per-timestep filtered marginal
distributions p(z_t | x_{:t}), as a Tensor of shape
`sample_shape(x) + batch_shape + [num_timesteps, latent_size]`.
filtered_covs: Covariances of the per-timestep filtered marginal
distributions p(z_t | x_{:t}), as a Tensor of shape
`sample_shape(mask) + batch_shape + [num_timesteps, latent_size,
latent_size]`. Note that the covariances depend only on the model and
the mask, not on the data, so this may have fewer dimensions than
`filtered_means`.
predicted_means: Means of the per-timestep predictive
distributions over latent states, p(z_{t+1} | x_{:t}), as a
Tensor of shape `sample_shape(x) + batch_shape +
[num_timesteps, latent_size]`.
predicted_covs: Covariances of the per-timestep predictive
distributions over latent states, p(z_{t+1} | x_{:t}), as a
Tensor of shape `sample_shape(mask) + batch_shape +
[num_timesteps, latent_size, latent_size]`. Note that the covariances
depend only on the model and the mask, not on the data, so this may
have fewer dimensions than `predicted_means`.
observation_means: Means of the per-timestep predictive
distributions over observations, p(x_{t} | x_{:t-1}), as a
Tensor of shape `sample_shape(x) + batch_shape +
[num_timesteps, observation_size]`.
observation_covs: Covariances of the per-timestep predictive
distributions over observations, p(x_{t} | x_{:t-1}), as a
Tensor of shape `sample_shape(mask) + batch_shape + [num_timesteps,
observation_size, observation_size]`. Note that the covariances depend
only on the model and the mask, not on the data, so this may have fewer
dimensions than `observation_means`.
|
tensorflow_probability/python/distributions/linear_gaussian_ssm.py
|
def forward_filter(self, x, mask=None):
"""Run a Kalman filter over a provided sequence of outputs.
Note that the returned values `filtered_means`, `predicted_means`, and
`observation_means` depend on the observed time series `x`, while the
corresponding covariances are independent of the observed series; i.e., they
depend only on the model itself. This means that the mean values have shape
`concat([sample_shape(x), batch_shape, [num_timesteps,
{latent/observation}_size]])`, while the covariances have shape
`concat[(batch_shape, [num_timesteps, {latent/observation}_size,
{latent/observation}_size]])`, which does not depend on the sample shape.
Args:
x: a float-type `Tensor` with rightmost dimensions
`[num_timesteps, observation_size]` matching
`self.event_shape`. Additional dimensions must match or be
broadcastable to `self.batch_shape`; any further dimensions
are interpreted as a sample shape.
mask: optional bool-type `Tensor` with rightmost dimension
`[num_timesteps]`; `True` values specify that the value of `x`
at that timestep is masked, i.e., not conditioned on. Additional
dimensions must match or be broadcastable to `self.batch_shape`; any
further dimensions must match or be broadcastable to the sample
shape of `x`.
Default value: `None`.
Returns:
log_likelihoods: Per-timestep log marginal likelihoods `log
p(x_t | x_{:t-1})` evaluated at the input `x`, as a `Tensor`
of shape `sample_shape(x) + batch_shape + [num_timesteps].`
filtered_means: Means of the per-timestep filtered marginal
distributions p(z_t | x_{:t}), as a Tensor of shape
`sample_shape(x) + batch_shape + [num_timesteps, latent_size]`.
filtered_covs: Covariances of the per-timestep filtered marginal
distributions p(z_t | x_{:t}), as a Tensor of shape
`sample_shape(mask) + batch_shape + [num_timesteps, latent_size,
latent_size]`. Note that the covariances depend only on the model and
the mask, not on the data, so this may have fewer dimensions than
`filtered_means`.
predicted_means: Means of the per-timestep predictive
distributions over latent states, p(z_{t+1} | x_{:t}), as a
Tensor of shape `sample_shape(x) + batch_shape +
[num_timesteps, latent_size]`.
predicted_covs: Covariances of the per-timestep predictive
distributions over latent states, p(z_{t+1} | x_{:t}), as a
Tensor of shape `sample_shape(mask) + batch_shape +
[num_timesteps, latent_size, latent_size]`. Note that the covariances
depend only on the model and the mask, not on the data, so this may
have fewer dimensions than `predicted_means`.
observation_means: Means of the per-timestep predictive
distributions over observations, p(x_{t} | x_{:t-1}), as a
Tensor of shape `sample_shape(x) + batch_shape +
[num_timesteps, observation_size]`.
observation_covs: Covariances of the per-timestep predictive
distributions over observations, p(x_{t} | x_{:t-1}), as a
Tensor of shape `sample_shape(mask) + batch_shape + [num_timesteps,
observation_size, observation_size]`. Note that the covariances depend
only on the model and the mask, not on the data, so this may have fewer
dimensions than `observation_means`.
"""
with tf.name_scope("forward_filter"):
x = tf.convert_to_tensor(value=x, name="x")
if mask is not None:
mask = tf.convert_to_tensor(value=mask, name="mask", dtype_hint=tf.bool)
# Check event shape statically if possible
check_x_shape_op = _check_equal_shape(
"x", x.shape[-2:], tf.shape(input=x)[-2:],
self.event_shape, self.event_shape_tensor())
check_mask_dims_op = None
check_mask_shape_op = None
if mask is not None:
if (tensorshape_util.rank(mask.shape) is None or
tensorshape_util.rank(x.shape) is None):
check_mask_dims_op = assert_util.assert_greater_equal(
tf.rank(x),
tf.rank(mask),
message=("mask cannot have higher rank than x!"))
elif tensorshape_util.rank(mask.shape) > tensorshape_util.rank(x.shape):
raise ValueError(
"mask cannot have higher rank than x! ({} vs {})".format(
tensorshape_util.rank(mask.shape),
tensorshape_util.rank(x.shape)))
check_mask_shape_op = _check_equal_shape(
"mask", mask.shape[-1:], tf.shape(input=mask)[-1:],
self.event_shape[-2:-1], self.event_shape_tensor()[-2:-1])
if self.validate_args:
runtime_assertions = self.runtime_assertions
if check_x_shape_op is not None:
runtime_assertions += [check_x_shape_op]
if check_mask_shape_op is not None:
runtime_assertions += [check_mask_shape_op]
if check_mask_dims_op is not None:
runtime_assertions += [check_mask_dims_op]
with tf.control_dependencies(runtime_assertions):
x = tf.identity(x)
# Get the full output sample_shape + batch shape. Usually
# this will just be x[:-2], i.e. the input shape excluding
# event shape. But users can specify inputs that broadcast
# batch dimensions, so we need to broadcast this against
# self.batch_shape.
if tensorshape_util.is_fully_defined(
self.batch_shape) and tensorshape_util.is_fully_defined(x.shape):
sample_and_batch_shape = tf.broadcast_static_shape(
x.shape[:-2], self.batch_shape)
else:
sample_and_batch_shape = tf.broadcast_dynamic_shape(
tf.shape(input=x)[:-2], self.batch_shape_tensor())
# Get the full output shape for covariances. The posterior variances
# in a LGSSM depend only on the model params (batch shape) and on the
# missingness pattern (mask shape), so in general this may be smaller
# than the full `sample_and_batch_shape`.
if mask is None:
mask_sample_and_batch_shape = self.batch_shape_tensor()
else:
if (tensorshape_util.is_fully_defined(self.batch_shape) and
tensorshape_util.is_fully_defined(mask.shape)):
mask_sample_and_batch_shape = tf.broadcast_static_shape(
mask.shape[:-1], self.batch_shape)
else:
mask_sample_and_batch_shape = tf.broadcast_dynamic_shape(
tf.shape(input=mask)[:-1], self.batch_shape_tensor())
# To scan over timesteps we need to move `num_timsteps` from the
# event shape to the initial dimension of the tensor.
x = distribution_util.move_dimension(x, -2, 0)
if mask is not None:
mask = distribution_util.move_dimension(mask, -1, 0)
# Observations are assumed to be vectors, but we add a dummy
# extra dimension to allow us to use `matmul` throughout.
x = x[..., tf.newaxis]
if mask is not None:
# Align mask.shape with x.shape, including a unit dimension to broadcast
# against `observation_size`.
mask = mask[..., tf.newaxis, tf.newaxis]
# Initialize filtering distribution from the prior. The mean in
# a Kalman filter depends on data, so should match the full
# sample and batch shape. The covariance is data-independent, so
# only has batch shape.
prior_mean = _broadcast_to_shape(
self.initial_state_prior.mean()[..., tf.newaxis],
tf.concat([sample_and_batch_shape,
[self.latent_size, 1]], axis=0))
prior_cov = _broadcast_to_shape(
self.initial_state_prior.covariance(),
tf.concat([mask_sample_and_batch_shape,
[self.latent_size, self.latent_size]], axis=0))
initial_observation_matrix = (
self.get_observation_matrix_for_timestep(self.initial_step))
initial_observation_noise = (
self.get_observation_noise_for_timestep(self.initial_step))
initial_observation_mean = _propagate_mean(prior_mean,
initial_observation_matrix,
initial_observation_noise)
initial_observation_cov = _propagate_cov(prior_cov,
initial_observation_matrix,
initial_observation_noise)
initial_state = KalmanFilterState(
predicted_mean=prior_mean,
predicted_cov=prior_cov,
filtered_mean=prior_mean, # establishes shape, value ignored
filtered_cov=prior_cov, # establishes shape, value ignored
observation_mean=initial_observation_mean,
observation_cov=initial_observation_cov,
log_marginal_likelihood=tf.zeros(
shape=sample_and_batch_shape, dtype=self.dtype),
timestep=tf.convert_to_tensor(
value=self.initial_step, dtype=tf.int32, name="initial_step"))
update_step_fn = build_kalman_filter_step(
self.get_transition_matrix_for_timestep,
self.get_transition_noise_for_timestep,
self.get_observation_matrix_for_timestep,
self.get_observation_noise_for_timestep)
filter_states = tf.scan(update_step_fn,
elems=x if mask is None else (x, mask),
initializer=initial_state)
log_likelihoods = distribution_util.move_dimension(
filter_states.log_marginal_likelihood, 0, -1)
# Move the time dimension back into the event shape.
filtered_means = distribution_util.move_dimension(
filter_states.filtered_mean[..., 0], 0, -2)
filtered_covs = distribution_util.move_dimension(
filter_states.filtered_cov, 0, -3)
predicted_means = distribution_util.move_dimension(
filter_states.predicted_mean[..., 0], 0, -2)
predicted_covs = distribution_util.move_dimension(
filter_states.predicted_cov, 0, -3)
observation_means = distribution_util.move_dimension(
filter_states.observation_mean[..., 0], 0, -2)
observation_covs = distribution_util.move_dimension(
filter_states.observation_cov, 0, -3)
# We could directly construct the batch Distributions
# filtered_marginals = tfd.MultivariateNormalFullCovariance(
# filtered_means, filtered_covs)
# predicted_marginals = tfd.MultivariateNormalFullCovariance(
# predicted_means, predicted_covs)
# but we choose not to: returning the raw means and covariances
# saves computation in Eager mode (avoiding an immediate
# Cholesky factorization that the user may not want) and aids
# debugging of numerical issues.
return (log_likelihoods,
filtered_means, filtered_covs,
predicted_means, predicted_covs,
observation_means, observation_covs)
|
def forward_filter(self, x, mask=None):
"""Run a Kalman filter over a provided sequence of outputs.
Note that the returned values `filtered_means`, `predicted_means`, and
`observation_means` depend on the observed time series `x`, while the
corresponding covariances are independent of the observed series; i.e., they
depend only on the model itself. This means that the mean values have shape
`concat([sample_shape(x), batch_shape, [num_timesteps,
{latent/observation}_size]])`, while the covariances have shape
`concat[(batch_shape, [num_timesteps, {latent/observation}_size,
{latent/observation}_size]])`, which does not depend on the sample shape.
Args:
x: a float-type `Tensor` with rightmost dimensions
`[num_timesteps, observation_size]` matching
`self.event_shape`. Additional dimensions must match or be
broadcastable to `self.batch_shape`; any further dimensions
are interpreted as a sample shape.
mask: optional bool-type `Tensor` with rightmost dimension
`[num_timesteps]`; `True` values specify that the value of `x`
at that timestep is masked, i.e., not conditioned on. Additional
dimensions must match or be broadcastable to `self.batch_shape`; any
further dimensions must match or be broadcastable to the sample
shape of `x`.
Default value: `None`.
Returns:
log_likelihoods: Per-timestep log marginal likelihoods `log
p(x_t | x_{:t-1})` evaluated at the input `x`, as a `Tensor`
of shape `sample_shape(x) + batch_shape + [num_timesteps].`
filtered_means: Means of the per-timestep filtered marginal
distributions p(z_t | x_{:t}), as a Tensor of shape
`sample_shape(x) + batch_shape + [num_timesteps, latent_size]`.
filtered_covs: Covariances of the per-timestep filtered marginal
distributions p(z_t | x_{:t}), as a Tensor of shape
`sample_shape(mask) + batch_shape + [num_timesteps, latent_size,
latent_size]`. Note that the covariances depend only on the model and
the mask, not on the data, so this may have fewer dimensions than
`filtered_means`.
predicted_means: Means of the per-timestep predictive
distributions over latent states, p(z_{t+1} | x_{:t}), as a
Tensor of shape `sample_shape(x) + batch_shape +
[num_timesteps, latent_size]`.
predicted_covs: Covariances of the per-timestep predictive
distributions over latent states, p(z_{t+1} | x_{:t}), as a
Tensor of shape `sample_shape(mask) + batch_shape +
[num_timesteps, latent_size, latent_size]`. Note that the covariances
depend only on the model and the mask, not on the data, so this may
have fewer dimensions than `predicted_means`.
observation_means: Means of the per-timestep predictive
distributions over observations, p(x_{t} | x_{:t-1}), as a
Tensor of shape `sample_shape(x) + batch_shape +
[num_timesteps, observation_size]`.
observation_covs: Covariances of the per-timestep predictive
distributions over observations, p(x_{t} | x_{:t-1}), as a
Tensor of shape `sample_shape(mask) + batch_shape + [num_timesteps,
observation_size, observation_size]`. Note that the covariances depend
only on the model and the mask, not on the data, so this may have fewer
dimensions than `observation_means`.
"""
with tf.name_scope("forward_filter"):
x = tf.convert_to_tensor(value=x, name="x")
if mask is not None:
mask = tf.convert_to_tensor(value=mask, name="mask", dtype_hint=tf.bool)
# Check event shape statically if possible
check_x_shape_op = _check_equal_shape(
"x", x.shape[-2:], tf.shape(input=x)[-2:],
self.event_shape, self.event_shape_tensor())
check_mask_dims_op = None
check_mask_shape_op = None
if mask is not None:
if (tensorshape_util.rank(mask.shape) is None or
tensorshape_util.rank(x.shape) is None):
check_mask_dims_op = assert_util.assert_greater_equal(
tf.rank(x),
tf.rank(mask),
message=("mask cannot have higher rank than x!"))
elif tensorshape_util.rank(mask.shape) > tensorshape_util.rank(x.shape):
raise ValueError(
"mask cannot have higher rank than x! ({} vs {})".format(
tensorshape_util.rank(mask.shape),
tensorshape_util.rank(x.shape)))
check_mask_shape_op = _check_equal_shape(
"mask", mask.shape[-1:], tf.shape(input=mask)[-1:],
self.event_shape[-2:-1], self.event_shape_tensor()[-2:-1])
if self.validate_args:
runtime_assertions = self.runtime_assertions
if check_x_shape_op is not None:
runtime_assertions += [check_x_shape_op]
if check_mask_shape_op is not None:
runtime_assertions += [check_mask_shape_op]
if check_mask_dims_op is not None:
runtime_assertions += [check_mask_dims_op]
with tf.control_dependencies(runtime_assertions):
x = tf.identity(x)
# Get the full output sample_shape + batch shape. Usually
# this will just be x[:-2], i.e. the input shape excluding
# event shape. But users can specify inputs that broadcast
# batch dimensions, so we need to broadcast this against
# self.batch_shape.
if tensorshape_util.is_fully_defined(
self.batch_shape) and tensorshape_util.is_fully_defined(x.shape):
sample_and_batch_shape = tf.broadcast_static_shape(
x.shape[:-2], self.batch_shape)
else:
sample_and_batch_shape = tf.broadcast_dynamic_shape(
tf.shape(input=x)[:-2], self.batch_shape_tensor())
# Get the full output shape for covariances. The posterior variances
# in a LGSSM depend only on the model params (batch shape) and on the
# missingness pattern (mask shape), so in general this may be smaller
# than the full `sample_and_batch_shape`.
if mask is None:
mask_sample_and_batch_shape = self.batch_shape_tensor()
else:
if (tensorshape_util.is_fully_defined(self.batch_shape) and
tensorshape_util.is_fully_defined(mask.shape)):
mask_sample_and_batch_shape = tf.broadcast_static_shape(
mask.shape[:-1], self.batch_shape)
else:
mask_sample_and_batch_shape = tf.broadcast_dynamic_shape(
tf.shape(input=mask)[:-1], self.batch_shape_tensor())
# To scan over timesteps we need to move `num_timsteps` from the
# event shape to the initial dimension of the tensor.
x = distribution_util.move_dimension(x, -2, 0)
if mask is not None:
mask = distribution_util.move_dimension(mask, -1, 0)
# Observations are assumed to be vectors, but we add a dummy
# extra dimension to allow us to use `matmul` throughout.
x = x[..., tf.newaxis]
if mask is not None:
# Align mask.shape with x.shape, including a unit dimension to broadcast
# against `observation_size`.
mask = mask[..., tf.newaxis, tf.newaxis]
# Initialize filtering distribution from the prior. The mean in
# a Kalman filter depends on data, so should match the full
# sample and batch shape. The covariance is data-independent, so
# only has batch shape.
prior_mean = _broadcast_to_shape(
self.initial_state_prior.mean()[..., tf.newaxis],
tf.concat([sample_and_batch_shape,
[self.latent_size, 1]], axis=0))
prior_cov = _broadcast_to_shape(
self.initial_state_prior.covariance(),
tf.concat([mask_sample_and_batch_shape,
[self.latent_size, self.latent_size]], axis=0))
initial_observation_matrix = (
self.get_observation_matrix_for_timestep(self.initial_step))
initial_observation_noise = (
self.get_observation_noise_for_timestep(self.initial_step))
initial_observation_mean = _propagate_mean(prior_mean,
initial_observation_matrix,
initial_observation_noise)
initial_observation_cov = _propagate_cov(prior_cov,
initial_observation_matrix,
initial_observation_noise)
initial_state = KalmanFilterState(
predicted_mean=prior_mean,
predicted_cov=prior_cov,
filtered_mean=prior_mean, # establishes shape, value ignored
filtered_cov=prior_cov, # establishes shape, value ignored
observation_mean=initial_observation_mean,
observation_cov=initial_observation_cov,
log_marginal_likelihood=tf.zeros(
shape=sample_and_batch_shape, dtype=self.dtype),
timestep=tf.convert_to_tensor(
value=self.initial_step, dtype=tf.int32, name="initial_step"))
update_step_fn = build_kalman_filter_step(
self.get_transition_matrix_for_timestep,
self.get_transition_noise_for_timestep,
self.get_observation_matrix_for_timestep,
self.get_observation_noise_for_timestep)
filter_states = tf.scan(update_step_fn,
elems=x if mask is None else (x, mask),
initializer=initial_state)
log_likelihoods = distribution_util.move_dimension(
filter_states.log_marginal_likelihood, 0, -1)
# Move the time dimension back into the event shape.
filtered_means = distribution_util.move_dimension(
filter_states.filtered_mean[..., 0], 0, -2)
filtered_covs = distribution_util.move_dimension(
filter_states.filtered_cov, 0, -3)
predicted_means = distribution_util.move_dimension(
filter_states.predicted_mean[..., 0], 0, -2)
predicted_covs = distribution_util.move_dimension(
filter_states.predicted_cov, 0, -3)
observation_means = distribution_util.move_dimension(
filter_states.observation_mean[..., 0], 0, -2)
observation_covs = distribution_util.move_dimension(
filter_states.observation_cov, 0, -3)
# We could directly construct the batch Distributions
# filtered_marginals = tfd.MultivariateNormalFullCovariance(
# filtered_means, filtered_covs)
# predicted_marginals = tfd.MultivariateNormalFullCovariance(
# predicted_means, predicted_covs)
# but we choose not to: returning the raw means and covariances
# saves computation in Eager mode (avoiding an immediate
# Cholesky factorization that the user may not want) and aids
# debugging of numerical issues.
return (log_likelihoods,
filtered_means, filtered_covs,
predicted_means, predicted_covs,
observation_means, observation_covs)
|
[
"Run",
"a",
"Kalman",
"filter",
"over",
"a",
"provided",
"sequence",
"of",
"outputs",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L696-L912
|
[
"def",
"forward_filter",
"(",
"self",
",",
"x",
",",
"mask",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"\"forward_filter\"",
")",
":",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"\"x\"",
")",
"if",
"mask",
"is",
"not",
"None",
":",
"mask",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"mask",
",",
"name",
"=",
"\"mask\"",
",",
"dtype_hint",
"=",
"tf",
".",
"bool",
")",
"# Check event shape statically if possible",
"check_x_shape_op",
"=",
"_check_equal_shape",
"(",
"\"x\"",
",",
"x",
".",
"shape",
"[",
"-",
"2",
":",
"]",
",",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
"[",
"-",
"2",
":",
"]",
",",
"self",
".",
"event_shape",
",",
"self",
".",
"event_shape_tensor",
"(",
")",
")",
"check_mask_dims_op",
"=",
"None",
"check_mask_shape_op",
"=",
"None",
"if",
"mask",
"is",
"not",
"None",
":",
"if",
"(",
"tensorshape_util",
".",
"rank",
"(",
"mask",
".",
"shape",
")",
"is",
"None",
"or",
"tensorshape_util",
".",
"rank",
"(",
"x",
".",
"shape",
")",
"is",
"None",
")",
":",
"check_mask_dims_op",
"=",
"assert_util",
".",
"assert_greater_equal",
"(",
"tf",
".",
"rank",
"(",
"x",
")",
",",
"tf",
".",
"rank",
"(",
"mask",
")",
",",
"message",
"=",
"(",
"\"mask cannot have higher rank than x!\"",
")",
")",
"elif",
"tensorshape_util",
".",
"rank",
"(",
"mask",
".",
"shape",
")",
">",
"tensorshape_util",
".",
"rank",
"(",
"x",
".",
"shape",
")",
":",
"raise",
"ValueError",
"(",
"\"mask cannot have higher rank than x! ({} vs {})\"",
".",
"format",
"(",
"tensorshape_util",
".",
"rank",
"(",
"mask",
".",
"shape",
")",
",",
"tensorshape_util",
".",
"rank",
"(",
"x",
".",
"shape",
")",
")",
")",
"check_mask_shape_op",
"=",
"_check_equal_shape",
"(",
"\"mask\"",
",",
"mask",
".",
"shape",
"[",
"-",
"1",
":",
"]",
",",
"tf",
".",
"shape",
"(",
"input",
"=",
"mask",
")",
"[",
"-",
"1",
":",
"]",
",",
"self",
".",
"event_shape",
"[",
"-",
"2",
":",
"-",
"1",
"]",
",",
"self",
".",
"event_shape_tensor",
"(",
")",
"[",
"-",
"2",
":",
"-",
"1",
"]",
")",
"if",
"self",
".",
"validate_args",
":",
"runtime_assertions",
"=",
"self",
".",
"runtime_assertions",
"if",
"check_x_shape_op",
"is",
"not",
"None",
":",
"runtime_assertions",
"+=",
"[",
"check_x_shape_op",
"]",
"if",
"check_mask_shape_op",
"is",
"not",
"None",
":",
"runtime_assertions",
"+=",
"[",
"check_mask_shape_op",
"]",
"if",
"check_mask_dims_op",
"is",
"not",
"None",
":",
"runtime_assertions",
"+=",
"[",
"check_mask_dims_op",
"]",
"with",
"tf",
".",
"control_dependencies",
"(",
"runtime_assertions",
")",
":",
"x",
"=",
"tf",
".",
"identity",
"(",
"x",
")",
"# Get the full output sample_shape + batch shape. Usually",
"# this will just be x[:-2], i.e. the input shape excluding",
"# event shape. But users can specify inputs that broadcast",
"# batch dimensions, so we need to broadcast this against",
"# self.batch_shape.",
"if",
"tensorshape_util",
".",
"is_fully_defined",
"(",
"self",
".",
"batch_shape",
")",
"and",
"tensorshape_util",
".",
"is_fully_defined",
"(",
"x",
".",
"shape",
")",
":",
"sample_and_batch_shape",
"=",
"tf",
".",
"broadcast_static_shape",
"(",
"x",
".",
"shape",
"[",
":",
"-",
"2",
"]",
",",
"self",
".",
"batch_shape",
")",
"else",
":",
"sample_and_batch_shape",
"=",
"tf",
".",
"broadcast_dynamic_shape",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
"[",
":",
"-",
"2",
"]",
",",
"self",
".",
"batch_shape_tensor",
"(",
")",
")",
"# Get the full output shape for covariances. The posterior variances",
"# in a LGSSM depend only on the model params (batch shape) and on the",
"# missingness pattern (mask shape), so in general this may be smaller",
"# than the full `sample_and_batch_shape`.",
"if",
"mask",
"is",
"None",
":",
"mask_sample_and_batch_shape",
"=",
"self",
".",
"batch_shape_tensor",
"(",
")",
"else",
":",
"if",
"(",
"tensorshape_util",
".",
"is_fully_defined",
"(",
"self",
".",
"batch_shape",
")",
"and",
"tensorshape_util",
".",
"is_fully_defined",
"(",
"mask",
".",
"shape",
")",
")",
":",
"mask_sample_and_batch_shape",
"=",
"tf",
".",
"broadcast_static_shape",
"(",
"mask",
".",
"shape",
"[",
":",
"-",
"1",
"]",
",",
"self",
".",
"batch_shape",
")",
"else",
":",
"mask_sample_and_batch_shape",
"=",
"tf",
".",
"broadcast_dynamic_shape",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"mask",
")",
"[",
":",
"-",
"1",
"]",
",",
"self",
".",
"batch_shape_tensor",
"(",
")",
")",
"# To scan over timesteps we need to move `num_timsteps` from the",
"# event shape to the initial dimension of the tensor.",
"x",
"=",
"distribution_util",
".",
"move_dimension",
"(",
"x",
",",
"-",
"2",
",",
"0",
")",
"if",
"mask",
"is",
"not",
"None",
":",
"mask",
"=",
"distribution_util",
".",
"move_dimension",
"(",
"mask",
",",
"-",
"1",
",",
"0",
")",
"# Observations are assumed to be vectors, but we add a dummy",
"# extra dimension to allow us to use `matmul` throughout.",
"x",
"=",
"x",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
"if",
"mask",
"is",
"not",
"None",
":",
"# Align mask.shape with x.shape, including a unit dimension to broadcast",
"# against `observation_size`.",
"mask",
"=",
"mask",
"[",
"...",
",",
"tf",
".",
"newaxis",
",",
"tf",
".",
"newaxis",
"]",
"# Initialize filtering distribution from the prior. The mean in",
"# a Kalman filter depends on data, so should match the full",
"# sample and batch shape. The covariance is data-independent, so",
"# only has batch shape.",
"prior_mean",
"=",
"_broadcast_to_shape",
"(",
"self",
".",
"initial_state_prior",
".",
"mean",
"(",
")",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
",",
"tf",
".",
"concat",
"(",
"[",
"sample_and_batch_shape",
",",
"[",
"self",
".",
"latent_size",
",",
"1",
"]",
"]",
",",
"axis",
"=",
"0",
")",
")",
"prior_cov",
"=",
"_broadcast_to_shape",
"(",
"self",
".",
"initial_state_prior",
".",
"covariance",
"(",
")",
",",
"tf",
".",
"concat",
"(",
"[",
"mask_sample_and_batch_shape",
",",
"[",
"self",
".",
"latent_size",
",",
"self",
".",
"latent_size",
"]",
"]",
",",
"axis",
"=",
"0",
")",
")",
"initial_observation_matrix",
"=",
"(",
"self",
".",
"get_observation_matrix_for_timestep",
"(",
"self",
".",
"initial_step",
")",
")",
"initial_observation_noise",
"=",
"(",
"self",
".",
"get_observation_noise_for_timestep",
"(",
"self",
".",
"initial_step",
")",
")",
"initial_observation_mean",
"=",
"_propagate_mean",
"(",
"prior_mean",
",",
"initial_observation_matrix",
",",
"initial_observation_noise",
")",
"initial_observation_cov",
"=",
"_propagate_cov",
"(",
"prior_cov",
",",
"initial_observation_matrix",
",",
"initial_observation_noise",
")",
"initial_state",
"=",
"KalmanFilterState",
"(",
"predicted_mean",
"=",
"prior_mean",
",",
"predicted_cov",
"=",
"prior_cov",
",",
"filtered_mean",
"=",
"prior_mean",
",",
"# establishes shape, value ignored",
"filtered_cov",
"=",
"prior_cov",
",",
"# establishes shape, value ignored",
"observation_mean",
"=",
"initial_observation_mean",
",",
"observation_cov",
"=",
"initial_observation_cov",
",",
"log_marginal_likelihood",
"=",
"tf",
".",
"zeros",
"(",
"shape",
"=",
"sample_and_batch_shape",
",",
"dtype",
"=",
"self",
".",
"dtype",
")",
",",
"timestep",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"self",
".",
"initial_step",
",",
"dtype",
"=",
"tf",
".",
"int32",
",",
"name",
"=",
"\"initial_step\"",
")",
")",
"update_step_fn",
"=",
"build_kalman_filter_step",
"(",
"self",
".",
"get_transition_matrix_for_timestep",
",",
"self",
".",
"get_transition_noise_for_timestep",
",",
"self",
".",
"get_observation_matrix_for_timestep",
",",
"self",
".",
"get_observation_noise_for_timestep",
")",
"filter_states",
"=",
"tf",
".",
"scan",
"(",
"update_step_fn",
",",
"elems",
"=",
"x",
"if",
"mask",
"is",
"None",
"else",
"(",
"x",
",",
"mask",
")",
",",
"initializer",
"=",
"initial_state",
")",
"log_likelihoods",
"=",
"distribution_util",
".",
"move_dimension",
"(",
"filter_states",
".",
"log_marginal_likelihood",
",",
"0",
",",
"-",
"1",
")",
"# Move the time dimension back into the event shape.",
"filtered_means",
"=",
"distribution_util",
".",
"move_dimension",
"(",
"filter_states",
".",
"filtered_mean",
"[",
"...",
",",
"0",
"]",
",",
"0",
",",
"-",
"2",
")",
"filtered_covs",
"=",
"distribution_util",
".",
"move_dimension",
"(",
"filter_states",
".",
"filtered_cov",
",",
"0",
",",
"-",
"3",
")",
"predicted_means",
"=",
"distribution_util",
".",
"move_dimension",
"(",
"filter_states",
".",
"predicted_mean",
"[",
"...",
",",
"0",
"]",
",",
"0",
",",
"-",
"2",
")",
"predicted_covs",
"=",
"distribution_util",
".",
"move_dimension",
"(",
"filter_states",
".",
"predicted_cov",
",",
"0",
",",
"-",
"3",
")",
"observation_means",
"=",
"distribution_util",
".",
"move_dimension",
"(",
"filter_states",
".",
"observation_mean",
"[",
"...",
",",
"0",
"]",
",",
"0",
",",
"-",
"2",
")",
"observation_covs",
"=",
"distribution_util",
".",
"move_dimension",
"(",
"filter_states",
".",
"observation_cov",
",",
"0",
",",
"-",
"3",
")",
"# We could directly construct the batch Distributions",
"# filtered_marginals = tfd.MultivariateNormalFullCovariance(",
"# filtered_means, filtered_covs)",
"# predicted_marginals = tfd.MultivariateNormalFullCovariance(",
"# predicted_means, predicted_covs)",
"# but we choose not to: returning the raw means and covariances",
"# saves computation in Eager mode (avoiding an immediate",
"# Cholesky factorization that the user may not want) and aids",
"# debugging of numerical issues.",
"return",
"(",
"log_likelihoods",
",",
"filtered_means",
",",
"filtered_covs",
",",
"predicted_means",
",",
"predicted_covs",
",",
"observation_means",
",",
"observation_covs",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
LinearGaussianStateSpaceModel.posterior_marginals
|
Run a Kalman smoother to return posterior mean and cov.
Note that the returned values `smoothed_means` depend on the observed
time series `x`, while the `smoothed_covs` are independent
of the observed series; i.e., they depend only on the model itself.
This means that the mean values have shape `concat([sample_shape(x),
batch_shape, [num_timesteps, {latent/observation}_size]])`,
while the covariances have shape `concat[(batch_shape, [num_timesteps,
{latent/observation}_size, {latent/observation}_size]])`, which
does not depend on the sample shape.
This function only performs smoothing. If the user wants the
intermediate values, which are returned by filtering pass `forward_filter`,
one could get it by:
```
(log_likelihoods,
filtered_means, filtered_covs,
predicted_means, predicted_covs,
observation_means, observation_covs) = model.forward_filter(x)
smoothed_means, smoothed_covs = model.backward_smoothing_pass(x)
```
where `x` is an observation sequence.
Args:
x: a float-type `Tensor` with rightmost dimensions
`[num_timesteps, observation_size]` matching
`self.event_shape`. Additional dimensions must match or be
broadcastable to `self.batch_shape`; any further dimensions
are interpreted as a sample shape.
mask: optional bool-type `Tensor` with rightmost dimension
`[num_timesteps]`; `True` values specify that the value of `x`
at that timestep is masked, i.e., not conditioned on. Additional
dimensions must match or be broadcastable to `self.batch_shape`; any
further dimensions must match or be broadcastable to the sample
shape of `x`.
Default value: `None`.
Returns:
smoothed_means: Means of the per-timestep smoothed
distributions over latent states, p(x_{t} | x_{:T}), as a
Tensor of shape `sample_shape(x) + batch_shape +
[num_timesteps, observation_size]`.
smoothed_covs: Covariances of the per-timestep smoothed
distributions over latent states, p(x_{t} | x_{:T}), as a
Tensor of shape `sample_shape(mask) + batch_shape + [num_timesteps,
observation_size, observation_size]`. Note that the covariances depend
only on the model and the mask, not on the data, so this may have fewer
dimensions than `filtered_means`.
|
tensorflow_probability/python/distributions/linear_gaussian_ssm.py
|
def posterior_marginals(self, x, mask=None):
"""Run a Kalman smoother to return posterior mean and cov.
Note that the returned values `smoothed_means` depend on the observed
time series `x`, while the `smoothed_covs` are independent
of the observed series; i.e., they depend only on the model itself.
This means that the mean values have shape `concat([sample_shape(x),
batch_shape, [num_timesteps, {latent/observation}_size]])`,
while the covariances have shape `concat[(batch_shape, [num_timesteps,
{latent/observation}_size, {latent/observation}_size]])`, which
does not depend on the sample shape.
This function only performs smoothing. If the user wants the
intermediate values, which are returned by filtering pass `forward_filter`,
one could get it by:
```
(log_likelihoods,
filtered_means, filtered_covs,
predicted_means, predicted_covs,
observation_means, observation_covs) = model.forward_filter(x)
smoothed_means, smoothed_covs = model.backward_smoothing_pass(x)
```
where `x` is an observation sequence.
Args:
x: a float-type `Tensor` with rightmost dimensions
`[num_timesteps, observation_size]` matching
`self.event_shape`. Additional dimensions must match or be
broadcastable to `self.batch_shape`; any further dimensions
are interpreted as a sample shape.
mask: optional bool-type `Tensor` with rightmost dimension
`[num_timesteps]`; `True` values specify that the value of `x`
at that timestep is masked, i.e., not conditioned on. Additional
dimensions must match or be broadcastable to `self.batch_shape`; any
further dimensions must match or be broadcastable to the sample
shape of `x`.
Default value: `None`.
Returns:
smoothed_means: Means of the per-timestep smoothed
distributions over latent states, p(x_{t} | x_{:T}), as a
Tensor of shape `sample_shape(x) + batch_shape +
[num_timesteps, observation_size]`.
smoothed_covs: Covariances of the per-timestep smoothed
distributions over latent states, p(x_{t} | x_{:T}), as a
Tensor of shape `sample_shape(mask) + batch_shape + [num_timesteps,
observation_size, observation_size]`. Note that the covariances depend
only on the model and the mask, not on the data, so this may have fewer
dimensions than `filtered_means`.
"""
with tf.name_scope("smooth"):
x = tf.convert_to_tensor(value=x, name="x")
(_, filtered_means, filtered_covs,
predicted_means, predicted_covs, _, _) = self.forward_filter(
x, mask=mask)
(smoothed_means, smoothed_covs) = self.backward_smoothing_pass(
filtered_means, filtered_covs,
predicted_means, predicted_covs)
return (smoothed_means, smoothed_covs)
|
def posterior_marginals(self, x, mask=None):
"""Run a Kalman smoother to return posterior mean and cov.
Note that the returned values `smoothed_means` depend on the observed
time series `x`, while the `smoothed_covs` are independent
of the observed series; i.e., they depend only on the model itself.
This means that the mean values have shape `concat([sample_shape(x),
batch_shape, [num_timesteps, {latent/observation}_size]])`,
while the covariances have shape `concat[(batch_shape, [num_timesteps,
{latent/observation}_size, {latent/observation}_size]])`, which
does not depend on the sample shape.
This function only performs smoothing. If the user wants the
intermediate values, which are returned by filtering pass `forward_filter`,
one could get it by:
```
(log_likelihoods,
filtered_means, filtered_covs,
predicted_means, predicted_covs,
observation_means, observation_covs) = model.forward_filter(x)
smoothed_means, smoothed_covs = model.backward_smoothing_pass(x)
```
where `x` is an observation sequence.
Args:
x: a float-type `Tensor` with rightmost dimensions
`[num_timesteps, observation_size]` matching
`self.event_shape`. Additional dimensions must match or be
broadcastable to `self.batch_shape`; any further dimensions
are interpreted as a sample shape.
mask: optional bool-type `Tensor` with rightmost dimension
`[num_timesteps]`; `True` values specify that the value of `x`
at that timestep is masked, i.e., not conditioned on. Additional
dimensions must match or be broadcastable to `self.batch_shape`; any
further dimensions must match or be broadcastable to the sample
shape of `x`.
Default value: `None`.
Returns:
smoothed_means: Means of the per-timestep smoothed
distributions over latent states, p(x_{t} | x_{:T}), as a
Tensor of shape `sample_shape(x) + batch_shape +
[num_timesteps, observation_size]`.
smoothed_covs: Covariances of the per-timestep smoothed
distributions over latent states, p(x_{t} | x_{:T}), as a
Tensor of shape `sample_shape(mask) + batch_shape + [num_timesteps,
observation_size, observation_size]`. Note that the covariances depend
only on the model and the mask, not on the data, so this may have fewer
dimensions than `filtered_means`.
"""
with tf.name_scope("smooth"):
x = tf.convert_to_tensor(value=x, name="x")
(_, filtered_means, filtered_covs,
predicted_means, predicted_covs, _, _) = self.forward_filter(
x, mask=mask)
(smoothed_means, smoothed_covs) = self.backward_smoothing_pass(
filtered_means, filtered_covs,
predicted_means, predicted_covs)
return (smoothed_means, smoothed_covs)
|
[
"Run",
"a",
"Kalman",
"smoother",
"to",
"return",
"posterior",
"mean",
"and",
"cov",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L914-L975
|
[
"def",
"posterior_marginals",
"(",
"self",
",",
"x",
",",
"mask",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"\"smooth\"",
")",
":",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"\"x\"",
")",
"(",
"_",
",",
"filtered_means",
",",
"filtered_covs",
",",
"predicted_means",
",",
"predicted_covs",
",",
"_",
",",
"_",
")",
"=",
"self",
".",
"forward_filter",
"(",
"x",
",",
"mask",
"=",
"mask",
")",
"(",
"smoothed_means",
",",
"smoothed_covs",
")",
"=",
"self",
".",
"backward_smoothing_pass",
"(",
"filtered_means",
",",
"filtered_covs",
",",
"predicted_means",
",",
"predicted_covs",
")",
"return",
"(",
"smoothed_means",
",",
"smoothed_covs",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
LinearGaussianStateSpaceModel._joint_mean
|
Compute prior means for all variables via dynamic programming.
Returns:
latent_means: Prior means of latent states `z_t`, as a `Tensor`
of shape `batch_shape + [num_timesteps, latent_size]`
observation_means: Prior covariance matrices of observations
`x_t`, as a `Tensor` of shape `batch_shape + [num_timesteps,
observation_size]`
|
tensorflow_probability/python/distributions/linear_gaussian_ssm.py
|
def _joint_mean(self):
"""Compute prior means for all variables via dynamic programming.
Returns:
latent_means: Prior means of latent states `z_t`, as a `Tensor`
of shape `batch_shape + [num_timesteps, latent_size]`
observation_means: Prior covariance matrices of observations
`x_t`, as a `Tensor` of shape `batch_shape + [num_timesteps,
observation_size]`
"""
with tf.name_scope("mean_joint"):
# The initial timestep is a special case, since we sample the
# latent state from the prior rather than the transition model.
with tf.control_dependencies(self.runtime_assertions):
# Broadcast to ensure we represent the full batch shape.
initial_latent_mean = _broadcast_to_shape(
self.initial_state_prior.mean()[..., tf.newaxis],
tf.concat([self.batch_shape_tensor(),
[self.latent_size, 1]], axis=0))
initial_observation_mean = _propagate_mean(
initial_latent_mean,
self.get_observation_matrix_for_timestep(self.initial_step),
self.get_observation_noise_for_timestep(self.initial_step))
mean_step = build_kalman_mean_step(
self.get_transition_matrix_for_timestep,
self.get_transition_noise_for_timestep,
self.get_observation_matrix_for_timestep,
self.get_observation_noise_for_timestep)
# Scan over all timesteps following the initial step.
(latent_means, observation_means) = tf.scan(
mean_step,
elems=tf.range(self.initial_step+1, self.final_step),
initializer=(initial_latent_mean, initial_observation_mean))
# Squish the initial step back on top of the other (scanned) timesteps
latent_means = tf.concat([initial_latent_mean[tf.newaxis, ...],
latent_means], axis=0)
observation_means = tf.concat([initial_observation_mean[tf.newaxis, ...],
observation_means], axis=0)
# Put dimensions back in order. The samples we've computed have
# shape `[num_timesteps, batch_shape, size, 1]`, where `size`
# is the dimension of the latent or observation spaces
# respectively, but we want to return values with shape
# `[batch_shape, num_timesteps, size]`.
latent_means = tf.squeeze(latent_means, -1)
latent_means = distribution_util.move_dimension(latent_means, 0, -2)
observation_means = tf.squeeze(observation_means, -1)
observation_means = distribution_util.move_dimension(
observation_means, 0, -2)
return latent_means, observation_means
|
def _joint_mean(self):
"""Compute prior means for all variables via dynamic programming.
Returns:
latent_means: Prior means of latent states `z_t`, as a `Tensor`
of shape `batch_shape + [num_timesteps, latent_size]`
observation_means: Prior covariance matrices of observations
`x_t`, as a `Tensor` of shape `batch_shape + [num_timesteps,
observation_size]`
"""
with tf.name_scope("mean_joint"):
# The initial timestep is a special case, since we sample the
# latent state from the prior rather than the transition model.
with tf.control_dependencies(self.runtime_assertions):
# Broadcast to ensure we represent the full batch shape.
initial_latent_mean = _broadcast_to_shape(
self.initial_state_prior.mean()[..., tf.newaxis],
tf.concat([self.batch_shape_tensor(),
[self.latent_size, 1]], axis=0))
initial_observation_mean = _propagate_mean(
initial_latent_mean,
self.get_observation_matrix_for_timestep(self.initial_step),
self.get_observation_noise_for_timestep(self.initial_step))
mean_step = build_kalman_mean_step(
self.get_transition_matrix_for_timestep,
self.get_transition_noise_for_timestep,
self.get_observation_matrix_for_timestep,
self.get_observation_noise_for_timestep)
# Scan over all timesteps following the initial step.
(latent_means, observation_means) = tf.scan(
mean_step,
elems=tf.range(self.initial_step+1, self.final_step),
initializer=(initial_latent_mean, initial_observation_mean))
# Squish the initial step back on top of the other (scanned) timesteps
latent_means = tf.concat([initial_latent_mean[tf.newaxis, ...],
latent_means], axis=0)
observation_means = tf.concat([initial_observation_mean[tf.newaxis, ...],
observation_means], axis=0)
# Put dimensions back in order. The samples we've computed have
# shape `[num_timesteps, batch_shape, size, 1]`, where `size`
# is the dimension of the latent or observation spaces
# respectively, but we want to return values with shape
# `[batch_shape, num_timesteps, size]`.
latent_means = tf.squeeze(latent_means, -1)
latent_means = distribution_util.move_dimension(latent_means, 0, -2)
observation_means = tf.squeeze(observation_means, -1)
observation_means = distribution_util.move_dimension(
observation_means, 0, -2)
return latent_means, observation_means
|
[
"Compute",
"prior",
"means",
"for",
"all",
"variables",
"via",
"dynamic",
"programming",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L981-L1038
|
[
"def",
"_joint_mean",
"(",
"self",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"\"mean_joint\"",
")",
":",
"# The initial timestep is a special case, since we sample the",
"# latent state from the prior rather than the transition model.",
"with",
"tf",
".",
"control_dependencies",
"(",
"self",
".",
"runtime_assertions",
")",
":",
"# Broadcast to ensure we represent the full batch shape.",
"initial_latent_mean",
"=",
"_broadcast_to_shape",
"(",
"self",
".",
"initial_state_prior",
".",
"mean",
"(",
")",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
",",
"tf",
".",
"concat",
"(",
"[",
"self",
".",
"batch_shape_tensor",
"(",
")",
",",
"[",
"self",
".",
"latent_size",
",",
"1",
"]",
"]",
",",
"axis",
"=",
"0",
")",
")",
"initial_observation_mean",
"=",
"_propagate_mean",
"(",
"initial_latent_mean",
",",
"self",
".",
"get_observation_matrix_for_timestep",
"(",
"self",
".",
"initial_step",
")",
",",
"self",
".",
"get_observation_noise_for_timestep",
"(",
"self",
".",
"initial_step",
")",
")",
"mean_step",
"=",
"build_kalman_mean_step",
"(",
"self",
".",
"get_transition_matrix_for_timestep",
",",
"self",
".",
"get_transition_noise_for_timestep",
",",
"self",
".",
"get_observation_matrix_for_timestep",
",",
"self",
".",
"get_observation_noise_for_timestep",
")",
"# Scan over all timesteps following the initial step.",
"(",
"latent_means",
",",
"observation_means",
")",
"=",
"tf",
".",
"scan",
"(",
"mean_step",
",",
"elems",
"=",
"tf",
".",
"range",
"(",
"self",
".",
"initial_step",
"+",
"1",
",",
"self",
".",
"final_step",
")",
",",
"initializer",
"=",
"(",
"initial_latent_mean",
",",
"initial_observation_mean",
")",
")",
"# Squish the initial step back on top of the other (scanned) timesteps",
"latent_means",
"=",
"tf",
".",
"concat",
"(",
"[",
"initial_latent_mean",
"[",
"tf",
".",
"newaxis",
",",
"...",
"]",
",",
"latent_means",
"]",
",",
"axis",
"=",
"0",
")",
"observation_means",
"=",
"tf",
".",
"concat",
"(",
"[",
"initial_observation_mean",
"[",
"tf",
".",
"newaxis",
",",
"...",
"]",
",",
"observation_means",
"]",
",",
"axis",
"=",
"0",
")",
"# Put dimensions back in order. The samples we've computed have",
"# shape `[num_timesteps, batch_shape, size, 1]`, where `size`",
"# is the dimension of the latent or observation spaces",
"# respectively, but we want to return values with shape",
"# `[batch_shape, num_timesteps, size]`.",
"latent_means",
"=",
"tf",
".",
"squeeze",
"(",
"latent_means",
",",
"-",
"1",
")",
"latent_means",
"=",
"distribution_util",
".",
"move_dimension",
"(",
"latent_means",
",",
"0",
",",
"-",
"2",
")",
"observation_means",
"=",
"tf",
".",
"squeeze",
"(",
"observation_means",
",",
"-",
"1",
")",
"observation_means",
"=",
"distribution_util",
".",
"move_dimension",
"(",
"observation_means",
",",
"0",
",",
"-",
"2",
")",
"return",
"latent_means",
",",
"observation_means"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
LinearGaussianStateSpaceModel._joint_covariances
|
Compute prior covariances for all variables via dynamic programming.
Returns:
latent_covs: Prior covariance matrices of latent states `z_t`, as
a `Tensor` of shape `batch_shape + [num_timesteps,
latent_size, latent_size]`
observation_covs: Prior covariance matrices of observations
`x_t`, as a `Tensor` of shape `batch_shape + [num_timesteps,
observation_size, observation_size]`
|
tensorflow_probability/python/distributions/linear_gaussian_ssm.py
|
def _joint_covariances(self):
"""Compute prior covariances for all variables via dynamic programming.
Returns:
latent_covs: Prior covariance matrices of latent states `z_t`, as
a `Tensor` of shape `batch_shape + [num_timesteps,
latent_size, latent_size]`
observation_covs: Prior covariance matrices of observations
`x_t`, as a `Tensor` of shape `batch_shape + [num_timesteps,
observation_size, observation_size]`
"""
with tf.name_scope("covariance_joint"):
with tf.control_dependencies(self.runtime_assertions):
initial_latent_cov = _broadcast_to_shape(
self.initial_state_prior.covariance(),
tf.concat([self.batch_shape_tensor(),
[self.latent_size, self.latent_size]], axis=0))
initial_observation_cov = _propagate_cov(
initial_latent_cov,
self.get_observation_matrix_for_timestep(self.initial_step),
self.get_observation_noise_for_timestep(self.initial_step))
cov_step = build_kalman_cov_step(
self.get_transition_matrix_for_timestep,
self.get_transition_noise_for_timestep,
self.get_observation_matrix_for_timestep,
self.get_observation_noise_for_timestep)
# Scan over all timesteps following the initial step.
(latent_covs, observation_covs) = tf.scan(
cov_step,
elems=tf.range(self.initial_step+1, self.final_step),
initializer=(initial_latent_cov, initial_observation_cov))
# Squish the initial step back on top of the other (scanned) timesteps
latent_covs = tf.concat([initial_latent_cov[tf.newaxis, ...],
latent_covs], axis=0)
observation_covs = tf.concat([initial_observation_cov[tf.newaxis, ...],
observation_covs], axis=0)
# Put dimensions back in order. The samples we've computed have
# shape `[num_timesteps, batch_shape, size, size]`, where `size`
# is the dimension of the state or observation spaces
# respectively, but we want to return values with shape
# `[batch_shape, num_timesteps, size, size]`.
latent_covs = distribution_util.move_dimension(latent_covs, 0, -3)
observation_covs = distribution_util.move_dimension(
observation_covs, 0, -3)
return latent_covs, observation_covs
|
def _joint_covariances(self):
"""Compute prior covariances for all variables via dynamic programming.
Returns:
latent_covs: Prior covariance matrices of latent states `z_t`, as
a `Tensor` of shape `batch_shape + [num_timesteps,
latent_size, latent_size]`
observation_covs: Prior covariance matrices of observations
`x_t`, as a `Tensor` of shape `batch_shape + [num_timesteps,
observation_size, observation_size]`
"""
with tf.name_scope("covariance_joint"):
with tf.control_dependencies(self.runtime_assertions):
initial_latent_cov = _broadcast_to_shape(
self.initial_state_prior.covariance(),
tf.concat([self.batch_shape_tensor(),
[self.latent_size, self.latent_size]], axis=0))
initial_observation_cov = _propagate_cov(
initial_latent_cov,
self.get_observation_matrix_for_timestep(self.initial_step),
self.get_observation_noise_for_timestep(self.initial_step))
cov_step = build_kalman_cov_step(
self.get_transition_matrix_for_timestep,
self.get_transition_noise_for_timestep,
self.get_observation_matrix_for_timestep,
self.get_observation_noise_for_timestep)
# Scan over all timesteps following the initial step.
(latent_covs, observation_covs) = tf.scan(
cov_step,
elems=tf.range(self.initial_step+1, self.final_step),
initializer=(initial_latent_cov, initial_observation_cov))
# Squish the initial step back on top of the other (scanned) timesteps
latent_covs = tf.concat([initial_latent_cov[tf.newaxis, ...],
latent_covs], axis=0)
observation_covs = tf.concat([initial_observation_cov[tf.newaxis, ...],
observation_covs], axis=0)
# Put dimensions back in order. The samples we've computed have
# shape `[num_timesteps, batch_shape, size, size]`, where `size`
# is the dimension of the state or observation spaces
# respectively, but we want to return values with shape
# `[batch_shape, num_timesteps, size, size]`.
latent_covs = distribution_util.move_dimension(latent_covs, 0, -3)
observation_covs = distribution_util.move_dimension(
observation_covs, 0, -3)
return latent_covs, observation_covs
|
[
"Compute",
"prior",
"covariances",
"for",
"all",
"variables",
"via",
"dynamic",
"programming",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L1040-L1091
|
[
"def",
"_joint_covariances",
"(",
"self",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"\"covariance_joint\"",
")",
":",
"with",
"tf",
".",
"control_dependencies",
"(",
"self",
".",
"runtime_assertions",
")",
":",
"initial_latent_cov",
"=",
"_broadcast_to_shape",
"(",
"self",
".",
"initial_state_prior",
".",
"covariance",
"(",
")",
",",
"tf",
".",
"concat",
"(",
"[",
"self",
".",
"batch_shape_tensor",
"(",
")",
",",
"[",
"self",
".",
"latent_size",
",",
"self",
".",
"latent_size",
"]",
"]",
",",
"axis",
"=",
"0",
")",
")",
"initial_observation_cov",
"=",
"_propagate_cov",
"(",
"initial_latent_cov",
",",
"self",
".",
"get_observation_matrix_for_timestep",
"(",
"self",
".",
"initial_step",
")",
",",
"self",
".",
"get_observation_noise_for_timestep",
"(",
"self",
".",
"initial_step",
")",
")",
"cov_step",
"=",
"build_kalman_cov_step",
"(",
"self",
".",
"get_transition_matrix_for_timestep",
",",
"self",
".",
"get_transition_noise_for_timestep",
",",
"self",
".",
"get_observation_matrix_for_timestep",
",",
"self",
".",
"get_observation_noise_for_timestep",
")",
"# Scan over all timesteps following the initial step.",
"(",
"latent_covs",
",",
"observation_covs",
")",
"=",
"tf",
".",
"scan",
"(",
"cov_step",
",",
"elems",
"=",
"tf",
".",
"range",
"(",
"self",
".",
"initial_step",
"+",
"1",
",",
"self",
".",
"final_step",
")",
",",
"initializer",
"=",
"(",
"initial_latent_cov",
",",
"initial_observation_cov",
")",
")",
"# Squish the initial step back on top of the other (scanned) timesteps",
"latent_covs",
"=",
"tf",
".",
"concat",
"(",
"[",
"initial_latent_cov",
"[",
"tf",
".",
"newaxis",
",",
"...",
"]",
",",
"latent_covs",
"]",
",",
"axis",
"=",
"0",
")",
"observation_covs",
"=",
"tf",
".",
"concat",
"(",
"[",
"initial_observation_cov",
"[",
"tf",
".",
"newaxis",
",",
"...",
"]",
",",
"observation_covs",
"]",
",",
"axis",
"=",
"0",
")",
"# Put dimensions back in order. The samples we've computed have",
"# shape `[num_timesteps, batch_shape, size, size]`, where `size`",
"# is the dimension of the state or observation spaces",
"# respectively, but we want to return values with shape",
"# `[batch_shape, num_timesteps, size, size]`.",
"latent_covs",
"=",
"distribution_util",
".",
"move_dimension",
"(",
"latent_covs",
",",
"0",
",",
"-",
"3",
")",
"observation_covs",
"=",
"distribution_util",
".",
"move_dimension",
"(",
"observation_covs",
",",
"0",
",",
"-",
"3",
")",
"return",
"latent_covs",
",",
"observation_covs"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
LinearGaussianStateSpaceModel.latents_to_observations
|
Push latent means and covariances forward through the observation model.
Args:
latent_means: float `Tensor` of shape `[..., num_timesteps, latent_size]`
latent_covs: float `Tensor` of shape
`[..., num_timesteps, latent_size, latent_size]`.
Returns:
observation_means: float `Tensor` of shape
`[..., num_timesteps, observation_size]`
observation_covs: float `Tensor` of shape
`[..., num_timesteps, observation_size, observation_size]`
|
tensorflow_probability/python/distributions/linear_gaussian_ssm.py
|
def latents_to_observations(self, latent_means, latent_covs):
"""Push latent means and covariances forward through the observation model.
Args:
latent_means: float `Tensor` of shape `[..., num_timesteps, latent_size]`
latent_covs: float `Tensor` of shape
`[..., num_timesteps, latent_size, latent_size]`.
Returns:
observation_means: float `Tensor` of shape
`[..., num_timesteps, observation_size]`
observation_covs: float `Tensor` of shape
`[..., num_timesteps, observation_size, observation_size]`
"""
with tf.name_scope("latents_to_observations"):
pushforward_latents_step = build_pushforward_latents_step(
self.get_observation_matrix_for_timestep,
self.get_observation_noise_for_timestep)
latent_means = distribution_util.move_dimension(
latent_means, source_idx=-2, dest_idx=0)
latent_means = latent_means[..., tf.newaxis] # Make matmul happy.
latent_covs = distribution_util.move_dimension(
latent_covs, source_idx=-3, dest_idx=0)
(initial_observation_mean,
initial_observation_cov) = pushforward_latents_step(
_=None, # Loop body ignores previous observations.
latent_t_mean_cov=(self.initial_step,
latent_means[self.initial_step],
latent_covs[self.initial_step]))
# TODO(davmre) this loop is embarassingly parallel; replace with `pfor`.
timesteps = tf.range(self.initial_step,
self.initial_step + self.num_timesteps)
observation_means, observation_covs = tf.scan(
pushforward_latents_step,
elems=(timesteps, latent_means, latent_covs),
initializer=(initial_observation_mean, initial_observation_cov),
parallel_iterations=10000)
observation_means = distribution_util.move_dimension(
observation_means[..., 0], source_idx=0, dest_idx=-2)
observation_covs = distribution_util.move_dimension(
observation_covs, source_idx=0, dest_idx=-3)
return observation_means, observation_covs
|
def latents_to_observations(self, latent_means, latent_covs):
"""Push latent means and covariances forward through the observation model.
Args:
latent_means: float `Tensor` of shape `[..., num_timesteps, latent_size]`
latent_covs: float `Tensor` of shape
`[..., num_timesteps, latent_size, latent_size]`.
Returns:
observation_means: float `Tensor` of shape
`[..., num_timesteps, observation_size]`
observation_covs: float `Tensor` of shape
`[..., num_timesteps, observation_size, observation_size]`
"""
with tf.name_scope("latents_to_observations"):
pushforward_latents_step = build_pushforward_latents_step(
self.get_observation_matrix_for_timestep,
self.get_observation_noise_for_timestep)
latent_means = distribution_util.move_dimension(
latent_means, source_idx=-2, dest_idx=0)
latent_means = latent_means[..., tf.newaxis] # Make matmul happy.
latent_covs = distribution_util.move_dimension(
latent_covs, source_idx=-3, dest_idx=0)
(initial_observation_mean,
initial_observation_cov) = pushforward_latents_step(
_=None, # Loop body ignores previous observations.
latent_t_mean_cov=(self.initial_step,
latent_means[self.initial_step],
latent_covs[self.initial_step]))
# TODO(davmre) this loop is embarassingly parallel; replace with `pfor`.
timesteps = tf.range(self.initial_step,
self.initial_step + self.num_timesteps)
observation_means, observation_covs = tf.scan(
pushforward_latents_step,
elems=(timesteps, latent_means, latent_covs),
initializer=(initial_observation_mean, initial_observation_cov),
parallel_iterations=10000)
observation_means = distribution_util.move_dimension(
observation_means[..., 0], source_idx=0, dest_idx=-2)
observation_covs = distribution_util.move_dimension(
observation_covs, source_idx=0, dest_idx=-3)
return observation_means, observation_covs
|
[
"Push",
"latent",
"means",
"and",
"covariances",
"forward",
"through",
"the",
"observation",
"model",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L1097-L1145
|
[
"def",
"latents_to_observations",
"(",
"self",
",",
"latent_means",
",",
"latent_covs",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"\"latents_to_observations\"",
")",
":",
"pushforward_latents_step",
"=",
"build_pushforward_latents_step",
"(",
"self",
".",
"get_observation_matrix_for_timestep",
",",
"self",
".",
"get_observation_noise_for_timestep",
")",
"latent_means",
"=",
"distribution_util",
".",
"move_dimension",
"(",
"latent_means",
",",
"source_idx",
"=",
"-",
"2",
",",
"dest_idx",
"=",
"0",
")",
"latent_means",
"=",
"latent_means",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
"# Make matmul happy.",
"latent_covs",
"=",
"distribution_util",
".",
"move_dimension",
"(",
"latent_covs",
",",
"source_idx",
"=",
"-",
"3",
",",
"dest_idx",
"=",
"0",
")",
"(",
"initial_observation_mean",
",",
"initial_observation_cov",
")",
"=",
"pushforward_latents_step",
"(",
"_",
"=",
"None",
",",
"# Loop body ignores previous observations.",
"latent_t_mean_cov",
"=",
"(",
"self",
".",
"initial_step",
",",
"latent_means",
"[",
"self",
".",
"initial_step",
"]",
",",
"latent_covs",
"[",
"self",
".",
"initial_step",
"]",
")",
")",
"# TODO(davmre) this loop is embarassingly parallel; replace with `pfor`.",
"timesteps",
"=",
"tf",
".",
"range",
"(",
"self",
".",
"initial_step",
",",
"self",
".",
"initial_step",
"+",
"self",
".",
"num_timesteps",
")",
"observation_means",
",",
"observation_covs",
"=",
"tf",
".",
"scan",
"(",
"pushforward_latents_step",
",",
"elems",
"=",
"(",
"timesteps",
",",
"latent_means",
",",
"latent_covs",
")",
",",
"initializer",
"=",
"(",
"initial_observation_mean",
",",
"initial_observation_cov",
")",
",",
"parallel_iterations",
"=",
"10000",
")",
"observation_means",
"=",
"distribution_util",
".",
"move_dimension",
"(",
"observation_means",
"[",
"...",
",",
"0",
"]",
",",
"source_idx",
"=",
"0",
",",
"dest_idx",
"=",
"-",
"2",
")",
"observation_covs",
"=",
"distribution_util",
".",
"move_dimension",
"(",
"observation_covs",
",",
"source_idx",
"=",
"0",
",",
"dest_idx",
"=",
"-",
"3",
")",
"return",
"observation_means",
",",
"observation_covs"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_bessel_ive
|
Computes I_v(z)*exp(-abs(z)) using a recurrence relation, where z > 0.
|
tensorflow_probability/python/distributions/von_mises_fisher.py
|
def _bessel_ive(v, z, cache=None):
"""Computes I_v(z)*exp(-abs(z)) using a recurrence relation, where z > 0."""
# TODO(b/67497980): Switch to a more numerically faithful implementation.
z = tf.convert_to_tensor(value=z)
wrap = lambda result: tf.debugging.check_numerics(result, 'besseli{}'.format(v
))
if float(v) >= 2:
raise ValueError(
'Evaluating bessel_i by recurrence becomes imprecise for large v')
cache = cache or {}
safe_z = tf.where(z > 0, z, tf.ones_like(z))
if v in cache:
return wrap(cache[v])
if v == 0:
cache[v] = tf.math.bessel_i0e(z)
elif v == 1:
cache[v] = tf.math.bessel_i1e(z)
elif v == 0.5:
# sinh(x)*exp(-abs(x)), sinh(x) = (e^x - e^{-x}) / 2
sinhe = lambda x: (tf.exp(x - tf.abs(x)) - tf.exp(-x - tf.abs(x))) / 2
cache[v] = (
np.sqrt(2 / np.pi) * sinhe(z) *
tf.where(z > 0, tf.math.rsqrt(safe_z), tf.ones_like(safe_z)))
elif v == -0.5:
# cosh(x)*exp(-abs(x)), cosh(x) = (e^x + e^{-x}) / 2
coshe = lambda x: (tf.exp(x - tf.abs(x)) + tf.exp(-x - tf.abs(x))) / 2
cache[v] = (
np.sqrt(2 / np.pi) * coshe(z) *
tf.where(z > 0, tf.math.rsqrt(safe_z), tf.ones_like(safe_z)))
if v <= 1:
return wrap(cache[v])
# Recurrence relation:
cache[v] = (_bessel_ive(v - 2, z, cache) -
(2 * (v - 1)) * _bessel_ive(v - 1, z, cache) / z)
return wrap(cache[v])
|
def _bessel_ive(v, z, cache=None):
"""Computes I_v(z)*exp(-abs(z)) using a recurrence relation, where z > 0."""
# TODO(b/67497980): Switch to a more numerically faithful implementation.
z = tf.convert_to_tensor(value=z)
wrap = lambda result: tf.debugging.check_numerics(result, 'besseli{}'.format(v
))
if float(v) >= 2:
raise ValueError(
'Evaluating bessel_i by recurrence becomes imprecise for large v')
cache = cache or {}
safe_z = tf.where(z > 0, z, tf.ones_like(z))
if v in cache:
return wrap(cache[v])
if v == 0:
cache[v] = tf.math.bessel_i0e(z)
elif v == 1:
cache[v] = tf.math.bessel_i1e(z)
elif v == 0.5:
# sinh(x)*exp(-abs(x)), sinh(x) = (e^x - e^{-x}) / 2
sinhe = lambda x: (tf.exp(x - tf.abs(x)) - tf.exp(-x - tf.abs(x))) / 2
cache[v] = (
np.sqrt(2 / np.pi) * sinhe(z) *
tf.where(z > 0, tf.math.rsqrt(safe_z), tf.ones_like(safe_z)))
elif v == -0.5:
# cosh(x)*exp(-abs(x)), cosh(x) = (e^x + e^{-x}) / 2
coshe = lambda x: (tf.exp(x - tf.abs(x)) + tf.exp(-x - tf.abs(x))) / 2
cache[v] = (
np.sqrt(2 / np.pi) * coshe(z) *
tf.where(z > 0, tf.math.rsqrt(safe_z), tf.ones_like(safe_z)))
if v <= 1:
return wrap(cache[v])
# Recurrence relation:
cache[v] = (_bessel_ive(v - 2, z, cache) -
(2 * (v - 1)) * _bessel_ive(v - 1, z, cache) / z)
return wrap(cache[v])
|
[
"Computes",
"I_v",
"(",
"z",
")",
"*",
"exp",
"(",
"-",
"abs",
"(",
"z",
"))",
"using",
"a",
"recurrence",
"relation",
"where",
"z",
">",
"0",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/von_mises_fisher.py#L36-L73
|
[
"def",
"_bessel_ive",
"(",
"v",
",",
"z",
",",
"cache",
"=",
"None",
")",
":",
"# TODO(b/67497980): Switch to a more numerically faithful implementation.",
"z",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"z",
")",
"wrap",
"=",
"lambda",
"result",
":",
"tf",
".",
"debugging",
".",
"check_numerics",
"(",
"result",
",",
"'besseli{}'",
".",
"format",
"(",
"v",
")",
")",
"if",
"float",
"(",
"v",
")",
">=",
"2",
":",
"raise",
"ValueError",
"(",
"'Evaluating bessel_i by recurrence becomes imprecise for large v'",
")",
"cache",
"=",
"cache",
"or",
"{",
"}",
"safe_z",
"=",
"tf",
".",
"where",
"(",
"z",
">",
"0",
",",
"z",
",",
"tf",
".",
"ones_like",
"(",
"z",
")",
")",
"if",
"v",
"in",
"cache",
":",
"return",
"wrap",
"(",
"cache",
"[",
"v",
"]",
")",
"if",
"v",
"==",
"0",
":",
"cache",
"[",
"v",
"]",
"=",
"tf",
".",
"math",
".",
"bessel_i0e",
"(",
"z",
")",
"elif",
"v",
"==",
"1",
":",
"cache",
"[",
"v",
"]",
"=",
"tf",
".",
"math",
".",
"bessel_i1e",
"(",
"z",
")",
"elif",
"v",
"==",
"0.5",
":",
"# sinh(x)*exp(-abs(x)), sinh(x) = (e^x - e^{-x}) / 2",
"sinhe",
"=",
"lambda",
"x",
":",
"(",
"tf",
".",
"exp",
"(",
"x",
"-",
"tf",
".",
"abs",
"(",
"x",
")",
")",
"-",
"tf",
".",
"exp",
"(",
"-",
"x",
"-",
"tf",
".",
"abs",
"(",
"x",
")",
")",
")",
"/",
"2",
"cache",
"[",
"v",
"]",
"=",
"(",
"np",
".",
"sqrt",
"(",
"2",
"/",
"np",
".",
"pi",
")",
"*",
"sinhe",
"(",
"z",
")",
"*",
"tf",
".",
"where",
"(",
"z",
">",
"0",
",",
"tf",
".",
"math",
".",
"rsqrt",
"(",
"safe_z",
")",
",",
"tf",
".",
"ones_like",
"(",
"safe_z",
")",
")",
")",
"elif",
"v",
"==",
"-",
"0.5",
":",
"# cosh(x)*exp(-abs(x)), cosh(x) = (e^x + e^{-x}) / 2",
"coshe",
"=",
"lambda",
"x",
":",
"(",
"tf",
".",
"exp",
"(",
"x",
"-",
"tf",
".",
"abs",
"(",
"x",
")",
")",
"+",
"tf",
".",
"exp",
"(",
"-",
"x",
"-",
"tf",
".",
"abs",
"(",
"x",
")",
")",
")",
"/",
"2",
"cache",
"[",
"v",
"]",
"=",
"(",
"np",
".",
"sqrt",
"(",
"2",
"/",
"np",
".",
"pi",
")",
"*",
"coshe",
"(",
"z",
")",
"*",
"tf",
".",
"where",
"(",
"z",
">",
"0",
",",
"tf",
".",
"math",
".",
"rsqrt",
"(",
"safe_z",
")",
",",
"tf",
".",
"ones_like",
"(",
"safe_z",
")",
")",
")",
"if",
"v",
"<=",
"1",
":",
"return",
"wrap",
"(",
"cache",
"[",
"v",
"]",
")",
"# Recurrence relation:",
"cache",
"[",
"v",
"]",
"=",
"(",
"_bessel_ive",
"(",
"v",
"-",
"2",
",",
"z",
",",
"cache",
")",
"-",
"(",
"2",
"*",
"(",
"v",
"-",
"1",
")",
")",
"*",
"_bessel_ive",
"(",
"v",
"-",
"1",
",",
"z",
",",
"cache",
")",
"/",
"z",
")",
"return",
"wrap",
"(",
"cache",
"[",
"v",
"]",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
VonMisesFisher._log_normalization
|
Computes the log-normalizer of the distribution.
|
tensorflow_probability/python/distributions/von_mises_fisher.py
|
def _log_normalization(self):
"""Computes the log-normalizer of the distribution."""
event_dim = tf.compat.dimension_value(self.event_shape[0])
if event_dim is None:
raise ValueError('vMF _log_normalizer currently only supports '
'statically known event shape')
safe_conc = tf.where(self.concentration > 0,
self.concentration,
tf.ones_like(self.concentration))
safe_lognorm = ((event_dim / 2 - 1) * tf.math.log(safe_conc) -
(event_dim / 2) * np.log(2 * np.pi) -
tf.math.log(_bessel_ive(event_dim / 2 - 1, safe_conc)) -
tf.abs(safe_conc))
log_nsphere_surface_area = (
np.log(2.) + (event_dim / 2) * np.log(np.pi) -
tf.math.lgamma(tf.cast(event_dim / 2, self.dtype)))
return tf.where(self.concentration > 0,
-safe_lognorm,
log_nsphere_surface_area * tf.ones_like(safe_lognorm))
|
def _log_normalization(self):
"""Computes the log-normalizer of the distribution."""
event_dim = tf.compat.dimension_value(self.event_shape[0])
if event_dim is None:
raise ValueError('vMF _log_normalizer currently only supports '
'statically known event shape')
safe_conc = tf.where(self.concentration > 0,
self.concentration,
tf.ones_like(self.concentration))
safe_lognorm = ((event_dim / 2 - 1) * tf.math.log(safe_conc) -
(event_dim / 2) * np.log(2 * np.pi) -
tf.math.log(_bessel_ive(event_dim / 2 - 1, safe_conc)) -
tf.abs(safe_conc))
log_nsphere_surface_area = (
np.log(2.) + (event_dim / 2) * np.log(np.pi) -
tf.math.lgamma(tf.cast(event_dim / 2, self.dtype)))
return tf.where(self.concentration > 0,
-safe_lognorm,
log_nsphere_surface_area * tf.ones_like(safe_lognorm))
|
[
"Computes",
"the",
"log",
"-",
"normalizer",
"of",
"the",
"distribution",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/von_mises_fisher.py#L262-L280
|
[
"def",
"_log_normalization",
"(",
"self",
")",
":",
"event_dim",
"=",
"tf",
".",
"compat",
".",
"dimension_value",
"(",
"self",
".",
"event_shape",
"[",
"0",
"]",
")",
"if",
"event_dim",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'vMF _log_normalizer currently only supports '",
"'statically known event shape'",
")",
"safe_conc",
"=",
"tf",
".",
"where",
"(",
"self",
".",
"concentration",
">",
"0",
",",
"self",
".",
"concentration",
",",
"tf",
".",
"ones_like",
"(",
"self",
".",
"concentration",
")",
")",
"safe_lognorm",
"=",
"(",
"(",
"event_dim",
"/",
"2",
"-",
"1",
")",
"*",
"tf",
".",
"math",
".",
"log",
"(",
"safe_conc",
")",
"-",
"(",
"event_dim",
"/",
"2",
")",
"*",
"np",
".",
"log",
"(",
"2",
"*",
"np",
".",
"pi",
")",
"-",
"tf",
".",
"math",
".",
"log",
"(",
"_bessel_ive",
"(",
"event_dim",
"/",
"2",
"-",
"1",
",",
"safe_conc",
")",
")",
"-",
"tf",
".",
"abs",
"(",
"safe_conc",
")",
")",
"log_nsphere_surface_area",
"=",
"(",
"np",
".",
"log",
"(",
"2.",
")",
"+",
"(",
"event_dim",
"/",
"2",
")",
"*",
"np",
".",
"log",
"(",
"np",
".",
"pi",
")",
"-",
"tf",
".",
"math",
".",
"lgamma",
"(",
"tf",
".",
"cast",
"(",
"event_dim",
"/",
"2",
",",
"self",
".",
"dtype",
")",
")",
")",
"return",
"tf",
".",
"where",
"(",
"self",
".",
"concentration",
">",
"0",
",",
"-",
"safe_lognorm",
",",
"log_nsphere_surface_area",
"*",
"tf",
".",
"ones_like",
"(",
"safe_lognorm",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
VonMisesFisher._maybe_assert_valid_sample
|
Check counts for proper shape, values, then return tensor version.
|
tensorflow_probability/python/distributions/von_mises_fisher.py
|
def _maybe_assert_valid_sample(self, samples):
"""Check counts for proper shape, values, then return tensor version."""
if not self.validate_args:
return samples
with tf.control_dependencies([
assert_util.assert_near(
1.,
tf.linalg.norm(tensor=samples, axis=-1),
message='samples must be unit length'),
assert_util.assert_equal(
tf.shape(input=samples)[-1:],
self.event_shape_tensor(),
message=('samples must have innermost dimension matching that of '
'`self.mean_direction`')),
]):
return tf.identity(samples)
|
def _maybe_assert_valid_sample(self, samples):
"""Check counts for proper shape, values, then return tensor version."""
if not self.validate_args:
return samples
with tf.control_dependencies([
assert_util.assert_near(
1.,
tf.linalg.norm(tensor=samples, axis=-1),
message='samples must be unit length'),
assert_util.assert_equal(
tf.shape(input=samples)[-1:],
self.event_shape_tensor(),
message=('samples must have innermost dimension matching that of '
'`self.mean_direction`')),
]):
return tf.identity(samples)
|
[
"Check",
"counts",
"for",
"proper",
"shape",
"values",
"then",
"return",
"tensor",
"version",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/von_mises_fisher.py#L285-L300
|
[
"def",
"_maybe_assert_valid_sample",
"(",
"self",
",",
"samples",
")",
":",
"if",
"not",
"self",
".",
"validate_args",
":",
"return",
"samples",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"assert_util",
".",
"assert_near",
"(",
"1.",
",",
"tf",
".",
"linalg",
".",
"norm",
"(",
"tensor",
"=",
"samples",
",",
"axis",
"=",
"-",
"1",
")",
",",
"message",
"=",
"'samples must be unit length'",
")",
",",
"assert_util",
".",
"assert_equal",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"samples",
")",
"[",
"-",
"1",
":",
"]",
",",
"self",
".",
"event_shape_tensor",
"(",
")",
",",
"message",
"=",
"(",
"'samples must have innermost dimension matching that of '",
"'`self.mean_direction`'",
")",
")",
",",
"]",
")",
":",
"return",
"tf",
".",
"identity",
"(",
"samples",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
VonMisesFisher._mode
|
The mode of the von Mises-Fisher distribution is the mean direction.
|
tensorflow_probability/python/distributions/von_mises_fisher.py
|
def _mode(self):
"""The mode of the von Mises-Fisher distribution is the mean direction."""
return (self.mean_direction +
tf.zeros_like(self.concentration)[..., tf.newaxis])
|
def _mode(self):
"""The mode of the von Mises-Fisher distribution is the mean direction."""
return (self.mean_direction +
tf.zeros_like(self.concentration)[..., tf.newaxis])
|
[
"The",
"mode",
"of",
"the",
"von",
"Mises",
"-",
"Fisher",
"distribution",
"is",
"the",
"mean",
"direction",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/von_mises_fisher.py#L302-L305
|
[
"def",
"_mode",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"mean_direction",
"+",
"tf",
".",
"zeros_like",
"(",
"self",
".",
"concentration",
")",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
VonMisesFisher._rotate
|
Applies a Householder rotation to `samples`.
|
tensorflow_probability/python/distributions/von_mises_fisher.py
|
def _rotate(self, samples):
"""Applies a Householder rotation to `samples`."""
event_dim = (
tf.compat.dimension_value(self.event_shape[0]) or
self._event_shape_tensor()[0])
basis = tf.concat([[1.], tf.zeros([event_dim - 1], dtype=self.dtype)],
axis=0),
u = tf.nn.l2_normalize(basis - self.mean_direction, axis=-1)
return samples - 2 * tf.reduce_sum(
input_tensor=samples * u, axis=-1, keepdims=True) * u
|
def _rotate(self, samples):
"""Applies a Householder rotation to `samples`."""
event_dim = (
tf.compat.dimension_value(self.event_shape[0]) or
self._event_shape_tensor()[0])
basis = tf.concat([[1.], tf.zeros([event_dim - 1], dtype=self.dtype)],
axis=0),
u = tf.nn.l2_normalize(basis - self.mean_direction, axis=-1)
return samples - 2 * tf.reduce_sum(
input_tensor=samples * u, axis=-1, keepdims=True) * u
|
[
"Applies",
"a",
"Householder",
"rotation",
"to",
"samples",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/von_mises_fisher.py#L347-L356
|
[
"def",
"_rotate",
"(",
"self",
",",
"samples",
")",
":",
"event_dim",
"=",
"(",
"tf",
".",
"compat",
".",
"dimension_value",
"(",
"self",
".",
"event_shape",
"[",
"0",
"]",
")",
"or",
"self",
".",
"_event_shape_tensor",
"(",
")",
"[",
"0",
"]",
")",
"basis",
"=",
"tf",
".",
"concat",
"(",
"[",
"[",
"1.",
"]",
",",
"tf",
".",
"zeros",
"(",
"[",
"event_dim",
"-",
"1",
"]",
",",
"dtype",
"=",
"self",
".",
"dtype",
")",
"]",
",",
"axis",
"=",
"0",
")",
",",
"u",
"=",
"tf",
".",
"nn",
".",
"l2_normalize",
"(",
"basis",
"-",
"self",
".",
"mean_direction",
",",
"axis",
"=",
"-",
"1",
")",
"return",
"samples",
"-",
"2",
"*",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"samples",
"*",
"u",
",",
"axis",
"=",
"-",
"1",
",",
"keepdims",
"=",
"True",
")",
"*",
"u"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
VonMisesFisher._sample_3d
|
Specialized inversion sampler for 3D.
|
tensorflow_probability/python/distributions/von_mises_fisher.py
|
def _sample_3d(self, n, seed=None):
"""Specialized inversion sampler for 3D."""
seed = seed_stream.SeedStream(seed, salt='von_mises_fisher_3d')
u_shape = tf.concat([[n], self._batch_shape_tensor()], axis=0)
z = tf.random.uniform(u_shape, seed=seed(), dtype=self.dtype)
# TODO(bjp): Higher-order odd dim analytic CDFs are available in [1], could
# be bisected for bounded sampling runtime (i.e. not rejection sampling).
# [1]: Inversion sampler via: https://ieeexplore.ieee.org/document/7347705/
# The inversion is: u = 1 + log(z + (1-z)*exp(-2*kappa)) / kappa
# We must protect against both kappa and z being zero.
safe_conc = tf.where(self.concentration > 0,
self.concentration,
tf.ones_like(self.concentration))
safe_z = tf.where(z > 0, z, tf.ones_like(z))
safe_u = 1 + tf.reduce_logsumexp(
input_tensor=[
tf.math.log(safe_z),
tf.math.log1p(-safe_z) - 2 * safe_conc
],
axis=0) / safe_conc
# Limit of the above expression as kappa->0 is 2*z-1
u = tf.where(self.concentration > tf.zeros_like(safe_u), safe_u,
2 * z - 1)
# Limit of the expression as z->0 is -1.
u = tf.where(tf.equal(z, 0), -tf.ones_like(u), u)
if not self._allow_nan_stats:
u = tf.debugging.check_numerics(u, 'u in _sample_3d')
return u[..., tf.newaxis]
|
def _sample_3d(self, n, seed=None):
"""Specialized inversion sampler for 3D."""
seed = seed_stream.SeedStream(seed, salt='von_mises_fisher_3d')
u_shape = tf.concat([[n], self._batch_shape_tensor()], axis=0)
z = tf.random.uniform(u_shape, seed=seed(), dtype=self.dtype)
# TODO(bjp): Higher-order odd dim analytic CDFs are available in [1], could
# be bisected for bounded sampling runtime (i.e. not rejection sampling).
# [1]: Inversion sampler via: https://ieeexplore.ieee.org/document/7347705/
# The inversion is: u = 1 + log(z + (1-z)*exp(-2*kappa)) / kappa
# We must protect against both kappa and z being zero.
safe_conc = tf.where(self.concentration > 0,
self.concentration,
tf.ones_like(self.concentration))
safe_z = tf.where(z > 0, z, tf.ones_like(z))
safe_u = 1 + tf.reduce_logsumexp(
input_tensor=[
tf.math.log(safe_z),
tf.math.log1p(-safe_z) - 2 * safe_conc
],
axis=0) / safe_conc
# Limit of the above expression as kappa->0 is 2*z-1
u = tf.where(self.concentration > tf.zeros_like(safe_u), safe_u,
2 * z - 1)
# Limit of the expression as z->0 is -1.
u = tf.where(tf.equal(z, 0), -tf.ones_like(u), u)
if not self._allow_nan_stats:
u = tf.debugging.check_numerics(u, 'u in _sample_3d')
return u[..., tf.newaxis]
|
[
"Specialized",
"inversion",
"sampler",
"for",
"3D",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/von_mises_fisher.py#L358-L385
|
[
"def",
"_sample_3d",
"(",
"self",
",",
"n",
",",
"seed",
"=",
"None",
")",
":",
"seed",
"=",
"seed_stream",
".",
"SeedStream",
"(",
"seed",
",",
"salt",
"=",
"'von_mises_fisher_3d'",
")",
"u_shape",
"=",
"tf",
".",
"concat",
"(",
"[",
"[",
"n",
"]",
",",
"self",
".",
"_batch_shape_tensor",
"(",
")",
"]",
",",
"axis",
"=",
"0",
")",
"z",
"=",
"tf",
".",
"random",
".",
"uniform",
"(",
"u_shape",
",",
"seed",
"=",
"seed",
"(",
")",
",",
"dtype",
"=",
"self",
".",
"dtype",
")",
"# TODO(bjp): Higher-order odd dim analytic CDFs are available in [1], could",
"# be bisected for bounded sampling runtime (i.e. not rejection sampling).",
"# [1]: Inversion sampler via: https://ieeexplore.ieee.org/document/7347705/",
"# The inversion is: u = 1 + log(z + (1-z)*exp(-2*kappa)) / kappa",
"# We must protect against both kappa and z being zero.",
"safe_conc",
"=",
"tf",
".",
"where",
"(",
"self",
".",
"concentration",
">",
"0",
",",
"self",
".",
"concentration",
",",
"tf",
".",
"ones_like",
"(",
"self",
".",
"concentration",
")",
")",
"safe_z",
"=",
"tf",
".",
"where",
"(",
"z",
">",
"0",
",",
"z",
",",
"tf",
".",
"ones_like",
"(",
"z",
")",
")",
"safe_u",
"=",
"1",
"+",
"tf",
".",
"reduce_logsumexp",
"(",
"input_tensor",
"=",
"[",
"tf",
".",
"math",
".",
"log",
"(",
"safe_z",
")",
",",
"tf",
".",
"math",
".",
"log1p",
"(",
"-",
"safe_z",
")",
"-",
"2",
"*",
"safe_conc",
"]",
",",
"axis",
"=",
"0",
")",
"/",
"safe_conc",
"# Limit of the above expression as kappa->0 is 2*z-1",
"u",
"=",
"tf",
".",
"where",
"(",
"self",
".",
"concentration",
">",
"tf",
".",
"zeros_like",
"(",
"safe_u",
")",
",",
"safe_u",
",",
"2",
"*",
"z",
"-",
"1",
")",
"# Limit of the expression as z->0 is -1.",
"u",
"=",
"tf",
".",
"where",
"(",
"tf",
".",
"equal",
"(",
"z",
",",
"0",
")",
",",
"-",
"tf",
".",
"ones_like",
"(",
"u",
")",
",",
"u",
")",
"if",
"not",
"self",
".",
"_allow_nan_stats",
":",
"u",
"=",
"tf",
".",
"debugging",
".",
"check_numerics",
"(",
"u",
",",
"'u in _sample_3d'",
")",
"return",
"u",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_copy_fn
|
Create a deep copy of fn.
Args:
fn: a callable
Returns:
A `FunctionType`: a deep copy of fn.
Raises:
TypeError: if `fn` is not a callable.
|
tensorflow_probability/python/distributions/distribution.py
|
def _copy_fn(fn):
"""Create a deep copy of fn.
Args:
fn: a callable
Returns:
A `FunctionType`: a deep copy of fn.
Raises:
TypeError: if `fn` is not a callable.
"""
if not callable(fn):
raise TypeError("fn is not callable: {}".format(fn))
# The blessed way to copy a function. copy.deepcopy fails to create a
# non-reference copy. Since:
# types.FunctionType == type(lambda: None),
# and the docstring for the function type states:
#
# function(code, globals[, name[, argdefs[, closure]]])
#
# Create a function object from a code object and a dictionary.
# ...
#
# Here we can use this to create a new function with the old function's
# code, globals, closure, etc.
return types.FunctionType(
code=fn.__code__, globals=fn.__globals__,
name=fn.__name__, argdefs=fn.__defaults__,
closure=fn.__closure__)
|
def _copy_fn(fn):
"""Create a deep copy of fn.
Args:
fn: a callable
Returns:
A `FunctionType`: a deep copy of fn.
Raises:
TypeError: if `fn` is not a callable.
"""
if not callable(fn):
raise TypeError("fn is not callable: {}".format(fn))
# The blessed way to copy a function. copy.deepcopy fails to create a
# non-reference copy. Since:
# types.FunctionType == type(lambda: None),
# and the docstring for the function type states:
#
# function(code, globals[, name[, argdefs[, closure]]])
#
# Create a function object from a code object and a dictionary.
# ...
#
# Here we can use this to create a new function with the old function's
# code, globals, closure, etc.
return types.FunctionType(
code=fn.__code__, globals=fn.__globals__,
name=fn.__name__, argdefs=fn.__defaults__,
closure=fn.__closure__)
|
[
"Create",
"a",
"deep",
"copy",
"of",
"fn",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/distribution.py#L75-L104
|
[
"def",
"_copy_fn",
"(",
"fn",
")",
":",
"if",
"not",
"callable",
"(",
"fn",
")",
":",
"raise",
"TypeError",
"(",
"\"fn is not callable: {}\"",
".",
"format",
"(",
"fn",
")",
")",
"# The blessed way to copy a function. copy.deepcopy fails to create a",
"# non-reference copy. Since:",
"# types.FunctionType == type(lambda: None),",
"# and the docstring for the function type states:",
"#",
"# function(code, globals[, name[, argdefs[, closure]]])",
"#",
"# Create a function object from a code object and a dictionary.",
"# ...",
"#",
"# Here we can use this to create a new function with the old function's",
"# code, globals, closure, etc.",
"return",
"types",
".",
"FunctionType",
"(",
"code",
"=",
"fn",
".",
"__code__",
",",
"globals",
"=",
"fn",
".",
"__globals__",
",",
"name",
"=",
"fn",
".",
"__name__",
",",
"argdefs",
"=",
"fn",
".",
"__defaults__",
",",
"closure",
"=",
"fn",
".",
"__closure__",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_update_docstring
|
Update old_str by inserting append_str just before the "Args:" section.
|
tensorflow_probability/python/distributions/distribution.py
|
def _update_docstring(old_str, append_str):
"""Update old_str by inserting append_str just before the "Args:" section."""
old_str = old_str or ""
old_str_lines = old_str.split("\n")
# Step 0: Prepend spaces to all lines of append_str. This is
# necessary for correct markdown generation.
append_str = "\n".join(" %s" % line for line in append_str.split("\n"))
# Step 1: Find mention of "Args":
has_args_ix = [
ix for ix, line in enumerate(old_str_lines)
if line.strip().lower() == "args:"]
if has_args_ix:
final_args_ix = has_args_ix[-1]
return ("\n".join(old_str_lines[:final_args_ix])
+ "\n\n" + append_str + "\n\n"
+ "\n".join(old_str_lines[final_args_ix:]))
else:
return old_str + "\n\n" + append_str
|
def _update_docstring(old_str, append_str):
"""Update old_str by inserting append_str just before the "Args:" section."""
old_str = old_str or ""
old_str_lines = old_str.split("\n")
# Step 0: Prepend spaces to all lines of append_str. This is
# necessary for correct markdown generation.
append_str = "\n".join(" %s" % line for line in append_str.split("\n"))
# Step 1: Find mention of "Args":
has_args_ix = [
ix for ix, line in enumerate(old_str_lines)
if line.strip().lower() == "args:"]
if has_args_ix:
final_args_ix = has_args_ix[-1]
return ("\n".join(old_str_lines[:final_args_ix])
+ "\n\n" + append_str + "\n\n"
+ "\n".join(old_str_lines[final_args_ix:]))
else:
return old_str + "\n\n" + append_str
|
[
"Update",
"old_str",
"by",
"inserting",
"append_str",
"just",
"before",
"the",
"Args",
":",
"section",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/distribution.py#L107-L126
|
[
"def",
"_update_docstring",
"(",
"old_str",
",",
"append_str",
")",
":",
"old_str",
"=",
"old_str",
"or",
"\"\"",
"old_str_lines",
"=",
"old_str",
".",
"split",
"(",
"\"\\n\"",
")",
"# Step 0: Prepend spaces to all lines of append_str. This is",
"# necessary for correct markdown generation.",
"append_str",
"=",
"\"\\n\"",
".",
"join",
"(",
"\" %s\"",
"%",
"line",
"for",
"line",
"in",
"append_str",
".",
"split",
"(",
"\"\\n\"",
")",
")",
"# Step 1: Find mention of \"Args\":",
"has_args_ix",
"=",
"[",
"ix",
"for",
"ix",
",",
"line",
"in",
"enumerate",
"(",
"old_str_lines",
")",
"if",
"line",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"==",
"\"args:\"",
"]",
"if",
"has_args_ix",
":",
"final_args_ix",
"=",
"has_args_ix",
"[",
"-",
"1",
"]",
"return",
"(",
"\"\\n\"",
".",
"join",
"(",
"old_str_lines",
"[",
":",
"final_args_ix",
"]",
")",
"+",
"\"\\n\\n\"",
"+",
"append_str",
"+",
"\"\\n\\n\"",
"+",
"\"\\n\"",
".",
"join",
"(",
"old_str_lines",
"[",
"final_args_ix",
":",
"]",
")",
")",
"else",
":",
"return",
"old_str",
"+",
"\"\\n\\n\"",
"+",
"append_str"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_convert_to_tensor
|
Converts the given `value` to a (structure of) `Tensor`.
This function converts Python objects of various types to a (structure of)
`Tensor` objects. It accepts `Tensor` objects, numpy arrays, Python lists, and
Python scalars. For example:
Args:
value: An object whose structure matches that of `dtype ` and/or
`dtype_hint` and for which each leaf has a registered `Tensor` conversion
function.
dtype: Optional (structure of) element type for the returned tensor. If
missing, the type is inferred from the type of `value`.
dtype_hint: Optional (structure of) element type for the returned tensor,
used when dtype is None. In some cases, a caller may not have a dtype in
mind when converting to a tensor, so dtype_hint can be used as a soft
preference. If the conversion to `dtype_hint` is not possible, this
argument has no effect.
name: Optional name to use if a new `Tensor` is created.
Returns:
tensor: A (structure of) `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
|
tensorflow_probability/python/distributions/distribution.py
|
def _convert_to_tensor(value, dtype=None, dtype_hint=None, name=None):
"""Converts the given `value` to a (structure of) `Tensor`.
This function converts Python objects of various types to a (structure of)
`Tensor` objects. It accepts `Tensor` objects, numpy arrays, Python lists, and
Python scalars. For example:
Args:
value: An object whose structure matches that of `dtype ` and/or
`dtype_hint` and for which each leaf has a registered `Tensor` conversion
function.
dtype: Optional (structure of) element type for the returned tensor. If
missing, the type is inferred from the type of `value`.
dtype_hint: Optional (structure of) element type for the returned tensor,
used when dtype is None. In some cases, a caller may not have a dtype in
mind when converting to a tensor, so dtype_hint can be used as a soft
preference. If the conversion to `dtype_hint` is not possible, this
argument has no effect.
name: Optional name to use if a new `Tensor` is created.
Returns:
tensor: A (structure of) `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
"""
if (tf.nest.is_nested(dtype) or
tf.nest.is_nested(dtype_hint)):
if dtype is None:
fn = lambda v, pd: tf.convert_to_tensor(v, dtype_hint=pd, name=name)
return tf.nest.map_structure(fn, value, dtype_hint)
elif dtype_hint is None:
fn = lambda v, d: tf.convert_to_tensor(v, dtype=d, name=name)
return tf.nest.map_structure(fn, value, dtype_hint)
else:
fn = lambda v, d, pd: tf.convert_to_tensor( # pylint: disable=g-long-lambda
v, dtype=d, dtype_hint=pd, name=name)
return tf.nest.map_structure(fn, value, dtype, dtype_hint)
return tf.convert_to_tensor(
value=value, dtype=dtype, dtype_hint=dtype_hint, name=name)
|
def _convert_to_tensor(value, dtype=None, dtype_hint=None, name=None):
"""Converts the given `value` to a (structure of) `Tensor`.
This function converts Python objects of various types to a (structure of)
`Tensor` objects. It accepts `Tensor` objects, numpy arrays, Python lists, and
Python scalars. For example:
Args:
value: An object whose structure matches that of `dtype ` and/or
`dtype_hint` and for which each leaf has a registered `Tensor` conversion
function.
dtype: Optional (structure of) element type for the returned tensor. If
missing, the type is inferred from the type of `value`.
dtype_hint: Optional (structure of) element type for the returned tensor,
used when dtype is None. In some cases, a caller may not have a dtype in
mind when converting to a tensor, so dtype_hint can be used as a soft
preference. If the conversion to `dtype_hint` is not possible, this
argument has no effect.
name: Optional name to use if a new `Tensor` is created.
Returns:
tensor: A (structure of) `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
"""
if (tf.nest.is_nested(dtype) or
tf.nest.is_nested(dtype_hint)):
if dtype is None:
fn = lambda v, pd: tf.convert_to_tensor(v, dtype_hint=pd, name=name)
return tf.nest.map_structure(fn, value, dtype_hint)
elif dtype_hint is None:
fn = lambda v, d: tf.convert_to_tensor(v, dtype=d, name=name)
return tf.nest.map_structure(fn, value, dtype_hint)
else:
fn = lambda v, d, pd: tf.convert_to_tensor( # pylint: disable=g-long-lambda
v, dtype=d, dtype_hint=pd, name=name)
return tf.nest.map_structure(fn, value, dtype, dtype_hint)
return tf.convert_to_tensor(
value=value, dtype=dtype, dtype_hint=dtype_hint, name=name)
|
[
"Converts",
"the",
"given",
"value",
"to",
"a",
"(",
"structure",
"of",
")",
"Tensor",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/distribution.py#L129-L170
|
[
"def",
"_convert_to_tensor",
"(",
"value",
",",
"dtype",
"=",
"None",
",",
"dtype_hint",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"if",
"(",
"tf",
".",
"nest",
".",
"is_nested",
"(",
"dtype",
")",
"or",
"tf",
".",
"nest",
".",
"is_nested",
"(",
"dtype_hint",
")",
")",
":",
"if",
"dtype",
"is",
"None",
":",
"fn",
"=",
"lambda",
"v",
",",
"pd",
":",
"tf",
".",
"convert_to_tensor",
"(",
"v",
",",
"dtype_hint",
"=",
"pd",
",",
"name",
"=",
"name",
")",
"return",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"fn",
",",
"value",
",",
"dtype_hint",
")",
"elif",
"dtype_hint",
"is",
"None",
":",
"fn",
"=",
"lambda",
"v",
",",
"d",
":",
"tf",
".",
"convert_to_tensor",
"(",
"v",
",",
"dtype",
"=",
"d",
",",
"name",
"=",
"name",
")",
"return",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"fn",
",",
"value",
",",
"dtype_hint",
")",
"else",
":",
"fn",
"=",
"lambda",
"v",
",",
"d",
",",
"pd",
":",
"tf",
".",
"convert_to_tensor",
"(",
"# pylint: disable=g-long-lambda",
"v",
",",
"dtype",
"=",
"d",
",",
"dtype_hint",
"=",
"pd",
",",
"name",
"=",
"name",
")",
"return",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"fn",
",",
"value",
",",
"dtype",
",",
"dtype_hint",
")",
"return",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"value",
",",
"dtype",
"=",
"dtype",
",",
"dtype_hint",
"=",
"dtype_hint",
",",
"name",
"=",
"name",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_remove_dict_keys_with_value
|
Removes `dict` keys which have have `self` as value.
|
tensorflow_probability/python/distributions/distribution.py
|
def _remove_dict_keys_with_value(dict_, val):
"""Removes `dict` keys which have have `self` as value."""
return {k: v for k, v in dict_.items() if v is not val}
|
def _remove_dict_keys_with_value(dict_, val):
"""Removes `dict` keys which have have `self` as value."""
return {k: v for k, v in dict_.items() if v is not val}
|
[
"Removes",
"dict",
"keys",
"which",
"have",
"have",
"self",
"as",
"value",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/distribution.py#L173-L175
|
[
"def",
"_remove_dict_keys_with_value",
"(",
"dict_",
",",
"val",
")",
":",
"return",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"dict_",
".",
"items",
"(",
")",
"if",
"v",
"is",
"not",
"val",
"}"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_recursively_replace_dict_for_pretty_dict
|
Recursively replace `dict`s with `_PrettyDict`.
|
tensorflow_probability/python/distributions/distribution.py
|
def _recursively_replace_dict_for_pretty_dict(x):
"""Recursively replace `dict`s with `_PrettyDict`."""
# We use "PrettyDict" because collections.OrderedDict repr/str has the word
# "OrderedDict" in it. We only want to print "OrderedDict" if in fact the
# input really is an OrderedDict.
if isinstance(x, dict):
return _PrettyDict({
k: _recursively_replace_dict_for_pretty_dict(v)
for k, v in x.items()})
if (isinstance(x, collections.Sequence) and
not isinstance(x, six.string_types)):
args = (_recursively_replace_dict_for_pretty_dict(x_) for x_ in x)
is_named_tuple = (isinstance(x, tuple) and
hasattr(x, "_asdict") and
hasattr(x, "_fields"))
return type(x)(*args) if is_named_tuple else type(x)(args)
if isinstance(x, collections.Mapping):
return type(x)(**{k: _recursively_replace_dict_for_pretty_dict(v)
for k, v in x.items()})
return x
|
def _recursively_replace_dict_for_pretty_dict(x):
"""Recursively replace `dict`s with `_PrettyDict`."""
# We use "PrettyDict" because collections.OrderedDict repr/str has the word
# "OrderedDict" in it. We only want to print "OrderedDict" if in fact the
# input really is an OrderedDict.
if isinstance(x, dict):
return _PrettyDict({
k: _recursively_replace_dict_for_pretty_dict(v)
for k, v in x.items()})
if (isinstance(x, collections.Sequence) and
not isinstance(x, six.string_types)):
args = (_recursively_replace_dict_for_pretty_dict(x_) for x_ in x)
is_named_tuple = (isinstance(x, tuple) and
hasattr(x, "_asdict") and
hasattr(x, "_fields"))
return type(x)(*args) if is_named_tuple else type(x)(args)
if isinstance(x, collections.Mapping):
return type(x)(**{k: _recursively_replace_dict_for_pretty_dict(v)
for k, v in x.items()})
return x
|
[
"Recursively",
"replace",
"dict",
"s",
"with",
"_PrettyDict",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/distribution.py#L1392-L1411
|
[
"def",
"_recursively_replace_dict_for_pretty_dict",
"(",
"x",
")",
":",
"# We use \"PrettyDict\" because collections.OrderedDict repr/str has the word",
"# \"OrderedDict\" in it. We only want to print \"OrderedDict\" if in fact the",
"# input really is an OrderedDict.",
"if",
"isinstance",
"(",
"x",
",",
"dict",
")",
":",
"return",
"_PrettyDict",
"(",
"{",
"k",
":",
"_recursively_replace_dict_for_pretty_dict",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"x",
".",
"items",
"(",
")",
"}",
")",
"if",
"(",
"isinstance",
"(",
"x",
",",
"collections",
".",
"Sequence",
")",
"and",
"not",
"isinstance",
"(",
"x",
",",
"six",
".",
"string_types",
")",
")",
":",
"args",
"=",
"(",
"_recursively_replace_dict_for_pretty_dict",
"(",
"x_",
")",
"for",
"x_",
"in",
"x",
")",
"is_named_tuple",
"=",
"(",
"isinstance",
"(",
"x",
",",
"tuple",
")",
"and",
"hasattr",
"(",
"x",
",",
"\"_asdict\"",
")",
"and",
"hasattr",
"(",
"x",
",",
"\"_fields\"",
")",
")",
"return",
"type",
"(",
"x",
")",
"(",
"*",
"args",
")",
"if",
"is_named_tuple",
"else",
"type",
"(",
"x",
")",
"(",
"args",
")",
"if",
"isinstance",
"(",
"x",
",",
"collections",
".",
"Mapping",
")",
":",
"return",
"type",
"(",
"x",
")",
"(",
"*",
"*",
"{",
"k",
":",
"_recursively_replace_dict_for_pretty_dict",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"x",
".",
"items",
"(",
")",
"}",
")",
"return",
"x"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
expectation
|
Computes the Monte-Carlo approximation of `E_p[f(X)]`.
This function computes the Monte-Carlo approximation of an expectation, i.e.,
```none
E_p[f(X)] approx= m**-1 sum_i^m f(x_j), x_j ~iid p(X)
```
where:
- `x_j = samples[j, ...]`,
- `log(p(samples)) = log_prob(samples)` and
- `m = prod(shape(samples)[axis])`.
Tricks: Reparameterization and Score-Gradient
When p is "reparameterized", i.e., a diffeomorphic transformation of a
parameterless distribution (e.g.,
`Normal(Y; m, s) <=> Y = sX + m, X ~ Normal(0,1)`), we can swap gradient and
expectation, i.e.,
`grad[ Avg{ s_i : i=1...n } ] = Avg{ grad[s_i] : i=1...n }` where
`S_n = Avg{s_i}` and `s_i = f(x_i), x_i ~ p`.
However, if p is not reparameterized, TensorFlow's gradient will be incorrect
since the chain-rule stops at samples of non-reparameterized distributions.
(The non-differentiated result, `approx_expectation`, is the same regardless
of `use_reparametrization`.) In this circumstance using the Score-Gradient
trick results in an unbiased gradient, i.e.,
```none
grad[ E_p[f(X)] ]
= grad[ int dx p(x) f(x) ]
= int dx grad[ p(x) f(x) ]
= int dx [ p'(x) f(x) + p(x) f'(x) ]
= int dx p(x) [p'(x) / p(x) f(x) + f'(x) ]
= int dx p(x) grad[ f(x) p(x) / stop_grad[p(x)] ]
= E_p[ grad[ f(x) p(x) / stop_grad[p(x)] ] ]
```
Unless p is not reparametrized, it is usually preferable to
`use_reparametrization = True`.
Warning: users are responsible for verifying `p` is a "reparameterized"
distribution.
Example Use:
```python
# Monte-Carlo approximation of a reparameterized distribution, e.g., Normal.
num_draws = int(1e5)
p = tfp.distributions.Normal(loc=0., scale=1.)
q = tfp.distributions.Normal(loc=1., scale=2.)
exact_kl_normal_normal = tfp.distributions.kl_divergence(p, q)
# ==> 0.44314718
approx_kl_normal_normal = tfp.monte_carlo.expectation(
f=lambda x: p.log_prob(x) - q.log_prob(x),
samples=p.sample(num_draws, seed=42),
log_prob=p.log_prob,
use_reparametrization=(p.reparameterization_type
== tfp.distributions.FULLY_REPARAMETERIZED))
# ==> 0.44632751
# Relative Error: <1%
# Monte-Carlo approximation of non-reparameterized distribution,
# e.g., Bernoulli.
num_draws = int(1e5)
p = tfp.distributions.Bernoulli(probs=0.4)
q = tfp.distributions.Bernoulli(probs=0.8)
exact_kl_bernoulli_bernoulli = tfp.distributions.kl_divergence(p, q)
# ==> 0.38190854
approx_kl_bernoulli_bernoulli = tfp.monte_carlo.expectation(
f=lambda x: p.log_prob(x) - q.log_prob(x),
samples=p.sample(num_draws, seed=42),
log_prob=p.log_prob,
use_reparametrization=(p.reparameterization_type
== tfp.distributions.FULLY_REPARAMETERIZED))
# ==> 0.38336259
# Relative Error: <1%
# For comparing the gradients, see `expectation_test.py`.
```
Note: The above example is for illustration only. To compute approximate
KL-divergence, the following is preferred:
```python
approx_kl_p_q = bf.monte_carlo_csiszar_f_divergence(
f=bf.kl_reverse,
p_log_prob=q.log_prob,
q=p,
num_draws=num_draws)
```
Args:
f: Python callable which can return `f(samples)`.
samples: `Tensor` of samples used to form the Monte-Carlo approximation of
`E_p[f(X)]`. A batch of samples should be indexed by `axis` dimensions.
log_prob: Python callable which can return `log_prob(samples)`. Must
correspond to the natural-logarithm of the pdf/pmf of each sample. Only
required/used if `use_reparametrization=False`.
Default value: `None`.
use_reparametrization: Python `bool` indicating that the approximation
should use the fact that the gradient of samples is unbiased. Whether
`True` or `False`, this arg only affects the gradient of the resulting
`approx_expectation`.
Default value: `True`.
axis: The dimensions to average. If `None`, averages all
dimensions.
Default value: `0` (the left-most dimension).
keep_dims: If True, retains averaged dimensions using size `1`.
Default value: `False`.
name: A `name_scope` for operations created by this function.
Default value: `None` (which implies "expectation").
Returns:
approx_expectation: `Tensor` corresponding to the Monte-Carlo approximation
of `E_p[f(X)]`.
Raises:
ValueError: if `f` is not a Python `callable`.
ValueError: if `use_reparametrization=False` and `log_prob` is not a Python
`callable`.
|
tensorflow_probability/python/monte_carlo/expectation.py
|
def expectation(f, samples, log_prob=None, use_reparametrization=True,
axis=0, keep_dims=False, name=None):
"""Computes the Monte-Carlo approximation of `E_p[f(X)]`.
This function computes the Monte-Carlo approximation of an expectation, i.e.,
```none
E_p[f(X)] approx= m**-1 sum_i^m f(x_j), x_j ~iid p(X)
```
where:
- `x_j = samples[j, ...]`,
- `log(p(samples)) = log_prob(samples)` and
- `m = prod(shape(samples)[axis])`.
Tricks: Reparameterization and Score-Gradient
When p is "reparameterized", i.e., a diffeomorphic transformation of a
parameterless distribution (e.g.,
`Normal(Y; m, s) <=> Y = sX + m, X ~ Normal(0,1)`), we can swap gradient and
expectation, i.e.,
`grad[ Avg{ s_i : i=1...n } ] = Avg{ grad[s_i] : i=1...n }` where
`S_n = Avg{s_i}` and `s_i = f(x_i), x_i ~ p`.
However, if p is not reparameterized, TensorFlow's gradient will be incorrect
since the chain-rule stops at samples of non-reparameterized distributions.
(The non-differentiated result, `approx_expectation`, is the same regardless
of `use_reparametrization`.) In this circumstance using the Score-Gradient
trick results in an unbiased gradient, i.e.,
```none
grad[ E_p[f(X)] ]
= grad[ int dx p(x) f(x) ]
= int dx grad[ p(x) f(x) ]
= int dx [ p'(x) f(x) + p(x) f'(x) ]
= int dx p(x) [p'(x) / p(x) f(x) + f'(x) ]
= int dx p(x) grad[ f(x) p(x) / stop_grad[p(x)] ]
= E_p[ grad[ f(x) p(x) / stop_grad[p(x)] ] ]
```
Unless p is not reparametrized, it is usually preferable to
`use_reparametrization = True`.
Warning: users are responsible for verifying `p` is a "reparameterized"
distribution.
Example Use:
```python
# Monte-Carlo approximation of a reparameterized distribution, e.g., Normal.
num_draws = int(1e5)
p = tfp.distributions.Normal(loc=0., scale=1.)
q = tfp.distributions.Normal(loc=1., scale=2.)
exact_kl_normal_normal = tfp.distributions.kl_divergence(p, q)
# ==> 0.44314718
approx_kl_normal_normal = tfp.monte_carlo.expectation(
f=lambda x: p.log_prob(x) - q.log_prob(x),
samples=p.sample(num_draws, seed=42),
log_prob=p.log_prob,
use_reparametrization=(p.reparameterization_type
== tfp.distributions.FULLY_REPARAMETERIZED))
# ==> 0.44632751
# Relative Error: <1%
# Monte-Carlo approximation of non-reparameterized distribution,
# e.g., Bernoulli.
num_draws = int(1e5)
p = tfp.distributions.Bernoulli(probs=0.4)
q = tfp.distributions.Bernoulli(probs=0.8)
exact_kl_bernoulli_bernoulli = tfp.distributions.kl_divergence(p, q)
# ==> 0.38190854
approx_kl_bernoulli_bernoulli = tfp.monte_carlo.expectation(
f=lambda x: p.log_prob(x) - q.log_prob(x),
samples=p.sample(num_draws, seed=42),
log_prob=p.log_prob,
use_reparametrization=(p.reparameterization_type
== tfp.distributions.FULLY_REPARAMETERIZED))
# ==> 0.38336259
# Relative Error: <1%
# For comparing the gradients, see `expectation_test.py`.
```
Note: The above example is for illustration only. To compute approximate
KL-divergence, the following is preferred:
```python
approx_kl_p_q = bf.monte_carlo_csiszar_f_divergence(
f=bf.kl_reverse,
p_log_prob=q.log_prob,
q=p,
num_draws=num_draws)
```
Args:
f: Python callable which can return `f(samples)`.
samples: `Tensor` of samples used to form the Monte-Carlo approximation of
`E_p[f(X)]`. A batch of samples should be indexed by `axis` dimensions.
log_prob: Python callable which can return `log_prob(samples)`. Must
correspond to the natural-logarithm of the pdf/pmf of each sample. Only
required/used if `use_reparametrization=False`.
Default value: `None`.
use_reparametrization: Python `bool` indicating that the approximation
should use the fact that the gradient of samples is unbiased. Whether
`True` or `False`, this arg only affects the gradient of the resulting
`approx_expectation`.
Default value: `True`.
axis: The dimensions to average. If `None`, averages all
dimensions.
Default value: `0` (the left-most dimension).
keep_dims: If True, retains averaged dimensions using size `1`.
Default value: `False`.
name: A `name_scope` for operations created by this function.
Default value: `None` (which implies "expectation").
Returns:
approx_expectation: `Tensor` corresponding to the Monte-Carlo approximation
of `E_p[f(X)]`.
Raises:
ValueError: if `f` is not a Python `callable`.
ValueError: if `use_reparametrization=False` and `log_prob` is not a Python
`callable`.
"""
with tf.compat.v1.name_scope(name, 'expectation', [samples]):
if not callable(f):
raise ValueError('`f` must be a callable function.')
if use_reparametrization:
return tf.reduce_mean(
input_tensor=f(samples), axis=axis, keepdims=keep_dims)
else:
if not callable(log_prob):
raise ValueError('`log_prob` must be a callable function.')
stop = tf.stop_gradient # For readability.
x = stop(samples)
logpx = log_prob(x)
fx = f(x) # Call `f` once in case it has side-effects.
# To achieve this, we use the fact that:
# `h(x) - stop(h(x)) == zeros_like(h(x))`
# but its gradient is grad[h(x)].
#
# This technique was published as:
# Jakob Foerster, Greg Farquhar, Maruan Al-Shedivat, Tim Rocktaeschel,
# Eric P. Xing, Shimon Whiteson (ICML 2018)
# "DiCE: The Infinitely Differentiable Monte-Carlo Estimator"
# https://arxiv.org/abs/1802.05098
#
# Unlike using:
# fx = fx + stop(fx) * (logpx - stop(logpx)),
# DiCE ensures that any order gradients of the objective
# are unbiased gradient estimators.
#
# Note that IEEE754 specifies that `x - x == 0.` and `x + 0. == x`, hence
# this trick loses no precision. For more discussion regarding the
# relevant portions of the IEEE754 standard, see the StackOverflow
# question,
# "Is there a floating point value of x, for which x-x == 0 is false?"
# http://stackoverflow.com/q/2686644
dice = fx * tf.exp(logpx - stop(logpx))
return tf.reduce_mean(input_tensor=dice, axis=axis, keepdims=keep_dims)
|
def expectation(f, samples, log_prob=None, use_reparametrization=True,
axis=0, keep_dims=False, name=None):
"""Computes the Monte-Carlo approximation of `E_p[f(X)]`.
This function computes the Monte-Carlo approximation of an expectation, i.e.,
```none
E_p[f(X)] approx= m**-1 sum_i^m f(x_j), x_j ~iid p(X)
```
where:
- `x_j = samples[j, ...]`,
- `log(p(samples)) = log_prob(samples)` and
- `m = prod(shape(samples)[axis])`.
Tricks: Reparameterization and Score-Gradient
When p is "reparameterized", i.e., a diffeomorphic transformation of a
parameterless distribution (e.g.,
`Normal(Y; m, s) <=> Y = sX + m, X ~ Normal(0,1)`), we can swap gradient and
expectation, i.e.,
`grad[ Avg{ s_i : i=1...n } ] = Avg{ grad[s_i] : i=1...n }` where
`S_n = Avg{s_i}` and `s_i = f(x_i), x_i ~ p`.
However, if p is not reparameterized, TensorFlow's gradient will be incorrect
since the chain-rule stops at samples of non-reparameterized distributions.
(The non-differentiated result, `approx_expectation`, is the same regardless
of `use_reparametrization`.) In this circumstance using the Score-Gradient
trick results in an unbiased gradient, i.e.,
```none
grad[ E_p[f(X)] ]
= grad[ int dx p(x) f(x) ]
= int dx grad[ p(x) f(x) ]
= int dx [ p'(x) f(x) + p(x) f'(x) ]
= int dx p(x) [p'(x) / p(x) f(x) + f'(x) ]
= int dx p(x) grad[ f(x) p(x) / stop_grad[p(x)] ]
= E_p[ grad[ f(x) p(x) / stop_grad[p(x)] ] ]
```
Unless p is not reparametrized, it is usually preferable to
`use_reparametrization = True`.
Warning: users are responsible for verifying `p` is a "reparameterized"
distribution.
Example Use:
```python
# Monte-Carlo approximation of a reparameterized distribution, e.g., Normal.
num_draws = int(1e5)
p = tfp.distributions.Normal(loc=0., scale=1.)
q = tfp.distributions.Normal(loc=1., scale=2.)
exact_kl_normal_normal = tfp.distributions.kl_divergence(p, q)
# ==> 0.44314718
approx_kl_normal_normal = tfp.monte_carlo.expectation(
f=lambda x: p.log_prob(x) - q.log_prob(x),
samples=p.sample(num_draws, seed=42),
log_prob=p.log_prob,
use_reparametrization=(p.reparameterization_type
== tfp.distributions.FULLY_REPARAMETERIZED))
# ==> 0.44632751
# Relative Error: <1%
# Monte-Carlo approximation of non-reparameterized distribution,
# e.g., Bernoulli.
num_draws = int(1e5)
p = tfp.distributions.Bernoulli(probs=0.4)
q = tfp.distributions.Bernoulli(probs=0.8)
exact_kl_bernoulli_bernoulli = tfp.distributions.kl_divergence(p, q)
# ==> 0.38190854
approx_kl_bernoulli_bernoulli = tfp.monte_carlo.expectation(
f=lambda x: p.log_prob(x) - q.log_prob(x),
samples=p.sample(num_draws, seed=42),
log_prob=p.log_prob,
use_reparametrization=(p.reparameterization_type
== tfp.distributions.FULLY_REPARAMETERIZED))
# ==> 0.38336259
# Relative Error: <1%
# For comparing the gradients, see `expectation_test.py`.
```
Note: The above example is for illustration only. To compute approximate
KL-divergence, the following is preferred:
```python
approx_kl_p_q = bf.monte_carlo_csiszar_f_divergence(
f=bf.kl_reverse,
p_log_prob=q.log_prob,
q=p,
num_draws=num_draws)
```
Args:
f: Python callable which can return `f(samples)`.
samples: `Tensor` of samples used to form the Monte-Carlo approximation of
`E_p[f(X)]`. A batch of samples should be indexed by `axis` dimensions.
log_prob: Python callable which can return `log_prob(samples)`. Must
correspond to the natural-logarithm of the pdf/pmf of each sample. Only
required/used if `use_reparametrization=False`.
Default value: `None`.
use_reparametrization: Python `bool` indicating that the approximation
should use the fact that the gradient of samples is unbiased. Whether
`True` or `False`, this arg only affects the gradient of the resulting
`approx_expectation`.
Default value: `True`.
axis: The dimensions to average. If `None`, averages all
dimensions.
Default value: `0` (the left-most dimension).
keep_dims: If True, retains averaged dimensions using size `1`.
Default value: `False`.
name: A `name_scope` for operations created by this function.
Default value: `None` (which implies "expectation").
Returns:
approx_expectation: `Tensor` corresponding to the Monte-Carlo approximation
of `E_p[f(X)]`.
Raises:
ValueError: if `f` is not a Python `callable`.
ValueError: if `use_reparametrization=False` and `log_prob` is not a Python
`callable`.
"""
with tf.compat.v1.name_scope(name, 'expectation', [samples]):
if not callable(f):
raise ValueError('`f` must be a callable function.')
if use_reparametrization:
return tf.reduce_mean(
input_tensor=f(samples), axis=axis, keepdims=keep_dims)
else:
if not callable(log_prob):
raise ValueError('`log_prob` must be a callable function.')
stop = tf.stop_gradient # For readability.
x = stop(samples)
logpx = log_prob(x)
fx = f(x) # Call `f` once in case it has side-effects.
# To achieve this, we use the fact that:
# `h(x) - stop(h(x)) == zeros_like(h(x))`
# but its gradient is grad[h(x)].
#
# This technique was published as:
# Jakob Foerster, Greg Farquhar, Maruan Al-Shedivat, Tim Rocktaeschel,
# Eric P. Xing, Shimon Whiteson (ICML 2018)
# "DiCE: The Infinitely Differentiable Monte-Carlo Estimator"
# https://arxiv.org/abs/1802.05098
#
# Unlike using:
# fx = fx + stop(fx) * (logpx - stop(logpx)),
# DiCE ensures that any order gradients of the objective
# are unbiased gradient estimators.
#
# Note that IEEE754 specifies that `x - x == 0.` and `x + 0. == x`, hence
# this trick loses no precision. For more discussion regarding the
# relevant portions of the IEEE754 standard, see the StackOverflow
# question,
# "Is there a floating point value of x, for which x-x == 0 is false?"
# http://stackoverflow.com/q/2686644
dice = fx * tf.exp(logpx - stop(logpx))
return tf.reduce_mean(input_tensor=dice, axis=axis, keepdims=keep_dims)
|
[
"Computes",
"the",
"Monte",
"-",
"Carlo",
"approximation",
"of",
"E_p",
"[",
"f",
"(",
"X",
")",
"]",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/monte_carlo/expectation.py#L29-L192
|
[
"def",
"expectation",
"(",
"f",
",",
"samples",
",",
"log_prob",
"=",
"None",
",",
"use_reparametrization",
"=",
"True",
",",
"axis",
"=",
"0",
",",
"keep_dims",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'expectation'",
",",
"[",
"samples",
"]",
")",
":",
"if",
"not",
"callable",
"(",
"f",
")",
":",
"raise",
"ValueError",
"(",
"'`f` must be a callable function.'",
")",
"if",
"use_reparametrization",
":",
"return",
"tf",
".",
"reduce_mean",
"(",
"input_tensor",
"=",
"f",
"(",
"samples",
")",
",",
"axis",
"=",
"axis",
",",
"keepdims",
"=",
"keep_dims",
")",
"else",
":",
"if",
"not",
"callable",
"(",
"log_prob",
")",
":",
"raise",
"ValueError",
"(",
"'`log_prob` must be a callable function.'",
")",
"stop",
"=",
"tf",
".",
"stop_gradient",
"# For readability.",
"x",
"=",
"stop",
"(",
"samples",
")",
"logpx",
"=",
"log_prob",
"(",
"x",
")",
"fx",
"=",
"f",
"(",
"x",
")",
"# Call `f` once in case it has side-effects.",
"# To achieve this, we use the fact that:",
"# `h(x) - stop(h(x)) == zeros_like(h(x))`",
"# but its gradient is grad[h(x)].",
"#",
"# This technique was published as:",
"# Jakob Foerster, Greg Farquhar, Maruan Al-Shedivat, Tim Rocktaeschel,",
"# Eric P. Xing, Shimon Whiteson (ICML 2018)",
"# \"DiCE: The Infinitely Differentiable Monte-Carlo Estimator\"",
"# https://arxiv.org/abs/1802.05098",
"#",
"# Unlike using:",
"# fx = fx + stop(fx) * (logpx - stop(logpx)),",
"# DiCE ensures that any order gradients of the objective",
"# are unbiased gradient estimators.",
"#",
"# Note that IEEE754 specifies that `x - x == 0.` and `x + 0. == x`, hence",
"# this trick loses no precision. For more discussion regarding the",
"# relevant portions of the IEEE754 standard, see the StackOverflow",
"# question,",
"# \"Is there a floating point value of x, for which x-x == 0 is false?\"",
"# http://stackoverflow.com/q/2686644",
"dice",
"=",
"fx",
"*",
"tf",
".",
"exp",
"(",
"logpx",
"-",
"stop",
"(",
"logpx",
")",
")",
"return",
"tf",
".",
"reduce_mean",
"(",
"input_tensor",
"=",
"dice",
",",
"axis",
"=",
"axis",
",",
"keepdims",
"=",
"keep_dims",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_get_samples
|
Check args and return samples.
|
tensorflow_probability/python/monte_carlo/expectation.py
|
def _get_samples(dist, z, n, seed):
"""Check args and return samples."""
with tf.compat.v1.name_scope('get_samples', values=[z, n]):
if (n is None) == (z is None):
raise ValueError(
'Must specify exactly one of arguments "n" and "z". Found: '
'n = %s, z = %s' % (n, z))
if n is not None:
return dist.sample(n, seed=seed)
else:
return tf.convert_to_tensor(value=z, name='z')
|
def _get_samples(dist, z, n, seed):
"""Check args and return samples."""
with tf.compat.v1.name_scope('get_samples', values=[z, n]):
if (n is None) == (z is None):
raise ValueError(
'Must specify exactly one of arguments "n" and "z". Found: '
'n = %s, z = %s' % (n, z))
if n is not None:
return dist.sample(n, seed=seed)
else:
return tf.convert_to_tensor(value=z, name='z')
|
[
"Check",
"args",
"and",
"return",
"samples",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/monte_carlo/expectation.py#L205-L215
|
[
"def",
"_get_samples",
"(",
"dist",
",",
"z",
",",
"n",
",",
"seed",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"'get_samples'",
",",
"values",
"=",
"[",
"z",
",",
"n",
"]",
")",
":",
"if",
"(",
"n",
"is",
"None",
")",
"==",
"(",
"z",
"is",
"None",
")",
":",
"raise",
"ValueError",
"(",
"'Must specify exactly one of arguments \"n\" and \"z\". Found: '",
"'n = %s, z = %s'",
"%",
"(",
"n",
",",
"z",
")",
")",
"if",
"n",
"is",
"not",
"None",
":",
"return",
"dist",
".",
"sample",
"(",
"n",
",",
"seed",
"=",
"seed",
")",
"else",
":",
"return",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"z",
",",
"name",
"=",
"'z'",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
is_namedtuple_like
|
Helper which returns `True` if input is `collections.namedtuple`-like.
|
tensorflow_probability/python/mcmc/internal/util.py
|
def is_namedtuple_like(x):
"""Helper which returns `True` if input is `collections.namedtuple`-like."""
try:
for fn in x._fields:
_ = getattr(x, fn)
return True
except AttributeError:
return False
|
def is_namedtuple_like(x):
"""Helper which returns `True` if input is `collections.namedtuple`-like."""
try:
for fn in x._fields:
_ = getattr(x, fn)
return True
except AttributeError:
return False
|
[
"Helper",
"which",
"returns",
"True",
"if",
"input",
"is",
"collections",
".",
"namedtuple",
"-",
"like",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/internal/util.py#L56-L63
|
[
"def",
"is_namedtuple_like",
"(",
"x",
")",
":",
"try",
":",
"for",
"fn",
"in",
"x",
".",
"_fields",
":",
"_",
"=",
"getattr",
"(",
"x",
",",
"fn",
")",
"return",
"True",
"except",
"AttributeError",
":",
"return",
"False"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
make_name
|
Helper which makes a `str` name; useful for tf.compat.v1.name_scope.
|
tensorflow_probability/python/mcmc/internal/util.py
|
def make_name(super_name, default_super_name, sub_name):
"""Helper which makes a `str` name; useful for tf.compat.v1.name_scope."""
name = super_name if super_name is not None else default_super_name
if sub_name is not None:
name += '_' + sub_name
return name
|
def make_name(super_name, default_super_name, sub_name):
"""Helper which makes a `str` name; useful for tf.compat.v1.name_scope."""
name = super_name if super_name is not None else default_super_name
if sub_name is not None:
name += '_' + sub_name
return name
|
[
"Helper",
"which",
"makes",
"a",
"str",
"name",
";",
"useful",
"for",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/internal/util.py#L66-L71
|
[
"def",
"make_name",
"(",
"super_name",
",",
"default_super_name",
",",
"sub_name",
")",
":",
"name",
"=",
"super_name",
"if",
"super_name",
"is",
"not",
"None",
"else",
"default_super_name",
"if",
"sub_name",
"is",
"not",
"None",
":",
"name",
"+=",
"'_'",
"+",
"sub_name",
"return",
"name"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_choose_base_case
|
Helper to `choose` which expand_dims `is_accepted` and applies tf.where.
|
tensorflow_probability/python/mcmc/internal/util.py
|
def _choose_base_case(is_accepted,
accepted,
rejected,
name=None):
"""Helper to `choose` which expand_dims `is_accepted` and applies tf.where."""
def _expand_is_accepted_like(x):
"""Helper to expand `is_accepted` like the shape of some input arg."""
with tf.compat.v1.name_scope('expand_is_accepted_like'):
expand_shape = tf.concat([
tf.shape(input=is_accepted),
tf.ones([tf.rank(x) - tf.rank(is_accepted)], dtype=tf.int32),
],
axis=0)
multiples = tf.concat([
tf.ones([tf.rank(is_accepted)], dtype=tf.int32),
tf.shape(input=x)[tf.rank(is_accepted):],
],
axis=0)
m = tf.tile(tf.reshape(is_accepted, expand_shape),
multiples)
m.set_shape(m.shape.merge_with(x.shape))
return m
def _where(accepted, rejected):
if accepted is rejected:
return accepted
accepted = tf.convert_to_tensor(value=accepted, name='accepted')
rejected = tf.convert_to_tensor(value=rejected, name='rejected')
r = tf.where(_expand_is_accepted_like(accepted), accepted, rejected)
r.set_shape(r.shape.merge_with(accepted.shape.merge_with(rejected.shape)))
return r
with tf.compat.v1.name_scope(
name, 'choose', values=[is_accepted, accepted, rejected]):
if not is_list_like(accepted):
return _where(accepted, rejected)
return [(choose(is_accepted, a, r, name=name) if is_namedtuple_like(a)
else _where(a, r))
for a, r in zip(accepted, rejected)]
|
def _choose_base_case(is_accepted,
accepted,
rejected,
name=None):
"""Helper to `choose` which expand_dims `is_accepted` and applies tf.where."""
def _expand_is_accepted_like(x):
"""Helper to expand `is_accepted` like the shape of some input arg."""
with tf.compat.v1.name_scope('expand_is_accepted_like'):
expand_shape = tf.concat([
tf.shape(input=is_accepted),
tf.ones([tf.rank(x) - tf.rank(is_accepted)], dtype=tf.int32),
],
axis=0)
multiples = tf.concat([
tf.ones([tf.rank(is_accepted)], dtype=tf.int32),
tf.shape(input=x)[tf.rank(is_accepted):],
],
axis=0)
m = tf.tile(tf.reshape(is_accepted, expand_shape),
multiples)
m.set_shape(m.shape.merge_with(x.shape))
return m
def _where(accepted, rejected):
if accepted is rejected:
return accepted
accepted = tf.convert_to_tensor(value=accepted, name='accepted')
rejected = tf.convert_to_tensor(value=rejected, name='rejected')
r = tf.where(_expand_is_accepted_like(accepted), accepted, rejected)
r.set_shape(r.shape.merge_with(accepted.shape.merge_with(rejected.shape)))
return r
with tf.compat.v1.name_scope(
name, 'choose', values=[is_accepted, accepted, rejected]):
if not is_list_like(accepted):
return _where(accepted, rejected)
return [(choose(is_accepted, a, r, name=name) if is_namedtuple_like(a)
else _where(a, r))
for a, r in zip(accepted, rejected)]
|
[
"Helper",
"to",
"choose",
"which",
"expand_dims",
"is_accepted",
"and",
"applies",
"tf",
".",
"where",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/internal/util.py#L74-L111
|
[
"def",
"_choose_base_case",
"(",
"is_accepted",
",",
"accepted",
",",
"rejected",
",",
"name",
"=",
"None",
")",
":",
"def",
"_expand_is_accepted_like",
"(",
"x",
")",
":",
"\"\"\"Helper to expand `is_accepted` like the shape of some input arg.\"\"\"",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"'expand_is_accepted_like'",
")",
":",
"expand_shape",
"=",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"shape",
"(",
"input",
"=",
"is_accepted",
")",
",",
"tf",
".",
"ones",
"(",
"[",
"tf",
".",
"rank",
"(",
"x",
")",
"-",
"tf",
".",
"rank",
"(",
"is_accepted",
")",
"]",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"]",
",",
"axis",
"=",
"0",
")",
"multiples",
"=",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"ones",
"(",
"[",
"tf",
".",
"rank",
"(",
"is_accepted",
")",
"]",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
"[",
"tf",
".",
"rank",
"(",
"is_accepted",
")",
":",
"]",
",",
"]",
",",
"axis",
"=",
"0",
")",
"m",
"=",
"tf",
".",
"tile",
"(",
"tf",
".",
"reshape",
"(",
"is_accepted",
",",
"expand_shape",
")",
",",
"multiples",
")",
"m",
".",
"set_shape",
"(",
"m",
".",
"shape",
".",
"merge_with",
"(",
"x",
".",
"shape",
")",
")",
"return",
"m",
"def",
"_where",
"(",
"accepted",
",",
"rejected",
")",
":",
"if",
"accepted",
"is",
"rejected",
":",
"return",
"accepted",
"accepted",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"accepted",
",",
"name",
"=",
"'accepted'",
")",
"rejected",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"rejected",
",",
"name",
"=",
"'rejected'",
")",
"r",
"=",
"tf",
".",
"where",
"(",
"_expand_is_accepted_like",
"(",
"accepted",
")",
",",
"accepted",
",",
"rejected",
")",
"r",
".",
"set_shape",
"(",
"r",
".",
"shape",
".",
"merge_with",
"(",
"accepted",
".",
"shape",
".",
"merge_with",
"(",
"rejected",
".",
"shape",
")",
")",
")",
"return",
"r",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'choose'",
",",
"values",
"=",
"[",
"is_accepted",
",",
"accepted",
",",
"rejected",
"]",
")",
":",
"if",
"not",
"is_list_like",
"(",
"accepted",
")",
":",
"return",
"_where",
"(",
"accepted",
",",
"rejected",
")",
"return",
"[",
"(",
"choose",
"(",
"is_accepted",
",",
"a",
",",
"r",
",",
"name",
"=",
"name",
")",
"if",
"is_namedtuple_like",
"(",
"a",
")",
"else",
"_where",
"(",
"a",
",",
"r",
")",
")",
"for",
"a",
",",
"r",
"in",
"zip",
"(",
"accepted",
",",
"rejected",
")",
"]"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
choose
|
Helper which expand_dims `is_accepted` then applies tf.where.
|
tensorflow_probability/python/mcmc/internal/util.py
|
def choose(is_accepted, accepted, rejected, name=None):
"""Helper which expand_dims `is_accepted` then applies tf.where."""
if not is_namedtuple_like(accepted):
return _choose_base_case(is_accepted, accepted, rejected, name=name)
if not isinstance(accepted, type(rejected)):
raise TypeError('Type of `accepted` ({}) must be identical to '
'type of `rejected` ({})'.format(
type(accepted).__name__,
type(rejected).__name__))
return type(accepted)(**dict(
[(fn,
choose(is_accepted,
getattr(accepted, fn),
getattr(rejected, fn),
name=name))
for fn in accepted._fields]))
|
def choose(is_accepted, accepted, rejected, name=None):
"""Helper which expand_dims `is_accepted` then applies tf.where."""
if not is_namedtuple_like(accepted):
return _choose_base_case(is_accepted, accepted, rejected, name=name)
if not isinstance(accepted, type(rejected)):
raise TypeError('Type of `accepted` ({}) must be identical to '
'type of `rejected` ({})'.format(
type(accepted).__name__,
type(rejected).__name__))
return type(accepted)(**dict(
[(fn,
choose(is_accepted,
getattr(accepted, fn),
getattr(rejected, fn),
name=name))
for fn in accepted._fields]))
|
[
"Helper",
"which",
"expand_dims",
"is_accepted",
"then",
"applies",
"tf",
".",
"where",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/internal/util.py#L114-L129
|
[
"def",
"choose",
"(",
"is_accepted",
",",
"accepted",
",",
"rejected",
",",
"name",
"=",
"None",
")",
":",
"if",
"not",
"is_namedtuple_like",
"(",
"accepted",
")",
":",
"return",
"_choose_base_case",
"(",
"is_accepted",
",",
"accepted",
",",
"rejected",
",",
"name",
"=",
"name",
")",
"if",
"not",
"isinstance",
"(",
"accepted",
",",
"type",
"(",
"rejected",
")",
")",
":",
"raise",
"TypeError",
"(",
"'Type of `accepted` ({}) must be identical to '",
"'type of `rejected` ({})'",
".",
"format",
"(",
"type",
"(",
"accepted",
")",
".",
"__name__",
",",
"type",
"(",
"rejected",
")",
".",
"__name__",
")",
")",
"return",
"type",
"(",
"accepted",
")",
"(",
"*",
"*",
"dict",
"(",
"[",
"(",
"fn",
",",
"choose",
"(",
"is_accepted",
",",
"getattr",
"(",
"accepted",
",",
"fn",
")",
",",
"getattr",
"(",
"rejected",
",",
"fn",
")",
",",
"name",
"=",
"name",
")",
")",
"for",
"fn",
"in",
"accepted",
".",
"_fields",
"]",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
safe_sum
|
Elementwise adds list members, replacing non-finite results with alt_value.
Typically the `alt_value` is chosen so the `MetropolisHastings`
`TransitionKernel` always rejects the proposal.
Args:
x: Python `list` of `Tensors` to elementwise add.
alt_value: Python scalar used to replace any elementwise sums which would
otherwise be non-finite.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "safe_sum").
Returns:
safe_sum: `Tensor` representing the elementwise sum of list of `Tensor`s
`x` or `alt_value` where sums are non-finite.
Raises:
TypeError: if `x` is not list-like.
ValueError: if `x` is empty.
|
tensorflow_probability/python/mcmc/internal/util.py
|
def safe_sum(x, alt_value=-np.inf, name=None):
"""Elementwise adds list members, replacing non-finite results with alt_value.
Typically the `alt_value` is chosen so the `MetropolisHastings`
`TransitionKernel` always rejects the proposal.
Args:
x: Python `list` of `Tensors` to elementwise add.
alt_value: Python scalar used to replace any elementwise sums which would
otherwise be non-finite.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "safe_sum").
Returns:
safe_sum: `Tensor` representing the elementwise sum of list of `Tensor`s
`x` or `alt_value` where sums are non-finite.
Raises:
TypeError: if `x` is not list-like.
ValueError: if `x` is empty.
"""
with tf.compat.v1.name_scope(name, 'safe_sum', [x, alt_value]):
if not is_list_like(x):
raise TypeError('Expected list input.')
if not x:
raise ValueError('Input should not be empty.')
in_shape = x[0].shape
x = tf.stack(x, axis=-1)
x = tf.reduce_sum(input_tensor=x, axis=-1)
alt_value = np.array(alt_value, x.dtype.as_numpy_dtype)
alt_fill = tf.fill(tf.shape(input=x), value=alt_value)
x = tf.where(tf.math.is_finite(x), x, alt_fill)
x.set_shape(x.shape.merge_with(in_shape))
return x
|
def safe_sum(x, alt_value=-np.inf, name=None):
"""Elementwise adds list members, replacing non-finite results with alt_value.
Typically the `alt_value` is chosen so the `MetropolisHastings`
`TransitionKernel` always rejects the proposal.
Args:
x: Python `list` of `Tensors` to elementwise add.
alt_value: Python scalar used to replace any elementwise sums which would
otherwise be non-finite.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "safe_sum").
Returns:
safe_sum: `Tensor` representing the elementwise sum of list of `Tensor`s
`x` or `alt_value` where sums are non-finite.
Raises:
TypeError: if `x` is not list-like.
ValueError: if `x` is empty.
"""
with tf.compat.v1.name_scope(name, 'safe_sum', [x, alt_value]):
if not is_list_like(x):
raise TypeError('Expected list input.')
if not x:
raise ValueError('Input should not be empty.')
in_shape = x[0].shape
x = tf.stack(x, axis=-1)
x = tf.reduce_sum(input_tensor=x, axis=-1)
alt_value = np.array(alt_value, x.dtype.as_numpy_dtype)
alt_fill = tf.fill(tf.shape(input=x), value=alt_value)
x = tf.where(tf.math.is_finite(x), x, alt_fill)
x.set_shape(x.shape.merge_with(in_shape))
return x
|
[
"Elementwise",
"adds",
"list",
"members",
"replacing",
"non",
"-",
"finite",
"results",
"with",
"alt_value",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/internal/util.py#L132-L165
|
[
"def",
"safe_sum",
"(",
"x",
",",
"alt_value",
"=",
"-",
"np",
".",
"inf",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'safe_sum'",
",",
"[",
"x",
",",
"alt_value",
"]",
")",
":",
"if",
"not",
"is_list_like",
"(",
"x",
")",
":",
"raise",
"TypeError",
"(",
"'Expected list input.'",
")",
"if",
"not",
"x",
":",
"raise",
"ValueError",
"(",
"'Input should not be empty.'",
")",
"in_shape",
"=",
"x",
"[",
"0",
"]",
".",
"shape",
"x",
"=",
"tf",
".",
"stack",
"(",
"x",
",",
"axis",
"=",
"-",
"1",
")",
"x",
"=",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"x",
",",
"axis",
"=",
"-",
"1",
")",
"alt_value",
"=",
"np",
".",
"array",
"(",
"alt_value",
",",
"x",
".",
"dtype",
".",
"as_numpy_dtype",
")",
"alt_fill",
"=",
"tf",
".",
"fill",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
",",
"value",
"=",
"alt_value",
")",
"x",
"=",
"tf",
".",
"where",
"(",
"tf",
".",
"math",
".",
"is_finite",
"(",
"x",
")",
",",
"x",
",",
"alt_fill",
")",
"x",
".",
"set_shape",
"(",
"x",
".",
"shape",
".",
"merge_with",
"(",
"in_shape",
")",
")",
"return",
"x"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.