partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
_log_ndtr_lower
|
Asymptotic expansion version of `Log[cdf(x)]`, appropriate for `x<<-1`.
|
tensorflow_probability/python/internal/special_math.py
|
def _log_ndtr_lower(x, series_order):
"""Asymptotic expansion version of `Log[cdf(x)]`, appropriate for `x<<-1`."""
x_2 = tf.square(x)
# Log of the term multiplying (1 + sum)
log_scale = -0.5 * x_2 - tf.math.log(-x) - 0.5 * np.log(2. * np.pi)
return log_scale + tf.math.log(_log_ndtr_asymptotic_series(x, series_order))
|
def _log_ndtr_lower(x, series_order):
"""Asymptotic expansion version of `Log[cdf(x)]`, appropriate for `x<<-1`."""
x_2 = tf.square(x)
# Log of the term multiplying (1 + sum)
log_scale = -0.5 * x_2 - tf.math.log(-x) - 0.5 * np.log(2. * np.pi)
return log_scale + tf.math.log(_log_ndtr_asymptotic_series(x, series_order))
|
[
"Asymptotic",
"expansion",
"version",
"of",
"Log",
"[",
"cdf",
"(",
"x",
")",
"]",
"appropriate",
"for",
"x<<",
"-",
"1",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/special_math.py#L383-L388
|
[
"def",
"_log_ndtr_lower",
"(",
"x",
",",
"series_order",
")",
":",
"x_2",
"=",
"tf",
".",
"square",
"(",
"x",
")",
"# Log of the term multiplying (1 + sum)",
"log_scale",
"=",
"-",
"0.5",
"*",
"x_2",
"-",
"tf",
".",
"math",
".",
"log",
"(",
"-",
"x",
")",
"-",
"0.5",
"*",
"np",
".",
"log",
"(",
"2.",
"*",
"np",
".",
"pi",
")",
"return",
"log_scale",
"+",
"tf",
".",
"math",
".",
"log",
"(",
"_log_ndtr_asymptotic_series",
"(",
"x",
",",
"series_order",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_log_ndtr_asymptotic_series
|
Calculates the asymptotic series used in log_ndtr.
|
tensorflow_probability/python/internal/special_math.py
|
def _log_ndtr_asymptotic_series(x, series_order):
"""Calculates the asymptotic series used in log_ndtr."""
npdt = dtype_util.as_numpy_dtype(x.dtype)
if series_order <= 0:
return npdt(1)
x_2 = tf.square(x)
even_sum = tf.zeros_like(x)
odd_sum = tf.zeros_like(x)
x_2n = x_2 # Start with x^{2*1} = x^{2*n} with n = 1.
for n in range(1, series_order + 1):
y = npdt(_double_factorial(2 * n - 1)) / x_2n
if n % 2:
odd_sum += y
else:
even_sum += y
x_2n *= x_2
return 1. + even_sum - odd_sum
|
def _log_ndtr_asymptotic_series(x, series_order):
"""Calculates the asymptotic series used in log_ndtr."""
npdt = dtype_util.as_numpy_dtype(x.dtype)
if series_order <= 0:
return npdt(1)
x_2 = tf.square(x)
even_sum = tf.zeros_like(x)
odd_sum = tf.zeros_like(x)
x_2n = x_2 # Start with x^{2*1} = x^{2*n} with n = 1.
for n in range(1, series_order + 1):
y = npdt(_double_factorial(2 * n - 1)) / x_2n
if n % 2:
odd_sum += y
else:
even_sum += y
x_2n *= x_2
return 1. + even_sum - odd_sum
|
[
"Calculates",
"the",
"asymptotic",
"series",
"used",
"in",
"log_ndtr",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/special_math.py#L391-L407
|
[
"def",
"_log_ndtr_asymptotic_series",
"(",
"x",
",",
"series_order",
")",
":",
"npdt",
"=",
"dtype_util",
".",
"as_numpy_dtype",
"(",
"x",
".",
"dtype",
")",
"if",
"series_order",
"<=",
"0",
":",
"return",
"npdt",
"(",
"1",
")",
"x_2",
"=",
"tf",
".",
"square",
"(",
"x",
")",
"even_sum",
"=",
"tf",
".",
"zeros_like",
"(",
"x",
")",
"odd_sum",
"=",
"tf",
".",
"zeros_like",
"(",
"x",
")",
"x_2n",
"=",
"x_2",
"# Start with x^{2*1} = x^{2*n} with n = 1.",
"for",
"n",
"in",
"range",
"(",
"1",
",",
"series_order",
"+",
"1",
")",
":",
"y",
"=",
"npdt",
"(",
"_double_factorial",
"(",
"2",
"*",
"n",
"-",
"1",
")",
")",
"/",
"x_2n",
"if",
"n",
"%",
"2",
":",
"odd_sum",
"+=",
"y",
"else",
":",
"even_sum",
"+=",
"y",
"x_2n",
"*=",
"x_2",
"return",
"1.",
"+",
"even_sum",
"-",
"odd_sum"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
erfinv
|
The inverse function for erf, the error function.
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="erfinv").
Returns:
x: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x` is not floating-type.
|
tensorflow_probability/python/internal/special_math.py
|
def erfinv(x, name="erfinv"):
"""The inverse function for erf, the error function.
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="erfinv").
Returns:
x: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x` is not floating-type.
"""
with tf.name_scope(name):
x = tf.convert_to_tensor(value=x, name="x")
if dtype_util.as_numpy_dtype(x.dtype) not in [np.float32, np.float64]:
raise TypeError("x.dtype={} is not handled, see docstring for supported "
"types.".format(dtype_util.name(x.dtype)))
return ndtri((x + 1.) / 2.) / np.sqrt(2.)
|
def erfinv(x, name="erfinv"):
"""The inverse function for erf, the error function.
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="erfinv").
Returns:
x: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x` is not floating-type.
"""
with tf.name_scope(name):
x = tf.convert_to_tensor(value=x, name="x")
if dtype_util.as_numpy_dtype(x.dtype) not in [np.float32, np.float64]:
raise TypeError("x.dtype={} is not handled, see docstring for supported "
"types.".format(dtype_util.name(x.dtype)))
return ndtri((x + 1.) / 2.) / np.sqrt(2.)
|
[
"The",
"inverse",
"function",
"for",
"erf",
"the",
"error",
"function",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/special_math.py#L410-L429
|
[
"def",
"erfinv",
"(",
"x",
",",
"name",
"=",
"\"erfinv\"",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
")",
":",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"\"x\"",
")",
"if",
"dtype_util",
".",
"as_numpy_dtype",
"(",
"x",
".",
"dtype",
")",
"not",
"in",
"[",
"np",
".",
"float32",
",",
"np",
".",
"float64",
"]",
":",
"raise",
"TypeError",
"(",
"\"x.dtype={} is not handled, see docstring for supported \"",
"\"types.\"",
".",
"format",
"(",
"dtype_util",
".",
"name",
"(",
"x",
".",
"dtype",
")",
")",
")",
"return",
"ndtri",
"(",
"(",
"x",
"+",
"1.",
")",
"/",
"2.",
")",
"/",
"np",
".",
"sqrt",
"(",
"2.",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
log_cdf_laplace
|
Log Laplace distribution function.
This function calculates `Log[L(x)]`, where `L(x)` is the cumulative
distribution function of the Laplace distribution, i.e.
```L(x) := 0.5 * int_{-infty}^x e^{-|t|} dt```
For numerical accuracy, `L(x)` is computed in different ways depending on `x`,
```
x <= 0:
Log[L(x)] = Log[0.5] + x, which is exact
0 < x:
Log[L(x)] = Log[1 - 0.5 * e^{-x}], which is exact
```
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="log_ndtr").
Returns:
`Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x.dtype` is not handled.
|
tensorflow_probability/python/internal/special_math.py
|
def log_cdf_laplace(x, name="log_cdf_laplace"):
"""Log Laplace distribution function.
This function calculates `Log[L(x)]`, where `L(x)` is the cumulative
distribution function of the Laplace distribution, i.e.
```L(x) := 0.5 * int_{-infty}^x e^{-|t|} dt```
For numerical accuracy, `L(x)` is computed in different ways depending on `x`,
```
x <= 0:
Log[L(x)] = Log[0.5] + x, which is exact
0 < x:
Log[L(x)] = Log[1 - 0.5 * e^{-x}], which is exact
```
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="log_ndtr").
Returns:
`Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x.dtype` is not handled.
"""
with tf.name_scope(name):
x = tf.convert_to_tensor(value=x, name="x")
# For x < 0, L(x) = 0.5 * exp{x} exactly, so Log[L(x)] = log(0.5) + x.
lower_solution = -np.log(2.) + x
# safe_exp_neg_x = exp{-x} for x > 0, but is
# bounded above by 1, which avoids
# log[1 - 1] = -inf for x = log(1/2), AND
# exp{-x} --> inf, for x << -1
safe_exp_neg_x = tf.exp(-tf.abs(x))
# log1p(z) = log(1 + z) approx z for |z| << 1. This approxmation is used
# internally by log1p, rather than being done explicitly here.
upper_solution = tf.math.log1p(-0.5 * safe_exp_neg_x)
return tf.where(x < 0., lower_solution, upper_solution)
|
def log_cdf_laplace(x, name="log_cdf_laplace"):
"""Log Laplace distribution function.
This function calculates `Log[L(x)]`, where `L(x)` is the cumulative
distribution function of the Laplace distribution, i.e.
```L(x) := 0.5 * int_{-infty}^x e^{-|t|} dt```
For numerical accuracy, `L(x)` is computed in different ways depending on `x`,
```
x <= 0:
Log[L(x)] = Log[0.5] + x, which is exact
0 < x:
Log[L(x)] = Log[1 - 0.5 * e^{-x}], which is exact
```
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="log_ndtr").
Returns:
`Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x.dtype` is not handled.
"""
with tf.name_scope(name):
x = tf.convert_to_tensor(value=x, name="x")
# For x < 0, L(x) = 0.5 * exp{x} exactly, so Log[L(x)] = log(0.5) + x.
lower_solution = -np.log(2.) + x
# safe_exp_neg_x = exp{-x} for x > 0, but is
# bounded above by 1, which avoids
# log[1 - 1] = -inf for x = log(1/2), AND
# exp{-x} --> inf, for x << -1
safe_exp_neg_x = tf.exp(-tf.abs(x))
# log1p(z) = log(1 + z) approx z for |z| << 1. This approxmation is used
# internally by log1p, rather than being done explicitly here.
upper_solution = tf.math.log1p(-0.5 * safe_exp_neg_x)
return tf.where(x < 0., lower_solution, upper_solution)
|
[
"Log",
"Laplace",
"distribution",
"function",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/special_math.py#L437-L482
|
[
"def",
"log_cdf_laplace",
"(",
"x",
",",
"name",
"=",
"\"log_cdf_laplace\"",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
")",
":",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"\"x\"",
")",
"# For x < 0, L(x) = 0.5 * exp{x} exactly, so Log[L(x)] = log(0.5) + x.",
"lower_solution",
"=",
"-",
"np",
".",
"log",
"(",
"2.",
")",
"+",
"x",
"# safe_exp_neg_x = exp{-x} for x > 0, but is",
"# bounded above by 1, which avoids",
"# log[1 - 1] = -inf for x = log(1/2), AND",
"# exp{-x} --> inf, for x << -1",
"safe_exp_neg_x",
"=",
"tf",
".",
"exp",
"(",
"-",
"tf",
".",
"abs",
"(",
"x",
")",
")",
"# log1p(z) = log(1 + z) approx z for |z| << 1. This approxmation is used",
"# internally by log1p, rather than being done explicitly here.",
"upper_solution",
"=",
"tf",
".",
"math",
".",
"log1p",
"(",
"-",
"0.5",
"*",
"safe_exp_neg_x",
")",
"return",
"tf",
".",
"where",
"(",
"x",
"<",
"0.",
",",
"lower_solution",
",",
"upper_solution",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
text_messages_joint_log_prob
|
Joint log probability function.
|
tensorflow_probability/python/mcmc/text_messages_hmc.py
|
def text_messages_joint_log_prob(count_data, lambda_1, lambda_2, tau):
"""Joint log probability function."""
alpha = (1. / tf.reduce_mean(input_tensor=count_data))
rv_lambda = tfd.Exponential(rate=alpha)
rv_tau = tfd.Uniform()
lambda_ = tf.gather(
[lambda_1, lambda_2],
indices=tf.cast(
tau * tf.cast(tf.size(input=count_data), dtype=tf.float32) <= tf.cast(
tf.range(tf.size(input=count_data)), dtype=tf.float32),
dtype=tf.int32))
rv_observation = tfd.Poisson(rate=lambda_)
return (rv_lambda.log_prob(lambda_1) + rv_lambda.log_prob(lambda_2) +
rv_tau.log_prob(tau) +
tf.reduce_sum(input_tensor=rv_observation.log_prob(count_data)))
|
def text_messages_joint_log_prob(count_data, lambda_1, lambda_2, tau):
"""Joint log probability function."""
alpha = (1. / tf.reduce_mean(input_tensor=count_data))
rv_lambda = tfd.Exponential(rate=alpha)
rv_tau = tfd.Uniform()
lambda_ = tf.gather(
[lambda_1, lambda_2],
indices=tf.cast(
tau * tf.cast(tf.size(input=count_data), dtype=tf.float32) <= tf.cast(
tf.range(tf.size(input=count_data)), dtype=tf.float32),
dtype=tf.int32))
rv_observation = tfd.Poisson(rate=lambda_)
return (rv_lambda.log_prob(lambda_1) + rv_lambda.log_prob(lambda_2) +
rv_tau.log_prob(tau) +
tf.reduce_sum(input_tensor=rv_observation.log_prob(count_data)))
|
[
"Joint",
"log",
"probability",
"function",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/text_messages_hmc.py#L44-L61
|
[
"def",
"text_messages_joint_log_prob",
"(",
"count_data",
",",
"lambda_1",
",",
"lambda_2",
",",
"tau",
")",
":",
"alpha",
"=",
"(",
"1.",
"/",
"tf",
".",
"reduce_mean",
"(",
"input_tensor",
"=",
"count_data",
")",
")",
"rv_lambda",
"=",
"tfd",
".",
"Exponential",
"(",
"rate",
"=",
"alpha",
")",
"rv_tau",
"=",
"tfd",
".",
"Uniform",
"(",
")",
"lambda_",
"=",
"tf",
".",
"gather",
"(",
"[",
"lambda_1",
",",
"lambda_2",
"]",
",",
"indices",
"=",
"tf",
".",
"cast",
"(",
"tau",
"*",
"tf",
".",
"cast",
"(",
"tf",
".",
"size",
"(",
"input",
"=",
"count_data",
")",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"<=",
"tf",
".",
"cast",
"(",
"tf",
".",
"range",
"(",
"tf",
".",
"size",
"(",
"input",
"=",
"count_data",
")",
")",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
")",
"rv_observation",
"=",
"tfd",
".",
"Poisson",
"(",
"rate",
"=",
"lambda_",
")",
"return",
"(",
"rv_lambda",
".",
"log_prob",
"(",
"lambda_1",
")",
"+",
"rv_lambda",
".",
"log_prob",
"(",
"lambda_2",
")",
"+",
"rv_tau",
".",
"log_prob",
"(",
"tau",
")",
"+",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"rv_observation",
".",
"log_prob",
"(",
"count_data",
")",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
benchmark_text_messages_hmc
|
Runs HMC on the text-messages unnormalized posterior.
|
tensorflow_probability/python/mcmc/text_messages_hmc.py
|
def benchmark_text_messages_hmc(
num_results=int(3e3),
num_burnin_steps=int(3e3),
num_leapfrog_steps=3):
"""Runs HMC on the text-messages unnormalized posterior."""
if not tf.executing_eagerly():
tf.compat.v1.reset_default_graph()
# Build a static, pretend dataset.
count_data = tf.cast(
tf.concat(
[tfd.Poisson(rate=15.).sample(43),
tfd.Poisson(rate=25.).sample(31)],
axis=0),
dtype=tf.float32)
if tf.executing_eagerly():
count_data = count_data.numpy()
else:
with tf.compat.v1.Session():
count_data = count_data.eval()
# Define a closure over our joint_log_prob.
def unnormalized_log_posterior(lambda1, lambda2, tau):
return text_messages_joint_log_prob(count_data, lambda1, lambda2, tau)
if tf.executing_eagerly():
sample_chain = tf.function(tfp.mcmc.sample_chain)
else:
sample_chain = tfp.mcmc.sample_chain
# Initialize the step_size. (It will be automatically adapted.)
step_size = tf.compat.v2.Variable(
name='step_size',
initial_value=tf.constant(0.05, dtype=tf.float32),
trainable=False)
def computation():
"""The benchmark computation."""
initial_chain_state = [
tf.constant(count_data.mean(), name='init_lambda1'),
tf.constant(count_data.mean(), name='init_lambda2'),
tf.constant(0.5, name='init_tau'),
]
unconstraining_bijectors = [
tfp.bijectors.Exp(), # Maps a positive real to R.
tfp.bijectors.Exp(), # Maps a positive real to R.
tfp.bijectors.Sigmoid(), # Maps [0,1] to R.
]
_, kernel_results = sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=initial_chain_state,
kernel=tfp.mcmc.TransformedTransitionKernel(
inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=unnormalized_log_posterior,
num_leapfrog_steps=num_leapfrog_steps,
step_size=step_size,
step_size_update_fn=
tfp.mcmc.make_simple_step_size_update_policy(num_burnin_steps),
state_gradients_are_stopped=True),
bijector=unconstraining_bijectors))
return kernel_results.inner_results.is_accepted
# Let's force evaluation of graph to ensure build time is not part of our time
# trial.
is_accepted_tensor = computation()
if not tf.executing_eagerly():
session = tf.compat.v1.Session()
session.run(tf.compat.v1.global_variables_initializer())
session.run(is_accepted_tensor)
start_time = time.time()
if tf.executing_eagerly():
is_accepted = computation()
else:
is_accepted = session.run(is_accepted_tensor)
wall_time = time.time() - start_time
num_accepted = np.sum(is_accepted)
acceptance_rate = np.float32(num_accepted) / np.float32(num_results)
return dict(
iters=(num_results + num_burnin_steps) * num_leapfrog_steps,
extras={'acceptance_rate': acceptance_rate},
wall_time=wall_time)
|
def benchmark_text_messages_hmc(
num_results=int(3e3),
num_burnin_steps=int(3e3),
num_leapfrog_steps=3):
"""Runs HMC on the text-messages unnormalized posterior."""
if not tf.executing_eagerly():
tf.compat.v1.reset_default_graph()
# Build a static, pretend dataset.
count_data = tf.cast(
tf.concat(
[tfd.Poisson(rate=15.).sample(43),
tfd.Poisson(rate=25.).sample(31)],
axis=0),
dtype=tf.float32)
if tf.executing_eagerly():
count_data = count_data.numpy()
else:
with tf.compat.v1.Session():
count_data = count_data.eval()
# Define a closure over our joint_log_prob.
def unnormalized_log_posterior(lambda1, lambda2, tau):
return text_messages_joint_log_prob(count_data, lambda1, lambda2, tau)
if tf.executing_eagerly():
sample_chain = tf.function(tfp.mcmc.sample_chain)
else:
sample_chain = tfp.mcmc.sample_chain
# Initialize the step_size. (It will be automatically adapted.)
step_size = tf.compat.v2.Variable(
name='step_size',
initial_value=tf.constant(0.05, dtype=tf.float32),
trainable=False)
def computation():
"""The benchmark computation."""
initial_chain_state = [
tf.constant(count_data.mean(), name='init_lambda1'),
tf.constant(count_data.mean(), name='init_lambda2'),
tf.constant(0.5, name='init_tau'),
]
unconstraining_bijectors = [
tfp.bijectors.Exp(), # Maps a positive real to R.
tfp.bijectors.Exp(), # Maps a positive real to R.
tfp.bijectors.Sigmoid(), # Maps [0,1] to R.
]
_, kernel_results = sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=initial_chain_state,
kernel=tfp.mcmc.TransformedTransitionKernel(
inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=unnormalized_log_posterior,
num_leapfrog_steps=num_leapfrog_steps,
step_size=step_size,
step_size_update_fn=
tfp.mcmc.make_simple_step_size_update_policy(num_burnin_steps),
state_gradients_are_stopped=True),
bijector=unconstraining_bijectors))
return kernel_results.inner_results.is_accepted
# Let's force evaluation of graph to ensure build time is not part of our time
# trial.
is_accepted_tensor = computation()
if not tf.executing_eagerly():
session = tf.compat.v1.Session()
session.run(tf.compat.v1.global_variables_initializer())
session.run(is_accepted_tensor)
start_time = time.time()
if tf.executing_eagerly():
is_accepted = computation()
else:
is_accepted = session.run(is_accepted_tensor)
wall_time = time.time() - start_time
num_accepted = np.sum(is_accepted)
acceptance_rate = np.float32(num_accepted) / np.float32(num_results)
return dict(
iters=(num_results + num_burnin_steps) * num_leapfrog_steps,
extras={'acceptance_rate': acceptance_rate},
wall_time=wall_time)
|
[
"Runs",
"HMC",
"on",
"the",
"text",
"-",
"messages",
"unnormalized",
"posterior",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/text_messages_hmc.py#L64-L153
|
[
"def",
"benchmark_text_messages_hmc",
"(",
"num_results",
"=",
"int",
"(",
"3e3",
")",
",",
"num_burnin_steps",
"=",
"int",
"(",
"3e3",
")",
",",
"num_leapfrog_steps",
"=",
"3",
")",
":",
"if",
"not",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"tf",
".",
"compat",
".",
"v1",
".",
"reset_default_graph",
"(",
")",
"# Build a static, pretend dataset.",
"count_data",
"=",
"tf",
".",
"cast",
"(",
"tf",
".",
"concat",
"(",
"[",
"tfd",
".",
"Poisson",
"(",
"rate",
"=",
"15.",
")",
".",
"sample",
"(",
"43",
")",
",",
"tfd",
".",
"Poisson",
"(",
"rate",
"=",
"25.",
")",
".",
"sample",
"(",
"31",
")",
"]",
",",
"axis",
"=",
"0",
")",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"if",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"count_data",
"=",
"count_data",
".",
"numpy",
"(",
")",
"else",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"Session",
"(",
")",
":",
"count_data",
"=",
"count_data",
".",
"eval",
"(",
")",
"# Define a closure over our joint_log_prob.",
"def",
"unnormalized_log_posterior",
"(",
"lambda1",
",",
"lambda2",
",",
"tau",
")",
":",
"return",
"text_messages_joint_log_prob",
"(",
"count_data",
",",
"lambda1",
",",
"lambda2",
",",
"tau",
")",
"if",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"sample_chain",
"=",
"tf",
".",
"function",
"(",
"tfp",
".",
"mcmc",
".",
"sample_chain",
")",
"else",
":",
"sample_chain",
"=",
"tfp",
".",
"mcmc",
".",
"sample_chain",
"# Initialize the step_size. (It will be automatically adapted.)",
"step_size",
"=",
"tf",
".",
"compat",
".",
"v2",
".",
"Variable",
"(",
"name",
"=",
"'step_size'",
",",
"initial_value",
"=",
"tf",
".",
"constant",
"(",
"0.05",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
",",
"trainable",
"=",
"False",
")",
"def",
"computation",
"(",
")",
":",
"\"\"\"The benchmark computation.\"\"\"",
"initial_chain_state",
"=",
"[",
"tf",
".",
"constant",
"(",
"count_data",
".",
"mean",
"(",
")",
",",
"name",
"=",
"'init_lambda1'",
")",
",",
"tf",
".",
"constant",
"(",
"count_data",
".",
"mean",
"(",
")",
",",
"name",
"=",
"'init_lambda2'",
")",
",",
"tf",
".",
"constant",
"(",
"0.5",
",",
"name",
"=",
"'init_tau'",
")",
",",
"]",
"unconstraining_bijectors",
"=",
"[",
"tfp",
".",
"bijectors",
".",
"Exp",
"(",
")",
",",
"# Maps a positive real to R.",
"tfp",
".",
"bijectors",
".",
"Exp",
"(",
")",
",",
"# Maps a positive real to R.",
"tfp",
".",
"bijectors",
".",
"Sigmoid",
"(",
")",
",",
"# Maps [0,1] to R.",
"]",
"_",
",",
"kernel_results",
"=",
"sample_chain",
"(",
"num_results",
"=",
"num_results",
",",
"num_burnin_steps",
"=",
"num_burnin_steps",
",",
"current_state",
"=",
"initial_chain_state",
",",
"kernel",
"=",
"tfp",
".",
"mcmc",
".",
"TransformedTransitionKernel",
"(",
"inner_kernel",
"=",
"tfp",
".",
"mcmc",
".",
"HamiltonianMonteCarlo",
"(",
"target_log_prob_fn",
"=",
"unnormalized_log_posterior",
",",
"num_leapfrog_steps",
"=",
"num_leapfrog_steps",
",",
"step_size",
"=",
"step_size",
",",
"step_size_update_fn",
"=",
"tfp",
".",
"mcmc",
".",
"make_simple_step_size_update_policy",
"(",
"num_burnin_steps",
")",
",",
"state_gradients_are_stopped",
"=",
"True",
")",
",",
"bijector",
"=",
"unconstraining_bijectors",
")",
")",
"return",
"kernel_results",
".",
"inner_results",
".",
"is_accepted",
"# Let's force evaluation of graph to ensure build time is not part of our time",
"# trial.",
"is_accepted_tensor",
"=",
"computation",
"(",
")",
"if",
"not",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"session",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"Session",
"(",
")",
"session",
".",
"run",
"(",
"tf",
".",
"compat",
".",
"v1",
".",
"global_variables_initializer",
"(",
")",
")",
"session",
".",
"run",
"(",
"is_accepted_tensor",
")",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"if",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"is_accepted",
"=",
"computation",
"(",
")",
"else",
":",
"is_accepted",
"=",
"session",
".",
"run",
"(",
"is_accepted_tensor",
")",
"wall_time",
"=",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
"num_accepted",
"=",
"np",
".",
"sum",
"(",
"is_accepted",
")",
"acceptance_rate",
"=",
"np",
".",
"float32",
"(",
"num_accepted",
")",
"/",
"np",
".",
"float32",
"(",
"num_results",
")",
"return",
"dict",
"(",
"iters",
"=",
"(",
"num_results",
"+",
"num_burnin_steps",
")",
"*",
"num_leapfrog_steps",
",",
"extras",
"=",
"{",
"'acceptance_rate'",
":",
"acceptance_rate",
"}",
",",
"wall_time",
"=",
"wall_time",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
GaussianProcess._is_univariate_marginal
|
True if the given index_points would yield a univariate marginal.
Args:
index_points: the set of index set locations at which to compute the
marginal Gaussian distribution. If this set is of size 1, the marginal is
univariate.
Returns:
is_univariate: Boolean indicating whether the marginal is univariate or
multivariate. In the case of dynamic shape in the number of index points,
defaults to "multivariate" since that's the best we can do.
|
tensorflow_probability/python/distributions/gaussian_process.py
|
def _is_univariate_marginal(self, index_points):
"""True if the given index_points would yield a univariate marginal.
Args:
index_points: the set of index set locations at which to compute the
marginal Gaussian distribution. If this set is of size 1, the marginal is
univariate.
Returns:
is_univariate: Boolean indicating whether the marginal is univariate or
multivariate. In the case of dynamic shape in the number of index points,
defaults to "multivariate" since that's the best we can do.
"""
num_index_points = tf.compat.dimension_value(
index_points.shape[-(self.kernel.feature_ndims + 1)])
if num_index_points is None:
warnings.warn(
'Unable to detect statically whether the number of index_points is '
'1. As a result, defaulting to treating the marginal GP at '
'`index_points` as a multivariate Gaussian. This makes some methods, '
'like `cdf` unavailable.')
return num_index_points == 1
|
def _is_univariate_marginal(self, index_points):
"""True if the given index_points would yield a univariate marginal.
Args:
index_points: the set of index set locations at which to compute the
marginal Gaussian distribution. If this set is of size 1, the marginal is
univariate.
Returns:
is_univariate: Boolean indicating whether the marginal is univariate or
multivariate. In the case of dynamic shape in the number of index points,
defaults to "multivariate" since that's the best we can do.
"""
num_index_points = tf.compat.dimension_value(
index_points.shape[-(self.kernel.feature_ndims + 1)])
if num_index_points is None:
warnings.warn(
'Unable to detect statically whether the number of index_points is '
'1. As a result, defaulting to treating the marginal GP at '
'`index_points` as a multivariate Gaussian. This makes some methods, '
'like `cdf` unavailable.')
return num_index_points == 1
|
[
"True",
"if",
"the",
"given",
"index_points",
"would",
"yield",
"a",
"univariate",
"marginal",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/gaussian_process.py#L286-L307
|
[
"def",
"_is_univariate_marginal",
"(",
"self",
",",
"index_points",
")",
":",
"num_index_points",
"=",
"tf",
".",
"compat",
".",
"dimension_value",
"(",
"index_points",
".",
"shape",
"[",
"-",
"(",
"self",
".",
"kernel",
".",
"feature_ndims",
"+",
"1",
")",
"]",
")",
"if",
"num_index_points",
"is",
"None",
":",
"warnings",
".",
"warn",
"(",
"'Unable to detect statically whether the number of index_points is '",
"'1. As a result, defaulting to treating the marginal GP at '",
"'`index_points` as a multivariate Gaussian. This makes some methods, '",
"'like `cdf` unavailable.'",
")",
"return",
"num_index_points",
"==",
"1"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
GaussianProcess.get_marginal_distribution
|
Compute the marginal of this GP over function values at `index_points`.
Args:
index_points: `float` `Tensor` representing finite (batch of) vector(s) of
points in the index set over which the GP is defined. Shape has the form
`[b1, ..., bB, e, f1, ..., fF]` where `F` is the number of feature
dimensions and must equal `kernel.feature_ndims` and `e` is the number
(size) of index points in each batch. Ultimately this distribution
corresponds to a `e`-dimensional multivariate normal. The batch shape
must be broadcastable with `kernel.batch_shape` and any batch dims
yielded by `mean_fn`.
Returns:
marginal: a `Normal` or `MultivariateNormalLinearOperator` distribution,
according to whether `index_points` consists of one or many index
points, respectively.
|
tensorflow_probability/python/distributions/gaussian_process.py
|
def get_marginal_distribution(self, index_points=None):
"""Compute the marginal of this GP over function values at `index_points`.
Args:
index_points: `float` `Tensor` representing finite (batch of) vector(s) of
points in the index set over which the GP is defined. Shape has the form
`[b1, ..., bB, e, f1, ..., fF]` where `F` is the number of feature
dimensions and must equal `kernel.feature_ndims` and `e` is the number
(size) of index points in each batch. Ultimately this distribution
corresponds to a `e`-dimensional multivariate normal. The batch shape
must be broadcastable with `kernel.batch_shape` and any batch dims
yielded by `mean_fn`.
Returns:
marginal: a `Normal` or `MultivariateNormalLinearOperator` distribution,
according to whether `index_points` consists of one or many index
points, respectively.
"""
with self._name_scope('get_marginal_distribution'):
# TODO(cgs): consider caching the result here, keyed on `index_points`.
index_points = self._get_index_points(index_points)
covariance = self._compute_covariance(index_points)
loc = self._mean_fn(index_points)
# If we're sure the number of index points is 1, we can just construct a
# scalar Normal. This has computational benefits and supports things like
# CDF that aren't otherwise straightforward to provide.
if self._is_univariate_marginal(index_points):
scale = tf.sqrt(covariance)
# `loc` has a trailing 1 in the shape; squeeze it.
loc = tf.squeeze(loc, axis=-1)
return normal.Normal(
loc=loc,
scale=scale,
validate_args=self._validate_args,
allow_nan_stats=self._allow_nan_stats,
name='marginal_distribution')
else:
scale = tf.linalg.LinearOperatorLowerTriangular(
tf.linalg.cholesky(_add_diagonal_shift(covariance, self.jitter)),
is_non_singular=True,
name='GaussianProcessScaleLinearOperator')
return mvn_linear_operator.MultivariateNormalLinearOperator(
loc=loc,
scale=scale,
validate_args=self._validate_args,
allow_nan_stats=self._allow_nan_stats,
name='marginal_distribution')
|
def get_marginal_distribution(self, index_points=None):
"""Compute the marginal of this GP over function values at `index_points`.
Args:
index_points: `float` `Tensor` representing finite (batch of) vector(s) of
points in the index set over which the GP is defined. Shape has the form
`[b1, ..., bB, e, f1, ..., fF]` where `F` is the number of feature
dimensions and must equal `kernel.feature_ndims` and `e` is the number
(size) of index points in each batch. Ultimately this distribution
corresponds to a `e`-dimensional multivariate normal. The batch shape
must be broadcastable with `kernel.batch_shape` and any batch dims
yielded by `mean_fn`.
Returns:
marginal: a `Normal` or `MultivariateNormalLinearOperator` distribution,
according to whether `index_points` consists of one or many index
points, respectively.
"""
with self._name_scope('get_marginal_distribution'):
# TODO(cgs): consider caching the result here, keyed on `index_points`.
index_points = self._get_index_points(index_points)
covariance = self._compute_covariance(index_points)
loc = self._mean_fn(index_points)
# If we're sure the number of index points is 1, we can just construct a
# scalar Normal. This has computational benefits and supports things like
# CDF that aren't otherwise straightforward to provide.
if self._is_univariate_marginal(index_points):
scale = tf.sqrt(covariance)
# `loc` has a trailing 1 in the shape; squeeze it.
loc = tf.squeeze(loc, axis=-1)
return normal.Normal(
loc=loc,
scale=scale,
validate_args=self._validate_args,
allow_nan_stats=self._allow_nan_stats,
name='marginal_distribution')
else:
scale = tf.linalg.LinearOperatorLowerTriangular(
tf.linalg.cholesky(_add_diagonal_shift(covariance, self.jitter)),
is_non_singular=True,
name='GaussianProcessScaleLinearOperator')
return mvn_linear_operator.MultivariateNormalLinearOperator(
loc=loc,
scale=scale,
validate_args=self._validate_args,
allow_nan_stats=self._allow_nan_stats,
name='marginal_distribution')
|
[
"Compute",
"the",
"marginal",
"of",
"this",
"GP",
"over",
"function",
"values",
"at",
"index_points",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/gaussian_process.py#L320-L366
|
[
"def",
"get_marginal_distribution",
"(",
"self",
",",
"index_points",
"=",
"None",
")",
":",
"with",
"self",
".",
"_name_scope",
"(",
"'get_marginal_distribution'",
")",
":",
"# TODO(cgs): consider caching the result here, keyed on `index_points`.",
"index_points",
"=",
"self",
".",
"_get_index_points",
"(",
"index_points",
")",
"covariance",
"=",
"self",
".",
"_compute_covariance",
"(",
"index_points",
")",
"loc",
"=",
"self",
".",
"_mean_fn",
"(",
"index_points",
")",
"# If we're sure the number of index points is 1, we can just construct a",
"# scalar Normal. This has computational benefits and supports things like",
"# CDF that aren't otherwise straightforward to provide.",
"if",
"self",
".",
"_is_univariate_marginal",
"(",
"index_points",
")",
":",
"scale",
"=",
"tf",
".",
"sqrt",
"(",
"covariance",
")",
"# `loc` has a trailing 1 in the shape; squeeze it.",
"loc",
"=",
"tf",
".",
"squeeze",
"(",
"loc",
",",
"axis",
"=",
"-",
"1",
")",
"return",
"normal",
".",
"Normal",
"(",
"loc",
"=",
"loc",
",",
"scale",
"=",
"scale",
",",
"validate_args",
"=",
"self",
".",
"_validate_args",
",",
"allow_nan_stats",
"=",
"self",
".",
"_allow_nan_stats",
",",
"name",
"=",
"'marginal_distribution'",
")",
"else",
":",
"scale",
"=",
"tf",
".",
"linalg",
".",
"LinearOperatorLowerTriangular",
"(",
"tf",
".",
"linalg",
".",
"cholesky",
"(",
"_add_diagonal_shift",
"(",
"covariance",
",",
"self",
".",
"jitter",
")",
")",
",",
"is_non_singular",
"=",
"True",
",",
"name",
"=",
"'GaussianProcessScaleLinearOperator'",
")",
"return",
"mvn_linear_operator",
".",
"MultivariateNormalLinearOperator",
"(",
"loc",
"=",
"loc",
",",
"scale",
"=",
"scale",
",",
"validate_args",
"=",
"self",
".",
"_validate_args",
",",
"allow_nan_stats",
"=",
"self",
".",
"_allow_nan_stats",
",",
"name",
"=",
"'marginal_distribution'",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
GaussianProcess._get_index_points
|
Return `index_points` if not None, else `self._index_points`.
Args:
index_points: if given, this is what is returned; else,
`self._index_points`
Returns:
index_points: the given arg, if not None, else the class member
`self._index_points`.
Rases:
ValueError: if `index_points` and `self._index_points` are both `None`.
|
tensorflow_probability/python/distributions/gaussian_process.py
|
def _get_index_points(self, index_points=None):
"""Return `index_points` if not None, else `self._index_points`.
Args:
index_points: if given, this is what is returned; else,
`self._index_points`
Returns:
index_points: the given arg, if not None, else the class member
`self._index_points`.
Rases:
ValueError: if `index_points` and `self._index_points` are both `None`.
"""
if self._index_points is None and index_points is None:
raise ValueError(
'This GaussianProcess instance was not instantiated with a value for '
'index_points. One must therefore be provided when calling sample, '
'log_prob, and other such methods. In particular, one can\'t compute '
'KL divergences to/from an instance of `GaussianProccess` with '
'unspecified `index_points` directly. Instead, use the '
'`get_marginal_distribution` function, which takes `index_points` as '
'an argument and returns a `Normal` or '
'`MultivariateNormalLinearOperator` instance, whose KL can be '
'computed.')
return index_points if index_points is not None else self._index_points
|
def _get_index_points(self, index_points=None):
"""Return `index_points` if not None, else `self._index_points`.
Args:
index_points: if given, this is what is returned; else,
`self._index_points`
Returns:
index_points: the given arg, if not None, else the class member
`self._index_points`.
Rases:
ValueError: if `index_points` and `self._index_points` are both `None`.
"""
if self._index_points is None and index_points is None:
raise ValueError(
'This GaussianProcess instance was not instantiated with a value for '
'index_points. One must therefore be provided when calling sample, '
'log_prob, and other such methods. In particular, one can\'t compute '
'KL divergences to/from an instance of `GaussianProccess` with '
'unspecified `index_points` directly. Instead, use the '
'`get_marginal_distribution` function, which takes `index_points` as '
'an argument and returns a `Normal` or '
'`MultivariateNormalLinearOperator` instance, whose KL can be '
'computed.')
return index_points if index_points is not None else self._index_points
|
[
"Return",
"index_points",
"if",
"not",
"None",
"else",
"self",
".",
"_index_points",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/gaussian_process.py#L388-L413
|
[
"def",
"_get_index_points",
"(",
"self",
",",
"index_points",
"=",
"None",
")",
":",
"if",
"self",
".",
"_index_points",
"is",
"None",
"and",
"index_points",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'This GaussianProcess instance was not instantiated with a value for '",
"'index_points. One must therefore be provided when calling sample, '",
"'log_prob, and other such methods. In particular, one can\\'t compute '",
"'KL divergences to/from an instance of `GaussianProccess` with '",
"'unspecified `index_points` directly. Instead, use the '",
"'`get_marginal_distribution` function, which takes `index_points` as '",
"'an argument and returns a `Normal` or '",
"'`MultivariateNormalLinearOperator` instance, whose KL can be '",
"'computed.'",
")",
"return",
"index_points",
"if",
"index_points",
"is",
"not",
"None",
"else",
"self",
".",
"_index_points"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_logsum_expbig_minus_expsmall
|
Stable evaluation of `Log[exp{big} - exp{small}]`.
To work correctly, we should have the pointwise relation: `small <= big`.
Args:
big: Floating-point `Tensor`
small: Floating-point `Tensor` with same `dtype` as `big` and broadcastable
shape.
Returns:
`Tensor` of same `dtype` of `big` and broadcast shape.
|
tensorflow_probability/python/distributions/quantized_distribution.py
|
def _logsum_expbig_minus_expsmall(big, small):
"""Stable evaluation of `Log[exp{big} - exp{small}]`.
To work correctly, we should have the pointwise relation: `small <= big`.
Args:
big: Floating-point `Tensor`
small: Floating-point `Tensor` with same `dtype` as `big` and broadcastable
shape.
Returns:
`Tensor` of same `dtype` of `big` and broadcast shape.
"""
with tf.name_scope("logsum_expbig_minus_expsmall"):
return tf.math.log1p(-tf.exp(small - big)) + big
|
def _logsum_expbig_minus_expsmall(big, small):
"""Stable evaluation of `Log[exp{big} - exp{small}]`.
To work correctly, we should have the pointwise relation: `small <= big`.
Args:
big: Floating-point `Tensor`
small: Floating-point `Tensor` with same `dtype` as `big` and broadcastable
shape.
Returns:
`Tensor` of same `dtype` of `big` and broadcast shape.
"""
with tf.name_scope("logsum_expbig_minus_expsmall"):
return tf.math.log1p(-tf.exp(small - big)) + big
|
[
"Stable",
"evaluation",
"of",
"Log",
"[",
"exp",
"{",
"big",
"}",
"-",
"exp",
"{",
"small",
"}",
"]",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/quantized_distribution.py#L35-L49
|
[
"def",
"_logsum_expbig_minus_expsmall",
"(",
"big",
",",
"small",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"\"logsum_expbig_minus_expsmall\"",
")",
":",
"return",
"tf",
".",
"math",
".",
"log1p",
"(",
"-",
"tf",
".",
"exp",
"(",
"small",
"-",
"big",
")",
")",
"+",
"big"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
QuantizedDistribution._log_prob_with_logsf_and_logcdf
|
Compute log_prob(y) using log survival_function and cdf together.
|
tensorflow_probability/python/distributions/quantized_distribution.py
|
def _log_prob_with_logsf_and_logcdf(self, y):
"""Compute log_prob(y) using log survival_function and cdf together."""
# There are two options that would be equal if we had infinite precision:
# Log[ sf(y - 1) - sf(y) ]
# = Log[ exp{logsf(y - 1)} - exp{logsf(y)} ]
# Log[ cdf(y) - cdf(y - 1) ]
# = Log[ exp{logcdf(y)} - exp{logcdf(y - 1)} ]
logsf_y = self.log_survival_function(y)
logsf_y_minus_1 = self.log_survival_function(y - 1)
logcdf_y = self.log_cdf(y)
logcdf_y_minus_1 = self.log_cdf(y - 1)
# Important: Here we use select in a way such that no input is inf, this
# prevents the troublesome case where the output of select can be finite,
# but the output of grad(select) will be NaN.
# In either case, we are doing Log[ exp{big} - exp{small} ]
# We want to use the sf items precisely when we are on the right side of the
# median, which occurs when logsf_y < logcdf_y.
big = tf.where(logsf_y < logcdf_y, logsf_y_minus_1, logcdf_y)
small = tf.where(logsf_y < logcdf_y, logsf_y, logcdf_y_minus_1)
return _logsum_expbig_minus_expsmall(big, small)
|
def _log_prob_with_logsf_and_logcdf(self, y):
"""Compute log_prob(y) using log survival_function and cdf together."""
# There are two options that would be equal if we had infinite precision:
# Log[ sf(y - 1) - sf(y) ]
# = Log[ exp{logsf(y - 1)} - exp{logsf(y)} ]
# Log[ cdf(y) - cdf(y - 1) ]
# = Log[ exp{logcdf(y)} - exp{logcdf(y - 1)} ]
logsf_y = self.log_survival_function(y)
logsf_y_minus_1 = self.log_survival_function(y - 1)
logcdf_y = self.log_cdf(y)
logcdf_y_minus_1 = self.log_cdf(y - 1)
# Important: Here we use select in a way such that no input is inf, this
# prevents the troublesome case where the output of select can be finite,
# but the output of grad(select) will be NaN.
# In either case, we are doing Log[ exp{big} - exp{small} ]
# We want to use the sf items precisely when we are on the right side of the
# median, which occurs when logsf_y < logcdf_y.
big = tf.where(logsf_y < logcdf_y, logsf_y_minus_1, logcdf_y)
small = tf.where(logsf_y < logcdf_y, logsf_y, logcdf_y_minus_1)
return _logsum_expbig_minus_expsmall(big, small)
|
[
"Compute",
"log_prob",
"(",
"y",
")",
"using",
"log",
"survival_function",
"and",
"cdf",
"together",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/quantized_distribution.py#L372-L394
|
[
"def",
"_log_prob_with_logsf_and_logcdf",
"(",
"self",
",",
"y",
")",
":",
"# There are two options that would be equal if we had infinite precision:",
"# Log[ sf(y - 1) - sf(y) ]",
"# = Log[ exp{logsf(y - 1)} - exp{logsf(y)} ]",
"# Log[ cdf(y) - cdf(y - 1) ]",
"# = Log[ exp{logcdf(y)} - exp{logcdf(y - 1)} ]",
"logsf_y",
"=",
"self",
".",
"log_survival_function",
"(",
"y",
")",
"logsf_y_minus_1",
"=",
"self",
".",
"log_survival_function",
"(",
"y",
"-",
"1",
")",
"logcdf_y",
"=",
"self",
".",
"log_cdf",
"(",
"y",
")",
"logcdf_y_minus_1",
"=",
"self",
".",
"log_cdf",
"(",
"y",
"-",
"1",
")",
"# Important: Here we use select in a way such that no input is inf, this",
"# prevents the troublesome case where the output of select can be finite,",
"# but the output of grad(select) will be NaN.",
"# In either case, we are doing Log[ exp{big} - exp{small} ]",
"# We want to use the sf items precisely when we are on the right side of the",
"# median, which occurs when logsf_y < logcdf_y.",
"big",
"=",
"tf",
".",
"where",
"(",
"logsf_y",
"<",
"logcdf_y",
",",
"logsf_y_minus_1",
",",
"logcdf_y",
")",
"small",
"=",
"tf",
".",
"where",
"(",
"logsf_y",
"<",
"logcdf_y",
",",
"logsf_y",
",",
"logcdf_y_minus_1",
")",
"return",
"_logsum_expbig_minus_expsmall",
"(",
"big",
",",
"small",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
make_iaf_stack
|
Creates an stacked IAF bijector.
This bijector operates on vector-valued events.
Args:
total_event_size: Number of dimensions to operate over.
num_hidden_layers: How many hidden layers to use in each IAF.
seed: Random seed for the initializers.
dtype: DType for the variables.
Returns:
bijector: The created bijector.
|
experimental/neutra/neutra_kernel.py
|
def make_iaf_stack(total_event_size,
num_hidden_layers=2,
seed=None,
dtype=tf.float32):
"""Creates an stacked IAF bijector.
This bijector operates on vector-valued events.
Args:
total_event_size: Number of dimensions to operate over.
num_hidden_layers: How many hidden layers to use in each IAF.
seed: Random seed for the initializers.
dtype: DType for the variables.
Returns:
bijector: The created bijector.
"""
seed = tfd.SeedStream(seed, 'make_iaf_stack')
def make_iaf():
"""Create an IAF."""
initializer = tf.compat.v2.keras.initializers.VarianceScaling(
2 * 0.01, seed=seed() % (2**31 - 1))
made = tfb.AutoregressiveLayer(
params=2,
event_shape=[total_event_size],
hidden_units=[total_event_size] * num_hidden_layers,
activation=tf.nn.elu,
kernel_initializer=initializer,
dtype=dtype)
def shift_and_scale(x):
# TODO(siege): Something is losing the static shape.
x.set_shape(
x.shape.merge_with([None] * (x.shape.ndims - 1) + [total_event_size]))
return tf.unstack(made(x), num=2, axis=-1)
return tfb.Invert(tfb.MaskedAutoregressiveFlow(shift_and_scale))
def make_swap():
"""Create an swap."""
permutation = list(reversed(range(total_event_size)))
return tfb.Permute(permutation)
bijector = make_iaf()
bijector = make_swap()(bijector)
bijector = make_iaf()(bijector)
bijector = make_swap()(bijector)
bijector = make_iaf()(bijector)
bijector = make_swap()(bijector)
return bijector
|
def make_iaf_stack(total_event_size,
num_hidden_layers=2,
seed=None,
dtype=tf.float32):
"""Creates an stacked IAF bijector.
This bijector operates on vector-valued events.
Args:
total_event_size: Number of dimensions to operate over.
num_hidden_layers: How many hidden layers to use in each IAF.
seed: Random seed for the initializers.
dtype: DType for the variables.
Returns:
bijector: The created bijector.
"""
seed = tfd.SeedStream(seed, 'make_iaf_stack')
def make_iaf():
"""Create an IAF."""
initializer = tf.compat.v2.keras.initializers.VarianceScaling(
2 * 0.01, seed=seed() % (2**31 - 1))
made = tfb.AutoregressiveLayer(
params=2,
event_shape=[total_event_size],
hidden_units=[total_event_size] * num_hidden_layers,
activation=tf.nn.elu,
kernel_initializer=initializer,
dtype=dtype)
def shift_and_scale(x):
# TODO(siege): Something is losing the static shape.
x.set_shape(
x.shape.merge_with([None] * (x.shape.ndims - 1) + [total_event_size]))
return tf.unstack(made(x), num=2, axis=-1)
return tfb.Invert(tfb.MaskedAutoregressiveFlow(shift_and_scale))
def make_swap():
"""Create an swap."""
permutation = list(reversed(range(total_event_size)))
return tfb.Permute(permutation)
bijector = make_iaf()
bijector = make_swap()(bijector)
bijector = make_iaf()(bijector)
bijector = make_swap()(bijector)
bijector = make_iaf()(bijector)
bijector = make_swap()(bijector)
return bijector
|
[
"Creates",
"an",
"stacked",
"IAF",
"bijector",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/neutra/neutra_kernel.py#L33-L86
|
[
"def",
"make_iaf_stack",
"(",
"total_event_size",
",",
"num_hidden_layers",
"=",
"2",
",",
"seed",
"=",
"None",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
":",
"seed",
"=",
"tfd",
".",
"SeedStream",
"(",
"seed",
",",
"'make_iaf_stack'",
")",
"def",
"make_iaf",
"(",
")",
":",
"\"\"\"Create an IAF.\"\"\"",
"initializer",
"=",
"tf",
".",
"compat",
".",
"v2",
".",
"keras",
".",
"initializers",
".",
"VarianceScaling",
"(",
"2",
"*",
"0.01",
",",
"seed",
"=",
"seed",
"(",
")",
"%",
"(",
"2",
"**",
"31",
"-",
"1",
")",
")",
"made",
"=",
"tfb",
".",
"AutoregressiveLayer",
"(",
"params",
"=",
"2",
",",
"event_shape",
"=",
"[",
"total_event_size",
"]",
",",
"hidden_units",
"=",
"[",
"total_event_size",
"]",
"*",
"num_hidden_layers",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"elu",
",",
"kernel_initializer",
"=",
"initializer",
",",
"dtype",
"=",
"dtype",
")",
"def",
"shift_and_scale",
"(",
"x",
")",
":",
"# TODO(siege): Something is losing the static shape.",
"x",
".",
"set_shape",
"(",
"x",
".",
"shape",
".",
"merge_with",
"(",
"[",
"None",
"]",
"*",
"(",
"x",
".",
"shape",
".",
"ndims",
"-",
"1",
")",
"+",
"[",
"total_event_size",
"]",
")",
")",
"return",
"tf",
".",
"unstack",
"(",
"made",
"(",
"x",
")",
",",
"num",
"=",
"2",
",",
"axis",
"=",
"-",
"1",
")",
"return",
"tfb",
".",
"Invert",
"(",
"tfb",
".",
"MaskedAutoregressiveFlow",
"(",
"shift_and_scale",
")",
")",
"def",
"make_swap",
"(",
")",
":",
"\"\"\"Create an swap.\"\"\"",
"permutation",
"=",
"list",
"(",
"reversed",
"(",
"range",
"(",
"total_event_size",
")",
")",
")",
"return",
"tfb",
".",
"Permute",
"(",
"permutation",
")",
"bijector",
"=",
"make_iaf",
"(",
")",
"bijector",
"=",
"make_swap",
"(",
")",
"(",
"bijector",
")",
"bijector",
"=",
"make_iaf",
"(",
")",
"(",
"bijector",
")",
"bijector",
"=",
"make_swap",
"(",
")",
"(",
"bijector",
")",
"bijector",
"=",
"make_iaf",
"(",
")",
"(",
"bijector",
")",
"bijector",
"=",
"make_swap",
"(",
")",
"(",
"bijector",
")",
"return",
"bijector"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
NeuTra.one_step
|
Runs one iteration of NeuTra.
Args:
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s). The first `r` dimensions index
independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.
previous_kernel_results: `collections.namedtuple` containing `Tensor`s
representing values from previous calls to this function (or from the
`bootstrap_results` function.)
Returns:
next_state: Tensor or Python list of `Tensor`s representing the state(s)
of the Markov chain(s) after taking exactly one step. Has same type and
shape as `current_state`.
kernel_results: `collections.namedtuple` of internal calculations used to
advance the chain.
|
experimental/neutra/neutra_kernel.py
|
def one_step(self, current_state, previous_kernel_results):
"""Runs one iteration of NeuTra.
Args:
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s). The first `r` dimensions index
independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.
previous_kernel_results: `collections.namedtuple` containing `Tensor`s
representing values from previous calls to this function (or from the
`bootstrap_results` function.)
Returns:
next_state: Tensor or Python list of `Tensor`s representing the state(s)
of the Markov chain(s) after taking exactly one step. Has same type and
shape as `current_state`.
kernel_results: `collections.namedtuple` of internal calculations used to
advance the chain.
"""
@tfp.mcmc.internal.util.make_innermost_setter
def set_num_leapfrog_steps(kernel_results, num_leapfrog_steps):
return kernel_results._replace(
accepted_results=kernel_results.accepted_results._replace(
num_leapfrog_steps=num_leapfrog_steps))
step_size = previous_kernel_results.new_step_size
previous_kernel_results = set_num_leapfrog_steps(
previous_kernel_results, self._num_leapfrog_steps(step_size))
new_state, kernel_results = self._kernel.one_step(
self._flatten_state(current_state), previous_kernel_results)
return self._unflatten_state(new_state), kernel_results
|
def one_step(self, current_state, previous_kernel_results):
"""Runs one iteration of NeuTra.
Args:
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s). The first `r` dimensions index
independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.
previous_kernel_results: `collections.namedtuple` containing `Tensor`s
representing values from previous calls to this function (or from the
`bootstrap_results` function.)
Returns:
next_state: Tensor or Python list of `Tensor`s representing the state(s)
of the Markov chain(s) after taking exactly one step. Has same type and
shape as `current_state`.
kernel_results: `collections.namedtuple` of internal calculations used to
advance the chain.
"""
@tfp.mcmc.internal.util.make_innermost_setter
def set_num_leapfrog_steps(kernel_results, num_leapfrog_steps):
return kernel_results._replace(
accepted_results=kernel_results.accepted_results._replace(
num_leapfrog_steps=num_leapfrog_steps))
step_size = previous_kernel_results.new_step_size
previous_kernel_results = set_num_leapfrog_steps(
previous_kernel_results, self._num_leapfrog_steps(step_size))
new_state, kernel_results = self._kernel.one_step(
self._flatten_state(current_state), previous_kernel_results)
return self._unflatten_state(new_state), kernel_results
|
[
"Runs",
"one",
"iteration",
"of",
"NeuTra",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/neutra/neutra_kernel.py#L350-L381
|
[
"def",
"one_step",
"(",
"self",
",",
"current_state",
",",
"previous_kernel_results",
")",
":",
"@",
"tfp",
".",
"mcmc",
".",
"internal",
".",
"util",
".",
"make_innermost_setter",
"def",
"set_num_leapfrog_steps",
"(",
"kernel_results",
",",
"num_leapfrog_steps",
")",
":",
"return",
"kernel_results",
".",
"_replace",
"(",
"accepted_results",
"=",
"kernel_results",
".",
"accepted_results",
".",
"_replace",
"(",
"num_leapfrog_steps",
"=",
"num_leapfrog_steps",
")",
")",
"step_size",
"=",
"previous_kernel_results",
".",
"new_step_size",
"previous_kernel_results",
"=",
"set_num_leapfrog_steps",
"(",
"previous_kernel_results",
",",
"self",
".",
"_num_leapfrog_steps",
"(",
"step_size",
")",
")",
"new_state",
",",
"kernel_results",
"=",
"self",
".",
"_kernel",
".",
"one_step",
"(",
"self",
".",
"_flatten_state",
"(",
"current_state",
")",
",",
"previous_kernel_results",
")",
"return",
"self",
".",
"_unflatten_state",
"(",
"new_state",
")",
",",
"kernel_results"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
NeuTra.bootstrap_results
|
Trains the bijector and creates initial `previous_kernel_results`.
The supplied `state` is only used to determine the number of chains to run
in parallel_iterations
Args:
state: `Tensor` or Python `list` of `Tensor`s representing the initial
state(s) of the Markov chain(s). The first `r` dimensions index
independent chains, `r = tf.rank(target_log_prob_fn(*state))`.
Returns:
kernel_results: Instance of
`UncalibratedHamiltonianMonteCarloKernelResults` inside
`MetropolisHastingsResults` inside `TransformedTransitionKernelResults`
inside `SimpleStepSizeAdaptationResults`.
|
experimental/neutra/neutra_kernel.py
|
def bootstrap_results(self, state):
"""Trains the bijector and creates initial `previous_kernel_results`.
The supplied `state` is only used to determine the number of chains to run
in parallel_iterations
Args:
state: `Tensor` or Python `list` of `Tensor`s representing the initial
state(s) of the Markov chain(s). The first `r` dimensions index
independent chains, `r = tf.rank(target_log_prob_fn(*state))`.
Returns:
kernel_results: Instance of
`UncalibratedHamiltonianMonteCarloKernelResults` inside
`MetropolisHastingsResults` inside `TransformedTransitionKernelResults`
inside `SimpleStepSizeAdaptationResults`.
"""
def loss():
q = self._flattened_variational_distribution()
# TODO(siege): How to seed this?
samples = q.sample(self.train_batch_size)
return tf.reduce_mean(
input_tensor=q.log_prob(samples) -
self._flattened_target_log_prob(samples),
axis=-1)
lr = tf.convert_to_tensor(value=self.learning_rate, dtype=self._dtype)
dtype = lr.dtype
learning_rate = tf.compat.v2.optimizers.schedules.PiecewiseConstantDecay(
list(self.num_train_steps *
np.array([0.2, 0.8]).astype(dtype.as_numpy_dtype())),
[lr, lr * 0.1, lr * 0.01])
opt = tf.compat.v2.optimizers.Adam(learning_rate)
@tf.function(autograph=False)
def train_step():
with tf.GradientTape() as tape:
loss_val = loss()
vals = tape.watched_variables()
grads = tape.gradient(loss_val, vals)
grads_and_vals = list(zip(grads, vals))
opt.apply_gradients(grads_and_vals)
return loss_val
for step in range(self.num_train_steps):
loss_val = train_step()
tf.debugging.assert_all_finite(
loss_val, 'NeuTra loss is NaN at step {}'.format(step))
if self.train_debug_fn:
# pylint: disable=not-callable
self.train_debug_fn(self, step, loss_val)
state_parts = tf.nest.flatten(state)
flat_state_shapes = tf.nest.flatten(self.state_shape)
batch_shape = tf.shape(input=state_parts[0])[:-flat_state_shapes[0].ndims]
return self._kernel.bootstrap_results(
self._flattened_variational_distribution().sample(
batch_shape, seed=self.seed))
|
def bootstrap_results(self, state):
"""Trains the bijector and creates initial `previous_kernel_results`.
The supplied `state` is only used to determine the number of chains to run
in parallel_iterations
Args:
state: `Tensor` or Python `list` of `Tensor`s representing the initial
state(s) of the Markov chain(s). The first `r` dimensions index
independent chains, `r = tf.rank(target_log_prob_fn(*state))`.
Returns:
kernel_results: Instance of
`UncalibratedHamiltonianMonteCarloKernelResults` inside
`MetropolisHastingsResults` inside `TransformedTransitionKernelResults`
inside `SimpleStepSizeAdaptationResults`.
"""
def loss():
q = self._flattened_variational_distribution()
# TODO(siege): How to seed this?
samples = q.sample(self.train_batch_size)
return tf.reduce_mean(
input_tensor=q.log_prob(samples) -
self._flattened_target_log_prob(samples),
axis=-1)
lr = tf.convert_to_tensor(value=self.learning_rate, dtype=self._dtype)
dtype = lr.dtype
learning_rate = tf.compat.v2.optimizers.schedules.PiecewiseConstantDecay(
list(self.num_train_steps *
np.array([0.2, 0.8]).astype(dtype.as_numpy_dtype())),
[lr, lr * 0.1, lr * 0.01])
opt = tf.compat.v2.optimizers.Adam(learning_rate)
@tf.function(autograph=False)
def train_step():
with tf.GradientTape() as tape:
loss_val = loss()
vals = tape.watched_variables()
grads = tape.gradient(loss_val, vals)
grads_and_vals = list(zip(grads, vals))
opt.apply_gradients(grads_and_vals)
return loss_val
for step in range(self.num_train_steps):
loss_val = train_step()
tf.debugging.assert_all_finite(
loss_val, 'NeuTra loss is NaN at step {}'.format(step))
if self.train_debug_fn:
# pylint: disable=not-callable
self.train_debug_fn(self, step, loss_val)
state_parts = tf.nest.flatten(state)
flat_state_shapes = tf.nest.flatten(self.state_shape)
batch_shape = tf.shape(input=state_parts[0])[:-flat_state_shapes[0].ndims]
return self._kernel.bootstrap_results(
self._flattened_variational_distribution().sample(
batch_shape, seed=self.seed))
|
[
"Trains",
"the",
"bijector",
"and",
"creates",
"initial",
"previous_kernel_results",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/neutra/neutra_kernel.py#L383-L444
|
[
"def",
"bootstrap_results",
"(",
"self",
",",
"state",
")",
":",
"def",
"loss",
"(",
")",
":",
"q",
"=",
"self",
".",
"_flattened_variational_distribution",
"(",
")",
"# TODO(siege): How to seed this?",
"samples",
"=",
"q",
".",
"sample",
"(",
"self",
".",
"train_batch_size",
")",
"return",
"tf",
".",
"reduce_mean",
"(",
"input_tensor",
"=",
"q",
".",
"log_prob",
"(",
"samples",
")",
"-",
"self",
".",
"_flattened_target_log_prob",
"(",
"samples",
")",
",",
"axis",
"=",
"-",
"1",
")",
"lr",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"self",
".",
"learning_rate",
",",
"dtype",
"=",
"self",
".",
"_dtype",
")",
"dtype",
"=",
"lr",
".",
"dtype",
"learning_rate",
"=",
"tf",
".",
"compat",
".",
"v2",
".",
"optimizers",
".",
"schedules",
".",
"PiecewiseConstantDecay",
"(",
"list",
"(",
"self",
".",
"num_train_steps",
"*",
"np",
".",
"array",
"(",
"[",
"0.2",
",",
"0.8",
"]",
")",
".",
"astype",
"(",
"dtype",
".",
"as_numpy_dtype",
"(",
")",
")",
")",
",",
"[",
"lr",
",",
"lr",
"*",
"0.1",
",",
"lr",
"*",
"0.01",
"]",
")",
"opt",
"=",
"tf",
".",
"compat",
".",
"v2",
".",
"optimizers",
".",
"Adam",
"(",
"learning_rate",
")",
"@",
"tf",
".",
"function",
"(",
"autograph",
"=",
"False",
")",
"def",
"train_step",
"(",
")",
":",
"with",
"tf",
".",
"GradientTape",
"(",
")",
"as",
"tape",
":",
"loss_val",
"=",
"loss",
"(",
")",
"vals",
"=",
"tape",
".",
"watched_variables",
"(",
")",
"grads",
"=",
"tape",
".",
"gradient",
"(",
"loss_val",
",",
"vals",
")",
"grads_and_vals",
"=",
"list",
"(",
"zip",
"(",
"grads",
",",
"vals",
")",
")",
"opt",
".",
"apply_gradients",
"(",
"grads_and_vals",
")",
"return",
"loss_val",
"for",
"step",
"in",
"range",
"(",
"self",
".",
"num_train_steps",
")",
":",
"loss_val",
"=",
"train_step",
"(",
")",
"tf",
".",
"debugging",
".",
"assert_all_finite",
"(",
"loss_val",
",",
"'NeuTra loss is NaN at step {}'",
".",
"format",
"(",
"step",
")",
")",
"if",
"self",
".",
"train_debug_fn",
":",
"# pylint: disable=not-callable",
"self",
".",
"train_debug_fn",
"(",
"self",
",",
"step",
",",
"loss_val",
")",
"state_parts",
"=",
"tf",
".",
"nest",
".",
"flatten",
"(",
"state",
")",
"flat_state_shapes",
"=",
"tf",
".",
"nest",
".",
"flatten",
"(",
"self",
".",
"state_shape",
")",
"batch_shape",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"state_parts",
"[",
"0",
"]",
")",
"[",
":",
"-",
"flat_state_shapes",
"[",
"0",
"]",
".",
"ndims",
"]",
"return",
"self",
".",
"_kernel",
".",
"bootstrap_results",
"(",
"self",
".",
"_flattened_variational_distribution",
"(",
")",
".",
"sample",
"(",
"batch_shape",
",",
"seed",
"=",
"self",
".",
"seed",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_WishartLinearOperator.mean_log_det
|
Computes E[log(det(X))] under this Wishart distribution.
|
tensorflow_probability/python/distributions/wishart.py
|
def mean_log_det(self, name="mean_log_det"):
"""Computes E[log(det(X))] under this Wishart distribution."""
with self._name_scope(name):
return (self._multi_digamma(0.5 * self.df, self.dimension) +
self.dimension * math.log(2.) +
2 * self.scale_operator.log_abs_determinant())
|
def mean_log_det(self, name="mean_log_det"):
"""Computes E[log(det(X))] under this Wishart distribution."""
with self._name_scope(name):
return (self._multi_digamma(0.5 * self.df, self.dimension) +
self.dimension * math.log(2.) +
2 * self.scale_operator.log_abs_determinant())
|
[
"Computes",
"E",
"[",
"log",
"(",
"det",
"(",
"X",
"))",
"]",
"under",
"this",
"Wishart",
"distribution",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/wishart.py#L402-L407
|
[
"def",
"mean_log_det",
"(",
"self",
",",
"name",
"=",
"\"mean_log_det\"",
")",
":",
"with",
"self",
".",
"_name_scope",
"(",
"name",
")",
":",
"return",
"(",
"self",
".",
"_multi_digamma",
"(",
"0.5",
"*",
"self",
".",
"df",
",",
"self",
".",
"dimension",
")",
"+",
"self",
".",
"dimension",
"*",
"math",
".",
"log",
"(",
"2.",
")",
"+",
"2",
"*",
"self",
".",
"scale_operator",
".",
"log_abs_determinant",
"(",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_WishartLinearOperator.log_normalization
|
Computes the log normalizing constant, log(Z).
|
tensorflow_probability/python/distributions/wishart.py
|
def log_normalization(self, name="log_normalization"):
"""Computes the log normalizing constant, log(Z)."""
with self._name_scope(name):
return (self.df * self.scale_operator.log_abs_determinant() +
0.5 * self.df * self.dimension * math.log(2.) +
self._multi_lgamma(0.5 * self.df, self.dimension))
|
def log_normalization(self, name="log_normalization"):
"""Computes the log normalizing constant, log(Z)."""
with self._name_scope(name):
return (self.df * self.scale_operator.log_abs_determinant() +
0.5 * self.df * self.dimension * math.log(2.) +
self._multi_lgamma(0.5 * self.df, self.dimension))
|
[
"Computes",
"the",
"log",
"normalizing",
"constant",
"log",
"(",
"Z",
")",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/wishart.py#L409-L414
|
[
"def",
"log_normalization",
"(",
"self",
",",
"name",
"=",
"\"log_normalization\"",
")",
":",
"with",
"self",
".",
"_name_scope",
"(",
"name",
")",
":",
"return",
"(",
"self",
".",
"df",
"*",
"self",
".",
"scale_operator",
".",
"log_abs_determinant",
"(",
")",
"+",
"0.5",
"*",
"self",
".",
"df",
"*",
"self",
".",
"dimension",
"*",
"math",
".",
"log",
"(",
"2.",
")",
"+",
"self",
".",
"_multi_lgamma",
"(",
"0.5",
"*",
"self",
".",
"df",
",",
"self",
".",
"dimension",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_WishartLinearOperator._multi_gamma_sequence
|
Creates sequence used in multivariate (di)gamma; shape = shape(a)+[p].
|
tensorflow_probability/python/distributions/wishart.py
|
def _multi_gamma_sequence(self, a, p, name="multi_gamma_sequence"):
"""Creates sequence used in multivariate (di)gamma; shape = shape(a)+[p]."""
with self._name_scope(name):
# Linspace only takes scalars, so we'll add in the offset afterwards.
seq = tf.linspace(
tf.constant(0., dtype=self.dtype), 0.5 - 0.5 * p, tf.cast(
p, tf.int32))
return seq + tf.expand_dims(a, [-1])
|
def _multi_gamma_sequence(self, a, p, name="multi_gamma_sequence"):
"""Creates sequence used in multivariate (di)gamma; shape = shape(a)+[p]."""
with self._name_scope(name):
# Linspace only takes scalars, so we'll add in the offset afterwards.
seq = tf.linspace(
tf.constant(0., dtype=self.dtype), 0.5 - 0.5 * p, tf.cast(
p, tf.int32))
return seq + tf.expand_dims(a, [-1])
|
[
"Creates",
"sequence",
"used",
"in",
"multivariate",
"(",
"di",
")",
"gamma",
";",
"shape",
"=",
"shape",
"(",
"a",
")",
"+",
"[",
"p",
"]",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/wishart.py#L416-L423
|
[
"def",
"_multi_gamma_sequence",
"(",
"self",
",",
"a",
",",
"p",
",",
"name",
"=",
"\"multi_gamma_sequence\"",
")",
":",
"with",
"self",
".",
"_name_scope",
"(",
"name",
")",
":",
"# Linspace only takes scalars, so we'll add in the offset afterwards.",
"seq",
"=",
"tf",
".",
"linspace",
"(",
"tf",
".",
"constant",
"(",
"0.",
",",
"dtype",
"=",
"self",
".",
"dtype",
")",
",",
"0.5",
"-",
"0.5",
"*",
"p",
",",
"tf",
".",
"cast",
"(",
"p",
",",
"tf",
".",
"int32",
")",
")",
"return",
"seq",
"+",
"tf",
".",
"expand_dims",
"(",
"a",
",",
"[",
"-",
"1",
"]",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_WishartLinearOperator._multi_lgamma
|
Computes the log multivariate gamma function; log(Gamma_p(a)).
|
tensorflow_probability/python/distributions/wishart.py
|
def _multi_lgamma(self, a, p, name="multi_lgamma"):
"""Computes the log multivariate gamma function; log(Gamma_p(a))."""
with self._name_scope(name):
seq = self._multi_gamma_sequence(a, p)
return (0.25 * p * (p - 1.) * math.log(math.pi) +
tf.reduce_sum(input_tensor=tf.math.lgamma(seq), axis=[-1]))
|
def _multi_lgamma(self, a, p, name="multi_lgamma"):
"""Computes the log multivariate gamma function; log(Gamma_p(a))."""
with self._name_scope(name):
seq = self._multi_gamma_sequence(a, p)
return (0.25 * p * (p - 1.) * math.log(math.pi) +
tf.reduce_sum(input_tensor=tf.math.lgamma(seq), axis=[-1]))
|
[
"Computes",
"the",
"log",
"multivariate",
"gamma",
"function",
";",
"log",
"(",
"Gamma_p",
"(",
"a",
"))",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/wishart.py#L425-L430
|
[
"def",
"_multi_lgamma",
"(",
"self",
",",
"a",
",",
"p",
",",
"name",
"=",
"\"multi_lgamma\"",
")",
":",
"with",
"self",
".",
"_name_scope",
"(",
"name",
")",
":",
"seq",
"=",
"self",
".",
"_multi_gamma_sequence",
"(",
"a",
",",
"p",
")",
"return",
"(",
"0.25",
"*",
"p",
"*",
"(",
"p",
"-",
"1.",
")",
"*",
"math",
".",
"log",
"(",
"math",
".",
"pi",
")",
"+",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"tf",
".",
"math",
".",
"lgamma",
"(",
"seq",
")",
",",
"axis",
"=",
"[",
"-",
"1",
"]",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_WishartLinearOperator._multi_digamma
|
Computes the multivariate digamma function; Psi_p(a).
|
tensorflow_probability/python/distributions/wishart.py
|
def _multi_digamma(self, a, p, name="multi_digamma"):
"""Computes the multivariate digamma function; Psi_p(a)."""
with self._name_scope(name):
seq = self._multi_gamma_sequence(a, p)
return tf.reduce_sum(input_tensor=tf.math.digamma(seq), axis=[-1])
|
def _multi_digamma(self, a, p, name="multi_digamma"):
"""Computes the multivariate digamma function; Psi_p(a)."""
with self._name_scope(name):
seq = self._multi_gamma_sequence(a, p)
return tf.reduce_sum(input_tensor=tf.math.digamma(seq), axis=[-1])
|
[
"Computes",
"the",
"multivariate",
"digamma",
"function",
";",
"Psi_p",
"(",
"a",
")",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/wishart.py#L432-L436
|
[
"def",
"_multi_digamma",
"(",
"self",
",",
"a",
",",
"p",
",",
"name",
"=",
"\"multi_digamma\"",
")",
":",
"with",
"self",
".",
"_name_scope",
"(",
"name",
")",
":",
"seq",
"=",
"self",
".",
"_multi_gamma_sequence",
"(",
"a",
",",
"p",
")",
"return",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"tf",
".",
"math",
".",
"digamma",
"(",
"seq",
")",
",",
"axis",
"=",
"[",
"-",
"1",
"]",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_outer_squared_difference
|
Convenience function analogous to tf.squared_difference.
|
tensorflow_probability/python/distributions/mixture_same_family.py
|
def _outer_squared_difference(x, y):
"""Convenience function analogous to tf.squared_difference."""
z = x - y
return z[..., tf.newaxis, :] * z[..., tf.newaxis]
|
def _outer_squared_difference(x, y):
"""Convenience function analogous to tf.squared_difference."""
z = x - y
return z[..., tf.newaxis, :] * z[..., tf.newaxis]
|
[
"Convenience",
"function",
"analogous",
"to",
"tf",
".",
"squared_difference",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/mixture_same_family.py#L536-L539
|
[
"def",
"_outer_squared_difference",
"(",
"x",
",",
"y",
")",
":",
"z",
"=",
"x",
"-",
"y",
"return",
"z",
"[",
"...",
",",
"tf",
".",
"newaxis",
",",
":",
"]",
"*",
"z",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_value_and_batch_jacobian
|
Enables uniform interface to value and batch jacobian calculation.
Works in both eager and graph modes.
Arguments:
f: The scalar function to evaluate.
x: The value at which to compute the value and the batch jacobian.
Returns:
A tuple (f(x), J(x)), where J(x) is the batch jacobian.
|
tensorflow_probability/python/distributions/mixture_same_family.py
|
def _value_and_batch_jacobian(f, x):
"""Enables uniform interface to value and batch jacobian calculation.
Works in both eager and graph modes.
Arguments:
f: The scalar function to evaluate.
x: The value at which to compute the value and the batch jacobian.
Returns:
A tuple (f(x), J(x)), where J(x) is the batch jacobian.
"""
if tf.executing_eagerly():
with tf.GradientTape() as tape:
tape.watch(x)
value = f(x)
batch_jacobian = tape.batch_jacobian(value, x)
else:
value = f(x)
batch_jacobian = gradients.batch_jacobian(value, x)
return value, batch_jacobian
|
def _value_and_batch_jacobian(f, x):
"""Enables uniform interface to value and batch jacobian calculation.
Works in both eager and graph modes.
Arguments:
f: The scalar function to evaluate.
x: The value at which to compute the value and the batch jacobian.
Returns:
A tuple (f(x), J(x)), where J(x) is the batch jacobian.
"""
if tf.executing_eagerly():
with tf.GradientTape() as tape:
tape.watch(x)
value = f(x)
batch_jacobian = tape.batch_jacobian(value, x)
else:
value = f(x)
batch_jacobian = gradients.batch_jacobian(value, x)
return value, batch_jacobian
|
[
"Enables",
"uniform",
"interface",
"to",
"value",
"and",
"batch",
"jacobian",
"calculation",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/mixture_same_family.py#L542-L562
|
[
"def",
"_value_and_batch_jacobian",
"(",
"f",
",",
"x",
")",
":",
"if",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"with",
"tf",
".",
"GradientTape",
"(",
")",
"as",
"tape",
":",
"tape",
".",
"watch",
"(",
"x",
")",
"value",
"=",
"f",
"(",
"x",
")",
"batch_jacobian",
"=",
"tape",
".",
"batch_jacobian",
"(",
"value",
",",
"x",
")",
"else",
":",
"value",
"=",
"f",
"(",
"x",
")",
"batch_jacobian",
"=",
"gradients",
".",
"batch_jacobian",
"(",
"value",
",",
"x",
")",
"return",
"value",
",",
"batch_jacobian"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_prevent_2nd_derivative
|
Disables computation of the second derivatives for a tensor.
NB: you need to apply a non-identity function to the output tensor for the
exception to be raised.
Arguments:
x: A tensor.
Returns:
A tensor with the same value and the same derivative as x, but that raises
LookupError when trying to compute the second derivatives.
|
tensorflow_probability/python/distributions/mixture_same_family.py
|
def _prevent_2nd_derivative(x):
"""Disables computation of the second derivatives for a tensor.
NB: you need to apply a non-identity function to the output tensor for the
exception to be raised.
Arguments:
x: A tensor.
Returns:
A tensor with the same value and the same derivative as x, but that raises
LookupError when trying to compute the second derivatives.
"""
def grad(dy):
return array_ops.prevent_gradient(
dy, message="Second derivative is not implemented.")
return tf.identity(x), grad
|
def _prevent_2nd_derivative(x):
"""Disables computation of the second derivatives for a tensor.
NB: you need to apply a non-identity function to the output tensor for the
exception to be raised.
Arguments:
x: A tensor.
Returns:
A tensor with the same value and the same derivative as x, but that raises
LookupError when trying to compute the second derivatives.
"""
def grad(dy):
return array_ops.prevent_gradient(
dy, message="Second derivative is not implemented.")
return tf.identity(x), grad
|
[
"Disables",
"computation",
"of",
"the",
"second",
"derivatives",
"for",
"a",
"tensor",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/mixture_same_family.py#L566-L583
|
[
"def",
"_prevent_2nd_derivative",
"(",
"x",
")",
":",
"def",
"grad",
"(",
"dy",
")",
":",
"return",
"array_ops",
".",
"prevent_gradient",
"(",
"dy",
",",
"message",
"=",
"\"Second derivative is not implemented.\"",
")",
"return",
"tf",
".",
"identity",
"(",
"x",
")",
",",
"grad"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
MixtureSameFamily._reparameterize_sample
|
Adds reparameterization (pathwise) gradients to samples of the mixture.
Implicit reparameterization gradients are
dx/dphi = -(d transform(x, phi) / dx)^-1 * d transform(x, phi) / dphi,
where transform(x, phi) is distributional transform that removes all
parameters from samples x.
We implement them by replacing x with
-stop_gradient(d transform(x, phi) / dx)^-1 * transform(x, phi)]
for the backward pass (gradient computation).
The derivative of this quantity w.r.t. phi is then the implicit
reparameterization gradient.
Note that this replaces the gradients w.r.t. both the mixture
distribution parameters and components distributions parameters.
Limitations:
1. Fundamental: components must be fully reparameterized.
2. Distributional transform is currently only implemented for
factorized components.
3. Distributional transform currently only works for known rank of the
batch tensor.
Arguments:
x: Sample of mixture distribution
Returns:
Tensor with same value as x, but with reparameterization gradients
|
tensorflow_probability/python/distributions/mixture_same_family.py
|
def _reparameterize_sample(self, x):
"""Adds reparameterization (pathwise) gradients to samples of the mixture.
Implicit reparameterization gradients are
dx/dphi = -(d transform(x, phi) / dx)^-1 * d transform(x, phi) / dphi,
where transform(x, phi) is distributional transform that removes all
parameters from samples x.
We implement them by replacing x with
-stop_gradient(d transform(x, phi) / dx)^-1 * transform(x, phi)]
for the backward pass (gradient computation).
The derivative of this quantity w.r.t. phi is then the implicit
reparameterization gradient.
Note that this replaces the gradients w.r.t. both the mixture
distribution parameters and components distributions parameters.
Limitations:
1. Fundamental: components must be fully reparameterized.
2. Distributional transform is currently only implemented for
factorized components.
3. Distributional transform currently only works for known rank of the
batch tensor.
Arguments:
x: Sample of mixture distribution
Returns:
Tensor with same value as x, but with reparameterization gradients
"""
# Remove the existing gradients of x wrt parameters of the components.
x = tf.stop_gradient(x)
x_2d_shape = [-1, self._event_size] # [S*prod(B), prod(E)]
# Perform distributional transform of x in [S, B, E] shape,
# but have Jacobian of size [S*prod(B), prod(E), prod(E)].
def reshaped_distributional_transform(x_2d):
return tf.reshape(
self._distributional_transform(tf.reshape(x_2d, tf.shape(input=x))),
x_2d_shape)
# transform_2d: [S*prod(B), prod(E)]
# jacobian: [S*prod(B), prod(E), prod(E)]
transform_2d, jacobian = _value_and_batch_jacobian(
reshaped_distributional_transform, tf.reshape(x, x_2d_shape))
# We only provide the first derivative; the second derivative computed by
# autodiff would be incorrect, so we raise an error if it is requested.
transform_2d = _prevent_2nd_derivative(transform_2d)
# Compute [- stop_gradient(jacobian)^-1 * transform] by solving a linear
# system. The Jacobian is lower triangular because the distributional
# transform for i-th event dimension does not depend on the next
# dimensions.
surrogate_x_2d = -tf.linalg.triangular_solve(
tf.stop_gradient(jacobian), tf.expand_dims(transform_2d, axis=-1),
lower=True) # [S*prod(B), prod(E), 1]
surrogate_x = tf.reshape(surrogate_x_2d, tf.shape(input=x))
# Replace gradients of x with gradients of surrogate_x, but keep the value.
return x + (surrogate_x - tf.stop_gradient(surrogate_x))
|
def _reparameterize_sample(self, x):
"""Adds reparameterization (pathwise) gradients to samples of the mixture.
Implicit reparameterization gradients are
dx/dphi = -(d transform(x, phi) / dx)^-1 * d transform(x, phi) / dphi,
where transform(x, phi) is distributional transform that removes all
parameters from samples x.
We implement them by replacing x with
-stop_gradient(d transform(x, phi) / dx)^-1 * transform(x, phi)]
for the backward pass (gradient computation).
The derivative of this quantity w.r.t. phi is then the implicit
reparameterization gradient.
Note that this replaces the gradients w.r.t. both the mixture
distribution parameters and components distributions parameters.
Limitations:
1. Fundamental: components must be fully reparameterized.
2. Distributional transform is currently only implemented for
factorized components.
3. Distributional transform currently only works for known rank of the
batch tensor.
Arguments:
x: Sample of mixture distribution
Returns:
Tensor with same value as x, but with reparameterization gradients
"""
# Remove the existing gradients of x wrt parameters of the components.
x = tf.stop_gradient(x)
x_2d_shape = [-1, self._event_size] # [S*prod(B), prod(E)]
# Perform distributional transform of x in [S, B, E] shape,
# but have Jacobian of size [S*prod(B), prod(E), prod(E)].
def reshaped_distributional_transform(x_2d):
return tf.reshape(
self._distributional_transform(tf.reshape(x_2d, tf.shape(input=x))),
x_2d_shape)
# transform_2d: [S*prod(B), prod(E)]
# jacobian: [S*prod(B), prod(E), prod(E)]
transform_2d, jacobian = _value_and_batch_jacobian(
reshaped_distributional_transform, tf.reshape(x, x_2d_shape))
# We only provide the first derivative; the second derivative computed by
# autodiff would be incorrect, so we raise an error if it is requested.
transform_2d = _prevent_2nd_derivative(transform_2d)
# Compute [- stop_gradient(jacobian)^-1 * transform] by solving a linear
# system. The Jacobian is lower triangular because the distributional
# transform for i-th event dimension does not depend on the next
# dimensions.
surrogate_x_2d = -tf.linalg.triangular_solve(
tf.stop_gradient(jacobian), tf.expand_dims(transform_2d, axis=-1),
lower=True) # [S*prod(B), prod(E), 1]
surrogate_x = tf.reshape(surrogate_x_2d, tf.shape(input=x))
# Replace gradients of x with gradients of surrogate_x, but keep the value.
return x + (surrogate_x - tf.stop_gradient(surrogate_x))
|
[
"Adds",
"reparameterization",
"(",
"pathwise",
")",
"gradients",
"to",
"samples",
"of",
"the",
"mixture",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/mixture_same_family.py#L407-L467
|
[
"def",
"_reparameterize_sample",
"(",
"self",
",",
"x",
")",
":",
"# Remove the existing gradients of x wrt parameters of the components.",
"x",
"=",
"tf",
".",
"stop_gradient",
"(",
"x",
")",
"x_2d_shape",
"=",
"[",
"-",
"1",
",",
"self",
".",
"_event_size",
"]",
"# [S*prod(B), prod(E)]",
"# Perform distributional transform of x in [S, B, E] shape,",
"# but have Jacobian of size [S*prod(B), prod(E), prod(E)].",
"def",
"reshaped_distributional_transform",
"(",
"x_2d",
")",
":",
"return",
"tf",
".",
"reshape",
"(",
"self",
".",
"_distributional_transform",
"(",
"tf",
".",
"reshape",
"(",
"x_2d",
",",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
")",
")",
",",
"x_2d_shape",
")",
"# transform_2d: [S*prod(B), prod(E)]",
"# jacobian: [S*prod(B), prod(E), prod(E)]",
"transform_2d",
",",
"jacobian",
"=",
"_value_and_batch_jacobian",
"(",
"reshaped_distributional_transform",
",",
"tf",
".",
"reshape",
"(",
"x",
",",
"x_2d_shape",
")",
")",
"# We only provide the first derivative; the second derivative computed by",
"# autodiff would be incorrect, so we raise an error if it is requested.",
"transform_2d",
"=",
"_prevent_2nd_derivative",
"(",
"transform_2d",
")",
"# Compute [- stop_gradient(jacobian)^-1 * transform] by solving a linear",
"# system. The Jacobian is lower triangular because the distributional",
"# transform for i-th event dimension does not depend on the next",
"# dimensions.",
"surrogate_x_2d",
"=",
"-",
"tf",
".",
"linalg",
".",
"triangular_solve",
"(",
"tf",
".",
"stop_gradient",
"(",
"jacobian",
")",
",",
"tf",
".",
"expand_dims",
"(",
"transform_2d",
",",
"axis",
"=",
"-",
"1",
")",
",",
"lower",
"=",
"True",
")",
"# [S*prod(B), prod(E), 1]",
"surrogate_x",
"=",
"tf",
".",
"reshape",
"(",
"surrogate_x_2d",
",",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
")",
"# Replace gradients of x with gradients of surrogate_x, but keep the value.",
"return",
"x",
"+",
"(",
"surrogate_x",
"-",
"tf",
".",
"stop_gradient",
"(",
"surrogate_x",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
MixtureSameFamily._distributional_transform
|
Performs distributional transform of the mixture samples.
Distributional transform removes the parameters from samples of a
multivariate distribution by applying conditional CDFs:
(F(x_1), F(x_2 | x1_), ..., F(x_d | x_1, ..., x_d-1))
(the indexing is over the "flattened" event dimensions).
The result is a sample of product of Uniform[0, 1] distributions.
We assume that the components are factorized, so the conditional CDFs become
F(x_i | x_1, ..., x_i-1) = sum_k w_i^k F_k (x_i),
where w_i^k is the posterior mixture weight: for i > 0
w_i^k = w_k prob_k(x_1, ..., x_i-1) / sum_k' w_k' prob_k'(x_1, ..., x_i-1)
and w_0^k = w_k is the mixture probability of the k-th component.
Arguments:
x: Sample of mixture distribution
Returns:
Result of the distributional transform
|
tensorflow_probability/python/distributions/mixture_same_family.py
|
def _distributional_transform(self, x):
"""Performs distributional transform of the mixture samples.
Distributional transform removes the parameters from samples of a
multivariate distribution by applying conditional CDFs:
(F(x_1), F(x_2 | x1_), ..., F(x_d | x_1, ..., x_d-1))
(the indexing is over the "flattened" event dimensions).
The result is a sample of product of Uniform[0, 1] distributions.
We assume that the components are factorized, so the conditional CDFs become
F(x_i | x_1, ..., x_i-1) = sum_k w_i^k F_k (x_i),
where w_i^k is the posterior mixture weight: for i > 0
w_i^k = w_k prob_k(x_1, ..., x_i-1) / sum_k' w_k' prob_k'(x_1, ..., x_i-1)
and w_0^k = w_k is the mixture probability of the k-th component.
Arguments:
x: Sample of mixture distribution
Returns:
Result of the distributional transform
"""
if tensorshape_util.rank(x.shape) is None:
# tf.nn.softmax raises an error when applied to inputs of undefined rank.
raise ValueError("Distributional transform does not support inputs of "
"undefined rank.")
# Obtain factorized components distribution and assert that it's
# a scalar distribution.
if isinstance(self._components_distribution, independent.Independent):
univariate_components = self._components_distribution.distribution
else:
univariate_components = self._components_distribution
with tf.control_dependencies([
assert_util.assert_equal(
univariate_components.is_scalar_event(),
True,
message="`univariate_components` must have scalar event")
]):
x_padded = self._pad_sample_dims(x) # [S, B, 1, E]
log_prob_x = univariate_components.log_prob(x_padded) # [S, B, k, E]
cdf_x = univariate_components.cdf(x_padded) # [S, B, k, E]
# log prob_k (x_1, ..., x_i-1)
cumsum_log_prob_x = tf.reshape(
tf.math.cumsum(
# [S*prod(B)*k, prod(E)]
tf.reshape(log_prob_x, [-1, self._event_size]),
exclusive=True,
axis=-1),
tf.shape(input=log_prob_x)) # [S, B, k, E]
logits_mix_prob = distribution_utils.pad_mixture_dimensions(
self.mixture_distribution.logits, self, self.mixture_distribution,
self._event_ndims) # [B, k, 1]
# Logits of the posterior weights: log w_k + log prob_k (x_1, ..., x_i-1)
log_posterior_weights_x = logits_mix_prob + cumsum_log_prob_x
component_axis = tensorshape_util.rank(x.shape) - self._event_ndims
posterior_weights_x = tf.nn.softmax(log_posterior_weights_x,
axis=component_axis)
return tf.reduce_sum(
input_tensor=posterior_weights_x * cdf_x, axis=component_axis)
|
def _distributional_transform(self, x):
"""Performs distributional transform of the mixture samples.
Distributional transform removes the parameters from samples of a
multivariate distribution by applying conditional CDFs:
(F(x_1), F(x_2 | x1_), ..., F(x_d | x_1, ..., x_d-1))
(the indexing is over the "flattened" event dimensions).
The result is a sample of product of Uniform[0, 1] distributions.
We assume that the components are factorized, so the conditional CDFs become
F(x_i | x_1, ..., x_i-1) = sum_k w_i^k F_k (x_i),
where w_i^k is the posterior mixture weight: for i > 0
w_i^k = w_k prob_k(x_1, ..., x_i-1) / sum_k' w_k' prob_k'(x_1, ..., x_i-1)
and w_0^k = w_k is the mixture probability of the k-th component.
Arguments:
x: Sample of mixture distribution
Returns:
Result of the distributional transform
"""
if tensorshape_util.rank(x.shape) is None:
# tf.nn.softmax raises an error when applied to inputs of undefined rank.
raise ValueError("Distributional transform does not support inputs of "
"undefined rank.")
# Obtain factorized components distribution and assert that it's
# a scalar distribution.
if isinstance(self._components_distribution, independent.Independent):
univariate_components = self._components_distribution.distribution
else:
univariate_components = self._components_distribution
with tf.control_dependencies([
assert_util.assert_equal(
univariate_components.is_scalar_event(),
True,
message="`univariate_components` must have scalar event")
]):
x_padded = self._pad_sample_dims(x) # [S, B, 1, E]
log_prob_x = univariate_components.log_prob(x_padded) # [S, B, k, E]
cdf_x = univariate_components.cdf(x_padded) # [S, B, k, E]
# log prob_k (x_1, ..., x_i-1)
cumsum_log_prob_x = tf.reshape(
tf.math.cumsum(
# [S*prod(B)*k, prod(E)]
tf.reshape(log_prob_x, [-1, self._event_size]),
exclusive=True,
axis=-1),
tf.shape(input=log_prob_x)) # [S, B, k, E]
logits_mix_prob = distribution_utils.pad_mixture_dimensions(
self.mixture_distribution.logits, self, self.mixture_distribution,
self._event_ndims) # [B, k, 1]
# Logits of the posterior weights: log w_k + log prob_k (x_1, ..., x_i-1)
log_posterior_weights_x = logits_mix_prob + cumsum_log_prob_x
component_axis = tensorshape_util.rank(x.shape) - self._event_ndims
posterior_weights_x = tf.nn.softmax(log_posterior_weights_x,
axis=component_axis)
return tf.reduce_sum(
input_tensor=posterior_weights_x * cdf_x, axis=component_axis)
|
[
"Performs",
"distributional",
"transform",
"of",
"the",
"mixture",
"samples",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/mixture_same_family.py#L469-L533
|
[
"def",
"_distributional_transform",
"(",
"self",
",",
"x",
")",
":",
"if",
"tensorshape_util",
".",
"rank",
"(",
"x",
".",
"shape",
")",
"is",
"None",
":",
"# tf.nn.softmax raises an error when applied to inputs of undefined rank.",
"raise",
"ValueError",
"(",
"\"Distributional transform does not support inputs of \"",
"\"undefined rank.\"",
")",
"# Obtain factorized components distribution and assert that it's",
"# a scalar distribution.",
"if",
"isinstance",
"(",
"self",
".",
"_components_distribution",
",",
"independent",
".",
"Independent",
")",
":",
"univariate_components",
"=",
"self",
".",
"_components_distribution",
".",
"distribution",
"else",
":",
"univariate_components",
"=",
"self",
".",
"_components_distribution",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"assert_util",
".",
"assert_equal",
"(",
"univariate_components",
".",
"is_scalar_event",
"(",
")",
",",
"True",
",",
"message",
"=",
"\"`univariate_components` must have scalar event\"",
")",
"]",
")",
":",
"x_padded",
"=",
"self",
".",
"_pad_sample_dims",
"(",
"x",
")",
"# [S, B, 1, E]",
"log_prob_x",
"=",
"univariate_components",
".",
"log_prob",
"(",
"x_padded",
")",
"# [S, B, k, E]",
"cdf_x",
"=",
"univariate_components",
".",
"cdf",
"(",
"x_padded",
")",
"# [S, B, k, E]",
"# log prob_k (x_1, ..., x_i-1)",
"cumsum_log_prob_x",
"=",
"tf",
".",
"reshape",
"(",
"tf",
".",
"math",
".",
"cumsum",
"(",
"# [S*prod(B)*k, prod(E)]",
"tf",
".",
"reshape",
"(",
"log_prob_x",
",",
"[",
"-",
"1",
",",
"self",
".",
"_event_size",
"]",
")",
",",
"exclusive",
"=",
"True",
",",
"axis",
"=",
"-",
"1",
")",
",",
"tf",
".",
"shape",
"(",
"input",
"=",
"log_prob_x",
")",
")",
"# [S, B, k, E]",
"logits_mix_prob",
"=",
"distribution_utils",
".",
"pad_mixture_dimensions",
"(",
"self",
".",
"mixture_distribution",
".",
"logits",
",",
"self",
",",
"self",
".",
"mixture_distribution",
",",
"self",
".",
"_event_ndims",
")",
"# [B, k, 1]",
"# Logits of the posterior weights: log w_k + log prob_k (x_1, ..., x_i-1)",
"log_posterior_weights_x",
"=",
"logits_mix_prob",
"+",
"cumsum_log_prob_x",
"component_axis",
"=",
"tensorshape_util",
".",
"rank",
"(",
"x",
".",
"shape",
")",
"-",
"self",
".",
"_event_ndims",
"posterior_weights_x",
"=",
"tf",
".",
"nn",
".",
"softmax",
"(",
"log_posterior_weights_x",
",",
"axis",
"=",
"component_axis",
")",
"return",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"posterior_weights_x",
"*",
"cdf_x",
",",
"axis",
"=",
"component_axis",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_split_covariance_into_marginals
|
Split a covariance matrix into block-diagonal marginals of given sizes.
|
tensorflow_probability/python/sts/decomposition.py
|
def _split_covariance_into_marginals(covariance, block_sizes):
"""Split a covariance matrix into block-diagonal marginals of given sizes."""
start_dim = 0
marginals = []
for size in block_sizes:
end_dim = start_dim + size
marginals.append(covariance[..., start_dim:end_dim, start_dim:end_dim])
start_dim = end_dim
return marginals
|
def _split_covariance_into_marginals(covariance, block_sizes):
"""Split a covariance matrix into block-diagonal marginals of given sizes."""
start_dim = 0
marginals = []
for size in block_sizes:
end_dim = start_dim + size
marginals.append(covariance[..., start_dim:end_dim, start_dim:end_dim])
start_dim = end_dim
return marginals
|
[
"Split",
"a",
"covariance",
"matrix",
"into",
"block",
"-",
"diagonal",
"marginals",
"of",
"given",
"sizes",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/decomposition.py#L29-L37
|
[
"def",
"_split_covariance_into_marginals",
"(",
"covariance",
",",
"block_sizes",
")",
":",
"start_dim",
"=",
"0",
"marginals",
"=",
"[",
"]",
"for",
"size",
"in",
"block_sizes",
":",
"end_dim",
"=",
"start_dim",
"+",
"size",
"marginals",
".",
"append",
"(",
"covariance",
"[",
"...",
",",
"start_dim",
":",
"end_dim",
",",
"start_dim",
":",
"end_dim",
"]",
")",
"start_dim",
"=",
"end_dim",
"return",
"marginals"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_decompose_from_posterior_marginals
|
Utility method to decompose a joint posterior into components.
Args:
model: `tfp.sts.Sum` instance defining an additive STS model.
posterior_means: float `Tensor` of shape `concat(
[[num_posterior_draws], batch_shape, num_timesteps, latent_size])`
representing the posterior mean over latents in an
`AdditiveStateSpaceModel`.
posterior_covs: float `Tensor` of shape `concat(
[[num_posterior_draws], batch_shape, num_timesteps,
latent_size, latent_size])`
representing the posterior marginal covariances over latents in an
`AdditiveStateSpaceModel`.
parameter_samples: Python `list` of `Tensors` representing posterior
samples of model parameters, with shapes `[concat([
[num_posterior_draws], param.prior.batch_shape,
param.prior.event_shape]) for param in model.parameters]`. This may
optionally also be a map (Python `dict`) of parameter names to
`Tensor` values.
Returns:
component_dists: A `collections.OrderedDict` instance mapping
component StructuralTimeSeries instances (elements of `model.components`)
to `tfd.Distribution` instances representing the posterior marginal
distributions on the process modeled by each component. Each distribution
has batch shape matching that of `posterior_means`/`posterior_covs`, and
event shape of `[num_timesteps]`.
|
tensorflow_probability/python/sts/decomposition.py
|
def _decompose_from_posterior_marginals(
model, posterior_means, posterior_covs, parameter_samples):
"""Utility method to decompose a joint posterior into components.
Args:
model: `tfp.sts.Sum` instance defining an additive STS model.
posterior_means: float `Tensor` of shape `concat(
[[num_posterior_draws], batch_shape, num_timesteps, latent_size])`
representing the posterior mean over latents in an
`AdditiveStateSpaceModel`.
posterior_covs: float `Tensor` of shape `concat(
[[num_posterior_draws], batch_shape, num_timesteps,
latent_size, latent_size])`
representing the posterior marginal covariances over latents in an
`AdditiveStateSpaceModel`.
parameter_samples: Python `list` of `Tensors` representing posterior
samples of model parameters, with shapes `[concat([
[num_posterior_draws], param.prior.batch_shape,
param.prior.event_shape]) for param in model.parameters]`. This may
optionally also be a map (Python `dict`) of parameter names to
`Tensor` values.
Returns:
component_dists: A `collections.OrderedDict` instance mapping
component StructuralTimeSeries instances (elements of `model.components`)
to `tfd.Distribution` instances representing the posterior marginal
distributions on the process modeled by each component. Each distribution
has batch shape matching that of `posterior_means`/`posterior_covs`, and
event shape of `[num_timesteps]`.
"""
try:
model.components
except AttributeError:
raise ValueError('Model decomposed into components must be an instance of'
'`tfp.sts.Sum` (passed model {})'.format(model))
with tf.compat.v1.name_scope('decompose_from_posterior_marginals'):
# Extract the component means/covs from the joint latent posterior.
latent_sizes = [component.latent_size for component in model.components]
component_means = tf.split(posterior_means, latent_sizes, axis=-1)
component_covs = _split_covariance_into_marginals(
posterior_covs, latent_sizes)
# Instantiate per-component state space models, and use them to push the
# posterior means/covs through the observation model for each component.
num_timesteps = dist_util.prefer_static_value(
tf.shape(input=posterior_means))[-2]
component_ssms = model.make_component_state_space_models(
num_timesteps=num_timesteps,
param_vals=parameter_samples)
component_predictive_dists = collections.OrderedDict()
for (component, component_ssm,
component_mean, component_cov) in zip(model.components, component_ssms,
component_means, component_covs):
component_obs_mean, component_obs_cov = (
component_ssm.latents_to_observations(
latent_means=component_mean,
latent_covs=component_cov))
# Using the observation means and covs, build a mixture distribution
# that integrates over the posterior draws.
component_predictive_dists[component] = sts_util.mix_over_posterior_draws(
means=component_obs_mean[..., 0],
variances=component_obs_cov[..., 0, 0])
return component_predictive_dists
|
def _decompose_from_posterior_marginals(
model, posterior_means, posterior_covs, parameter_samples):
"""Utility method to decompose a joint posterior into components.
Args:
model: `tfp.sts.Sum` instance defining an additive STS model.
posterior_means: float `Tensor` of shape `concat(
[[num_posterior_draws], batch_shape, num_timesteps, latent_size])`
representing the posterior mean over latents in an
`AdditiveStateSpaceModel`.
posterior_covs: float `Tensor` of shape `concat(
[[num_posterior_draws], batch_shape, num_timesteps,
latent_size, latent_size])`
representing the posterior marginal covariances over latents in an
`AdditiveStateSpaceModel`.
parameter_samples: Python `list` of `Tensors` representing posterior
samples of model parameters, with shapes `[concat([
[num_posterior_draws], param.prior.batch_shape,
param.prior.event_shape]) for param in model.parameters]`. This may
optionally also be a map (Python `dict`) of parameter names to
`Tensor` values.
Returns:
component_dists: A `collections.OrderedDict` instance mapping
component StructuralTimeSeries instances (elements of `model.components`)
to `tfd.Distribution` instances representing the posterior marginal
distributions on the process modeled by each component. Each distribution
has batch shape matching that of `posterior_means`/`posterior_covs`, and
event shape of `[num_timesteps]`.
"""
try:
model.components
except AttributeError:
raise ValueError('Model decomposed into components must be an instance of'
'`tfp.sts.Sum` (passed model {})'.format(model))
with tf.compat.v1.name_scope('decompose_from_posterior_marginals'):
# Extract the component means/covs from the joint latent posterior.
latent_sizes = [component.latent_size for component in model.components]
component_means = tf.split(posterior_means, latent_sizes, axis=-1)
component_covs = _split_covariance_into_marginals(
posterior_covs, latent_sizes)
# Instantiate per-component state space models, and use them to push the
# posterior means/covs through the observation model for each component.
num_timesteps = dist_util.prefer_static_value(
tf.shape(input=posterior_means))[-2]
component_ssms = model.make_component_state_space_models(
num_timesteps=num_timesteps,
param_vals=parameter_samples)
component_predictive_dists = collections.OrderedDict()
for (component, component_ssm,
component_mean, component_cov) in zip(model.components, component_ssms,
component_means, component_covs):
component_obs_mean, component_obs_cov = (
component_ssm.latents_to_observations(
latent_means=component_mean,
latent_covs=component_cov))
# Using the observation means and covs, build a mixture distribution
# that integrates over the posterior draws.
component_predictive_dists[component] = sts_util.mix_over_posterior_draws(
means=component_obs_mean[..., 0],
variances=component_obs_cov[..., 0, 0])
return component_predictive_dists
|
[
"Utility",
"method",
"to",
"decompose",
"a",
"joint",
"posterior",
"into",
"components",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/decomposition.py#L40-L106
|
[
"def",
"_decompose_from_posterior_marginals",
"(",
"model",
",",
"posterior_means",
",",
"posterior_covs",
",",
"parameter_samples",
")",
":",
"try",
":",
"model",
".",
"components",
"except",
"AttributeError",
":",
"raise",
"ValueError",
"(",
"'Model decomposed into components must be an instance of'",
"'`tfp.sts.Sum` (passed model {})'",
".",
"format",
"(",
"model",
")",
")",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"'decompose_from_posterior_marginals'",
")",
":",
"# Extract the component means/covs from the joint latent posterior.",
"latent_sizes",
"=",
"[",
"component",
".",
"latent_size",
"for",
"component",
"in",
"model",
".",
"components",
"]",
"component_means",
"=",
"tf",
".",
"split",
"(",
"posterior_means",
",",
"latent_sizes",
",",
"axis",
"=",
"-",
"1",
")",
"component_covs",
"=",
"_split_covariance_into_marginals",
"(",
"posterior_covs",
",",
"latent_sizes",
")",
"# Instantiate per-component state space models, and use them to push the",
"# posterior means/covs through the observation model for each component.",
"num_timesteps",
"=",
"dist_util",
".",
"prefer_static_value",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"posterior_means",
")",
")",
"[",
"-",
"2",
"]",
"component_ssms",
"=",
"model",
".",
"make_component_state_space_models",
"(",
"num_timesteps",
"=",
"num_timesteps",
",",
"param_vals",
"=",
"parameter_samples",
")",
"component_predictive_dists",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"for",
"(",
"component",
",",
"component_ssm",
",",
"component_mean",
",",
"component_cov",
")",
"in",
"zip",
"(",
"model",
".",
"components",
",",
"component_ssms",
",",
"component_means",
",",
"component_covs",
")",
":",
"component_obs_mean",
",",
"component_obs_cov",
"=",
"(",
"component_ssm",
".",
"latents_to_observations",
"(",
"latent_means",
"=",
"component_mean",
",",
"latent_covs",
"=",
"component_cov",
")",
")",
"# Using the observation means and covs, build a mixture distribution",
"# that integrates over the posterior draws.",
"component_predictive_dists",
"[",
"component",
"]",
"=",
"sts_util",
".",
"mix_over_posterior_draws",
"(",
"means",
"=",
"component_obs_mean",
"[",
"...",
",",
"0",
"]",
",",
"variances",
"=",
"component_obs_cov",
"[",
"...",
",",
"0",
",",
"0",
"]",
")",
"return",
"component_predictive_dists"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
decompose_by_component
|
Decompose an observed time series into contributions from each component.
This method decomposes a time series according to the posterior represention
of a structural time series model. In particular, it:
- Computes the posterior marginal mean and covariances over the additive
model's latent space.
- Decomposes the latent posterior into the marginal blocks for each
model component.
- Maps the per-component latent posteriors back through each component's
observation model, to generate the time series modeled by that component.
Args:
model: An instance of `tfp.sts.Sum` representing a structural time series
model.
observed_time_series: `float` `Tensor` of shape
`batch_shape + [num_timesteps, 1]` (omitting the trailing unit dimension
is also supported when `num_timesteps > 1`), specifying an observed time
series. May optionally be an instance of `tfp.sts.MaskedTimeSeries`, which
includes a mask `Tensor` to specify timesteps with missing observations.
parameter_samples: Python `list` of `Tensors` representing posterior
samples of model parameters, with shapes `[concat([
[num_posterior_draws], param.prior.batch_shape,
param.prior.event_shape]) for param in model.parameters]`. This may
optionally also be a map (Python `dict`) of parameter names to
`Tensor` values.
Returns:
component_dists: A `collections.OrderedDict` instance mapping
component StructuralTimeSeries instances (elements of `model.components`)
to `tfd.Distribution` instances representing the posterior marginal
distributions on the process modeled by each component. Each distribution
has batch shape matching that of `posterior_means`/`posterior_covs`, and
event shape of `[num_timesteps]`.
#### Examples
Suppose we've built a model and fit it to data:
```python
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
num_steps_forecast = 50
samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)
```
To extract the contributions of individual components, pass the time series
and sampled parameters into `decompose_by_component`:
```python
component_dists = decompose_by_component(
model,
observed_time_series=observed_time_series,
parameter_samples=samples)
# Component mean and stddev have shape `[len(observed_time_series)]`.
day_of_week_effect_mean = component_dists[day_of_week].mean()
day_of_week_effect_stddev = component_dists[day_of_week].stddev()
```
Using the component distributions, we can visualize the uncertainty for
each component:
```
from matplotlib import pylab as plt
num_components = len(component_dists)
xs = np.arange(len(observed_time_series))
fig = plt.figure(figsize=(12, 3 * num_components))
for i, (component, component_dist) in enumerate(component_dists.items()):
# If in graph mode, replace `.numpy()` with `.eval()` or `sess.run()`.
component_mean = component_dist.mean().numpy()
component_stddev = component_dist.stddev().numpy()
ax = fig.add_subplot(num_components, 1, 1 + i)
ax.plot(xs, component_mean, lw=2)
ax.fill_between(xs,
component_mean - 2 * component_stddev,
component_mean + 2 * component_stddev,
alpha=0.5)
ax.set_title(component.name)
```
|
tensorflow_probability/python/sts/decomposition.py
|
def decompose_by_component(model, observed_time_series, parameter_samples):
"""Decompose an observed time series into contributions from each component.
This method decomposes a time series according to the posterior represention
of a structural time series model. In particular, it:
- Computes the posterior marginal mean and covariances over the additive
model's latent space.
- Decomposes the latent posterior into the marginal blocks for each
model component.
- Maps the per-component latent posteriors back through each component's
observation model, to generate the time series modeled by that component.
Args:
model: An instance of `tfp.sts.Sum` representing a structural time series
model.
observed_time_series: `float` `Tensor` of shape
`batch_shape + [num_timesteps, 1]` (omitting the trailing unit dimension
is also supported when `num_timesteps > 1`), specifying an observed time
series. May optionally be an instance of `tfp.sts.MaskedTimeSeries`, which
includes a mask `Tensor` to specify timesteps with missing observations.
parameter_samples: Python `list` of `Tensors` representing posterior
samples of model parameters, with shapes `[concat([
[num_posterior_draws], param.prior.batch_shape,
param.prior.event_shape]) for param in model.parameters]`. This may
optionally also be a map (Python `dict`) of parameter names to
`Tensor` values.
Returns:
component_dists: A `collections.OrderedDict` instance mapping
component StructuralTimeSeries instances (elements of `model.components`)
to `tfd.Distribution` instances representing the posterior marginal
distributions on the process modeled by each component. Each distribution
has batch shape matching that of `posterior_means`/`posterior_covs`, and
event shape of `[num_timesteps]`.
#### Examples
Suppose we've built a model and fit it to data:
```python
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
num_steps_forecast = 50
samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)
```
To extract the contributions of individual components, pass the time series
and sampled parameters into `decompose_by_component`:
```python
component_dists = decompose_by_component(
model,
observed_time_series=observed_time_series,
parameter_samples=samples)
# Component mean and stddev have shape `[len(observed_time_series)]`.
day_of_week_effect_mean = component_dists[day_of_week].mean()
day_of_week_effect_stddev = component_dists[day_of_week].stddev()
```
Using the component distributions, we can visualize the uncertainty for
each component:
```
from matplotlib import pylab as plt
num_components = len(component_dists)
xs = np.arange(len(observed_time_series))
fig = plt.figure(figsize=(12, 3 * num_components))
for i, (component, component_dist) in enumerate(component_dists.items()):
# If in graph mode, replace `.numpy()` with `.eval()` or `sess.run()`.
component_mean = component_dist.mean().numpy()
component_stddev = component_dist.stddev().numpy()
ax = fig.add_subplot(num_components, 1, 1 + i)
ax.plot(xs, component_mean, lw=2)
ax.fill_between(xs,
component_mean - 2 * component_stddev,
component_mean + 2 * component_stddev,
alpha=0.5)
ax.set_title(component.name)
```
"""
with tf.compat.v1.name_scope('decompose_by_component',
values=[observed_time_series]):
[
observed_time_series,
is_missing
] = sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series)
# Run smoothing over the training timesteps to extract the
# posterior on latents.
num_timesteps = dist_util.prefer_static_value(
tf.shape(input=observed_time_series))[-2]
ssm = model.make_state_space_model(num_timesteps=num_timesteps,
param_vals=parameter_samples)
posterior_means, posterior_covs = ssm.posterior_marginals(
observed_time_series, mask=is_missing)
return _decompose_from_posterior_marginals(
model, posterior_means, posterior_covs, parameter_samples)
|
def decompose_by_component(model, observed_time_series, parameter_samples):
"""Decompose an observed time series into contributions from each component.
This method decomposes a time series according to the posterior represention
of a structural time series model. In particular, it:
- Computes the posterior marginal mean and covariances over the additive
model's latent space.
- Decomposes the latent posterior into the marginal blocks for each
model component.
- Maps the per-component latent posteriors back through each component's
observation model, to generate the time series modeled by that component.
Args:
model: An instance of `tfp.sts.Sum` representing a structural time series
model.
observed_time_series: `float` `Tensor` of shape
`batch_shape + [num_timesteps, 1]` (omitting the trailing unit dimension
is also supported when `num_timesteps > 1`), specifying an observed time
series. May optionally be an instance of `tfp.sts.MaskedTimeSeries`, which
includes a mask `Tensor` to specify timesteps with missing observations.
parameter_samples: Python `list` of `Tensors` representing posterior
samples of model parameters, with shapes `[concat([
[num_posterior_draws], param.prior.batch_shape,
param.prior.event_shape]) for param in model.parameters]`. This may
optionally also be a map (Python `dict`) of parameter names to
`Tensor` values.
Returns:
component_dists: A `collections.OrderedDict` instance mapping
component StructuralTimeSeries instances (elements of `model.components`)
to `tfd.Distribution` instances representing the posterior marginal
distributions on the process modeled by each component. Each distribution
has batch shape matching that of `posterior_means`/`posterior_covs`, and
event shape of `[num_timesteps]`.
#### Examples
Suppose we've built a model and fit it to data:
```python
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
num_steps_forecast = 50
samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)
```
To extract the contributions of individual components, pass the time series
and sampled parameters into `decompose_by_component`:
```python
component_dists = decompose_by_component(
model,
observed_time_series=observed_time_series,
parameter_samples=samples)
# Component mean and stddev have shape `[len(observed_time_series)]`.
day_of_week_effect_mean = component_dists[day_of_week].mean()
day_of_week_effect_stddev = component_dists[day_of_week].stddev()
```
Using the component distributions, we can visualize the uncertainty for
each component:
```
from matplotlib import pylab as plt
num_components = len(component_dists)
xs = np.arange(len(observed_time_series))
fig = plt.figure(figsize=(12, 3 * num_components))
for i, (component, component_dist) in enumerate(component_dists.items()):
# If in graph mode, replace `.numpy()` with `.eval()` or `sess.run()`.
component_mean = component_dist.mean().numpy()
component_stddev = component_dist.stddev().numpy()
ax = fig.add_subplot(num_components, 1, 1 + i)
ax.plot(xs, component_mean, lw=2)
ax.fill_between(xs,
component_mean - 2 * component_stddev,
component_mean + 2 * component_stddev,
alpha=0.5)
ax.set_title(component.name)
```
"""
with tf.compat.v1.name_scope('decompose_by_component',
values=[observed_time_series]):
[
observed_time_series,
is_missing
] = sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series)
# Run smoothing over the training timesteps to extract the
# posterior on latents.
num_timesteps = dist_util.prefer_static_value(
tf.shape(input=observed_time_series))[-2]
ssm = model.make_state_space_model(num_timesteps=num_timesteps,
param_vals=parameter_samples)
posterior_means, posterior_covs = ssm.posterior_marginals(
observed_time_series, mask=is_missing)
return _decompose_from_posterior_marginals(
model, posterior_means, posterior_covs, parameter_samples)
|
[
"Decompose",
"an",
"observed",
"time",
"series",
"into",
"contributions",
"from",
"each",
"component",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/decomposition.py#L109-L219
|
[
"def",
"decompose_by_component",
"(",
"model",
",",
"observed_time_series",
",",
"parameter_samples",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"'decompose_by_component'",
",",
"values",
"=",
"[",
"observed_time_series",
"]",
")",
":",
"[",
"observed_time_series",
",",
"is_missing",
"]",
"=",
"sts_util",
".",
"canonicalize_observed_time_series_with_mask",
"(",
"observed_time_series",
")",
"# Run smoothing over the training timesteps to extract the",
"# posterior on latents.",
"num_timesteps",
"=",
"dist_util",
".",
"prefer_static_value",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"observed_time_series",
")",
")",
"[",
"-",
"2",
"]",
"ssm",
"=",
"model",
".",
"make_state_space_model",
"(",
"num_timesteps",
"=",
"num_timesteps",
",",
"param_vals",
"=",
"parameter_samples",
")",
"posterior_means",
",",
"posterior_covs",
"=",
"ssm",
".",
"posterior_marginals",
"(",
"observed_time_series",
",",
"mask",
"=",
"is_missing",
")",
"return",
"_decompose_from_posterior_marginals",
"(",
"model",
",",
"posterior_means",
",",
"posterior_covs",
",",
"parameter_samples",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
decompose_forecast_by_component
|
Decompose a forecast distribution into contributions from each component.
Args:
model: An instance of `tfp.sts.Sum` representing a structural time series
model.
forecast_dist: A `Distribution` instance returned by `tfp.sts.forecast()`.
(specifically, must be a `tfd.MixtureSameFamily` over a
`tfd.LinearGaussianStateSpaceModel` parameterized by posterior samples).
parameter_samples: Python `list` of `Tensors` representing posterior samples
of model parameters, with shapes `[concat([[num_posterior_draws],
param.prior.batch_shape, param.prior.event_shape]) for param in
model.parameters]`. This may optionally also be a map (Python `dict`) of
parameter names to `Tensor` values.
Returns:
component_forecasts: A `collections.OrderedDict` instance mapping
component StructuralTimeSeries instances (elements of `model.components`)
to `tfd.Distribution` instances representing the marginal forecast for
each component. Each distribution has batch and event shape matching
`forecast_dist` (specifically, the event shape is
`[num_steps_forecast]`).
#### Examples
Suppose we've built a model, fit it to data, and constructed a forecast
distribution:
```python
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
num_steps_forecast = 50
samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)
forecast_dist = tfp.sts.forecast(model, observed_time_series,
parameter_samples=samples,
num_steps_forecast=num_steps_forecast)
```
To extract the forecast for individual components, pass the forecast
distribution into `decompose_forecast_by_components`:
```python
component_forecasts = decompose_forecast_by_component(
model, forecast_dist, samples)
# Component mean and stddev have shape `[num_steps_forecast]`.
day_of_week_effect_mean = forecast_components[day_of_week].mean()
day_of_week_effect_stddev = forecast_components[day_of_week].stddev()
```
Using the component forecasts, we can visualize the uncertainty for each
component:
```
from matplotlib import pylab as plt
num_components = len(component_forecasts)
xs = np.arange(num_steps_forecast)
fig = plt.figure(figsize=(12, 3 * num_components))
for i, (component, component_dist) in enumerate(component_forecasts.items()):
# If in graph mode, replace `.numpy()` with `.eval()` or `sess.run()`.
component_mean = component_dist.mean().numpy()
component_stddev = component_dist.stddev().numpy()
ax = fig.add_subplot(num_components, 1, 1 + i)
ax.plot(xs, component_mean, lw=2)
ax.fill_between(xs,
component_mean - 2 * component_stddev,
component_mean + 2 * component_stddev,
alpha=0.5)
ax.set_title(component.name)
```
|
tensorflow_probability/python/sts/decomposition.py
|
def decompose_forecast_by_component(model, forecast_dist, parameter_samples):
"""Decompose a forecast distribution into contributions from each component.
Args:
model: An instance of `tfp.sts.Sum` representing a structural time series
model.
forecast_dist: A `Distribution` instance returned by `tfp.sts.forecast()`.
(specifically, must be a `tfd.MixtureSameFamily` over a
`tfd.LinearGaussianStateSpaceModel` parameterized by posterior samples).
parameter_samples: Python `list` of `Tensors` representing posterior samples
of model parameters, with shapes `[concat([[num_posterior_draws],
param.prior.batch_shape, param.prior.event_shape]) for param in
model.parameters]`. This may optionally also be a map (Python `dict`) of
parameter names to `Tensor` values.
Returns:
component_forecasts: A `collections.OrderedDict` instance mapping
component StructuralTimeSeries instances (elements of `model.components`)
to `tfd.Distribution` instances representing the marginal forecast for
each component. Each distribution has batch and event shape matching
`forecast_dist` (specifically, the event shape is
`[num_steps_forecast]`).
#### Examples
Suppose we've built a model, fit it to data, and constructed a forecast
distribution:
```python
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
num_steps_forecast = 50
samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)
forecast_dist = tfp.sts.forecast(model, observed_time_series,
parameter_samples=samples,
num_steps_forecast=num_steps_forecast)
```
To extract the forecast for individual components, pass the forecast
distribution into `decompose_forecast_by_components`:
```python
component_forecasts = decompose_forecast_by_component(
model, forecast_dist, samples)
# Component mean and stddev have shape `[num_steps_forecast]`.
day_of_week_effect_mean = forecast_components[day_of_week].mean()
day_of_week_effect_stddev = forecast_components[day_of_week].stddev()
```
Using the component forecasts, we can visualize the uncertainty for each
component:
```
from matplotlib import pylab as plt
num_components = len(component_forecasts)
xs = np.arange(num_steps_forecast)
fig = plt.figure(figsize=(12, 3 * num_components))
for i, (component, component_dist) in enumerate(component_forecasts.items()):
# If in graph mode, replace `.numpy()` with `.eval()` or `sess.run()`.
component_mean = component_dist.mean().numpy()
component_stddev = component_dist.stddev().numpy()
ax = fig.add_subplot(num_components, 1, 1 + i)
ax.plot(xs, component_mean, lw=2)
ax.fill_between(xs,
component_mean - 2 * component_stddev,
component_mean + 2 * component_stddev,
alpha=0.5)
ax.set_title(component.name)
```
"""
with tf.compat.v1.name_scope('decompose_forecast_by_component'):
try:
forecast_lgssm = forecast_dist.components_distribution
forecast_latent_mean, _ = forecast_lgssm._joint_mean() # pylint: disable=protected-access
forecast_latent_covs, _ = forecast_lgssm._joint_covariances() # pylint: disable=protected-access
except AttributeError as e:
raise ValueError(
'Forecast distribution must be a MixtureSameFamily of'
'LinearGaussianStateSpaceModel distributions, such as returned by'
'`tfp.sts.forecast()`. (saw exception: {})'.format(e))
# Since `parameter_samples` will have sample shape `[num_posterior_draws]`,
# we need to move the `num_posterior_draws` dimension of the forecast
# moments from the trailing batch dimension, where it's currently put by
# `sts.forecast`, back to the leading (sample shape) dimension.
forecast_latent_mean = dist_util.move_dimension(
forecast_latent_mean, source_idx=-3, dest_idx=0)
forecast_latent_covs = dist_util.move_dimension(
forecast_latent_covs, source_idx=-4, dest_idx=0)
return _decompose_from_posterior_marginals(
model, forecast_latent_mean, forecast_latent_covs, parameter_samples)
|
def decompose_forecast_by_component(model, forecast_dist, parameter_samples):
"""Decompose a forecast distribution into contributions from each component.
Args:
model: An instance of `tfp.sts.Sum` representing a structural time series
model.
forecast_dist: A `Distribution` instance returned by `tfp.sts.forecast()`.
(specifically, must be a `tfd.MixtureSameFamily` over a
`tfd.LinearGaussianStateSpaceModel` parameterized by posterior samples).
parameter_samples: Python `list` of `Tensors` representing posterior samples
of model parameters, with shapes `[concat([[num_posterior_draws],
param.prior.batch_shape, param.prior.event_shape]) for param in
model.parameters]`. This may optionally also be a map (Python `dict`) of
parameter names to `Tensor` values.
Returns:
component_forecasts: A `collections.OrderedDict` instance mapping
component StructuralTimeSeries instances (elements of `model.components`)
to `tfd.Distribution` instances representing the marginal forecast for
each component. Each distribution has batch and event shape matching
`forecast_dist` (specifically, the event shape is
`[num_steps_forecast]`).
#### Examples
Suppose we've built a model, fit it to data, and constructed a forecast
distribution:
```python
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
num_steps_forecast = 50
samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)
forecast_dist = tfp.sts.forecast(model, observed_time_series,
parameter_samples=samples,
num_steps_forecast=num_steps_forecast)
```
To extract the forecast for individual components, pass the forecast
distribution into `decompose_forecast_by_components`:
```python
component_forecasts = decompose_forecast_by_component(
model, forecast_dist, samples)
# Component mean and stddev have shape `[num_steps_forecast]`.
day_of_week_effect_mean = forecast_components[day_of_week].mean()
day_of_week_effect_stddev = forecast_components[day_of_week].stddev()
```
Using the component forecasts, we can visualize the uncertainty for each
component:
```
from matplotlib import pylab as plt
num_components = len(component_forecasts)
xs = np.arange(num_steps_forecast)
fig = plt.figure(figsize=(12, 3 * num_components))
for i, (component, component_dist) in enumerate(component_forecasts.items()):
# If in graph mode, replace `.numpy()` with `.eval()` or `sess.run()`.
component_mean = component_dist.mean().numpy()
component_stddev = component_dist.stddev().numpy()
ax = fig.add_subplot(num_components, 1, 1 + i)
ax.plot(xs, component_mean, lw=2)
ax.fill_between(xs,
component_mean - 2 * component_stddev,
component_mean + 2 * component_stddev,
alpha=0.5)
ax.set_title(component.name)
```
"""
with tf.compat.v1.name_scope('decompose_forecast_by_component'):
try:
forecast_lgssm = forecast_dist.components_distribution
forecast_latent_mean, _ = forecast_lgssm._joint_mean() # pylint: disable=protected-access
forecast_latent_covs, _ = forecast_lgssm._joint_covariances() # pylint: disable=protected-access
except AttributeError as e:
raise ValueError(
'Forecast distribution must be a MixtureSameFamily of'
'LinearGaussianStateSpaceModel distributions, such as returned by'
'`tfp.sts.forecast()`. (saw exception: {})'.format(e))
# Since `parameter_samples` will have sample shape `[num_posterior_draws]`,
# we need to move the `num_posterior_draws` dimension of the forecast
# moments from the trailing batch dimension, where it's currently put by
# `sts.forecast`, back to the leading (sample shape) dimension.
forecast_latent_mean = dist_util.move_dimension(
forecast_latent_mean, source_idx=-3, dest_idx=0)
forecast_latent_covs = dist_util.move_dimension(
forecast_latent_covs, source_idx=-4, dest_idx=0)
return _decompose_from_posterior_marginals(
model, forecast_latent_mean, forecast_latent_covs, parameter_samples)
|
[
"Decompose",
"a",
"forecast",
"distribution",
"into",
"contributions",
"from",
"each",
"component",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/decomposition.py#L222-L325
|
[
"def",
"decompose_forecast_by_component",
"(",
"model",
",",
"forecast_dist",
",",
"parameter_samples",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"'decompose_forecast_by_component'",
")",
":",
"try",
":",
"forecast_lgssm",
"=",
"forecast_dist",
".",
"components_distribution",
"forecast_latent_mean",
",",
"_",
"=",
"forecast_lgssm",
".",
"_joint_mean",
"(",
")",
"# pylint: disable=protected-access",
"forecast_latent_covs",
",",
"_",
"=",
"forecast_lgssm",
".",
"_joint_covariances",
"(",
")",
"# pylint: disable=protected-access",
"except",
"AttributeError",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"'Forecast distribution must be a MixtureSameFamily of'",
"'LinearGaussianStateSpaceModel distributions, such as returned by'",
"'`tfp.sts.forecast()`. (saw exception: {})'",
".",
"format",
"(",
"e",
")",
")",
"# Since `parameter_samples` will have sample shape `[num_posterior_draws]`,",
"# we need to move the `num_posterior_draws` dimension of the forecast",
"# moments from the trailing batch dimension, where it's currently put by",
"# `sts.forecast`, back to the leading (sample shape) dimension.",
"forecast_latent_mean",
"=",
"dist_util",
".",
"move_dimension",
"(",
"forecast_latent_mean",
",",
"source_idx",
"=",
"-",
"3",
",",
"dest_idx",
"=",
"0",
")",
"forecast_latent_covs",
"=",
"dist_util",
".",
"move_dimension",
"(",
"forecast_latent_covs",
",",
"source_idx",
"=",
"-",
"4",
",",
"dest_idx",
"=",
"0",
")",
"return",
"_decompose_from_posterior_marginals",
"(",
"model",
",",
"forecast_latent_mean",
",",
"forecast_latent_covs",
",",
"parameter_samples",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
dense_to_sparse
|
Converts dense `Tensor` to `SparseTensor`, dropping `ignore_value` cells.
Args:
x: A `Tensor`.
ignore_value: Entries in `x` equal to this value will be
absent from the return `SparseTensor`. If `None`, default value of
`x` dtype will be used (e.g. '' for `str`, 0 for `int`).
name: Python `str` prefix for ops created by this function.
Returns:
sparse_x: A `tf.SparseTensor` with the same shape as `x`.
Raises:
ValueError: when `x`'s rank is `None`.
|
tensorflow_probability/python/math/sparse.py
|
def dense_to_sparse(x, ignore_value=None, name=None):
"""Converts dense `Tensor` to `SparseTensor`, dropping `ignore_value` cells.
Args:
x: A `Tensor`.
ignore_value: Entries in `x` equal to this value will be
absent from the return `SparseTensor`. If `None`, default value of
`x` dtype will be used (e.g. '' for `str`, 0 for `int`).
name: Python `str` prefix for ops created by this function.
Returns:
sparse_x: A `tf.SparseTensor` with the same shape as `x`.
Raises:
ValueError: when `x`'s rank is `None`.
"""
# Copied (with modifications) from:
# tensorflow/contrib/layers/python/ops/sparse_ops.py.
with tf.compat.v1.name_scope(name, 'dense_to_sparse', [x, ignore_value]):
x = tf.convert_to_tensor(value=x, name='x')
if ignore_value is None:
if x.dtype.base_dtype == tf.string:
# Exception due to TF strings are converted to numpy objects by default.
ignore_value = ''
else:
ignore_value = x.dtype.as_numpy_dtype(0)
ignore_value = tf.cast(ignore_value, x.dtype, name='ignore_value')
indices = tf.where(tf.not_equal(x, ignore_value), name='indices')
return tf.SparseTensor(
indices=indices,
values=tf.gather_nd(x, indices, name='values'),
dense_shape=tf.shape(input=x, out_type=tf.int64, name='dense_shape'))
|
def dense_to_sparse(x, ignore_value=None, name=None):
"""Converts dense `Tensor` to `SparseTensor`, dropping `ignore_value` cells.
Args:
x: A `Tensor`.
ignore_value: Entries in `x` equal to this value will be
absent from the return `SparseTensor`. If `None`, default value of
`x` dtype will be used (e.g. '' for `str`, 0 for `int`).
name: Python `str` prefix for ops created by this function.
Returns:
sparse_x: A `tf.SparseTensor` with the same shape as `x`.
Raises:
ValueError: when `x`'s rank is `None`.
"""
# Copied (with modifications) from:
# tensorflow/contrib/layers/python/ops/sparse_ops.py.
with tf.compat.v1.name_scope(name, 'dense_to_sparse', [x, ignore_value]):
x = tf.convert_to_tensor(value=x, name='x')
if ignore_value is None:
if x.dtype.base_dtype == tf.string:
# Exception due to TF strings are converted to numpy objects by default.
ignore_value = ''
else:
ignore_value = x.dtype.as_numpy_dtype(0)
ignore_value = tf.cast(ignore_value, x.dtype, name='ignore_value')
indices = tf.where(tf.not_equal(x, ignore_value), name='indices')
return tf.SparseTensor(
indices=indices,
values=tf.gather_nd(x, indices, name='values'),
dense_shape=tf.shape(input=x, out_type=tf.int64, name='dense_shape'))
|
[
"Converts",
"dense",
"Tensor",
"to",
"SparseTensor",
"dropping",
"ignore_value",
"cells",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/sparse.py#L30-L61
|
[
"def",
"dense_to_sparse",
"(",
"x",
",",
"ignore_value",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"# Copied (with modifications) from:",
"# tensorflow/contrib/layers/python/ops/sparse_ops.py.",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'dense_to_sparse'",
",",
"[",
"x",
",",
"ignore_value",
"]",
")",
":",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"'x'",
")",
"if",
"ignore_value",
"is",
"None",
":",
"if",
"x",
".",
"dtype",
".",
"base_dtype",
"==",
"tf",
".",
"string",
":",
"# Exception due to TF strings are converted to numpy objects by default.",
"ignore_value",
"=",
"''",
"else",
":",
"ignore_value",
"=",
"x",
".",
"dtype",
".",
"as_numpy_dtype",
"(",
"0",
")",
"ignore_value",
"=",
"tf",
".",
"cast",
"(",
"ignore_value",
",",
"x",
".",
"dtype",
",",
"name",
"=",
"'ignore_value'",
")",
"indices",
"=",
"tf",
".",
"where",
"(",
"tf",
".",
"not_equal",
"(",
"x",
",",
"ignore_value",
")",
",",
"name",
"=",
"'indices'",
")",
"return",
"tf",
".",
"SparseTensor",
"(",
"indices",
"=",
"indices",
",",
"values",
"=",
"tf",
".",
"gather_nd",
"(",
"x",
",",
"indices",
",",
"name",
"=",
"'values'",
")",
",",
"dense_shape",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
",",
"out_type",
"=",
"tf",
".",
"int64",
",",
"name",
"=",
"'dense_shape'",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_operator
|
Defers an operator overload to `attr`.
Args:
attr: Operator attribute to use.
Returns:
Function calling operator attribute.
|
tensorflow_probability/python/edward2/random_variable.py
|
def _operator(attr):
"""Defers an operator overload to `attr`.
Args:
attr: Operator attribute to use.
Returns:
Function calling operator attribute.
"""
@functools.wraps(attr)
def func(a, *args):
return attr(a.value, *args)
return func
|
def _operator(attr):
"""Defers an operator overload to `attr`.
Args:
attr: Operator attribute to use.
Returns:
Function calling operator attribute.
"""
@functools.wraps(attr)
def func(a, *args):
return attr(a.value, *args)
return func
|
[
"Defers",
"an",
"operator",
"overload",
"to",
"attr",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/random_variable.py#L32-L44
|
[
"def",
"_operator",
"(",
"attr",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"attr",
")",
"def",
"func",
"(",
"a",
",",
"*",
"args",
")",
":",
"return",
"attr",
"(",
"a",
".",
"value",
",",
"*",
"args",
")",
"return",
"func"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_numpy_text
|
Human-readable representation of a tensor's numpy value.
|
tensorflow_probability/python/edward2/random_variable.py
|
def _numpy_text(tensor, is_repr=False):
"""Human-readable representation of a tensor's numpy value."""
if tensor.dtype.is_numpy_compatible:
text = repr(tensor.numpy()) if is_repr else str(tensor.numpy())
else:
text = "<unprintable>"
if "\n" in text:
text = "\n" + text
return text
|
def _numpy_text(tensor, is_repr=False):
"""Human-readable representation of a tensor's numpy value."""
if tensor.dtype.is_numpy_compatible:
text = repr(tensor.numpy()) if is_repr else str(tensor.numpy())
else:
text = "<unprintable>"
if "\n" in text:
text = "\n" + text
return text
|
[
"Human",
"-",
"readable",
"representation",
"of",
"a",
"tensor",
"s",
"numpy",
"value",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/random_variable.py#L287-L295
|
[
"def",
"_numpy_text",
"(",
"tensor",
",",
"is_repr",
"=",
"False",
")",
":",
"if",
"tensor",
".",
"dtype",
".",
"is_numpy_compatible",
":",
"text",
"=",
"repr",
"(",
"tensor",
".",
"numpy",
"(",
")",
")",
"if",
"is_repr",
"else",
"str",
"(",
"tensor",
".",
"numpy",
"(",
")",
")",
"else",
":",
"text",
"=",
"\"<unprintable>\"",
"if",
"\"\\n\"",
"in",
"text",
":",
"text",
"=",
"\"\\n\"",
"+",
"text",
"return",
"text"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
RandomVariable.sample_shape
|
Sample shape of random variable as a `TensorShape`.
|
tensorflow_probability/python/edward2/random_variable.py
|
def sample_shape(self):
"""Sample shape of random variable as a `TensorShape`."""
if isinstance(self._sample_shape, tf.Tensor):
return tf.TensorShape(tf.get_static_value(self._sample_shape))
return tf.TensorShape(self._sample_shape)
|
def sample_shape(self):
"""Sample shape of random variable as a `TensorShape`."""
if isinstance(self._sample_shape, tf.Tensor):
return tf.TensorShape(tf.get_static_value(self._sample_shape))
return tf.TensorShape(self._sample_shape)
|
[
"Sample",
"shape",
"of",
"random",
"variable",
"as",
"a",
"TensorShape",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/random_variable.py#L133-L137
|
[
"def",
"sample_shape",
"(",
"self",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"_sample_shape",
",",
"tf",
".",
"Tensor",
")",
":",
"return",
"tf",
".",
"TensorShape",
"(",
"tf",
".",
"get_static_value",
"(",
"self",
".",
"_sample_shape",
")",
")",
"return",
"tf",
".",
"TensorShape",
"(",
"self",
".",
"_sample_shape",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
RandomVariable.sample_shape_tensor
|
Sample shape of random variable as a 1-D `Tensor`.
Args:
name: name to give to the op
Returns:
sample_shape: `Tensor`.
|
tensorflow_probability/python/edward2/random_variable.py
|
def sample_shape_tensor(self, name="sample_shape_tensor"):
"""Sample shape of random variable as a 1-D `Tensor`.
Args:
name: name to give to the op
Returns:
sample_shape: `Tensor`.
"""
with tf.compat.v1.name_scope(name):
if isinstance(self._sample_shape, tf.Tensor):
return self._sample_shape
return tf.convert_to_tensor(
value=self.sample_shape.as_list(), dtype=tf.int32)
|
def sample_shape_tensor(self, name="sample_shape_tensor"):
"""Sample shape of random variable as a 1-D `Tensor`.
Args:
name: name to give to the op
Returns:
sample_shape: `Tensor`.
"""
with tf.compat.v1.name_scope(name):
if isinstance(self._sample_shape, tf.Tensor):
return self._sample_shape
return tf.convert_to_tensor(
value=self.sample_shape.as_list(), dtype=tf.int32)
|
[
"Sample",
"shape",
"of",
"random",
"variable",
"as",
"a",
"1",
"-",
"D",
"Tensor",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/random_variable.py#L139-L152
|
[
"def",
"sample_shape_tensor",
"(",
"self",
",",
"name",
"=",
"\"sample_shape_tensor\"",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"_sample_shape",
",",
"tf",
".",
"Tensor",
")",
":",
"return",
"self",
".",
"_sample_shape",
"return",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"self",
".",
"sample_shape",
".",
"as_list",
"(",
")",
",",
"dtype",
"=",
"tf",
".",
"int32",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
RandomVariable.value
|
Get tensor that the random variable corresponds to.
|
tensorflow_probability/python/edward2/random_variable.py
|
def value(self):
"""Get tensor that the random variable corresponds to."""
if self._value is None:
try:
self._value = self.distribution.sample(self.sample_shape_tensor())
except NotImplementedError:
raise NotImplementedError(
"sample is not implemented for {0}. You must either pass in the "
"value argument or implement sample for {0}."
.format(self.distribution.__class__.__name__))
return self._value
|
def value(self):
"""Get tensor that the random variable corresponds to."""
if self._value is None:
try:
self._value = self.distribution.sample(self.sample_shape_tensor())
except NotImplementedError:
raise NotImplementedError(
"sample is not implemented for {0}. You must either pass in the "
"value argument or implement sample for {0}."
.format(self.distribution.__class__.__name__))
return self._value
|
[
"Get",
"tensor",
"that",
"the",
"random",
"variable",
"corresponds",
"to",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/random_variable.py#L160-L170
|
[
"def",
"value",
"(",
"self",
")",
":",
"if",
"self",
".",
"_value",
"is",
"None",
":",
"try",
":",
"self",
".",
"_value",
"=",
"self",
".",
"distribution",
".",
"sample",
"(",
"self",
".",
"sample_shape_tensor",
"(",
")",
")",
"except",
"NotImplementedError",
":",
"raise",
"NotImplementedError",
"(",
"\"sample is not implemented for {0}. You must either pass in the \"",
"\"value argument or implement sample for {0}.\"",
".",
"format",
"(",
"self",
".",
"distribution",
".",
"__class__",
".",
"__name__",
")",
")",
"return",
"self",
".",
"_value"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
RandomVariable.eval
|
In a session, computes and returns the value of this random variable.
This is not a graph construction method, it does not add ops to the graph.
This convenience method requires a session where the graph
containing this variable has been launched. If no session is
passed, the default session is used.
Args:
session: tf.BaseSession.
The `tf.Session` to use to evaluate this random variable. If
none, the default session is used.
feed_dict: dict.
A dictionary that maps `tf.Tensor` objects to feed values. See
`tf.Session.run()` for a description of the valid feed values.
Returns:
Value of the random variable.
#### Examples
```python
x = Normal(0.0, 1.0)
with tf.Session() as sess:
# Usage passing the session explicitly.
print(x.eval(sess))
# Usage with the default session. The 'with' block
# above makes 'sess' the default session.
print(x.eval())
```
|
tensorflow_probability/python/edward2/random_variable.py
|
def eval(self, session=None, feed_dict=None):
"""In a session, computes and returns the value of this random variable.
This is not a graph construction method, it does not add ops to the graph.
This convenience method requires a session where the graph
containing this variable has been launched. If no session is
passed, the default session is used.
Args:
session: tf.BaseSession.
The `tf.Session` to use to evaluate this random variable. If
none, the default session is used.
feed_dict: dict.
A dictionary that maps `tf.Tensor` objects to feed values. See
`tf.Session.run()` for a description of the valid feed values.
Returns:
Value of the random variable.
#### Examples
```python
x = Normal(0.0, 1.0)
with tf.Session() as sess:
# Usage passing the session explicitly.
print(x.eval(sess))
# Usage with the default session. The 'with' block
# above makes 'sess' the default session.
print(x.eval())
```
"""
return self.value.eval(session=session, feed_dict=feed_dict)
|
def eval(self, session=None, feed_dict=None):
"""In a session, computes and returns the value of this random variable.
This is not a graph construction method, it does not add ops to the graph.
This convenience method requires a session where the graph
containing this variable has been launched. If no session is
passed, the default session is used.
Args:
session: tf.BaseSession.
The `tf.Session` to use to evaluate this random variable. If
none, the default session is used.
feed_dict: dict.
A dictionary that maps `tf.Tensor` objects to feed values. See
`tf.Session.run()` for a description of the valid feed values.
Returns:
Value of the random variable.
#### Examples
```python
x = Normal(0.0, 1.0)
with tf.Session() as sess:
# Usage passing the session explicitly.
print(x.eval(sess))
# Usage with the default session. The 'with' block
# above makes 'sess' the default session.
print(x.eval())
```
"""
return self.value.eval(session=session, feed_dict=feed_dict)
|
[
"In",
"a",
"session",
"computes",
"and",
"returns",
"the",
"value",
"of",
"this",
"random",
"variable",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/random_variable.py#L236-L268
|
[
"def",
"eval",
"(",
"self",
",",
"session",
"=",
"None",
",",
"feed_dict",
"=",
"None",
")",
":",
"return",
"self",
".",
"value",
".",
"eval",
"(",
"session",
"=",
"session",
",",
"feed_dict",
"=",
"feed_dict",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
RandomVariable.numpy
|
Value as NumPy array, only available for TF Eager.
|
tensorflow_probability/python/edward2/random_variable.py
|
def numpy(self):
"""Value as NumPy array, only available for TF Eager."""
if not isinstance(self.value, ops.EagerTensor):
raise NotImplementedError("value argument must be a EagerTensor.")
return self.value.numpy()
|
def numpy(self):
"""Value as NumPy array, only available for TF Eager."""
if not isinstance(self.value, ops.EagerTensor):
raise NotImplementedError("value argument must be a EagerTensor.")
return self.value.numpy()
|
[
"Value",
"as",
"NumPy",
"array",
"only",
"available",
"for",
"TF",
"Eager",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/random_variable.py#L270-L275
|
[
"def",
"numpy",
"(",
"self",
")",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"value",
",",
"ops",
".",
"EagerTensor",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"value argument must be a EagerTensor.\"",
")",
"return",
"self",
".",
"value",
".",
"numpy",
"(",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
normal_conjugates_known_scale_posterior
|
Posterior Normal distribution with conjugate prior on the mean.
This model assumes that `n` observations (with sum `s`) come from a
Normal with unknown mean `loc` (described by the Normal `prior`)
and known variance `scale**2`. The "known scale posterior" is
the distribution of the unknown `loc`.
Accepts a prior Normal distribution object, having parameters
`loc0` and `scale0`, as well as known `scale` values of the predictive
distribution(s) (also assumed Normal),
and statistical estimates `s` (the sum(s) of the observations) and
`n` (the number(s) of observations).
Returns a posterior (also Normal) distribution object, with parameters
`(loc', scale'**2)`, where:
```
mu ~ N(mu', sigma'**2)
sigma'**2 = 1/(1/sigma0**2 + n/sigma**2),
mu' = (mu0/sigma0**2 + s/sigma**2) * sigma'**2.
```
Distribution parameters from `prior`, as well as `scale`, `s`, and `n`.
will broadcast in the case of multidimensional sets of parameters.
Args:
prior: `Normal` object of type `dtype`:
the prior distribution having parameters `(loc0, scale0)`.
scale: tensor of type `dtype`, taking values `scale > 0`.
The known stddev parameter(s).
s: Tensor of type `dtype`. The sum(s) of observations.
n: Tensor of type `int`. The number(s) of observations.
Returns:
A new Normal posterior distribution object for the unknown observation
mean `loc`.
Raises:
TypeError: if dtype of `s` does not match `dtype`, or `prior` is not a
Normal object.
|
tensorflow_probability/python/distributions/normal_conjugate_posteriors.py
|
def normal_conjugates_known_scale_posterior(prior, scale, s, n):
"""Posterior Normal distribution with conjugate prior on the mean.
This model assumes that `n` observations (with sum `s`) come from a
Normal with unknown mean `loc` (described by the Normal `prior`)
and known variance `scale**2`. The "known scale posterior" is
the distribution of the unknown `loc`.
Accepts a prior Normal distribution object, having parameters
`loc0` and `scale0`, as well as known `scale` values of the predictive
distribution(s) (also assumed Normal),
and statistical estimates `s` (the sum(s) of the observations) and
`n` (the number(s) of observations).
Returns a posterior (also Normal) distribution object, with parameters
`(loc', scale'**2)`, where:
```
mu ~ N(mu', sigma'**2)
sigma'**2 = 1/(1/sigma0**2 + n/sigma**2),
mu' = (mu0/sigma0**2 + s/sigma**2) * sigma'**2.
```
Distribution parameters from `prior`, as well as `scale`, `s`, and `n`.
will broadcast in the case of multidimensional sets of parameters.
Args:
prior: `Normal` object of type `dtype`:
the prior distribution having parameters `(loc0, scale0)`.
scale: tensor of type `dtype`, taking values `scale > 0`.
The known stddev parameter(s).
s: Tensor of type `dtype`. The sum(s) of observations.
n: Tensor of type `int`. The number(s) of observations.
Returns:
A new Normal posterior distribution object for the unknown observation
mean `loc`.
Raises:
TypeError: if dtype of `s` does not match `dtype`, or `prior` is not a
Normal object.
"""
if not isinstance(prior, normal.Normal):
raise TypeError("Expected prior to be an instance of type Normal")
if s.dtype != prior.dtype:
raise TypeError(
"Observation sum s.dtype does not match prior dtype: %s vs. %s"
% (s.dtype, prior.dtype))
n = tf.cast(n, prior.dtype)
scale0_2 = tf.square(prior.scale)
scale_2 = tf.square(scale)
scalep_2 = 1.0/(1/scale0_2 + n/scale_2)
return normal.Normal(
loc=(prior.loc / scale0_2 + s / scale_2) * scalep_2,
scale=tf.sqrt(scalep_2))
|
def normal_conjugates_known_scale_posterior(prior, scale, s, n):
"""Posterior Normal distribution with conjugate prior on the mean.
This model assumes that `n` observations (with sum `s`) come from a
Normal with unknown mean `loc` (described by the Normal `prior`)
and known variance `scale**2`. The "known scale posterior" is
the distribution of the unknown `loc`.
Accepts a prior Normal distribution object, having parameters
`loc0` and `scale0`, as well as known `scale` values of the predictive
distribution(s) (also assumed Normal),
and statistical estimates `s` (the sum(s) of the observations) and
`n` (the number(s) of observations).
Returns a posterior (also Normal) distribution object, with parameters
`(loc', scale'**2)`, where:
```
mu ~ N(mu', sigma'**2)
sigma'**2 = 1/(1/sigma0**2 + n/sigma**2),
mu' = (mu0/sigma0**2 + s/sigma**2) * sigma'**2.
```
Distribution parameters from `prior`, as well as `scale`, `s`, and `n`.
will broadcast in the case of multidimensional sets of parameters.
Args:
prior: `Normal` object of type `dtype`:
the prior distribution having parameters `(loc0, scale0)`.
scale: tensor of type `dtype`, taking values `scale > 0`.
The known stddev parameter(s).
s: Tensor of type `dtype`. The sum(s) of observations.
n: Tensor of type `int`. The number(s) of observations.
Returns:
A new Normal posterior distribution object for the unknown observation
mean `loc`.
Raises:
TypeError: if dtype of `s` does not match `dtype`, or `prior` is not a
Normal object.
"""
if not isinstance(prior, normal.Normal):
raise TypeError("Expected prior to be an instance of type Normal")
if s.dtype != prior.dtype:
raise TypeError(
"Observation sum s.dtype does not match prior dtype: %s vs. %s"
% (s.dtype, prior.dtype))
n = tf.cast(n, prior.dtype)
scale0_2 = tf.square(prior.scale)
scale_2 = tf.square(scale)
scalep_2 = 1.0/(1/scale0_2 + n/scale_2)
return normal.Normal(
loc=(prior.loc / scale0_2 + s / scale_2) * scalep_2,
scale=tf.sqrt(scalep_2))
|
[
"Posterior",
"Normal",
"distribution",
"with",
"conjugate",
"prior",
"on",
"the",
"mean",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/normal_conjugate_posteriors.py#L25-L81
|
[
"def",
"normal_conjugates_known_scale_posterior",
"(",
"prior",
",",
"scale",
",",
"s",
",",
"n",
")",
":",
"if",
"not",
"isinstance",
"(",
"prior",
",",
"normal",
".",
"Normal",
")",
":",
"raise",
"TypeError",
"(",
"\"Expected prior to be an instance of type Normal\"",
")",
"if",
"s",
".",
"dtype",
"!=",
"prior",
".",
"dtype",
":",
"raise",
"TypeError",
"(",
"\"Observation sum s.dtype does not match prior dtype: %s vs. %s\"",
"%",
"(",
"s",
".",
"dtype",
",",
"prior",
".",
"dtype",
")",
")",
"n",
"=",
"tf",
".",
"cast",
"(",
"n",
",",
"prior",
".",
"dtype",
")",
"scale0_2",
"=",
"tf",
".",
"square",
"(",
"prior",
".",
"scale",
")",
"scale_2",
"=",
"tf",
".",
"square",
"(",
"scale",
")",
"scalep_2",
"=",
"1.0",
"/",
"(",
"1",
"/",
"scale0_2",
"+",
"n",
"/",
"scale_2",
")",
"return",
"normal",
".",
"Normal",
"(",
"loc",
"=",
"(",
"prior",
".",
"loc",
"/",
"scale0_2",
"+",
"s",
"/",
"scale_2",
")",
"*",
"scalep_2",
",",
"scale",
"=",
"tf",
".",
"sqrt",
"(",
"scalep_2",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
real_nvp_default_template
|
Build a scale-and-shift function using a multi-layer neural network.
This will be wrapped in a make_template to ensure the variables are only
created once. It takes the `d`-dimensional input x[0:d] and returns the `D-d`
dimensional outputs `loc` ("mu") and `log_scale` ("alpha").
The default template does not support conditioning and will raise an
exception if `condition_kwargs` are passed to it. To use conditioning in
real nvp bijector, implement a conditioned shift/scale template that
handles the `condition_kwargs`.
Arguments:
hidden_layers: Python `list`-like of non-negative integer, scalars
indicating the number of units in each hidden layer. Default: `[512, 512].
shift_only: Python `bool` indicating if only the `shift` term shall be
computed (i.e. NICE bijector). Default: `False`.
activation: Activation function (callable). Explicitly setting to `None`
implies a linear activation.
name: A name for ops managed by this function. Default:
"real_nvp_default_template".
*args: `tf.layers.dense` arguments.
**kwargs: `tf.layers.dense` keyword arguments.
Returns:
shift: `Float`-like `Tensor` of shift terms ("mu" in
[Papamakarios et al. (2016)][1]).
log_scale: `Float`-like `Tensor` of log(scale) terms ("alpha" in
[Papamakarios et al. (2016)][1]).
Raises:
NotImplementedError: if rightmost dimension of `inputs` is unknown prior to
graph execution, or if `condition_kwargs` is not empty.
#### References
[1]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked
Autoregressive Flow for Density Estimation. In _Neural Information
Processing Systems_, 2017. https://arxiv.org/abs/1705.07057
|
tensorflow_probability/python/bijectors/real_nvp.py
|
def real_nvp_default_template(hidden_layers,
shift_only=False,
activation=tf.nn.relu,
name=None,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs):
"""Build a scale-and-shift function using a multi-layer neural network.
This will be wrapped in a make_template to ensure the variables are only
created once. It takes the `d`-dimensional input x[0:d] and returns the `D-d`
dimensional outputs `loc` ("mu") and `log_scale` ("alpha").
The default template does not support conditioning and will raise an
exception if `condition_kwargs` are passed to it. To use conditioning in
real nvp bijector, implement a conditioned shift/scale template that
handles the `condition_kwargs`.
Arguments:
hidden_layers: Python `list`-like of non-negative integer, scalars
indicating the number of units in each hidden layer. Default: `[512, 512].
shift_only: Python `bool` indicating if only the `shift` term shall be
computed (i.e. NICE bijector). Default: `False`.
activation: Activation function (callable). Explicitly setting to `None`
implies a linear activation.
name: A name for ops managed by this function. Default:
"real_nvp_default_template".
*args: `tf.layers.dense` arguments.
**kwargs: `tf.layers.dense` keyword arguments.
Returns:
shift: `Float`-like `Tensor` of shift terms ("mu" in
[Papamakarios et al. (2016)][1]).
log_scale: `Float`-like `Tensor` of log(scale) terms ("alpha" in
[Papamakarios et al. (2016)][1]).
Raises:
NotImplementedError: if rightmost dimension of `inputs` is unknown prior to
graph execution, or if `condition_kwargs` is not empty.
#### References
[1]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked
Autoregressive Flow for Density Estimation. In _Neural Information
Processing Systems_, 2017. https://arxiv.org/abs/1705.07057
"""
with tf.compat.v2.name_scope(name or "real_nvp_default_template"):
def _fn(x, output_units, **condition_kwargs):
"""Fully connected MLP parameterized via `real_nvp_template`."""
if condition_kwargs:
raise NotImplementedError(
"Conditioning not implemented in the default template.")
if tensorshape_util.rank(x.shape) == 1:
x = x[tf.newaxis, ...]
reshape_output = lambda x: x[0]
else:
reshape_output = lambda x: x
for units in hidden_layers:
x = tf.compat.v1.layers.dense(
inputs=x,
units=units,
activation=activation,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs)
x = tf.compat.v1.layers.dense(
inputs=x,
units=(1 if shift_only else 2) * output_units,
activation=None,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs)
if shift_only:
return reshape_output(x), None
shift, log_scale = tf.split(x, 2, axis=-1)
return reshape_output(shift), reshape_output(log_scale)
return tf.compat.v1.make_template("real_nvp_default_template", _fn)
|
def real_nvp_default_template(hidden_layers,
shift_only=False,
activation=tf.nn.relu,
name=None,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs):
"""Build a scale-and-shift function using a multi-layer neural network.
This will be wrapped in a make_template to ensure the variables are only
created once. It takes the `d`-dimensional input x[0:d] and returns the `D-d`
dimensional outputs `loc` ("mu") and `log_scale` ("alpha").
The default template does not support conditioning and will raise an
exception if `condition_kwargs` are passed to it. To use conditioning in
real nvp bijector, implement a conditioned shift/scale template that
handles the `condition_kwargs`.
Arguments:
hidden_layers: Python `list`-like of non-negative integer, scalars
indicating the number of units in each hidden layer. Default: `[512, 512].
shift_only: Python `bool` indicating if only the `shift` term shall be
computed (i.e. NICE bijector). Default: `False`.
activation: Activation function (callable). Explicitly setting to `None`
implies a linear activation.
name: A name for ops managed by this function. Default:
"real_nvp_default_template".
*args: `tf.layers.dense` arguments.
**kwargs: `tf.layers.dense` keyword arguments.
Returns:
shift: `Float`-like `Tensor` of shift terms ("mu" in
[Papamakarios et al. (2016)][1]).
log_scale: `Float`-like `Tensor` of log(scale) terms ("alpha" in
[Papamakarios et al. (2016)][1]).
Raises:
NotImplementedError: if rightmost dimension of `inputs` is unknown prior to
graph execution, or if `condition_kwargs` is not empty.
#### References
[1]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked
Autoregressive Flow for Density Estimation. In _Neural Information
Processing Systems_, 2017. https://arxiv.org/abs/1705.07057
"""
with tf.compat.v2.name_scope(name or "real_nvp_default_template"):
def _fn(x, output_units, **condition_kwargs):
"""Fully connected MLP parameterized via `real_nvp_template`."""
if condition_kwargs:
raise NotImplementedError(
"Conditioning not implemented in the default template.")
if tensorshape_util.rank(x.shape) == 1:
x = x[tf.newaxis, ...]
reshape_output = lambda x: x[0]
else:
reshape_output = lambda x: x
for units in hidden_layers:
x = tf.compat.v1.layers.dense(
inputs=x,
units=units,
activation=activation,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs)
x = tf.compat.v1.layers.dense(
inputs=x,
units=(1 if shift_only else 2) * output_units,
activation=None,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs)
if shift_only:
return reshape_output(x), None
shift, log_scale = tf.split(x, 2, axis=-1)
return reshape_output(shift), reshape_output(log_scale)
return tf.compat.v1.make_template("real_nvp_default_template", _fn)
|
[
"Build",
"a",
"scale",
"-",
"and",
"-",
"shift",
"function",
"using",
"a",
"multi",
"-",
"layer",
"neural",
"network",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/real_nvp.py#L228-L305
|
[
"def",
"real_nvp_default_template",
"(",
"hidden_layers",
",",
"shift_only",
"=",
"False",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"name",
"=",
"None",
",",
"*",
"args",
",",
"# pylint: disable=keyword-arg-before-vararg",
"*",
"*",
"kwargs",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v2",
".",
"name_scope",
"(",
"name",
"or",
"\"real_nvp_default_template\"",
")",
":",
"def",
"_fn",
"(",
"x",
",",
"output_units",
",",
"*",
"*",
"condition_kwargs",
")",
":",
"\"\"\"Fully connected MLP parameterized via `real_nvp_template`.\"\"\"",
"if",
"condition_kwargs",
":",
"raise",
"NotImplementedError",
"(",
"\"Conditioning not implemented in the default template.\"",
")",
"if",
"tensorshape_util",
".",
"rank",
"(",
"x",
".",
"shape",
")",
"==",
"1",
":",
"x",
"=",
"x",
"[",
"tf",
".",
"newaxis",
",",
"...",
"]",
"reshape_output",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
"else",
":",
"reshape_output",
"=",
"lambda",
"x",
":",
"x",
"for",
"units",
"in",
"hidden_layers",
":",
"x",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"layers",
".",
"dense",
"(",
"inputs",
"=",
"x",
",",
"units",
"=",
"units",
",",
"activation",
"=",
"activation",
",",
"*",
"args",
",",
"# pylint: disable=keyword-arg-before-vararg",
"*",
"*",
"kwargs",
")",
"x",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"layers",
".",
"dense",
"(",
"inputs",
"=",
"x",
",",
"units",
"=",
"(",
"1",
"if",
"shift_only",
"else",
"2",
")",
"*",
"output_units",
",",
"activation",
"=",
"None",
",",
"*",
"args",
",",
"# pylint: disable=keyword-arg-before-vararg",
"*",
"*",
"kwargs",
")",
"if",
"shift_only",
":",
"return",
"reshape_output",
"(",
"x",
")",
",",
"None",
"shift",
",",
"log_scale",
"=",
"tf",
".",
"split",
"(",
"x",
",",
"2",
",",
"axis",
"=",
"-",
"1",
")",
"return",
"reshape_output",
"(",
"shift",
")",
",",
"reshape_output",
"(",
"log_scale",
")",
"return",
"tf",
".",
"compat",
".",
"v1",
".",
"make_template",
"(",
"\"real_nvp_default_template\"",
",",
"_fn",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_uniform_unit_norm
|
Returns a batch of points chosen uniformly from the unit hypersphere.
|
tensorflow_probability/python/distributions/lkj.py
|
def _uniform_unit_norm(dimension, shape, dtype, seed):
"""Returns a batch of points chosen uniformly from the unit hypersphere."""
# This works because the Gaussian distribution is spherically symmetric.
# raw shape: shape + [dimension]
raw = normal.Normal(
loc=dtype_util.as_numpy_dtype(dtype)(0),
scale=dtype_util.as_numpy_dtype(dtype)(1)).sample(
tf.concat([shape, [dimension]], axis=0), seed=seed())
unit_norm = raw / tf.norm(tensor=raw, ord=2, axis=-1)[..., tf.newaxis]
return unit_norm
|
def _uniform_unit_norm(dimension, shape, dtype, seed):
"""Returns a batch of points chosen uniformly from the unit hypersphere."""
# This works because the Gaussian distribution is spherically symmetric.
# raw shape: shape + [dimension]
raw = normal.Normal(
loc=dtype_util.as_numpy_dtype(dtype)(0),
scale=dtype_util.as_numpy_dtype(dtype)(1)).sample(
tf.concat([shape, [dimension]], axis=0), seed=seed())
unit_norm = raw / tf.norm(tensor=raw, ord=2, axis=-1)[..., tf.newaxis]
return unit_norm
|
[
"Returns",
"a",
"batch",
"of",
"points",
"chosen",
"uniformly",
"from",
"the",
"unit",
"hypersphere",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/lkj.py#L47-L56
|
[
"def",
"_uniform_unit_norm",
"(",
"dimension",
",",
"shape",
",",
"dtype",
",",
"seed",
")",
":",
"# This works because the Gaussian distribution is spherically symmetric.",
"# raw shape: shape + [dimension]",
"raw",
"=",
"normal",
".",
"Normal",
"(",
"loc",
"=",
"dtype_util",
".",
"as_numpy_dtype",
"(",
"dtype",
")",
"(",
"0",
")",
",",
"scale",
"=",
"dtype_util",
".",
"as_numpy_dtype",
"(",
"dtype",
")",
"(",
"1",
")",
")",
".",
"sample",
"(",
"tf",
".",
"concat",
"(",
"[",
"shape",
",",
"[",
"dimension",
"]",
"]",
",",
"axis",
"=",
"0",
")",
",",
"seed",
"=",
"seed",
"(",
")",
")",
"unit_norm",
"=",
"raw",
"/",
"tf",
".",
"norm",
"(",
"tensor",
"=",
"raw",
",",
"ord",
"=",
"2",
",",
"axis",
"=",
"-",
"1",
")",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
"return",
"unit_norm"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_replicate
|
Replicate the input tensor n times along a new (major) dimension.
|
tensorflow_probability/python/distributions/lkj.py
|
def _replicate(n, tensor):
"""Replicate the input tensor n times along a new (major) dimension."""
# TODO(axch) Does this already exist somewhere? Should it get contributed?
multiples = tf.concat([[n], tf.ones_like(tensor.shape)], axis=0)
return tf.tile(tf.expand_dims(tensor, axis=0), multiples)
|
def _replicate(n, tensor):
"""Replicate the input tensor n times along a new (major) dimension."""
# TODO(axch) Does this already exist somewhere? Should it get contributed?
multiples = tf.concat([[n], tf.ones_like(tensor.shape)], axis=0)
return tf.tile(tf.expand_dims(tensor, axis=0), multiples)
|
[
"Replicate",
"the",
"input",
"tensor",
"n",
"times",
"along",
"a",
"new",
"(",
"major",
")",
"dimension",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/lkj.py#L59-L63
|
[
"def",
"_replicate",
"(",
"n",
",",
"tensor",
")",
":",
"# TODO(axch) Does this already exist somewhere? Should it get contributed?",
"multiples",
"=",
"tf",
".",
"concat",
"(",
"[",
"[",
"n",
"]",
",",
"tf",
".",
"ones_like",
"(",
"tensor",
".",
"shape",
")",
"]",
",",
"axis",
"=",
"0",
")",
"return",
"tf",
".",
"tile",
"(",
"tf",
".",
"expand_dims",
"(",
"tensor",
",",
"axis",
"=",
"0",
")",
",",
"multiples",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
LKJ._sample_n
|
Returns a Tensor of samples from an LKJ distribution.
Args:
num_samples: Python `int`. The number of samples to draw.
seed: Python integer seed for RNG
name: Python `str` name prefixed to Ops created by this function.
Returns:
samples: A Tensor of correlation matrices with shape `[n, B, D, D]`,
where `B` is the shape of the `concentration` parameter, and `D`
is the `dimension`.
Raises:
ValueError: If `dimension` is negative.
|
tensorflow_probability/python/distributions/lkj.py
|
def _sample_n(self, num_samples, seed=None, name=None):
"""Returns a Tensor of samples from an LKJ distribution.
Args:
num_samples: Python `int`. The number of samples to draw.
seed: Python integer seed for RNG
name: Python `str` name prefixed to Ops created by this function.
Returns:
samples: A Tensor of correlation matrices with shape `[n, B, D, D]`,
where `B` is the shape of the `concentration` parameter, and `D`
is the `dimension`.
Raises:
ValueError: If `dimension` is negative.
"""
if self.dimension < 0:
raise ValueError(
'Cannot sample negative-dimension correlation matrices.')
# Notation below: B is the batch shape, i.e., tf.shape(concentration)
seed = seed_stream.SeedStream(seed, 'sample_lkj')
with tf.name_scope('sample_lkj' or name):
if not dtype_util.is_floating(self.concentration.dtype):
raise TypeError(
'The concentration argument should have floating type, not '
'{}'.format(dtype_util.name(self.concentration.dtype)))
concentration = _replicate(num_samples, self.concentration)
concentration_shape = tf.shape(input=concentration)
if self.dimension <= 1:
# For any dimension <= 1, there is only one possible correlation matrix.
shape = tf.concat([
concentration_shape, [self.dimension, self.dimension]], axis=0)
return tf.ones(shape=shape, dtype=self.concentration.dtype)
beta_conc = concentration + (self.dimension - 2.) / 2.
beta_dist = beta.Beta(concentration1=beta_conc, concentration0=beta_conc)
# Note that the sampler below deviates from [1], by doing the sampling in
# cholesky space. This does not change the fundamental logic of the
# sampler, but does speed up the sampling.
# This is the correlation coefficient between the first two dimensions.
# This is also `r` in reference [1].
corr12 = 2. * beta_dist.sample(seed=seed()) - 1.
# Below we construct the Cholesky of the initial 2x2 correlation matrix,
# which is of the form:
# [[1, 0], [r, sqrt(1 - r**2)]], where r is the correlation between the
# first two dimensions.
# This is the top-left corner of the cholesky of the final sample.
first_row = tf.concat([
tf.ones_like(corr12)[..., tf.newaxis],
tf.zeros_like(corr12)[..., tf.newaxis]], axis=-1)
second_row = tf.concat([
corr12[..., tf.newaxis],
tf.sqrt(1 - corr12**2)[..., tf.newaxis]], axis=-1)
chol_result = tf.concat([
first_row[..., tf.newaxis, :],
second_row[..., tf.newaxis, :]], axis=-2)
for n in range(2, self.dimension):
# Loop invariant: on entry, result has shape B + [n, n]
beta_conc -= 0.5
# norm is y in reference [1].
norm = beta.Beta(
concentration1=n/2.,
concentration0=beta_conc
).sample(seed=seed())
# distance shape: B + [1] for broadcast
distance = tf.sqrt(norm)[..., tf.newaxis]
# direction is u in reference [1].
# direction shape: B + [n]
direction = _uniform_unit_norm(
n, concentration_shape, self.concentration.dtype, seed)
# raw_correlation is w in reference [1].
raw_correlation = distance * direction # shape: B + [n]
# This is the next row in the cholesky of the result,
# which differs from the construction in reference [1].
# In the reference, the new row `z` = chol_result @ raw_correlation^T
# = C @ raw_correlation^T (where as short hand we use C = chol_result).
# We prove that the below equation is the right row to add to the
# cholesky, by showing equality with reference [1].
# Let S be the sample constructed so far, and let `z` be as in
# reference [1]. Then at this iteration, the new sample S' will be
# [[S z^T]
# [z 1]]
# In our case we have the cholesky decomposition factor C, so
# we want our new row x (same size as z) to satisfy:
# [[S z^T] [[C 0] [[C^T x^T] [[CC^T Cx^T]
# [z 1]] = [x k]] [0 k]] = [xC^t xx^T + k**2]]
# Since C @ raw_correlation^T = z = C @ x^T, and C is invertible,
# we have that x = raw_correlation. Also 1 = xx^T + k**2, so k
# = sqrt(1 - xx^T) = sqrt(1 - |raw_correlation|**2) = sqrt(1 -
# distance**2).
new_row = tf.concat(
[raw_correlation, tf.sqrt(1. - norm[..., tf.newaxis])], axis=-1)
# Finally add this new row, by growing the cholesky of the result.
chol_result = tf.concat([
chol_result,
tf.zeros_like(chol_result[..., 0][..., tf.newaxis])], axis=-1)
chol_result = tf.concat(
[chol_result, new_row[..., tf.newaxis, :]], axis=-2)
if self.input_output_cholesky:
return chol_result
result = tf.matmul(chol_result, chol_result, transpose_b=True)
# The diagonal for a correlation matrix should always be ones. Due to
# numerical instability the matmul might not achieve that, so manually set
# these to ones.
result = tf.linalg.set_diag(
result,
tf.ones(shape=tf.shape(input=result)[:-1], dtype=result.dtype))
# This sampling algorithm can produce near-PSD matrices on which standard
# algorithms such as `tf.cholesky` or `tf.linalg.self_adjoint_eigvals`
# fail. Specifically, as documented in b/116828694, around 2% of trials
# of 900,000 5x5 matrices (distributed according to 9 different
# concentration parameter values) contained at least one matrix on which
# the Cholesky decomposition failed.
return result
|
def _sample_n(self, num_samples, seed=None, name=None):
"""Returns a Tensor of samples from an LKJ distribution.
Args:
num_samples: Python `int`. The number of samples to draw.
seed: Python integer seed for RNG
name: Python `str` name prefixed to Ops created by this function.
Returns:
samples: A Tensor of correlation matrices with shape `[n, B, D, D]`,
where `B` is the shape of the `concentration` parameter, and `D`
is the `dimension`.
Raises:
ValueError: If `dimension` is negative.
"""
if self.dimension < 0:
raise ValueError(
'Cannot sample negative-dimension correlation matrices.')
# Notation below: B is the batch shape, i.e., tf.shape(concentration)
seed = seed_stream.SeedStream(seed, 'sample_lkj')
with tf.name_scope('sample_lkj' or name):
if not dtype_util.is_floating(self.concentration.dtype):
raise TypeError(
'The concentration argument should have floating type, not '
'{}'.format(dtype_util.name(self.concentration.dtype)))
concentration = _replicate(num_samples, self.concentration)
concentration_shape = tf.shape(input=concentration)
if self.dimension <= 1:
# For any dimension <= 1, there is only one possible correlation matrix.
shape = tf.concat([
concentration_shape, [self.dimension, self.dimension]], axis=0)
return tf.ones(shape=shape, dtype=self.concentration.dtype)
beta_conc = concentration + (self.dimension - 2.) / 2.
beta_dist = beta.Beta(concentration1=beta_conc, concentration0=beta_conc)
# Note that the sampler below deviates from [1], by doing the sampling in
# cholesky space. This does not change the fundamental logic of the
# sampler, but does speed up the sampling.
# This is the correlation coefficient between the first two dimensions.
# This is also `r` in reference [1].
corr12 = 2. * beta_dist.sample(seed=seed()) - 1.
# Below we construct the Cholesky of the initial 2x2 correlation matrix,
# which is of the form:
# [[1, 0], [r, sqrt(1 - r**2)]], where r is the correlation between the
# first two dimensions.
# This is the top-left corner of the cholesky of the final sample.
first_row = tf.concat([
tf.ones_like(corr12)[..., tf.newaxis],
tf.zeros_like(corr12)[..., tf.newaxis]], axis=-1)
second_row = tf.concat([
corr12[..., tf.newaxis],
tf.sqrt(1 - corr12**2)[..., tf.newaxis]], axis=-1)
chol_result = tf.concat([
first_row[..., tf.newaxis, :],
second_row[..., tf.newaxis, :]], axis=-2)
for n in range(2, self.dimension):
# Loop invariant: on entry, result has shape B + [n, n]
beta_conc -= 0.5
# norm is y in reference [1].
norm = beta.Beta(
concentration1=n/2.,
concentration0=beta_conc
).sample(seed=seed())
# distance shape: B + [1] for broadcast
distance = tf.sqrt(norm)[..., tf.newaxis]
# direction is u in reference [1].
# direction shape: B + [n]
direction = _uniform_unit_norm(
n, concentration_shape, self.concentration.dtype, seed)
# raw_correlation is w in reference [1].
raw_correlation = distance * direction # shape: B + [n]
# This is the next row in the cholesky of the result,
# which differs from the construction in reference [1].
# In the reference, the new row `z` = chol_result @ raw_correlation^T
# = C @ raw_correlation^T (where as short hand we use C = chol_result).
# We prove that the below equation is the right row to add to the
# cholesky, by showing equality with reference [1].
# Let S be the sample constructed so far, and let `z` be as in
# reference [1]. Then at this iteration, the new sample S' will be
# [[S z^T]
# [z 1]]
# In our case we have the cholesky decomposition factor C, so
# we want our new row x (same size as z) to satisfy:
# [[S z^T] [[C 0] [[C^T x^T] [[CC^T Cx^T]
# [z 1]] = [x k]] [0 k]] = [xC^t xx^T + k**2]]
# Since C @ raw_correlation^T = z = C @ x^T, and C is invertible,
# we have that x = raw_correlation. Also 1 = xx^T + k**2, so k
# = sqrt(1 - xx^T) = sqrt(1 - |raw_correlation|**2) = sqrt(1 -
# distance**2).
new_row = tf.concat(
[raw_correlation, tf.sqrt(1. - norm[..., tf.newaxis])], axis=-1)
# Finally add this new row, by growing the cholesky of the result.
chol_result = tf.concat([
chol_result,
tf.zeros_like(chol_result[..., 0][..., tf.newaxis])], axis=-1)
chol_result = tf.concat(
[chol_result, new_row[..., tf.newaxis, :]], axis=-2)
if self.input_output_cholesky:
return chol_result
result = tf.matmul(chol_result, chol_result, transpose_b=True)
# The diagonal for a correlation matrix should always be ones. Due to
# numerical instability the matmul might not achieve that, so manually set
# these to ones.
result = tf.linalg.set_diag(
result,
tf.ones(shape=tf.shape(input=result)[:-1], dtype=result.dtype))
# This sampling algorithm can produce near-PSD matrices on which standard
# algorithms such as `tf.cholesky` or `tf.linalg.self_adjoint_eigvals`
# fail. Specifically, as documented in b/116828694, around 2% of trials
# of 900,000 5x5 matrices (distributed according to 9 different
# concentration parameter values) contained at least one matrix on which
# the Cholesky decomposition failed.
return result
|
[
"Returns",
"a",
"Tensor",
"of",
"samples",
"from",
"an",
"LKJ",
"distribution",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/lkj.py#L190-L313
|
[
"def",
"_sample_n",
"(",
"self",
",",
"num_samples",
",",
"seed",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"if",
"self",
".",
"dimension",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'Cannot sample negative-dimension correlation matrices.'",
")",
"# Notation below: B is the batch shape, i.e., tf.shape(concentration)",
"seed",
"=",
"seed_stream",
".",
"SeedStream",
"(",
"seed",
",",
"'sample_lkj'",
")",
"with",
"tf",
".",
"name_scope",
"(",
"'sample_lkj'",
"or",
"name",
")",
":",
"if",
"not",
"dtype_util",
".",
"is_floating",
"(",
"self",
".",
"concentration",
".",
"dtype",
")",
":",
"raise",
"TypeError",
"(",
"'The concentration argument should have floating type, not '",
"'{}'",
".",
"format",
"(",
"dtype_util",
".",
"name",
"(",
"self",
".",
"concentration",
".",
"dtype",
")",
")",
")",
"concentration",
"=",
"_replicate",
"(",
"num_samples",
",",
"self",
".",
"concentration",
")",
"concentration_shape",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"concentration",
")",
"if",
"self",
".",
"dimension",
"<=",
"1",
":",
"# For any dimension <= 1, there is only one possible correlation matrix.",
"shape",
"=",
"tf",
".",
"concat",
"(",
"[",
"concentration_shape",
",",
"[",
"self",
".",
"dimension",
",",
"self",
".",
"dimension",
"]",
"]",
",",
"axis",
"=",
"0",
")",
"return",
"tf",
".",
"ones",
"(",
"shape",
"=",
"shape",
",",
"dtype",
"=",
"self",
".",
"concentration",
".",
"dtype",
")",
"beta_conc",
"=",
"concentration",
"+",
"(",
"self",
".",
"dimension",
"-",
"2.",
")",
"/",
"2.",
"beta_dist",
"=",
"beta",
".",
"Beta",
"(",
"concentration1",
"=",
"beta_conc",
",",
"concentration0",
"=",
"beta_conc",
")",
"# Note that the sampler below deviates from [1], by doing the sampling in",
"# cholesky space. This does not change the fundamental logic of the",
"# sampler, but does speed up the sampling.",
"# This is the correlation coefficient between the first two dimensions.",
"# This is also `r` in reference [1].",
"corr12",
"=",
"2.",
"*",
"beta_dist",
".",
"sample",
"(",
"seed",
"=",
"seed",
"(",
")",
")",
"-",
"1.",
"# Below we construct the Cholesky of the initial 2x2 correlation matrix,",
"# which is of the form:",
"# [[1, 0], [r, sqrt(1 - r**2)]], where r is the correlation between the",
"# first two dimensions.",
"# This is the top-left corner of the cholesky of the final sample.",
"first_row",
"=",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"ones_like",
"(",
"corr12",
")",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
",",
"tf",
".",
"zeros_like",
"(",
"corr12",
")",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
"]",
",",
"axis",
"=",
"-",
"1",
")",
"second_row",
"=",
"tf",
".",
"concat",
"(",
"[",
"corr12",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
",",
"tf",
".",
"sqrt",
"(",
"1",
"-",
"corr12",
"**",
"2",
")",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
"]",
",",
"axis",
"=",
"-",
"1",
")",
"chol_result",
"=",
"tf",
".",
"concat",
"(",
"[",
"first_row",
"[",
"...",
",",
"tf",
".",
"newaxis",
",",
":",
"]",
",",
"second_row",
"[",
"...",
",",
"tf",
".",
"newaxis",
",",
":",
"]",
"]",
",",
"axis",
"=",
"-",
"2",
")",
"for",
"n",
"in",
"range",
"(",
"2",
",",
"self",
".",
"dimension",
")",
":",
"# Loop invariant: on entry, result has shape B + [n, n]",
"beta_conc",
"-=",
"0.5",
"# norm is y in reference [1].",
"norm",
"=",
"beta",
".",
"Beta",
"(",
"concentration1",
"=",
"n",
"/",
"2.",
",",
"concentration0",
"=",
"beta_conc",
")",
".",
"sample",
"(",
"seed",
"=",
"seed",
"(",
")",
")",
"# distance shape: B + [1] for broadcast",
"distance",
"=",
"tf",
".",
"sqrt",
"(",
"norm",
")",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
"# direction is u in reference [1].",
"# direction shape: B + [n]",
"direction",
"=",
"_uniform_unit_norm",
"(",
"n",
",",
"concentration_shape",
",",
"self",
".",
"concentration",
".",
"dtype",
",",
"seed",
")",
"# raw_correlation is w in reference [1].",
"raw_correlation",
"=",
"distance",
"*",
"direction",
"# shape: B + [n]",
"# This is the next row in the cholesky of the result,",
"# which differs from the construction in reference [1].",
"# In the reference, the new row `z` = chol_result @ raw_correlation^T",
"# = C @ raw_correlation^T (where as short hand we use C = chol_result).",
"# We prove that the below equation is the right row to add to the",
"# cholesky, by showing equality with reference [1].",
"# Let S be the sample constructed so far, and let `z` be as in",
"# reference [1]. Then at this iteration, the new sample S' will be",
"# [[S z^T]",
"# [z 1]]",
"# In our case we have the cholesky decomposition factor C, so",
"# we want our new row x (same size as z) to satisfy:",
"# [[S z^T] [[C 0] [[C^T x^T] [[CC^T Cx^T]",
"# [z 1]] = [x k]] [0 k]] = [xC^t xx^T + k**2]]",
"# Since C @ raw_correlation^T = z = C @ x^T, and C is invertible,",
"# we have that x = raw_correlation. Also 1 = xx^T + k**2, so k",
"# = sqrt(1 - xx^T) = sqrt(1 - |raw_correlation|**2) = sqrt(1 -",
"# distance**2).",
"new_row",
"=",
"tf",
".",
"concat",
"(",
"[",
"raw_correlation",
",",
"tf",
".",
"sqrt",
"(",
"1.",
"-",
"norm",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
")",
"]",
",",
"axis",
"=",
"-",
"1",
")",
"# Finally add this new row, by growing the cholesky of the result.",
"chol_result",
"=",
"tf",
".",
"concat",
"(",
"[",
"chol_result",
",",
"tf",
".",
"zeros_like",
"(",
"chol_result",
"[",
"...",
",",
"0",
"]",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
")",
"]",
",",
"axis",
"=",
"-",
"1",
")",
"chol_result",
"=",
"tf",
".",
"concat",
"(",
"[",
"chol_result",
",",
"new_row",
"[",
"...",
",",
"tf",
".",
"newaxis",
",",
":",
"]",
"]",
",",
"axis",
"=",
"-",
"2",
")",
"if",
"self",
".",
"input_output_cholesky",
":",
"return",
"chol_result",
"result",
"=",
"tf",
".",
"matmul",
"(",
"chol_result",
",",
"chol_result",
",",
"transpose_b",
"=",
"True",
")",
"# The diagonal for a correlation matrix should always be ones. Due to",
"# numerical instability the matmul might not achieve that, so manually set",
"# these to ones.",
"result",
"=",
"tf",
".",
"linalg",
".",
"set_diag",
"(",
"result",
",",
"tf",
".",
"ones",
"(",
"shape",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"result",
")",
"[",
":",
"-",
"1",
"]",
",",
"dtype",
"=",
"result",
".",
"dtype",
")",
")",
"# This sampling algorithm can produce near-PSD matrices on which standard",
"# algorithms such as `tf.cholesky` or `tf.linalg.self_adjoint_eigvals`",
"# fail. Specifically, as documented in b/116828694, around 2% of trials",
"# of 900,000 5x5 matrices (distributed according to 9 different",
"# concentration parameter values) contained at least one matrix on which",
"# the Cholesky decomposition failed.",
"return",
"result"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
LKJ._log_unnorm_prob
|
Returns the unnormalized log density of an LKJ distribution.
Args:
x: `float` or `double` `Tensor` of correlation matrices. The shape of `x`
must be `B + [D, D]`, where `B` broadcasts with the shape of
`concentration`.
name: Python `str` name prefixed to Ops created by this function.
Returns:
log_p: A Tensor of the unnormalized log density of each matrix element of
`x`, with respect to an LKJ distribution with parameter the
corresponding element of `concentration`.
|
tensorflow_probability/python/distributions/lkj.py
|
def _log_unnorm_prob(self, x, name=None):
"""Returns the unnormalized log density of an LKJ distribution.
Args:
x: `float` or `double` `Tensor` of correlation matrices. The shape of `x`
must be `B + [D, D]`, where `B` broadcasts with the shape of
`concentration`.
name: Python `str` name prefixed to Ops created by this function.
Returns:
log_p: A Tensor of the unnormalized log density of each matrix element of
`x`, with respect to an LKJ distribution with parameter the
corresponding element of `concentration`.
"""
with tf.name_scope(name or 'log_unnorm_prob_lkj'):
x = tf.convert_to_tensor(value=x, name='x')
# The density is det(matrix) ** (concentration - 1).
# Computing the determinant with `logdet` is usually fine, since
# correlation matrices are Hermitian and PSD. But in some cases, for a
# PSD matrix whose eigenvalues are close to zero, `logdet` raises an error
# complaining that it is not PSD. The root cause is the computation of the
# cholesky decomposition in `logdet`. Hence, we use the less efficient but
# more robust `slogdet` which does not use `cholesky`.
#
# An alternative would have been to check allow_nan_stats and use
# eigenvalues = tf.linalg.self_adjoint_eigvals(x)
# psd_mask = tf.cast(
# tf.reduce_min(eigenvalues, axis=-1) >= 0, dtype=x.dtype)
# tf.where(psd_mask, answer, float('-inf'))
# to emit probability 0 for inputs that are not PSD, without ever raising
# an error. More care must be taken, as due to numerical stability issues,
# self_adjoint_eigvals can return slightly negative eigenvalues even for
# a PSD matrix.
if self.input_output_cholesky:
logdet = 2.0 * tf.reduce_sum(
input_tensor=tf.math.log(tf.linalg.diag_part(x)), axis=[-1])
else:
_, logdet = tf.linalg.slogdet(x)
answer = (self.concentration - 1.) * logdet
return answer
|
def _log_unnorm_prob(self, x, name=None):
"""Returns the unnormalized log density of an LKJ distribution.
Args:
x: `float` or `double` `Tensor` of correlation matrices. The shape of `x`
must be `B + [D, D]`, where `B` broadcasts with the shape of
`concentration`.
name: Python `str` name prefixed to Ops created by this function.
Returns:
log_p: A Tensor of the unnormalized log density of each matrix element of
`x`, with respect to an LKJ distribution with parameter the
corresponding element of `concentration`.
"""
with tf.name_scope(name or 'log_unnorm_prob_lkj'):
x = tf.convert_to_tensor(value=x, name='x')
# The density is det(matrix) ** (concentration - 1).
# Computing the determinant with `logdet` is usually fine, since
# correlation matrices are Hermitian and PSD. But in some cases, for a
# PSD matrix whose eigenvalues are close to zero, `logdet` raises an error
# complaining that it is not PSD. The root cause is the computation of the
# cholesky decomposition in `logdet`. Hence, we use the less efficient but
# more robust `slogdet` which does not use `cholesky`.
#
# An alternative would have been to check allow_nan_stats and use
# eigenvalues = tf.linalg.self_adjoint_eigvals(x)
# psd_mask = tf.cast(
# tf.reduce_min(eigenvalues, axis=-1) >= 0, dtype=x.dtype)
# tf.where(psd_mask, answer, float('-inf'))
# to emit probability 0 for inputs that are not PSD, without ever raising
# an error. More care must be taken, as due to numerical stability issues,
# self_adjoint_eigvals can return slightly negative eigenvalues even for
# a PSD matrix.
if self.input_output_cholesky:
logdet = 2.0 * tf.reduce_sum(
input_tensor=tf.math.log(tf.linalg.diag_part(x)), axis=[-1])
else:
_, logdet = tf.linalg.slogdet(x)
answer = (self.concentration - 1.) * logdet
return answer
|
[
"Returns",
"the",
"unnormalized",
"log",
"density",
"of",
"an",
"LKJ",
"distribution",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/lkj.py#L371-L410
|
[
"def",
"_log_unnorm_prob",
"(",
"self",
",",
"x",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"'log_unnorm_prob_lkj'",
")",
":",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"'x'",
")",
"# The density is det(matrix) ** (concentration - 1).",
"# Computing the determinant with `logdet` is usually fine, since",
"# correlation matrices are Hermitian and PSD. But in some cases, for a",
"# PSD matrix whose eigenvalues are close to zero, `logdet` raises an error",
"# complaining that it is not PSD. The root cause is the computation of the",
"# cholesky decomposition in `logdet`. Hence, we use the less efficient but",
"# more robust `slogdet` which does not use `cholesky`.",
"#",
"# An alternative would have been to check allow_nan_stats and use",
"# eigenvalues = tf.linalg.self_adjoint_eigvals(x)",
"# psd_mask = tf.cast(",
"# tf.reduce_min(eigenvalues, axis=-1) >= 0, dtype=x.dtype)",
"# tf.where(psd_mask, answer, float('-inf'))",
"# to emit probability 0 for inputs that are not PSD, without ever raising",
"# an error. More care must be taken, as due to numerical stability issues,",
"# self_adjoint_eigvals can return slightly negative eigenvalues even for",
"# a PSD matrix.",
"if",
"self",
".",
"input_output_cholesky",
":",
"logdet",
"=",
"2.0",
"*",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"tf",
".",
"math",
".",
"log",
"(",
"tf",
".",
"linalg",
".",
"diag_part",
"(",
"x",
")",
")",
",",
"axis",
"=",
"[",
"-",
"1",
"]",
")",
"else",
":",
"_",
",",
"logdet",
"=",
"tf",
".",
"linalg",
".",
"slogdet",
"(",
"x",
")",
"answer",
"=",
"(",
"self",
".",
"concentration",
"-",
"1.",
")",
"*",
"logdet",
"return",
"answer"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
LKJ._log_normalization
|
Returns the log normalization of an LKJ distribution.
Args:
name: Python `str` name prefixed to Ops created by this function.
Returns:
log_z: A Tensor of the same shape and dtype as `concentration`, containing
the corresponding log normalizers.
|
tensorflow_probability/python/distributions/lkj.py
|
def _log_normalization(self, name='log_normalization'):
"""Returns the log normalization of an LKJ distribution.
Args:
name: Python `str` name prefixed to Ops created by this function.
Returns:
log_z: A Tensor of the same shape and dtype as `concentration`, containing
the corresponding log normalizers.
"""
# The formula is from D. Lewandowski et al [1], p. 1999, from the
# proof that eqs 16 and 17 are equivalent.
with tf.name_scope(name or 'log_normalization_lkj'):
logpi = np.log(np.pi)
ans = tf.zeros_like(self.concentration)
for k in range(1, self.dimension):
ans += logpi * (k / 2.)
ans += tf.math.lgamma(self.concentration +
(self.dimension - 1 - k) / 2.)
ans -= tf.math.lgamma(self.concentration + (self.dimension - 1) / 2.)
return ans
|
def _log_normalization(self, name='log_normalization'):
"""Returns the log normalization of an LKJ distribution.
Args:
name: Python `str` name prefixed to Ops created by this function.
Returns:
log_z: A Tensor of the same shape and dtype as `concentration`, containing
the corresponding log normalizers.
"""
# The formula is from D. Lewandowski et al [1], p. 1999, from the
# proof that eqs 16 and 17 are equivalent.
with tf.name_scope(name or 'log_normalization_lkj'):
logpi = np.log(np.pi)
ans = tf.zeros_like(self.concentration)
for k in range(1, self.dimension):
ans += logpi * (k / 2.)
ans += tf.math.lgamma(self.concentration +
(self.dimension - 1 - k) / 2.)
ans -= tf.math.lgamma(self.concentration + (self.dimension - 1) / 2.)
return ans
|
[
"Returns",
"the",
"log",
"normalization",
"of",
"an",
"LKJ",
"distribution",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/lkj.py#L412-L432
|
[
"def",
"_log_normalization",
"(",
"self",
",",
"name",
"=",
"'log_normalization'",
")",
":",
"# The formula is from D. Lewandowski et al [1], p. 1999, from the",
"# proof that eqs 16 and 17 are equivalent.",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"'log_normalization_lkj'",
")",
":",
"logpi",
"=",
"np",
".",
"log",
"(",
"np",
".",
"pi",
")",
"ans",
"=",
"tf",
".",
"zeros_like",
"(",
"self",
".",
"concentration",
")",
"for",
"k",
"in",
"range",
"(",
"1",
",",
"self",
".",
"dimension",
")",
":",
"ans",
"+=",
"logpi",
"*",
"(",
"k",
"/",
"2.",
")",
"ans",
"+=",
"tf",
".",
"math",
".",
"lgamma",
"(",
"self",
".",
"concentration",
"+",
"(",
"self",
".",
"dimension",
"-",
"1",
"-",
"k",
")",
"/",
"2.",
")",
"ans",
"-=",
"tf",
".",
"math",
".",
"lgamma",
"(",
"self",
".",
"concentration",
"+",
"(",
"self",
".",
"dimension",
"-",
"1",
")",
"/",
"2.",
")",
"return",
"ans"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
common_dtype
|
Returns explict dtype from `args_list` if exists, else preferred_dtype.
|
tensorflow_probability/python/internal/backend/numpy/internal/utils.py
|
def common_dtype(args_list, preferred_dtype=None):
"""Returns explict dtype from `args_list` if exists, else preferred_dtype."""
dtype = None
preferred_dtype = (None if preferred_dtype is None
else tf.as_dtype(preferred_dtype))
for a in tf.nest.flatten(args_list):
if hasattr(a, 'dtype'):
dt = tf.as_dtype(a.dtype)
else:
continue
if dtype is None:
dtype = dt
elif dtype != dt:
raise TypeError('Found incompatible dtypes, {} and {}.'.format(dtype, dt))
if dtype is None and preferred_dtype is None:
return None
return (preferred_dtype if dtype is None else dtype).as_numpy_dtype
|
def common_dtype(args_list, preferred_dtype=None):
"""Returns explict dtype from `args_list` if exists, else preferred_dtype."""
dtype = None
preferred_dtype = (None if preferred_dtype is None
else tf.as_dtype(preferred_dtype))
for a in tf.nest.flatten(args_list):
if hasattr(a, 'dtype'):
dt = tf.as_dtype(a.dtype)
else:
continue
if dtype is None:
dtype = dt
elif dtype != dt:
raise TypeError('Found incompatible dtypes, {} and {}.'.format(dtype, dt))
if dtype is None and preferred_dtype is None:
return None
return (preferred_dtype if dtype is None else dtype).as_numpy_dtype
|
[
"Returns",
"explict",
"dtype",
"from",
"args_list",
"if",
"exists",
"else",
"preferred_dtype",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/backend/numpy/internal/utils.py#L58-L74
|
[
"def",
"common_dtype",
"(",
"args_list",
",",
"preferred_dtype",
"=",
"None",
")",
":",
"dtype",
"=",
"None",
"preferred_dtype",
"=",
"(",
"None",
"if",
"preferred_dtype",
"is",
"None",
"else",
"tf",
".",
"as_dtype",
"(",
"preferred_dtype",
")",
")",
"for",
"a",
"in",
"tf",
".",
"nest",
".",
"flatten",
"(",
"args_list",
")",
":",
"if",
"hasattr",
"(",
"a",
",",
"'dtype'",
")",
":",
"dt",
"=",
"tf",
".",
"as_dtype",
"(",
"a",
".",
"dtype",
")",
"else",
":",
"continue",
"if",
"dtype",
"is",
"None",
":",
"dtype",
"=",
"dt",
"elif",
"dtype",
"!=",
"dt",
":",
"raise",
"TypeError",
"(",
"'Found incompatible dtypes, {} and {}.'",
".",
"format",
"(",
"dtype",
",",
"dt",
")",
")",
"if",
"dtype",
"is",
"None",
"and",
"preferred_dtype",
"is",
"None",
":",
"return",
"None",
"return",
"(",
"preferred_dtype",
"if",
"dtype",
"is",
"None",
"else",
"dtype",
")",
".",
"as_numpy_dtype"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_make_summary_statistic
|
Factory for implementing summary statistics, eg, mean, stddev, mode.
|
tensorflow_probability/python/distributions/sample.py
|
def _make_summary_statistic(attr):
"""Factory for implementing summary statistics, eg, mean, stddev, mode."""
def _fn(self, **kwargs):
"""Implements summary statistic, eg, mean, stddev, mode."""
x = getattr(self.distribution, attr)(**kwargs)
shape = prefer_static.concat([
self.distribution.batch_shape_tensor(),
prefer_static.ones(prefer_static.rank_from_shape(self.sample_shape),
dtype=self.sample_shape.dtype),
self.distribution.event_shape_tensor(),
], axis=0)
x = tf.reshape(x, shape=shape)
shape = prefer_static.concat([
self.distribution.batch_shape_tensor(),
self.sample_shape,
self.distribution.event_shape_tensor(),
], axis=0)
return tf.broadcast_to(x, shape)
return _fn
|
def _make_summary_statistic(attr):
"""Factory for implementing summary statistics, eg, mean, stddev, mode."""
def _fn(self, **kwargs):
"""Implements summary statistic, eg, mean, stddev, mode."""
x = getattr(self.distribution, attr)(**kwargs)
shape = prefer_static.concat([
self.distribution.batch_shape_tensor(),
prefer_static.ones(prefer_static.rank_from_shape(self.sample_shape),
dtype=self.sample_shape.dtype),
self.distribution.event_shape_tensor(),
], axis=0)
x = tf.reshape(x, shape=shape)
shape = prefer_static.concat([
self.distribution.batch_shape_tensor(),
self.sample_shape,
self.distribution.event_shape_tensor(),
], axis=0)
return tf.broadcast_to(x, shape)
return _fn
|
[
"Factory",
"for",
"implementing",
"summary",
"statistics",
"eg",
"mean",
"stddev",
"mode",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/sample.py#L34-L52
|
[
"def",
"_make_summary_statistic",
"(",
"attr",
")",
":",
"def",
"_fn",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Implements summary statistic, eg, mean, stddev, mode.\"\"\"",
"x",
"=",
"getattr",
"(",
"self",
".",
"distribution",
",",
"attr",
")",
"(",
"*",
"*",
"kwargs",
")",
"shape",
"=",
"prefer_static",
".",
"concat",
"(",
"[",
"self",
".",
"distribution",
".",
"batch_shape_tensor",
"(",
")",
",",
"prefer_static",
".",
"ones",
"(",
"prefer_static",
".",
"rank_from_shape",
"(",
"self",
".",
"sample_shape",
")",
",",
"dtype",
"=",
"self",
".",
"sample_shape",
".",
"dtype",
")",
",",
"self",
".",
"distribution",
".",
"event_shape_tensor",
"(",
")",
",",
"]",
",",
"axis",
"=",
"0",
")",
"x",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"shape",
"=",
"shape",
")",
"shape",
"=",
"prefer_static",
".",
"concat",
"(",
"[",
"self",
".",
"distribution",
".",
"batch_shape_tensor",
"(",
")",
",",
"self",
".",
"sample_shape",
",",
"self",
".",
"distribution",
".",
"event_shape_tensor",
"(",
")",
",",
"]",
",",
"axis",
"=",
"0",
")",
"return",
"tf",
".",
"broadcast_to",
"(",
"x",
",",
"shape",
")",
"return",
"_fn"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_kl_sample
|
Batched KL divergence `KL(a || b)` for Sample distributions.
We can leverage the fact that:
```
KL(Sample(a) || Sample(b)) = sum(KL(a || b))
```
where the sum is over the `sample_shape` dims.
Args:
a: Instance of `Sample` distribution.
b: Instance of `Sample` distribution.
name: (optional) name to use for created ops.
Default value: `"kl_sample"`'.
Returns:
kldiv: Batchwise `KL(a || b)`.
Raises:
ValueError: If the `sample_shape` of `a` and `b` don't match.
|
tensorflow_probability/python/distributions/sample.py
|
def _kl_sample(a, b, name='kl_sample'):
"""Batched KL divergence `KL(a || b)` for Sample distributions.
We can leverage the fact that:
```
KL(Sample(a) || Sample(b)) = sum(KL(a || b))
```
where the sum is over the `sample_shape` dims.
Args:
a: Instance of `Sample` distribution.
b: Instance of `Sample` distribution.
name: (optional) name to use for created ops.
Default value: `"kl_sample"`'.
Returns:
kldiv: Batchwise `KL(a || b)`.
Raises:
ValueError: If the `sample_shape` of `a` and `b` don't match.
"""
assertions = []
a_ss = tf.get_static_value(a.sample_shape)
b_ss = tf.get_static_value(b.sample_shape)
msg = '`a.sample_shape` must be identical to `b.sample_shape`.'
if a_ss is not None and b_ss is not None:
if not np.array_equal(a_ss, b_ss):
raise ValueError(msg)
elif a.validate_args or b.validate_args:
assertions.append(assert_util.assert_equal(
a.sample_shape, b.sample_shape, message=msg))
with tf.control_dependencies(assertions):
kl = kullback_leibler.kl_divergence(
a.distribution, b.distribution, name=name)
n = prefer_static.reduce_prod(a.sample_shape)
return tf.cast(x=n, dtype=kl.dtype) * kl
|
def _kl_sample(a, b, name='kl_sample'):
"""Batched KL divergence `KL(a || b)` for Sample distributions.
We can leverage the fact that:
```
KL(Sample(a) || Sample(b)) = sum(KL(a || b))
```
where the sum is over the `sample_shape` dims.
Args:
a: Instance of `Sample` distribution.
b: Instance of `Sample` distribution.
name: (optional) name to use for created ops.
Default value: `"kl_sample"`'.
Returns:
kldiv: Batchwise `KL(a || b)`.
Raises:
ValueError: If the `sample_shape` of `a` and `b` don't match.
"""
assertions = []
a_ss = tf.get_static_value(a.sample_shape)
b_ss = tf.get_static_value(b.sample_shape)
msg = '`a.sample_shape` must be identical to `b.sample_shape`.'
if a_ss is not None and b_ss is not None:
if not np.array_equal(a_ss, b_ss):
raise ValueError(msg)
elif a.validate_args or b.validate_args:
assertions.append(assert_util.assert_equal(
a.sample_shape, b.sample_shape, message=msg))
with tf.control_dependencies(assertions):
kl = kullback_leibler.kl_divergence(
a.distribution, b.distribution, name=name)
n = prefer_static.reduce_prod(a.sample_shape)
return tf.cast(x=n, dtype=kl.dtype) * kl
|
[
"Batched",
"KL",
"divergence",
"KL",
"(",
"a",
"||",
"b",
")",
"for",
"Sample",
"distributions",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/sample.py#L241-L278
|
[
"def",
"_kl_sample",
"(",
"a",
",",
"b",
",",
"name",
"=",
"'kl_sample'",
")",
":",
"assertions",
"=",
"[",
"]",
"a_ss",
"=",
"tf",
".",
"get_static_value",
"(",
"a",
".",
"sample_shape",
")",
"b_ss",
"=",
"tf",
".",
"get_static_value",
"(",
"b",
".",
"sample_shape",
")",
"msg",
"=",
"'`a.sample_shape` must be identical to `b.sample_shape`.'",
"if",
"a_ss",
"is",
"not",
"None",
"and",
"b_ss",
"is",
"not",
"None",
":",
"if",
"not",
"np",
".",
"array_equal",
"(",
"a_ss",
",",
"b_ss",
")",
":",
"raise",
"ValueError",
"(",
"msg",
")",
"elif",
"a",
".",
"validate_args",
"or",
"b",
".",
"validate_args",
":",
"assertions",
".",
"append",
"(",
"assert_util",
".",
"assert_equal",
"(",
"a",
".",
"sample_shape",
",",
"b",
".",
"sample_shape",
",",
"message",
"=",
"msg",
")",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"assertions",
")",
":",
"kl",
"=",
"kullback_leibler",
".",
"kl_divergence",
"(",
"a",
".",
"distribution",
",",
"b",
".",
"distribution",
",",
"name",
"=",
"name",
")",
"n",
"=",
"prefer_static",
".",
"reduce_prod",
"(",
"a",
".",
"sample_shape",
")",
"return",
"tf",
".",
"cast",
"(",
"x",
"=",
"n",
",",
"dtype",
"=",
"kl",
".",
"dtype",
")",
"*",
"kl"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_broadcast_to
|
Helper to broadcast a tensor using a list of target tensors.
|
tensorflow_probability/python/distributions/triangular.py
|
def _broadcast_to(tensor_to_broadcast, target_tensors):
"""Helper to broadcast a tensor using a list of target tensors."""
output = tensor_to_broadcast
for tensor in target_tensors:
output += tf.zeros_like(tensor)
return output
|
def _broadcast_to(tensor_to_broadcast, target_tensors):
"""Helper to broadcast a tensor using a list of target tensors."""
output = tensor_to_broadcast
for tensor in target_tensors:
output += tf.zeros_like(tensor)
return output
|
[
"Helper",
"to",
"broadcast",
"a",
"tensor",
"using",
"a",
"list",
"of",
"target",
"tensors",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/triangular.py#L33-L38
|
[
"def",
"_broadcast_to",
"(",
"tensor_to_broadcast",
",",
"target_tensors",
")",
":",
"output",
"=",
"tensor_to_broadcast",
"for",
"tensor",
"in",
"target_tensors",
":",
"output",
"+=",
"tf",
".",
"zeros_like",
"(",
"tensor",
")",
"return",
"output"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
Triangular._pdf_at_peak
|
Pdf evaluated at the peak.
|
tensorflow_probability/python/distributions/triangular.py
|
def _pdf_at_peak(self):
"""Pdf evaluated at the peak."""
return (self.peak - self.low) / (self.high - self.low)
|
def _pdf_at_peak(self):
"""Pdf evaluated at the peak."""
return (self.peak - self.low) / (self.high - self.low)
|
[
"Pdf",
"evaluated",
"at",
"the",
"peak",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/triangular.py#L186-L188
|
[
"def",
"_pdf_at_peak",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"peak",
"-",
"self",
".",
"low",
")",
"/",
"(",
"self",
".",
"high",
"-",
"self",
".",
"low",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
effective_sample_size
|
Estimate a lower bound on effective sample size for each independent chain.
Roughly speaking, "effective sample size" (ESS) is the size of an iid sample
with the same variance as `state`.
More precisely, given a stationary sequence of possibly correlated random
variables `X_1, X_2,...,X_N`, each identically distributed ESS is the number
such that
```Variance{ N**-1 * Sum{X_i} } = ESS**-1 * Variance{ X_1 }.```
If the sequence is uncorrelated, `ESS = N`. In general, one should expect
`ESS <= N`, with more highly correlated sequences having smaller `ESS`.
Args:
states: `Tensor` or list of `Tensor` objects. Dimension zero should index
identically distributed states.
filter_threshold: `Tensor` or list of `Tensor` objects.
Must broadcast with `state`. The auto-correlation sequence is truncated
after the first appearance of a term less than `filter_threshold`.
Setting to `None` means we use no threshold filter. Since `|R_k| <= 1`,
setting to any number less than `-1` has the same effect.
filter_beyond_lag: `Tensor` or list of `Tensor` objects. Must be
`int`-like and scalar valued. The auto-correlation sequence is truncated
to this length. Setting to `None` means we do not filter based on number
of lags.
name: `String` name to prepend to created ops.
Returns:
ess: `Tensor` or list of `Tensor` objects. The effective sample size of
each component of `states`. Shape will be `states.shape[1:]`.
Raises:
ValueError: If `states` and `filter_threshold` or `states` and
`filter_beyond_lag` are both lists with different lengths.
#### Examples
We use ESS to estimate standard error.
```
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
target = tfd.MultivariateNormalDiag(scale_diag=[1., 2.])
# Get 1000 states from one chain.
states = tfp.mcmc.sample_chain(
num_burnin_steps=200,
num_results=1000,
current_state=tf.constant([0., 0.]),
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target.log_prob,
step_size=0.05,
num_leapfrog_steps=20))
states.shape
==> (1000, 2)
ess = effective_sample_size(states)
==> Shape (2,) Tensor
mean, variance = tf.nn.moments(states, axis=0)
standard_error = tf.sqrt(variance / ess)
```
Some math shows that, with `R_k` the auto-correlation sequence,
`R_k := Covariance{X_1, X_{1+k}} / Variance{X_1}`, we have
```ESS(N) = N / [ 1 + 2 * ( (N - 1) / N * R_1 + ... + 1 / N * R_{N-1} ) ]```
This function estimates the above by first estimating the auto-correlation.
Since `R_k` must be estimated using only `N - k` samples, it becomes
progressively noisier for larger `k`. For this reason, the summation over
`R_k` should be truncated at some number `filter_beyond_lag < N`. Since many
MCMC methods generate chains where `R_k > 0`, a reasonable criteria is to
truncate at the first index where the estimated auto-correlation becomes
negative.
The arguments `filter_beyond_lag`, `filter_threshold` are filters intended to
remove noisy tail terms from `R_k`. They combine in an "OR" manner meaning
terms are removed if they were to be filtered under the `filter_beyond_lag` OR
`filter_threshold` criteria.
|
tensorflow_probability/python/mcmc/diagnostic.py
|
def effective_sample_size(states,
filter_threshold=0.,
filter_beyond_lag=None,
name=None):
"""Estimate a lower bound on effective sample size for each independent chain.
Roughly speaking, "effective sample size" (ESS) is the size of an iid sample
with the same variance as `state`.
More precisely, given a stationary sequence of possibly correlated random
variables `X_1, X_2,...,X_N`, each identically distributed ESS is the number
such that
```Variance{ N**-1 * Sum{X_i} } = ESS**-1 * Variance{ X_1 }.```
If the sequence is uncorrelated, `ESS = N`. In general, one should expect
`ESS <= N`, with more highly correlated sequences having smaller `ESS`.
Args:
states: `Tensor` or list of `Tensor` objects. Dimension zero should index
identically distributed states.
filter_threshold: `Tensor` or list of `Tensor` objects.
Must broadcast with `state`. The auto-correlation sequence is truncated
after the first appearance of a term less than `filter_threshold`.
Setting to `None` means we use no threshold filter. Since `|R_k| <= 1`,
setting to any number less than `-1` has the same effect.
filter_beyond_lag: `Tensor` or list of `Tensor` objects. Must be
`int`-like and scalar valued. The auto-correlation sequence is truncated
to this length. Setting to `None` means we do not filter based on number
of lags.
name: `String` name to prepend to created ops.
Returns:
ess: `Tensor` or list of `Tensor` objects. The effective sample size of
each component of `states`. Shape will be `states.shape[1:]`.
Raises:
ValueError: If `states` and `filter_threshold` or `states` and
`filter_beyond_lag` are both lists with different lengths.
#### Examples
We use ESS to estimate standard error.
```
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
target = tfd.MultivariateNormalDiag(scale_diag=[1., 2.])
# Get 1000 states from one chain.
states = tfp.mcmc.sample_chain(
num_burnin_steps=200,
num_results=1000,
current_state=tf.constant([0., 0.]),
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target.log_prob,
step_size=0.05,
num_leapfrog_steps=20))
states.shape
==> (1000, 2)
ess = effective_sample_size(states)
==> Shape (2,) Tensor
mean, variance = tf.nn.moments(states, axis=0)
standard_error = tf.sqrt(variance / ess)
```
Some math shows that, with `R_k` the auto-correlation sequence,
`R_k := Covariance{X_1, X_{1+k}} / Variance{X_1}`, we have
```ESS(N) = N / [ 1 + 2 * ( (N - 1) / N * R_1 + ... + 1 / N * R_{N-1} ) ]```
This function estimates the above by first estimating the auto-correlation.
Since `R_k` must be estimated using only `N - k` samples, it becomes
progressively noisier for larger `k`. For this reason, the summation over
`R_k` should be truncated at some number `filter_beyond_lag < N`. Since many
MCMC methods generate chains where `R_k > 0`, a reasonable criteria is to
truncate at the first index where the estimated auto-correlation becomes
negative.
The arguments `filter_beyond_lag`, `filter_threshold` are filters intended to
remove noisy tail terms from `R_k`. They combine in an "OR" manner meaning
terms are removed if they were to be filtered under the `filter_beyond_lag` OR
`filter_threshold` criteria.
"""
states_was_list = _is_list_like(states)
# Convert all args to lists.
if not states_was_list:
states = [states]
filter_beyond_lag = _broadcast_maybelist_arg(states, filter_beyond_lag,
'filter_beyond_lag')
filter_threshold = _broadcast_maybelist_arg(states, filter_threshold,
'filter_threshold')
# Process items, one at a time.
with tf.compat.v1.name_scope(name, 'effective_sample_size'):
ess_list = [
_effective_sample_size_single_state(s, ml, mlt)
for (s, ml, mlt) in zip(states, filter_beyond_lag, filter_threshold)
]
if states_was_list:
return ess_list
return ess_list[0]
|
def effective_sample_size(states,
filter_threshold=0.,
filter_beyond_lag=None,
name=None):
"""Estimate a lower bound on effective sample size for each independent chain.
Roughly speaking, "effective sample size" (ESS) is the size of an iid sample
with the same variance as `state`.
More precisely, given a stationary sequence of possibly correlated random
variables `X_1, X_2,...,X_N`, each identically distributed ESS is the number
such that
```Variance{ N**-1 * Sum{X_i} } = ESS**-1 * Variance{ X_1 }.```
If the sequence is uncorrelated, `ESS = N`. In general, one should expect
`ESS <= N`, with more highly correlated sequences having smaller `ESS`.
Args:
states: `Tensor` or list of `Tensor` objects. Dimension zero should index
identically distributed states.
filter_threshold: `Tensor` or list of `Tensor` objects.
Must broadcast with `state`. The auto-correlation sequence is truncated
after the first appearance of a term less than `filter_threshold`.
Setting to `None` means we use no threshold filter. Since `|R_k| <= 1`,
setting to any number less than `-1` has the same effect.
filter_beyond_lag: `Tensor` or list of `Tensor` objects. Must be
`int`-like and scalar valued. The auto-correlation sequence is truncated
to this length. Setting to `None` means we do not filter based on number
of lags.
name: `String` name to prepend to created ops.
Returns:
ess: `Tensor` or list of `Tensor` objects. The effective sample size of
each component of `states`. Shape will be `states.shape[1:]`.
Raises:
ValueError: If `states` and `filter_threshold` or `states` and
`filter_beyond_lag` are both lists with different lengths.
#### Examples
We use ESS to estimate standard error.
```
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
target = tfd.MultivariateNormalDiag(scale_diag=[1., 2.])
# Get 1000 states from one chain.
states = tfp.mcmc.sample_chain(
num_burnin_steps=200,
num_results=1000,
current_state=tf.constant([0., 0.]),
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target.log_prob,
step_size=0.05,
num_leapfrog_steps=20))
states.shape
==> (1000, 2)
ess = effective_sample_size(states)
==> Shape (2,) Tensor
mean, variance = tf.nn.moments(states, axis=0)
standard_error = tf.sqrt(variance / ess)
```
Some math shows that, with `R_k` the auto-correlation sequence,
`R_k := Covariance{X_1, X_{1+k}} / Variance{X_1}`, we have
```ESS(N) = N / [ 1 + 2 * ( (N - 1) / N * R_1 + ... + 1 / N * R_{N-1} ) ]```
This function estimates the above by first estimating the auto-correlation.
Since `R_k` must be estimated using only `N - k` samples, it becomes
progressively noisier for larger `k`. For this reason, the summation over
`R_k` should be truncated at some number `filter_beyond_lag < N`. Since many
MCMC methods generate chains where `R_k > 0`, a reasonable criteria is to
truncate at the first index where the estimated auto-correlation becomes
negative.
The arguments `filter_beyond_lag`, `filter_threshold` are filters intended to
remove noisy tail terms from `R_k`. They combine in an "OR" manner meaning
terms are removed if they were to be filtered under the `filter_beyond_lag` OR
`filter_threshold` criteria.
"""
states_was_list = _is_list_like(states)
# Convert all args to lists.
if not states_was_list:
states = [states]
filter_beyond_lag = _broadcast_maybelist_arg(states, filter_beyond_lag,
'filter_beyond_lag')
filter_threshold = _broadcast_maybelist_arg(states, filter_threshold,
'filter_threshold')
# Process items, one at a time.
with tf.compat.v1.name_scope(name, 'effective_sample_size'):
ess_list = [
_effective_sample_size_single_state(s, ml, mlt)
for (s, ml, mlt) in zip(states, filter_beyond_lag, filter_threshold)
]
if states_was_list:
return ess_list
return ess_list[0]
|
[
"Estimate",
"a",
"lower",
"bound",
"on",
"effective",
"sample",
"size",
"for",
"each",
"independent",
"chain",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/diagnostic.py#L35-L143
|
[
"def",
"effective_sample_size",
"(",
"states",
",",
"filter_threshold",
"=",
"0.",
",",
"filter_beyond_lag",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"states_was_list",
"=",
"_is_list_like",
"(",
"states",
")",
"# Convert all args to lists.",
"if",
"not",
"states_was_list",
":",
"states",
"=",
"[",
"states",
"]",
"filter_beyond_lag",
"=",
"_broadcast_maybelist_arg",
"(",
"states",
",",
"filter_beyond_lag",
",",
"'filter_beyond_lag'",
")",
"filter_threshold",
"=",
"_broadcast_maybelist_arg",
"(",
"states",
",",
"filter_threshold",
",",
"'filter_threshold'",
")",
"# Process items, one at a time.",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'effective_sample_size'",
")",
":",
"ess_list",
"=",
"[",
"_effective_sample_size_single_state",
"(",
"s",
",",
"ml",
",",
"mlt",
")",
"for",
"(",
"s",
",",
"ml",
",",
"mlt",
")",
"in",
"zip",
"(",
"states",
",",
"filter_beyond_lag",
",",
"filter_threshold",
")",
"]",
"if",
"states_was_list",
":",
"return",
"ess_list",
"return",
"ess_list",
"[",
"0",
"]"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_effective_sample_size_single_state
|
ESS computation for one single Tensor argument.
|
tensorflow_probability/python/mcmc/diagnostic.py
|
def _effective_sample_size_single_state(states, filter_beyond_lag,
filter_threshold):
"""ESS computation for one single Tensor argument."""
with tf.compat.v1.name_scope(
'effective_sample_size_single_state',
values=[states, filter_beyond_lag, filter_threshold]):
states = tf.convert_to_tensor(value=states, name='states')
dt = states.dtype
# filter_beyond_lag == None ==> auto_corr is the full sequence.
auto_corr = stats.auto_correlation(
states, axis=0, max_lags=filter_beyond_lag)
if filter_threshold is not None:
filter_threshold = tf.convert_to_tensor(
value=filter_threshold, dtype=dt, name='filter_threshold')
# Get a binary mask to zero out values of auto_corr below the threshold.
# mask[i, ...] = 1 if auto_corr[j, ...] > threshold for all j <= i,
# mask[i, ...] = 0, otherwise.
# So, along dimension zero, the mask will look like [1, 1, ..., 0, 0,...]
# Building step by step,
# Assume auto_corr = [1, 0.5, 0.0, 0.3], and filter_threshold = 0.2.
# Step 1: mask = [False, False, True, False]
mask = auto_corr < filter_threshold
# Step 2: mask = [0, 0, 1, 1]
mask = tf.cast(mask, dtype=dt)
# Step 3: mask = [0, 0, 1, 2]
mask = tf.cumsum(mask, axis=0)
# Step 4: mask = [1, 1, 0, 0]
mask = tf.maximum(1. - mask, 0.)
auto_corr *= mask
# With R[k] := auto_corr[k, ...],
# ESS = N / {1 + 2 * Sum_{k=1}^N (N - k) / N * R[k]}
# = N / {-1 + 2 * Sum_{k=0}^N (N - k) / N * R[k]} (since R[0] = 1)
# approx N / {-1 + 2 * Sum_{k=0}^M (N - k) / N * R[k]}
# where M is the filter_beyond_lag truncation point chosen above.
# Get the factor (N - k) / N, and give it shape [M, 1,...,1], having total
# ndims the same as auto_corr
n = _axis_size(states, axis=0)
k = tf.range(0., _axis_size(auto_corr, axis=0))
nk_factor = (n - k) / n
if auto_corr.shape.ndims is not None:
new_shape = [-1] + [1] * (auto_corr.shape.ndims - 1)
else:
new_shape = tf.concat(
([-1],
tf.ones([tf.rank(auto_corr) - 1], dtype=tf.int32)),
axis=0)
nk_factor = tf.reshape(nk_factor, new_shape)
return n / (-1 +
2 * tf.reduce_sum(input_tensor=nk_factor * auto_corr, axis=0))
|
def _effective_sample_size_single_state(states, filter_beyond_lag,
filter_threshold):
"""ESS computation for one single Tensor argument."""
with tf.compat.v1.name_scope(
'effective_sample_size_single_state',
values=[states, filter_beyond_lag, filter_threshold]):
states = tf.convert_to_tensor(value=states, name='states')
dt = states.dtype
# filter_beyond_lag == None ==> auto_corr is the full sequence.
auto_corr = stats.auto_correlation(
states, axis=0, max_lags=filter_beyond_lag)
if filter_threshold is not None:
filter_threshold = tf.convert_to_tensor(
value=filter_threshold, dtype=dt, name='filter_threshold')
# Get a binary mask to zero out values of auto_corr below the threshold.
# mask[i, ...] = 1 if auto_corr[j, ...] > threshold for all j <= i,
# mask[i, ...] = 0, otherwise.
# So, along dimension zero, the mask will look like [1, 1, ..., 0, 0,...]
# Building step by step,
# Assume auto_corr = [1, 0.5, 0.0, 0.3], and filter_threshold = 0.2.
# Step 1: mask = [False, False, True, False]
mask = auto_corr < filter_threshold
# Step 2: mask = [0, 0, 1, 1]
mask = tf.cast(mask, dtype=dt)
# Step 3: mask = [0, 0, 1, 2]
mask = tf.cumsum(mask, axis=0)
# Step 4: mask = [1, 1, 0, 0]
mask = tf.maximum(1. - mask, 0.)
auto_corr *= mask
# With R[k] := auto_corr[k, ...],
# ESS = N / {1 + 2 * Sum_{k=1}^N (N - k) / N * R[k]}
# = N / {-1 + 2 * Sum_{k=0}^N (N - k) / N * R[k]} (since R[0] = 1)
# approx N / {-1 + 2 * Sum_{k=0}^M (N - k) / N * R[k]}
# where M is the filter_beyond_lag truncation point chosen above.
# Get the factor (N - k) / N, and give it shape [M, 1,...,1], having total
# ndims the same as auto_corr
n = _axis_size(states, axis=0)
k = tf.range(0., _axis_size(auto_corr, axis=0))
nk_factor = (n - k) / n
if auto_corr.shape.ndims is not None:
new_shape = [-1] + [1] * (auto_corr.shape.ndims - 1)
else:
new_shape = tf.concat(
([-1],
tf.ones([tf.rank(auto_corr) - 1], dtype=tf.int32)),
axis=0)
nk_factor = tf.reshape(nk_factor, new_shape)
return n / (-1 +
2 * tf.reduce_sum(input_tensor=nk_factor * auto_corr, axis=0))
|
[
"ESS",
"computation",
"for",
"one",
"single",
"Tensor",
"argument",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/diagnostic.py#L146-L200
|
[
"def",
"_effective_sample_size_single_state",
"(",
"states",
",",
"filter_beyond_lag",
",",
"filter_threshold",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"'effective_sample_size_single_state'",
",",
"values",
"=",
"[",
"states",
",",
"filter_beyond_lag",
",",
"filter_threshold",
"]",
")",
":",
"states",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"states",
",",
"name",
"=",
"'states'",
")",
"dt",
"=",
"states",
".",
"dtype",
"# filter_beyond_lag == None ==> auto_corr is the full sequence.",
"auto_corr",
"=",
"stats",
".",
"auto_correlation",
"(",
"states",
",",
"axis",
"=",
"0",
",",
"max_lags",
"=",
"filter_beyond_lag",
")",
"if",
"filter_threshold",
"is",
"not",
"None",
":",
"filter_threshold",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"filter_threshold",
",",
"dtype",
"=",
"dt",
",",
"name",
"=",
"'filter_threshold'",
")",
"# Get a binary mask to zero out values of auto_corr below the threshold.",
"# mask[i, ...] = 1 if auto_corr[j, ...] > threshold for all j <= i,",
"# mask[i, ...] = 0, otherwise.",
"# So, along dimension zero, the mask will look like [1, 1, ..., 0, 0,...]",
"# Building step by step,",
"# Assume auto_corr = [1, 0.5, 0.0, 0.3], and filter_threshold = 0.2.",
"# Step 1: mask = [False, False, True, False]",
"mask",
"=",
"auto_corr",
"<",
"filter_threshold",
"# Step 2: mask = [0, 0, 1, 1]",
"mask",
"=",
"tf",
".",
"cast",
"(",
"mask",
",",
"dtype",
"=",
"dt",
")",
"# Step 3: mask = [0, 0, 1, 2]",
"mask",
"=",
"tf",
".",
"cumsum",
"(",
"mask",
",",
"axis",
"=",
"0",
")",
"# Step 4: mask = [1, 1, 0, 0]",
"mask",
"=",
"tf",
".",
"maximum",
"(",
"1.",
"-",
"mask",
",",
"0.",
")",
"auto_corr",
"*=",
"mask",
"# With R[k] := auto_corr[k, ...],",
"# ESS = N / {1 + 2 * Sum_{k=1}^N (N - k) / N * R[k]}",
"# = N / {-1 + 2 * Sum_{k=0}^N (N - k) / N * R[k]} (since R[0] = 1)",
"# approx N / {-1 + 2 * Sum_{k=0}^M (N - k) / N * R[k]}",
"# where M is the filter_beyond_lag truncation point chosen above.",
"# Get the factor (N - k) / N, and give it shape [M, 1,...,1], having total",
"# ndims the same as auto_corr",
"n",
"=",
"_axis_size",
"(",
"states",
",",
"axis",
"=",
"0",
")",
"k",
"=",
"tf",
".",
"range",
"(",
"0.",
",",
"_axis_size",
"(",
"auto_corr",
",",
"axis",
"=",
"0",
")",
")",
"nk_factor",
"=",
"(",
"n",
"-",
"k",
")",
"/",
"n",
"if",
"auto_corr",
".",
"shape",
".",
"ndims",
"is",
"not",
"None",
":",
"new_shape",
"=",
"[",
"-",
"1",
"]",
"+",
"[",
"1",
"]",
"*",
"(",
"auto_corr",
".",
"shape",
".",
"ndims",
"-",
"1",
")",
"else",
":",
"new_shape",
"=",
"tf",
".",
"concat",
"(",
"(",
"[",
"-",
"1",
"]",
",",
"tf",
".",
"ones",
"(",
"[",
"tf",
".",
"rank",
"(",
"auto_corr",
")",
"-",
"1",
"]",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
")",
",",
"axis",
"=",
"0",
")",
"nk_factor",
"=",
"tf",
".",
"reshape",
"(",
"nk_factor",
",",
"new_shape",
")",
"return",
"n",
"/",
"(",
"-",
"1",
"+",
"2",
"*",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"nk_factor",
"*",
"auto_corr",
",",
"axis",
"=",
"0",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
potential_scale_reduction
|
Gelman and Rubin (1992)'s potential scale reduction for chain convergence.
Given `N > 1` states from each of `C > 1` independent chains, the potential
scale reduction factor, commonly referred to as R-hat, measures convergence of
the chains (to the same target) by testing for equality of means.
Specifically, R-hat measures the degree to which variance (of the means)
between chains exceeds what one would expect if the chains were identically
distributed. See [Gelman and Rubin (1992)][1]; [Brooks and Gelman (1998)][2].
Some guidelines:
* The initial state of the chains should be drawn from a distribution
overdispersed with respect to the target.
* If all chains converge to the target, then as `N --> infinity`, R-hat --> 1.
Before that, R-hat > 1 (except in pathological cases, e.g. if the chain
paths were identical).
* The above holds for any number of chains `C > 1`. Increasing `C` does
improves effectiveness of the diagnostic.
* Sometimes, R-hat < 1.2 is used to indicate approximate convergence, but of
course this is problem dependent. See [Brooks and Gelman (1998)][2].
* R-hat only measures non-convergence of the mean. If higher moments, or
other statistics are desired, a different diagnostic should be used. See
[Brooks and Gelman (1998)][2].
Args:
chains_states: `Tensor` or Python `list` of `Tensor`s representing the
state(s) of a Markov Chain at each result step. The `ith` state is
assumed to have shape `[Ni, Ci1, Ci2,...,CiD] + A`.
Dimension `0` indexes the `Ni > 1` result steps of the Markov Chain.
Dimensions `1` through `D` index the `Ci1 x ... x CiD` independent
chains to be tested for convergence to the same target.
The remaining dimensions, `A`, can have any shape (even empty).
independent_chain_ndims: Integer type `Tensor` with value `>= 1` giving the
number of giving the number of dimensions, from `dim = 1` to `dim = D`,
holding independent chain results to be tested for convergence.
name: `String` name to prepend to created tf. Default:
`potential_scale_reduction`.
Returns:
`Tensor` or Python `list` of `Tensor`s representing the R-hat statistic for
the state(s). Same `dtype` as `state`, and shape equal to
`state.shape[1 + independent_chain_ndims:]`.
Raises:
ValueError: If `independent_chain_ndims < 1`.
#### Examples
Diagnosing convergence by monitoring 10 chains that each attempt to
sample from a 2-variate normal.
```python
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
target = tfd.MultivariateNormalDiag(scale_diag=[1., 2.])
# Get 10 (2x) overdispersed initial states.
initial_state = target.sample(10) * 2.
==> (10, 2)
# Get 1000 samples from the 10 independent chains.
chains_states, _ = tfp.mcmc.sample_chain(
num_burnin_steps=200,
num_results=1000,
current_state=initial_state,
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target.log_prob,
step_size=0.05,
num_leapfrog_steps=20))
chains_states.shape
==> (1000, 10, 2)
rhat = tfp.mcmc.diagnostic.potential_scale_reduction(
chains_states, independent_chain_ndims=1)
# The second dimension needed a longer burn-in.
rhat.eval()
==> [1.05, 1.3]
```
To see why R-hat is reasonable, let `X` be a random variable drawn uniformly
from the combined states (combined over all chains). Then, in the limit
`N, C --> infinity`, with `E`, `Var` denoting expectation and variance,
```R-hat = ( E[Var[X | chain]] + Var[E[X | chain]] ) / E[Var[X | chain]].```
Using the law of total variance, the numerator is the variance of the combined
states, and the denominator is the total variance minus the variance of the
the individual chain means. If the chains are all drawing from the same
distribution, they will have the same mean, and thus the ratio should be one.
#### References
[1]: Stephen P. Brooks and Andrew Gelman. General Methods for Monitoring
Convergence of Iterative Simulations. _Journal of Computational and
Graphical Statistics_, 7(4), 1998.
[2]: Andrew Gelman and Donald B. Rubin. Inference from Iterative Simulation
Using Multiple Sequences. _Statistical Science_, 7(4):457-472, 1992.
|
tensorflow_probability/python/mcmc/diagnostic.py
|
def potential_scale_reduction(chains_states,
independent_chain_ndims=1,
name=None):
"""Gelman and Rubin (1992)'s potential scale reduction for chain convergence.
Given `N > 1` states from each of `C > 1` independent chains, the potential
scale reduction factor, commonly referred to as R-hat, measures convergence of
the chains (to the same target) by testing for equality of means.
Specifically, R-hat measures the degree to which variance (of the means)
between chains exceeds what one would expect if the chains were identically
distributed. See [Gelman and Rubin (1992)][1]; [Brooks and Gelman (1998)][2].
Some guidelines:
* The initial state of the chains should be drawn from a distribution
overdispersed with respect to the target.
* If all chains converge to the target, then as `N --> infinity`, R-hat --> 1.
Before that, R-hat > 1 (except in pathological cases, e.g. if the chain
paths were identical).
* The above holds for any number of chains `C > 1`. Increasing `C` does
improves effectiveness of the diagnostic.
* Sometimes, R-hat < 1.2 is used to indicate approximate convergence, but of
course this is problem dependent. See [Brooks and Gelman (1998)][2].
* R-hat only measures non-convergence of the mean. If higher moments, or
other statistics are desired, a different diagnostic should be used. See
[Brooks and Gelman (1998)][2].
Args:
chains_states: `Tensor` or Python `list` of `Tensor`s representing the
state(s) of a Markov Chain at each result step. The `ith` state is
assumed to have shape `[Ni, Ci1, Ci2,...,CiD] + A`.
Dimension `0` indexes the `Ni > 1` result steps of the Markov Chain.
Dimensions `1` through `D` index the `Ci1 x ... x CiD` independent
chains to be tested for convergence to the same target.
The remaining dimensions, `A`, can have any shape (even empty).
independent_chain_ndims: Integer type `Tensor` with value `>= 1` giving the
number of giving the number of dimensions, from `dim = 1` to `dim = D`,
holding independent chain results to be tested for convergence.
name: `String` name to prepend to created tf. Default:
`potential_scale_reduction`.
Returns:
`Tensor` or Python `list` of `Tensor`s representing the R-hat statistic for
the state(s). Same `dtype` as `state`, and shape equal to
`state.shape[1 + independent_chain_ndims:]`.
Raises:
ValueError: If `independent_chain_ndims < 1`.
#### Examples
Diagnosing convergence by monitoring 10 chains that each attempt to
sample from a 2-variate normal.
```python
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
target = tfd.MultivariateNormalDiag(scale_diag=[1., 2.])
# Get 10 (2x) overdispersed initial states.
initial_state = target.sample(10) * 2.
==> (10, 2)
# Get 1000 samples from the 10 independent chains.
chains_states, _ = tfp.mcmc.sample_chain(
num_burnin_steps=200,
num_results=1000,
current_state=initial_state,
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target.log_prob,
step_size=0.05,
num_leapfrog_steps=20))
chains_states.shape
==> (1000, 10, 2)
rhat = tfp.mcmc.diagnostic.potential_scale_reduction(
chains_states, independent_chain_ndims=1)
# The second dimension needed a longer burn-in.
rhat.eval()
==> [1.05, 1.3]
```
To see why R-hat is reasonable, let `X` be a random variable drawn uniformly
from the combined states (combined over all chains). Then, in the limit
`N, C --> infinity`, with `E`, `Var` denoting expectation and variance,
```R-hat = ( E[Var[X | chain]] + Var[E[X | chain]] ) / E[Var[X | chain]].```
Using the law of total variance, the numerator is the variance of the combined
states, and the denominator is the total variance minus the variance of the
the individual chain means. If the chains are all drawing from the same
distribution, they will have the same mean, and thus the ratio should be one.
#### References
[1]: Stephen P. Brooks and Andrew Gelman. General Methods for Monitoring
Convergence of Iterative Simulations. _Journal of Computational and
Graphical Statistics_, 7(4), 1998.
[2]: Andrew Gelman and Donald B. Rubin. Inference from Iterative Simulation
Using Multiple Sequences. _Statistical Science_, 7(4):457-472, 1992.
"""
chains_states_was_list = _is_list_like(chains_states)
if not chains_states_was_list:
chains_states = [chains_states]
# tf.get_static_value returns None iff a constant value (as a numpy
# array) is not efficiently computable. Therefore, we try constant_value then
# check for None.
icn_const_ = tf.get_static_value(
tf.convert_to_tensor(value=independent_chain_ndims))
if icn_const_ is not None:
independent_chain_ndims = icn_const_
if icn_const_ < 1:
raise ValueError(
'Argument `independent_chain_ndims` must be `>= 1`, found: {}'.format(
independent_chain_ndims))
with tf.compat.v1.name_scope(name, 'potential_scale_reduction'):
rhat_list = [
_potential_scale_reduction_single_state(s, independent_chain_ndims)
for s in chains_states
]
if chains_states_was_list:
return rhat_list
return rhat_list[0]
|
def potential_scale_reduction(chains_states,
independent_chain_ndims=1,
name=None):
"""Gelman and Rubin (1992)'s potential scale reduction for chain convergence.
Given `N > 1` states from each of `C > 1` independent chains, the potential
scale reduction factor, commonly referred to as R-hat, measures convergence of
the chains (to the same target) by testing for equality of means.
Specifically, R-hat measures the degree to which variance (of the means)
between chains exceeds what one would expect if the chains were identically
distributed. See [Gelman and Rubin (1992)][1]; [Brooks and Gelman (1998)][2].
Some guidelines:
* The initial state of the chains should be drawn from a distribution
overdispersed with respect to the target.
* If all chains converge to the target, then as `N --> infinity`, R-hat --> 1.
Before that, R-hat > 1 (except in pathological cases, e.g. if the chain
paths were identical).
* The above holds for any number of chains `C > 1`. Increasing `C` does
improves effectiveness of the diagnostic.
* Sometimes, R-hat < 1.2 is used to indicate approximate convergence, but of
course this is problem dependent. See [Brooks and Gelman (1998)][2].
* R-hat only measures non-convergence of the mean. If higher moments, or
other statistics are desired, a different diagnostic should be used. See
[Brooks and Gelman (1998)][2].
Args:
chains_states: `Tensor` or Python `list` of `Tensor`s representing the
state(s) of a Markov Chain at each result step. The `ith` state is
assumed to have shape `[Ni, Ci1, Ci2,...,CiD] + A`.
Dimension `0` indexes the `Ni > 1` result steps of the Markov Chain.
Dimensions `1` through `D` index the `Ci1 x ... x CiD` independent
chains to be tested for convergence to the same target.
The remaining dimensions, `A`, can have any shape (even empty).
independent_chain_ndims: Integer type `Tensor` with value `>= 1` giving the
number of giving the number of dimensions, from `dim = 1` to `dim = D`,
holding independent chain results to be tested for convergence.
name: `String` name to prepend to created tf. Default:
`potential_scale_reduction`.
Returns:
`Tensor` or Python `list` of `Tensor`s representing the R-hat statistic for
the state(s). Same `dtype` as `state`, and shape equal to
`state.shape[1 + independent_chain_ndims:]`.
Raises:
ValueError: If `independent_chain_ndims < 1`.
#### Examples
Diagnosing convergence by monitoring 10 chains that each attempt to
sample from a 2-variate normal.
```python
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
target = tfd.MultivariateNormalDiag(scale_diag=[1., 2.])
# Get 10 (2x) overdispersed initial states.
initial_state = target.sample(10) * 2.
==> (10, 2)
# Get 1000 samples from the 10 independent chains.
chains_states, _ = tfp.mcmc.sample_chain(
num_burnin_steps=200,
num_results=1000,
current_state=initial_state,
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target.log_prob,
step_size=0.05,
num_leapfrog_steps=20))
chains_states.shape
==> (1000, 10, 2)
rhat = tfp.mcmc.diagnostic.potential_scale_reduction(
chains_states, independent_chain_ndims=1)
# The second dimension needed a longer burn-in.
rhat.eval()
==> [1.05, 1.3]
```
To see why R-hat is reasonable, let `X` be a random variable drawn uniformly
from the combined states (combined over all chains). Then, in the limit
`N, C --> infinity`, with `E`, `Var` denoting expectation and variance,
```R-hat = ( E[Var[X | chain]] + Var[E[X | chain]] ) / E[Var[X | chain]].```
Using the law of total variance, the numerator is the variance of the combined
states, and the denominator is the total variance minus the variance of the
the individual chain means. If the chains are all drawing from the same
distribution, they will have the same mean, and thus the ratio should be one.
#### References
[1]: Stephen P. Brooks and Andrew Gelman. General Methods for Monitoring
Convergence of Iterative Simulations. _Journal of Computational and
Graphical Statistics_, 7(4), 1998.
[2]: Andrew Gelman and Donald B. Rubin. Inference from Iterative Simulation
Using Multiple Sequences. _Statistical Science_, 7(4):457-472, 1992.
"""
chains_states_was_list = _is_list_like(chains_states)
if not chains_states_was_list:
chains_states = [chains_states]
# tf.get_static_value returns None iff a constant value (as a numpy
# array) is not efficiently computable. Therefore, we try constant_value then
# check for None.
icn_const_ = tf.get_static_value(
tf.convert_to_tensor(value=independent_chain_ndims))
if icn_const_ is not None:
independent_chain_ndims = icn_const_
if icn_const_ < 1:
raise ValueError(
'Argument `independent_chain_ndims` must be `>= 1`, found: {}'.format(
independent_chain_ndims))
with tf.compat.v1.name_scope(name, 'potential_scale_reduction'):
rhat_list = [
_potential_scale_reduction_single_state(s, independent_chain_ndims)
for s in chains_states
]
if chains_states_was_list:
return rhat_list
return rhat_list[0]
|
[
"Gelman",
"and",
"Rubin",
"(",
"1992",
")",
"s",
"potential",
"scale",
"reduction",
"for",
"chain",
"convergence",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/diagnostic.py#L203-L332
|
[
"def",
"potential_scale_reduction",
"(",
"chains_states",
",",
"independent_chain_ndims",
"=",
"1",
",",
"name",
"=",
"None",
")",
":",
"chains_states_was_list",
"=",
"_is_list_like",
"(",
"chains_states",
")",
"if",
"not",
"chains_states_was_list",
":",
"chains_states",
"=",
"[",
"chains_states",
"]",
"# tf.get_static_value returns None iff a constant value (as a numpy",
"# array) is not efficiently computable. Therefore, we try constant_value then",
"# check for None.",
"icn_const_",
"=",
"tf",
".",
"get_static_value",
"(",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"independent_chain_ndims",
")",
")",
"if",
"icn_const_",
"is",
"not",
"None",
":",
"independent_chain_ndims",
"=",
"icn_const_",
"if",
"icn_const_",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"'Argument `independent_chain_ndims` must be `>= 1`, found: {}'",
".",
"format",
"(",
"independent_chain_ndims",
")",
")",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'potential_scale_reduction'",
")",
":",
"rhat_list",
"=",
"[",
"_potential_scale_reduction_single_state",
"(",
"s",
",",
"independent_chain_ndims",
")",
"for",
"s",
"in",
"chains_states",
"]",
"if",
"chains_states_was_list",
":",
"return",
"rhat_list",
"return",
"rhat_list",
"[",
"0",
"]"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_potential_scale_reduction_single_state
|
potential_scale_reduction for one single state `Tensor`.
|
tensorflow_probability/python/mcmc/diagnostic.py
|
def _potential_scale_reduction_single_state(state, independent_chain_ndims):
"""potential_scale_reduction for one single state `Tensor`."""
with tf.compat.v1.name_scope(
'potential_scale_reduction_single_state',
values=[state, independent_chain_ndims]):
# We assume exactly one leading dimension indexes e.g. correlated samples
# from each Markov chain.
state = tf.convert_to_tensor(value=state, name='state')
sample_ndims = 1
sample_axis = tf.range(0, sample_ndims)
chain_axis = tf.range(sample_ndims,
sample_ndims + independent_chain_ndims)
sample_and_chain_axis = tf.range(
0, sample_ndims + independent_chain_ndims)
n = _axis_size(state, sample_axis)
m = _axis_size(state, chain_axis)
# In the language of Brooks and Gelman (1998),
# B / n is the between chain variance, the variance of the chain means.
# W is the within sequence variance, the mean of the chain variances.
b_div_n = _reduce_variance(
tf.reduce_mean(input_tensor=state, axis=sample_axis, keepdims=True),
sample_and_chain_axis,
biased=False)
w = tf.reduce_mean(
input_tensor=_reduce_variance(
state, sample_axis, keepdims=True, biased=True),
axis=sample_and_chain_axis)
# sigma^2_+ is an estimate of the true variance, which would be unbiased if
# each chain was drawn from the target. c.f. "law of total variance."
sigma_2_plus = w + b_div_n
return ((m + 1.) / m) * sigma_2_plus / w - (n - 1.) / (m * n)
|
def _potential_scale_reduction_single_state(state, independent_chain_ndims):
"""potential_scale_reduction for one single state `Tensor`."""
with tf.compat.v1.name_scope(
'potential_scale_reduction_single_state',
values=[state, independent_chain_ndims]):
# We assume exactly one leading dimension indexes e.g. correlated samples
# from each Markov chain.
state = tf.convert_to_tensor(value=state, name='state')
sample_ndims = 1
sample_axis = tf.range(0, sample_ndims)
chain_axis = tf.range(sample_ndims,
sample_ndims + independent_chain_ndims)
sample_and_chain_axis = tf.range(
0, sample_ndims + independent_chain_ndims)
n = _axis_size(state, sample_axis)
m = _axis_size(state, chain_axis)
# In the language of Brooks and Gelman (1998),
# B / n is the between chain variance, the variance of the chain means.
# W is the within sequence variance, the mean of the chain variances.
b_div_n = _reduce_variance(
tf.reduce_mean(input_tensor=state, axis=sample_axis, keepdims=True),
sample_and_chain_axis,
biased=False)
w = tf.reduce_mean(
input_tensor=_reduce_variance(
state, sample_axis, keepdims=True, biased=True),
axis=sample_and_chain_axis)
# sigma^2_+ is an estimate of the true variance, which would be unbiased if
# each chain was drawn from the target. c.f. "law of total variance."
sigma_2_plus = w + b_div_n
return ((m + 1.) / m) * sigma_2_plus / w - (n - 1.) / (m * n)
|
[
"potential_scale_reduction",
"for",
"one",
"single",
"state",
"Tensor",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/diagnostic.py#L335-L370
|
[
"def",
"_potential_scale_reduction_single_state",
"(",
"state",
",",
"independent_chain_ndims",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"'potential_scale_reduction_single_state'",
",",
"values",
"=",
"[",
"state",
",",
"independent_chain_ndims",
"]",
")",
":",
"# We assume exactly one leading dimension indexes e.g. correlated samples",
"# from each Markov chain.",
"state",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"state",
",",
"name",
"=",
"'state'",
")",
"sample_ndims",
"=",
"1",
"sample_axis",
"=",
"tf",
".",
"range",
"(",
"0",
",",
"sample_ndims",
")",
"chain_axis",
"=",
"tf",
".",
"range",
"(",
"sample_ndims",
",",
"sample_ndims",
"+",
"independent_chain_ndims",
")",
"sample_and_chain_axis",
"=",
"tf",
".",
"range",
"(",
"0",
",",
"sample_ndims",
"+",
"independent_chain_ndims",
")",
"n",
"=",
"_axis_size",
"(",
"state",
",",
"sample_axis",
")",
"m",
"=",
"_axis_size",
"(",
"state",
",",
"chain_axis",
")",
"# In the language of Brooks and Gelman (1998),",
"# B / n is the between chain variance, the variance of the chain means.",
"# W is the within sequence variance, the mean of the chain variances.",
"b_div_n",
"=",
"_reduce_variance",
"(",
"tf",
".",
"reduce_mean",
"(",
"input_tensor",
"=",
"state",
",",
"axis",
"=",
"sample_axis",
",",
"keepdims",
"=",
"True",
")",
",",
"sample_and_chain_axis",
",",
"biased",
"=",
"False",
")",
"w",
"=",
"tf",
".",
"reduce_mean",
"(",
"input_tensor",
"=",
"_reduce_variance",
"(",
"state",
",",
"sample_axis",
",",
"keepdims",
"=",
"True",
",",
"biased",
"=",
"True",
")",
",",
"axis",
"=",
"sample_and_chain_axis",
")",
"# sigma^2_+ is an estimate of the true variance, which would be unbiased if",
"# each chain was drawn from the target. c.f. \"law of total variance.\"",
"sigma_2_plus",
"=",
"w",
"+",
"b_div_n",
"return",
"(",
"(",
"m",
"+",
"1.",
")",
"/",
"m",
")",
"*",
"sigma_2_plus",
"/",
"w",
"-",
"(",
"n",
"-",
"1.",
")",
"/",
"(",
"m",
"*",
"n",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_axis_size
|
Get number of elements of `x` in `axis`, as type `x.dtype`.
|
tensorflow_probability/python/mcmc/diagnostic.py
|
def _axis_size(x, axis=None):
"""Get number of elements of `x` in `axis`, as type `x.dtype`."""
if axis is None:
return tf.cast(tf.size(input=x), x.dtype)
return tf.cast(
tf.reduce_prod(input_tensor=tf.gather(tf.shape(input=x), axis)), x.dtype)
|
def _axis_size(x, axis=None):
"""Get number of elements of `x` in `axis`, as type `x.dtype`."""
if axis is None:
return tf.cast(tf.size(input=x), x.dtype)
return tf.cast(
tf.reduce_prod(input_tensor=tf.gather(tf.shape(input=x), axis)), x.dtype)
|
[
"Get",
"number",
"of",
"elements",
"of",
"x",
"in",
"axis",
"as",
"type",
"x",
".",
"dtype",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/diagnostic.py#L388-L393
|
[
"def",
"_axis_size",
"(",
"x",
",",
"axis",
"=",
"None",
")",
":",
"if",
"axis",
"is",
"None",
":",
"return",
"tf",
".",
"cast",
"(",
"tf",
".",
"size",
"(",
"input",
"=",
"x",
")",
",",
"x",
".",
"dtype",
")",
"return",
"tf",
".",
"cast",
"(",
"tf",
".",
"reduce_prod",
"(",
"input_tensor",
"=",
"tf",
".",
"gather",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
",",
"axis",
")",
")",
",",
"x",
".",
"dtype",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_broadcast_maybelist_arg
|
Broadcast a listable secondary_arg to that of states.
|
tensorflow_probability/python/mcmc/diagnostic.py
|
def _broadcast_maybelist_arg(states, secondary_arg, name):
"""Broadcast a listable secondary_arg to that of states."""
if _is_list_like(secondary_arg):
if len(secondary_arg) != len(states):
raise ValueError('Argument `%s` was a list of different length ({}) than '
'`states` ({})'.format(name, len(states)))
else:
secondary_arg = [secondary_arg] * len(states)
return secondary_arg
|
def _broadcast_maybelist_arg(states, secondary_arg, name):
"""Broadcast a listable secondary_arg to that of states."""
if _is_list_like(secondary_arg):
if len(secondary_arg) != len(states):
raise ValueError('Argument `%s` was a list of different length ({}) than '
'`states` ({})'.format(name, len(states)))
else:
secondary_arg = [secondary_arg] * len(states)
return secondary_arg
|
[
"Broadcast",
"a",
"listable",
"secondary_arg",
"to",
"that",
"of",
"states",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/diagnostic.py#L401-L410
|
[
"def",
"_broadcast_maybelist_arg",
"(",
"states",
",",
"secondary_arg",
",",
"name",
")",
":",
"if",
"_is_list_like",
"(",
"secondary_arg",
")",
":",
"if",
"len",
"(",
"secondary_arg",
")",
"!=",
"len",
"(",
"states",
")",
":",
"raise",
"ValueError",
"(",
"'Argument `%s` was a list of different length ({}) than '",
"'`states` ({})'",
".",
"format",
"(",
"name",
",",
"len",
"(",
"states",
")",
")",
")",
"else",
":",
"secondary_arg",
"=",
"[",
"secondary_arg",
"]",
"*",
"len",
"(",
"states",
")",
"return",
"secondary_arg"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
quadrature_scheme_lognormal_gauss_hermite
|
Use Gauss-Hermite quadrature to form quadrature on positive-reals.
Note: for a given `quadrature_size`, this method is generally less accurate
than `quadrature_scheme_lognormal_quantiles`.
Args:
loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
the LogNormal prior.
scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
the LogNormal prior.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: (Batch of) length-`quadrature_size` vectors representing the
`log_rate` parameters of a `Poisson`.
probs: (Batch of) length-`quadrature_size` vectors representing the
weight associate with each `grid` value.
|
tensorflow_probability/python/distributions/poisson_lognormal.py
|
def quadrature_scheme_lognormal_gauss_hermite(
loc, scale, quadrature_size,
validate_args=False, name=None): # pylint: disable=unused-argument
"""Use Gauss-Hermite quadrature to form quadrature on positive-reals.
Note: for a given `quadrature_size`, this method is generally less accurate
than `quadrature_scheme_lognormal_quantiles`.
Args:
loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
the LogNormal prior.
scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
the LogNormal prior.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: (Batch of) length-`quadrature_size` vectors representing the
`log_rate` parameters of a `Poisson`.
probs: (Batch of) length-`quadrature_size` vectors representing the
weight associate with each `grid` value.
"""
with tf.name_scope(
name or "vector_diffeomixture_quadrature_gauss_hermite"):
grid, probs = np.polynomial.hermite.hermgauss(deg=quadrature_size)
npdt = dtype_util.as_numpy_dtype(loc.dtype)
grid = grid.astype(npdt)
probs = probs.astype(npdt)
probs /= np.linalg.norm(probs, ord=1, keepdims=True)
probs = tf.convert_to_tensor(value=probs, name="probs", dtype=loc.dtype)
# The following maps the broadcast of `loc` and `scale` to each grid
# point, i.e., we are creating several log-rates that correspond to the
# different Gauss-Hermite quadrature points and (possible) batches of
# `loc` and `scale`.
grid = (loc[..., tf.newaxis] + np.sqrt(2.) * scale[..., tf.newaxis] * grid)
return grid, probs
|
def quadrature_scheme_lognormal_gauss_hermite(
loc, scale, quadrature_size,
validate_args=False, name=None): # pylint: disable=unused-argument
"""Use Gauss-Hermite quadrature to form quadrature on positive-reals.
Note: for a given `quadrature_size`, this method is generally less accurate
than `quadrature_scheme_lognormal_quantiles`.
Args:
loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
the LogNormal prior.
scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
the LogNormal prior.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: (Batch of) length-`quadrature_size` vectors representing the
`log_rate` parameters of a `Poisson`.
probs: (Batch of) length-`quadrature_size` vectors representing the
weight associate with each `grid` value.
"""
with tf.name_scope(
name or "vector_diffeomixture_quadrature_gauss_hermite"):
grid, probs = np.polynomial.hermite.hermgauss(deg=quadrature_size)
npdt = dtype_util.as_numpy_dtype(loc.dtype)
grid = grid.astype(npdt)
probs = probs.astype(npdt)
probs /= np.linalg.norm(probs, ord=1, keepdims=True)
probs = tf.convert_to_tensor(value=probs, name="probs", dtype=loc.dtype)
# The following maps the broadcast of `loc` and `scale` to each grid
# point, i.e., we are creating several log-rates that correspond to the
# different Gauss-Hermite quadrature points and (possible) batches of
# `loc` and `scale`.
grid = (loc[..., tf.newaxis] + np.sqrt(2.) * scale[..., tf.newaxis] * grid)
return grid, probs
|
[
"Use",
"Gauss",
"-",
"Hermite",
"quadrature",
"to",
"form",
"quadrature",
"on",
"positive",
"-",
"reals",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/poisson_lognormal.py#L45-L85
|
[
"def",
"quadrature_scheme_lognormal_gauss_hermite",
"(",
"loc",
",",
"scale",
",",
"quadrature_size",
",",
"validate_args",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"# pylint: disable=unused-argument",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"\"vector_diffeomixture_quadrature_gauss_hermite\"",
")",
":",
"grid",
",",
"probs",
"=",
"np",
".",
"polynomial",
".",
"hermite",
".",
"hermgauss",
"(",
"deg",
"=",
"quadrature_size",
")",
"npdt",
"=",
"dtype_util",
".",
"as_numpy_dtype",
"(",
"loc",
".",
"dtype",
")",
"grid",
"=",
"grid",
".",
"astype",
"(",
"npdt",
")",
"probs",
"=",
"probs",
".",
"astype",
"(",
"npdt",
")",
"probs",
"/=",
"np",
".",
"linalg",
".",
"norm",
"(",
"probs",
",",
"ord",
"=",
"1",
",",
"keepdims",
"=",
"True",
")",
"probs",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"probs",
",",
"name",
"=",
"\"probs\"",
",",
"dtype",
"=",
"loc",
".",
"dtype",
")",
"# The following maps the broadcast of `loc` and `scale` to each grid",
"# point, i.e., we are creating several log-rates that correspond to the",
"# different Gauss-Hermite quadrature points and (possible) batches of",
"# `loc` and `scale`.",
"grid",
"=",
"(",
"loc",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
"+",
"np",
".",
"sqrt",
"(",
"2.",
")",
"*",
"scale",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
"*",
"grid",
")",
"return",
"grid",
",",
"probs"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
quadrature_scheme_lognormal_quantiles
|
Use LogNormal quantiles to form quadrature on positive-reals.
Args:
loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
the LogNormal prior.
scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
the LogNormal prior.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: (Batch of) length-`quadrature_size` vectors representing the
`log_rate` parameters of a `Poisson`.
probs: (Batch of) length-`quadrature_size` vectors representing the
weight associate with each `grid` value.
|
tensorflow_probability/python/distributions/poisson_lognormal.py
|
def quadrature_scheme_lognormal_quantiles(
loc, scale, quadrature_size,
validate_args=False, name=None):
"""Use LogNormal quantiles to form quadrature on positive-reals.
Args:
loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
the LogNormal prior.
scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
the LogNormal prior.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: (Batch of) length-`quadrature_size` vectors representing the
`log_rate` parameters of a `Poisson`.
probs: (Batch of) length-`quadrature_size` vectors representing the
weight associate with each `grid` value.
"""
with tf.name_scope(name or "quadrature_scheme_lognormal_quantiles"):
# Create a LogNormal distribution.
dist = transformed_distribution.TransformedDistribution(
distribution=normal.Normal(loc=loc, scale=scale),
bijector=exp_bijector.Exp(),
validate_args=validate_args)
batch_ndims = tensorshape_util.rank(dist.batch_shape)
if batch_ndims is None:
batch_ndims = tf.shape(input=dist.batch_shape_tensor())[0]
def _compute_quantiles():
"""Helper to build quantiles."""
# Omit {0, 1} since they might lead to Inf/NaN.
zero = tf.zeros([], dtype=dist.dtype)
edges = tf.linspace(zero, 1., quadrature_size + 3)[1:-1]
# Expand edges so its broadcast across batch dims.
edges = tf.reshape(
edges,
shape=tf.concat(
[[-1], tf.ones([batch_ndims], dtype=tf.int32)], axis=0))
quantiles = dist.quantile(edges)
# Cyclically permute left by one.
perm = tf.concat([tf.range(1, 1 + batch_ndims), [0]], axis=0)
quantiles = tf.transpose(a=quantiles, perm=perm)
return quantiles
quantiles = _compute_quantiles()
# Compute grid as quantile midpoints.
grid = (quantiles[..., :-1] + quantiles[..., 1:]) / 2.
# Set shape hints.
new_shape = tensorshape_util.concatenate(dist.batch_shape,
[quadrature_size])
tensorshape_util.set_shape(grid, new_shape)
# By construction probs is constant, i.e., `1 / quadrature_size`. This is
# important, because non-constant probs leads to non-reparameterizable
# samples.
probs = tf.fill(
dims=[quadrature_size], value=1. / tf.cast(quadrature_size, dist.dtype))
return grid, probs
|
def quadrature_scheme_lognormal_quantiles(
loc, scale, quadrature_size,
validate_args=False, name=None):
"""Use LogNormal quantiles to form quadrature on positive-reals.
Args:
loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
the LogNormal prior.
scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
the LogNormal prior.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: (Batch of) length-`quadrature_size` vectors representing the
`log_rate` parameters of a `Poisson`.
probs: (Batch of) length-`quadrature_size` vectors representing the
weight associate with each `grid` value.
"""
with tf.name_scope(name or "quadrature_scheme_lognormal_quantiles"):
# Create a LogNormal distribution.
dist = transformed_distribution.TransformedDistribution(
distribution=normal.Normal(loc=loc, scale=scale),
bijector=exp_bijector.Exp(),
validate_args=validate_args)
batch_ndims = tensorshape_util.rank(dist.batch_shape)
if batch_ndims is None:
batch_ndims = tf.shape(input=dist.batch_shape_tensor())[0]
def _compute_quantiles():
"""Helper to build quantiles."""
# Omit {0, 1} since they might lead to Inf/NaN.
zero = tf.zeros([], dtype=dist.dtype)
edges = tf.linspace(zero, 1., quadrature_size + 3)[1:-1]
# Expand edges so its broadcast across batch dims.
edges = tf.reshape(
edges,
shape=tf.concat(
[[-1], tf.ones([batch_ndims], dtype=tf.int32)], axis=0))
quantiles = dist.quantile(edges)
# Cyclically permute left by one.
perm = tf.concat([tf.range(1, 1 + batch_ndims), [0]], axis=0)
quantiles = tf.transpose(a=quantiles, perm=perm)
return quantiles
quantiles = _compute_quantiles()
# Compute grid as quantile midpoints.
grid = (quantiles[..., :-1] + quantiles[..., 1:]) / 2.
# Set shape hints.
new_shape = tensorshape_util.concatenate(dist.batch_shape,
[quadrature_size])
tensorshape_util.set_shape(grid, new_shape)
# By construction probs is constant, i.e., `1 / quadrature_size`. This is
# important, because non-constant probs leads to non-reparameterizable
# samples.
probs = tf.fill(
dims=[quadrature_size], value=1. / tf.cast(quadrature_size, dist.dtype))
return grid, probs
|
[
"Use",
"LogNormal",
"quantiles",
"to",
"form",
"quadrature",
"on",
"positive",
"-",
"reals",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/poisson_lognormal.py#L88-L152
|
[
"def",
"quadrature_scheme_lognormal_quantiles",
"(",
"loc",
",",
"scale",
",",
"quadrature_size",
",",
"validate_args",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"\"quadrature_scheme_lognormal_quantiles\"",
")",
":",
"# Create a LogNormal distribution.",
"dist",
"=",
"transformed_distribution",
".",
"TransformedDistribution",
"(",
"distribution",
"=",
"normal",
".",
"Normal",
"(",
"loc",
"=",
"loc",
",",
"scale",
"=",
"scale",
")",
",",
"bijector",
"=",
"exp_bijector",
".",
"Exp",
"(",
")",
",",
"validate_args",
"=",
"validate_args",
")",
"batch_ndims",
"=",
"tensorshape_util",
".",
"rank",
"(",
"dist",
".",
"batch_shape",
")",
"if",
"batch_ndims",
"is",
"None",
":",
"batch_ndims",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"dist",
".",
"batch_shape_tensor",
"(",
")",
")",
"[",
"0",
"]",
"def",
"_compute_quantiles",
"(",
")",
":",
"\"\"\"Helper to build quantiles.\"\"\"",
"# Omit {0, 1} since they might lead to Inf/NaN.",
"zero",
"=",
"tf",
".",
"zeros",
"(",
"[",
"]",
",",
"dtype",
"=",
"dist",
".",
"dtype",
")",
"edges",
"=",
"tf",
".",
"linspace",
"(",
"zero",
",",
"1.",
",",
"quadrature_size",
"+",
"3",
")",
"[",
"1",
":",
"-",
"1",
"]",
"# Expand edges so its broadcast across batch dims.",
"edges",
"=",
"tf",
".",
"reshape",
"(",
"edges",
",",
"shape",
"=",
"tf",
".",
"concat",
"(",
"[",
"[",
"-",
"1",
"]",
",",
"tf",
".",
"ones",
"(",
"[",
"batch_ndims",
"]",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"]",
",",
"axis",
"=",
"0",
")",
")",
"quantiles",
"=",
"dist",
".",
"quantile",
"(",
"edges",
")",
"# Cyclically permute left by one.",
"perm",
"=",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"range",
"(",
"1",
",",
"1",
"+",
"batch_ndims",
")",
",",
"[",
"0",
"]",
"]",
",",
"axis",
"=",
"0",
")",
"quantiles",
"=",
"tf",
".",
"transpose",
"(",
"a",
"=",
"quantiles",
",",
"perm",
"=",
"perm",
")",
"return",
"quantiles",
"quantiles",
"=",
"_compute_quantiles",
"(",
")",
"# Compute grid as quantile midpoints.",
"grid",
"=",
"(",
"quantiles",
"[",
"...",
",",
":",
"-",
"1",
"]",
"+",
"quantiles",
"[",
"...",
",",
"1",
":",
"]",
")",
"/",
"2.",
"# Set shape hints.",
"new_shape",
"=",
"tensorshape_util",
".",
"concatenate",
"(",
"dist",
".",
"batch_shape",
",",
"[",
"quadrature_size",
"]",
")",
"tensorshape_util",
".",
"set_shape",
"(",
"grid",
",",
"new_shape",
")",
"# By construction probs is constant, i.e., `1 / quadrature_size`. This is",
"# important, because non-constant probs leads to non-reparameterizable",
"# samples.",
"probs",
"=",
"tf",
".",
"fill",
"(",
"dims",
"=",
"[",
"quadrature_size",
"]",
",",
"value",
"=",
"1.",
"/",
"tf",
".",
"cast",
"(",
"quadrature_size",
",",
"dist",
".",
"dtype",
")",
")",
"return",
"grid",
",",
"probs"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_Mapping.merge
|
Returns new _Mapping with args merged with self.
Args:
x: `Tensor` or None. Input to forward; output of inverse.
y: `Tensor` or None. Input to inverse; output of forward.
ildj: `Tensor`. This is the (un-reduce_sum'ed) inverse log det jacobian.
kwargs: Python dictionary. Extra args supplied to forward/inverse/etc
functions.
mapping: Instance of _Mapping to merge. Can only be specified if no other
arg is specified.
Returns:
mapping: New instance of `_Mapping` which has inputs merged with self.
Raises:
ValueError: if mapping and any other arg is not `None`.
|
tensorflow_probability/python/bijectors/bijector.py
|
def merge(self, x=None, y=None, ildj=None, kwargs=None, mapping=None):
"""Returns new _Mapping with args merged with self.
Args:
x: `Tensor` or None. Input to forward; output of inverse.
y: `Tensor` or None. Input to inverse; output of forward.
ildj: `Tensor`. This is the (un-reduce_sum'ed) inverse log det jacobian.
kwargs: Python dictionary. Extra args supplied to forward/inverse/etc
functions.
mapping: Instance of _Mapping to merge. Can only be specified if no other
arg is specified.
Returns:
mapping: New instance of `_Mapping` which has inputs merged with self.
Raises:
ValueError: if mapping and any other arg is not `None`.
"""
if mapping is None:
mapping = _Mapping(x=x, y=y, ildj=ildj, kwargs=kwargs)
elif any(arg is not None for arg in [x, y, ildj, kwargs]):
raise ValueError("Cannot simultaneously specify mapping and individual "
"arguments.")
return _Mapping(
x=self._merge(self.x, mapping.x),
y=self._merge(self.y, mapping.y),
ildj=self._merge(self.ildj, mapping.ildj),
kwargs=self._merge(self.kwargs, mapping.kwargs, use_equals=True))
|
def merge(self, x=None, y=None, ildj=None, kwargs=None, mapping=None):
"""Returns new _Mapping with args merged with self.
Args:
x: `Tensor` or None. Input to forward; output of inverse.
y: `Tensor` or None. Input to inverse; output of forward.
ildj: `Tensor`. This is the (un-reduce_sum'ed) inverse log det jacobian.
kwargs: Python dictionary. Extra args supplied to forward/inverse/etc
functions.
mapping: Instance of _Mapping to merge. Can only be specified if no other
arg is specified.
Returns:
mapping: New instance of `_Mapping` which has inputs merged with self.
Raises:
ValueError: if mapping and any other arg is not `None`.
"""
if mapping is None:
mapping = _Mapping(x=x, y=y, ildj=ildj, kwargs=kwargs)
elif any(arg is not None for arg in [x, y, ildj, kwargs]):
raise ValueError("Cannot simultaneously specify mapping and individual "
"arguments.")
return _Mapping(
x=self._merge(self.x, mapping.x),
y=self._merge(self.y, mapping.y),
ildj=self._merge(self.ildj, mapping.ildj),
kwargs=self._merge(self.kwargs, mapping.kwargs, use_equals=True))
|
[
"Returns",
"new",
"_Mapping",
"with",
"args",
"merged",
"with",
"self",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/bijector.py#L74-L102
|
[
"def",
"merge",
"(",
"self",
",",
"x",
"=",
"None",
",",
"y",
"=",
"None",
",",
"ildj",
"=",
"None",
",",
"kwargs",
"=",
"None",
",",
"mapping",
"=",
"None",
")",
":",
"if",
"mapping",
"is",
"None",
":",
"mapping",
"=",
"_Mapping",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"ildj",
"=",
"ildj",
",",
"kwargs",
"=",
"kwargs",
")",
"elif",
"any",
"(",
"arg",
"is",
"not",
"None",
"for",
"arg",
"in",
"[",
"x",
",",
"y",
",",
"ildj",
",",
"kwargs",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"Cannot simultaneously specify mapping and individual \"",
"\"arguments.\"",
")",
"return",
"_Mapping",
"(",
"x",
"=",
"self",
".",
"_merge",
"(",
"self",
".",
"x",
",",
"mapping",
".",
"x",
")",
",",
"y",
"=",
"self",
".",
"_merge",
"(",
"self",
".",
"y",
",",
"mapping",
".",
"y",
")",
",",
"ildj",
"=",
"self",
".",
"_merge",
"(",
"self",
".",
"ildj",
",",
"mapping",
".",
"ildj",
")",
",",
"kwargs",
"=",
"self",
".",
"_merge",
"(",
"self",
".",
"kwargs",
",",
"mapping",
".",
"kwargs",
",",
"use_equals",
"=",
"True",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_Mapping.remove
|
To support weak referencing, removes cache key from the cache value.
|
tensorflow_probability/python/bijectors/bijector.py
|
def remove(self, field):
"""To support weak referencing, removes cache key from the cache value."""
return _Mapping(
x=None if field == "x" else self.x,
y=None if field == "y" else self.y,
ildj=self.ildj,
kwargs=self.kwargs)
|
def remove(self, field):
"""To support weak referencing, removes cache key from the cache value."""
return _Mapping(
x=None if field == "x" else self.x,
y=None if field == "y" else self.y,
ildj=self.ildj,
kwargs=self.kwargs)
|
[
"To",
"support",
"weak",
"referencing",
"removes",
"cache",
"key",
"from",
"the",
"cache",
"value",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/bijector.py#L104-L110
|
[
"def",
"remove",
"(",
"self",
",",
"field",
")",
":",
"return",
"_Mapping",
"(",
"x",
"=",
"None",
"if",
"field",
"==",
"\"x\"",
"else",
"self",
".",
"x",
",",
"y",
"=",
"None",
"if",
"field",
"==",
"\"y\"",
"else",
"self",
".",
"y",
",",
"ildj",
"=",
"self",
".",
"ildj",
",",
"kwargs",
"=",
"self",
".",
"kwargs",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_Mapping._merge
|
Helper to merge which handles merging one value.
|
tensorflow_probability/python/bijectors/bijector.py
|
def _merge(self, old, new, use_equals=False):
"""Helper to merge which handles merging one value."""
if old is None:
return new
if new is None:
return old
if (old == new) if use_equals else (old is new):
return old
raise ValueError("Incompatible values: %s != %s" % (old, new))
|
def _merge(self, old, new, use_equals=False):
"""Helper to merge which handles merging one value."""
if old is None:
return new
if new is None:
return old
if (old == new) if use_equals else (old is new):
return old
raise ValueError("Incompatible values: %s != %s" % (old, new))
|
[
"Helper",
"to",
"merge",
"which",
"handles",
"merging",
"one",
"value",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/bijector.py#L112-L120
|
[
"def",
"_merge",
"(",
"self",
",",
"old",
",",
"new",
",",
"use_equals",
"=",
"False",
")",
":",
"if",
"old",
"is",
"None",
":",
"return",
"new",
"if",
"new",
"is",
"None",
":",
"return",
"old",
"if",
"(",
"old",
"==",
"new",
")",
"if",
"use_equals",
"else",
"(",
"old",
"is",
"new",
")",
":",
"return",
"old",
"raise",
"ValueError",
"(",
"\"Incompatible values: %s != %s\"",
"%",
"(",
"old",
",",
"new",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_Mapping._deep_tuple
|
Converts nested `tuple`, `list`, or `dict` to nested `tuple`.
|
tensorflow_probability/python/bijectors/bijector.py
|
def _deep_tuple(self, x):
"""Converts nested `tuple`, `list`, or `dict` to nested `tuple`."""
if isinstance(x, dict):
return self._deep_tuple(tuple(sorted(x.items())))
elif isinstance(x, (list, tuple)):
return tuple(map(self._deep_tuple, x))
return x
|
def _deep_tuple(self, x):
"""Converts nested `tuple`, `list`, or `dict` to nested `tuple`."""
if isinstance(x, dict):
return self._deep_tuple(tuple(sorted(x.items())))
elif isinstance(x, (list, tuple)):
return tuple(map(self._deep_tuple, x))
return x
|
[
"Converts",
"nested",
"tuple",
"list",
"or",
"dict",
"to",
"nested",
"tuple",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/bijector.py#L122-L129
|
[
"def",
"_deep_tuple",
"(",
"self",
",",
"x",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"dict",
")",
":",
"return",
"self",
".",
"_deep_tuple",
"(",
"tuple",
"(",
"sorted",
"(",
"x",
".",
"items",
"(",
")",
")",
")",
")",
"elif",
"isinstance",
"(",
"x",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"tuple",
"(",
"map",
"(",
"self",
".",
"_deep_tuple",
",",
"x",
")",
")",
"return",
"x"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_left_doubling_increments
|
Computes the doubling increments for the left end point.
The doubling procedure expands an initial interval to find a superset of the
true slice. At each doubling iteration, the interval width is doubled to
either the left or the right hand side with equal probability.
If, initially, the left end point is at `L(0)` and the width of the
interval is `w(0)`, then the left end point and the width at the
k-th iteration (denoted L(k) and w(k) respectively) are given by the following
recursions:
```none
w(k) = 2 * w(k-1)
L(k) = L(k-1) - w(k-1) * X_k, X_k ~ Bernoulli(0.5)
or, L(0) - L(k) = w(0) Sum(2^i * X(i+1), 0 <= i < k)
```
This function computes the sequence of `L(0)-L(k)` and `w(k)` for k between 0
and `max_doublings` independently for each chain.
Args:
batch_shape: Positive int32 `tf.Tensor`. The batch shape.
max_doublings: Scalar positive int32 `tf.Tensor`. The maximum number of
doublings to consider.
step_size: A real `tf.Tensor` with shape compatible with [num_chains].
The size of the initial interval.
seed: (Optional) positive int. The random seed. If None, no seed is set.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'find_slice_bounds').
Returns:
left_increments: A tensor of shape (max_doublings+1, batch_shape). The
relative position of the left end point after the doublings.
widths: A tensor of shape (max_doublings+1, ones_like(batch_shape)). The
widths of the intervals at each stage of the doubling.
|
tensorflow_probability/python/mcmc/internal/slice_sampler_utils.py
|
def _left_doubling_increments(batch_shape, max_doublings, step_size, seed=None,
name=None):
"""Computes the doubling increments for the left end point.
The doubling procedure expands an initial interval to find a superset of the
true slice. At each doubling iteration, the interval width is doubled to
either the left or the right hand side with equal probability.
If, initially, the left end point is at `L(0)` and the width of the
interval is `w(0)`, then the left end point and the width at the
k-th iteration (denoted L(k) and w(k) respectively) are given by the following
recursions:
```none
w(k) = 2 * w(k-1)
L(k) = L(k-1) - w(k-1) * X_k, X_k ~ Bernoulli(0.5)
or, L(0) - L(k) = w(0) Sum(2^i * X(i+1), 0 <= i < k)
```
This function computes the sequence of `L(0)-L(k)` and `w(k)` for k between 0
and `max_doublings` independently for each chain.
Args:
batch_shape: Positive int32 `tf.Tensor`. The batch shape.
max_doublings: Scalar positive int32 `tf.Tensor`. The maximum number of
doublings to consider.
step_size: A real `tf.Tensor` with shape compatible with [num_chains].
The size of the initial interval.
seed: (Optional) positive int. The random seed. If None, no seed is set.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'find_slice_bounds').
Returns:
left_increments: A tensor of shape (max_doublings+1, batch_shape). The
relative position of the left end point after the doublings.
widths: A tensor of shape (max_doublings+1, ones_like(batch_shape)). The
widths of the intervals at each stage of the doubling.
"""
with tf.compat.v1.name_scope(name, 'left_doubling_increments',
[batch_shape, max_doublings, step_size]):
step_size = tf.convert_to_tensor(value=step_size)
dtype = step_size.dtype.base_dtype
# Output shape of the left increments tensor.
output_shape = tf.concat(([max_doublings + 1], batch_shape), axis=0)
# A sample realization of X_k.
expand_left = distributions.Bernoulli(0.5, dtype=dtype).sample(
sample_shape=output_shape, seed=seed)
# The widths of the successive intervals. Starts with 1.0 and ends with
# 2^max_doublings.
width_multipliers = tf.cast(2 ** tf.range(0, max_doublings+1), dtype=dtype)
# Output shape of the `widths` tensor.
widths_shape = tf.concat(([max_doublings + 1],
tf.ones_like(batch_shape)), axis=0)
width_multipliers = tf.reshape(width_multipliers, shape=widths_shape)
# Widths shape is [max_doublings + 1, 1, 1, 1...].
widths = width_multipliers * step_size
# Take the cumulative sum of the left side increments in slice width to give
# the resulting distance from the inital lower bound.
left_increments = tf.cumsum(widths * expand_left, exclusive=True, axis=0)
return left_increments, widths
|
def _left_doubling_increments(batch_shape, max_doublings, step_size, seed=None,
name=None):
"""Computes the doubling increments for the left end point.
The doubling procedure expands an initial interval to find a superset of the
true slice. At each doubling iteration, the interval width is doubled to
either the left or the right hand side with equal probability.
If, initially, the left end point is at `L(0)` and the width of the
interval is `w(0)`, then the left end point and the width at the
k-th iteration (denoted L(k) and w(k) respectively) are given by the following
recursions:
```none
w(k) = 2 * w(k-1)
L(k) = L(k-1) - w(k-1) * X_k, X_k ~ Bernoulli(0.5)
or, L(0) - L(k) = w(0) Sum(2^i * X(i+1), 0 <= i < k)
```
This function computes the sequence of `L(0)-L(k)` and `w(k)` for k between 0
and `max_doublings` independently for each chain.
Args:
batch_shape: Positive int32 `tf.Tensor`. The batch shape.
max_doublings: Scalar positive int32 `tf.Tensor`. The maximum number of
doublings to consider.
step_size: A real `tf.Tensor` with shape compatible with [num_chains].
The size of the initial interval.
seed: (Optional) positive int. The random seed. If None, no seed is set.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'find_slice_bounds').
Returns:
left_increments: A tensor of shape (max_doublings+1, batch_shape). The
relative position of the left end point after the doublings.
widths: A tensor of shape (max_doublings+1, ones_like(batch_shape)). The
widths of the intervals at each stage of the doubling.
"""
with tf.compat.v1.name_scope(name, 'left_doubling_increments',
[batch_shape, max_doublings, step_size]):
step_size = tf.convert_to_tensor(value=step_size)
dtype = step_size.dtype.base_dtype
# Output shape of the left increments tensor.
output_shape = tf.concat(([max_doublings + 1], batch_shape), axis=0)
# A sample realization of X_k.
expand_left = distributions.Bernoulli(0.5, dtype=dtype).sample(
sample_shape=output_shape, seed=seed)
# The widths of the successive intervals. Starts with 1.0 and ends with
# 2^max_doublings.
width_multipliers = tf.cast(2 ** tf.range(0, max_doublings+1), dtype=dtype)
# Output shape of the `widths` tensor.
widths_shape = tf.concat(([max_doublings + 1],
tf.ones_like(batch_shape)), axis=0)
width_multipliers = tf.reshape(width_multipliers, shape=widths_shape)
# Widths shape is [max_doublings + 1, 1, 1, 1...].
widths = width_multipliers * step_size
# Take the cumulative sum of the left side increments in slice width to give
# the resulting distance from the inital lower bound.
left_increments = tf.cumsum(widths * expand_left, exclusive=True, axis=0)
return left_increments, widths
|
[
"Computes",
"the",
"doubling",
"increments",
"for",
"the",
"left",
"end",
"point",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/internal/slice_sampler_utils.py#L26-L87
|
[
"def",
"_left_doubling_increments",
"(",
"batch_shape",
",",
"max_doublings",
",",
"step_size",
",",
"seed",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'left_doubling_increments'",
",",
"[",
"batch_shape",
",",
"max_doublings",
",",
"step_size",
"]",
")",
":",
"step_size",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"step_size",
")",
"dtype",
"=",
"step_size",
".",
"dtype",
".",
"base_dtype",
"# Output shape of the left increments tensor.",
"output_shape",
"=",
"tf",
".",
"concat",
"(",
"(",
"[",
"max_doublings",
"+",
"1",
"]",
",",
"batch_shape",
")",
",",
"axis",
"=",
"0",
")",
"# A sample realization of X_k.",
"expand_left",
"=",
"distributions",
".",
"Bernoulli",
"(",
"0.5",
",",
"dtype",
"=",
"dtype",
")",
".",
"sample",
"(",
"sample_shape",
"=",
"output_shape",
",",
"seed",
"=",
"seed",
")",
"# The widths of the successive intervals. Starts with 1.0 and ends with",
"# 2^max_doublings.",
"width_multipliers",
"=",
"tf",
".",
"cast",
"(",
"2",
"**",
"tf",
".",
"range",
"(",
"0",
",",
"max_doublings",
"+",
"1",
")",
",",
"dtype",
"=",
"dtype",
")",
"# Output shape of the `widths` tensor.",
"widths_shape",
"=",
"tf",
".",
"concat",
"(",
"(",
"[",
"max_doublings",
"+",
"1",
"]",
",",
"tf",
".",
"ones_like",
"(",
"batch_shape",
")",
")",
",",
"axis",
"=",
"0",
")",
"width_multipliers",
"=",
"tf",
".",
"reshape",
"(",
"width_multipliers",
",",
"shape",
"=",
"widths_shape",
")",
"# Widths shape is [max_doublings + 1, 1, 1, 1...].",
"widths",
"=",
"width_multipliers",
"*",
"step_size",
"# Take the cumulative sum of the left side increments in slice width to give",
"# the resulting distance from the inital lower bound.",
"left_increments",
"=",
"tf",
".",
"cumsum",
"(",
"widths",
"*",
"expand_left",
",",
"exclusive",
"=",
"True",
",",
"axis",
"=",
"0",
")",
"return",
"left_increments",
",",
"widths"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_find_best_interval_idx
|
Finds the index of the optimal set of bounds for each chain.
For each chain, finds the smallest set of bounds for which both edges lie
outside the slice. This is equivalent to the point at which a for loop
implementation (P715 of Neal (2003)) of the algorithm would terminate.
Performs the following calculation, where i is the number of doublings that
have been performed and k is the max number of doublings:
(2 * k - i) * flag + i
The argmax of the above returns the earliest index where the bounds were
outside the slice and if there is no such point, the widest bounds.
Args:
x: A tensor of shape (max_doublings+1, batch_shape). Type int32, with value
0 or 1. Indicates if this set of bounds is outside the slice.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'find_slice_bounds').
Returns:
indices: A tensor of shape batch_shape. Type int32, with the index of the
first set of bounds outside the slice and if there are none, the index of
the widest set.
|
tensorflow_probability/python/mcmc/internal/slice_sampler_utils.py
|
def _find_best_interval_idx(x, name=None):
"""Finds the index of the optimal set of bounds for each chain.
For each chain, finds the smallest set of bounds for which both edges lie
outside the slice. This is equivalent to the point at which a for loop
implementation (P715 of Neal (2003)) of the algorithm would terminate.
Performs the following calculation, where i is the number of doublings that
have been performed and k is the max number of doublings:
(2 * k - i) * flag + i
The argmax of the above returns the earliest index where the bounds were
outside the slice and if there is no such point, the widest bounds.
Args:
x: A tensor of shape (max_doublings+1, batch_shape). Type int32, with value
0 or 1. Indicates if this set of bounds is outside the slice.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'find_slice_bounds').
Returns:
indices: A tensor of shape batch_shape. Type int32, with the index of the
first set of bounds outside the slice and if there are none, the index of
the widest set.
"""
with tf.compat.v1.name_scope(name, 'find_best_interval_idx', [x]):
# Returns max_doublings + 1. Positive int32.
k = tf.shape(input=x)[0]
dtype = x.dtype.base_dtype
# Factors by which to multiply the flag. Corresponds to (2 * k - i) above.
mults = tf.range(2 * k, k, -1, dtype=dtype)[:, tf.newaxis]
# Factors by which to shift the flag. Corresponds to i above. Ensures the
# widest bounds are selected if there are no bounds outside the slice.
shifts = tf.range(k, dtype=dtype)[:, tf.newaxis]
indices = tf.argmax(input=mults * x + shifts, axis=0, output_type=dtype)
return indices
|
def _find_best_interval_idx(x, name=None):
"""Finds the index of the optimal set of bounds for each chain.
For each chain, finds the smallest set of bounds for which both edges lie
outside the slice. This is equivalent to the point at which a for loop
implementation (P715 of Neal (2003)) of the algorithm would terminate.
Performs the following calculation, where i is the number of doublings that
have been performed and k is the max number of doublings:
(2 * k - i) * flag + i
The argmax of the above returns the earliest index where the bounds were
outside the slice and if there is no such point, the widest bounds.
Args:
x: A tensor of shape (max_doublings+1, batch_shape). Type int32, with value
0 or 1. Indicates if this set of bounds is outside the slice.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'find_slice_bounds').
Returns:
indices: A tensor of shape batch_shape. Type int32, with the index of the
first set of bounds outside the slice and if there are none, the index of
the widest set.
"""
with tf.compat.v1.name_scope(name, 'find_best_interval_idx', [x]):
# Returns max_doublings + 1. Positive int32.
k = tf.shape(input=x)[0]
dtype = x.dtype.base_dtype
# Factors by which to multiply the flag. Corresponds to (2 * k - i) above.
mults = tf.range(2 * k, k, -1, dtype=dtype)[:, tf.newaxis]
# Factors by which to shift the flag. Corresponds to i above. Ensures the
# widest bounds are selected if there are no bounds outside the slice.
shifts = tf.range(k, dtype=dtype)[:, tf.newaxis]
indices = tf.argmax(input=mults * x + shifts, axis=0, output_type=dtype)
return indices
|
[
"Finds",
"the",
"index",
"of",
"the",
"optimal",
"set",
"of",
"bounds",
"for",
"each",
"chain",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/internal/slice_sampler_utils.py#L90-L126
|
[
"def",
"_find_best_interval_idx",
"(",
"x",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'find_best_interval_idx'",
",",
"[",
"x",
"]",
")",
":",
"# Returns max_doublings + 1. Positive int32.",
"k",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
"[",
"0",
"]",
"dtype",
"=",
"x",
".",
"dtype",
".",
"base_dtype",
"# Factors by which to multiply the flag. Corresponds to (2 * k - i) above.",
"mults",
"=",
"tf",
".",
"range",
"(",
"2",
"*",
"k",
",",
"k",
",",
"-",
"1",
",",
"dtype",
"=",
"dtype",
")",
"[",
":",
",",
"tf",
".",
"newaxis",
"]",
"# Factors by which to shift the flag. Corresponds to i above. Ensures the",
"# widest bounds are selected if there are no bounds outside the slice.",
"shifts",
"=",
"tf",
".",
"range",
"(",
"k",
",",
"dtype",
"=",
"dtype",
")",
"[",
":",
",",
"tf",
".",
"newaxis",
"]",
"indices",
"=",
"tf",
".",
"argmax",
"(",
"input",
"=",
"mults",
"*",
"x",
"+",
"shifts",
",",
"axis",
"=",
"0",
",",
"output_type",
"=",
"dtype",
")",
"return",
"indices"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
slice_bounds_by_doubling
|
Returns the bounds of the slice at each stage of doubling procedure.
Precomputes the x coordinates of the left (L) and right (R) endpoints of the
interval `I` produced in the "doubling" algorithm [Neal 2003][1] P713. Note
that we simultaneously compute all possible doubling values for each chain,
for the reason that at small-medium densities, the gains from parallel
evaluation might cause a speed-up, but this will be benchmarked against the
while loop implementation.
Args:
x_initial: `tf.Tensor` of any shape and any real dtype consumable by
`target_log_prob`. The initial points.
target_log_prob: A callable taking a `tf.Tensor` of shape and dtype as
`x_initial` and returning a tensor of the same shape. The log density of
the target distribution.
log_slice_heights: `tf.Tensor` with the same shape as `x_initial` and the
same dtype as returned by `target_log_prob`. The log of the height of the
slice for each chain. The values must be bounded above by
`target_log_prob(x_initial)`.
max_doublings: Scalar positive int32 `tf.Tensor`. The maximum number of
doublings to consider.
step_size: `tf.Tensor` with same dtype as and shape compatible with
`x_initial`. The size of the initial interval.
seed: (Optional) positive int. The random seed. If None, no seed is set.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'find_slice_bounds').
Returns:
upper_bounds: A tensor of same shape and dtype as `x_initial`. Slice upper
bounds for each chain.
lower_bounds: A tensor of same shape and dtype as `x_initial`. Slice lower
bounds for each chain.
both_ok: A tensor of shape `x_initial` and boolean dtype. Indicates if both
the chosen upper and lower bound lie outside of the slice.
#### References
[1]: Radford M. Neal. Slice Sampling. The Annals of Statistics. 2003, Vol 31,
No. 3 , 705-767.
https://projecteuclid.org/download/pdf_1/euclid.aos/1056562461
|
tensorflow_probability/python/mcmc/internal/slice_sampler_utils.py
|
def slice_bounds_by_doubling(x_initial,
target_log_prob,
log_slice_heights,
max_doublings,
step_size,
seed=None,
name=None):
"""Returns the bounds of the slice at each stage of doubling procedure.
Precomputes the x coordinates of the left (L) and right (R) endpoints of the
interval `I` produced in the "doubling" algorithm [Neal 2003][1] P713. Note
that we simultaneously compute all possible doubling values for each chain,
for the reason that at small-medium densities, the gains from parallel
evaluation might cause a speed-up, but this will be benchmarked against the
while loop implementation.
Args:
x_initial: `tf.Tensor` of any shape and any real dtype consumable by
`target_log_prob`. The initial points.
target_log_prob: A callable taking a `tf.Tensor` of shape and dtype as
`x_initial` and returning a tensor of the same shape. The log density of
the target distribution.
log_slice_heights: `tf.Tensor` with the same shape as `x_initial` and the
same dtype as returned by `target_log_prob`. The log of the height of the
slice for each chain. The values must be bounded above by
`target_log_prob(x_initial)`.
max_doublings: Scalar positive int32 `tf.Tensor`. The maximum number of
doublings to consider.
step_size: `tf.Tensor` with same dtype as and shape compatible with
`x_initial`. The size of the initial interval.
seed: (Optional) positive int. The random seed. If None, no seed is set.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'find_slice_bounds').
Returns:
upper_bounds: A tensor of same shape and dtype as `x_initial`. Slice upper
bounds for each chain.
lower_bounds: A tensor of same shape and dtype as `x_initial`. Slice lower
bounds for each chain.
both_ok: A tensor of shape `x_initial` and boolean dtype. Indicates if both
the chosen upper and lower bound lie outside of the slice.
#### References
[1]: Radford M. Neal. Slice Sampling. The Annals of Statistics. 2003, Vol 31,
No. 3 , 705-767.
https://projecteuclid.org/download/pdf_1/euclid.aos/1056562461
"""
with tf.compat.v1.name_scope(
name, 'slice_bounds_by_doubling',
[x_initial, log_slice_heights, max_doublings, step_size]):
seed_gen = distributions.SeedStream(seed, salt='slice_bounds_by_doubling')
x_initial = tf.convert_to_tensor(value=x_initial)
batch_shape = tf.shape(input=x_initial)
dtype = step_size.dtype.base_dtype
left_endpoints = x_initial + step_size * tf.random.uniform(
batch_shape, minval=-1.0, maxval=0.0, dtype=dtype, seed=seed_gen())
# Compute the increments by which we need to step the upper and lower bounds
# part of the doubling procedure.
left_increments, widths = _left_doubling_increments(
batch_shape, max_doublings, step_size, seed=seed_gen())
# The left and right end points. Shape (max_doublings+1,) + batch_shape.
left_endpoints -= left_increments
right_endpoints = left_endpoints + widths
# Test if these end points lie outside of the slice.
# Checks if the end points of the slice are outside the graph of the pdf.
left_ep_values = tf.map_fn(target_log_prob, left_endpoints)
right_ep_values = tf.map_fn(target_log_prob, right_endpoints)
left_ok = left_ep_values < log_slice_heights
right_ok = right_ep_values < log_slice_heights
both_ok = left_ok & right_ok
both_ok_f = tf.reshape(both_ok, [max_doublings + 1, -1])
best_interval_idx = _find_best_interval_idx(
tf.cast(both_ok_f, dtype=tf.int32))
# Formats the above index as required to use with gather_nd.
point_index_gather = tf.stack(
[best_interval_idx,
tf.range(tf.size(input=best_interval_idx))],
axis=1,
name='point_index_gather')
left_ep_f = tf.reshape(left_endpoints, [max_doublings + 1, -1])
right_ep_f = tf.reshape(right_endpoints, [max_doublings + 1, -1])
# The x values of the uppper and lower bounds of the slices for each chain.
lower_bounds = tf.reshape(tf.gather_nd(left_ep_f, point_index_gather),
batch_shape)
upper_bounds = tf.reshape(tf.gather_nd(right_ep_f, point_index_gather),
batch_shape)
both_ok = tf.reduce_any(input_tensor=both_ok, axis=0)
return upper_bounds, lower_bounds, both_ok
|
def slice_bounds_by_doubling(x_initial,
target_log_prob,
log_slice_heights,
max_doublings,
step_size,
seed=None,
name=None):
"""Returns the bounds of the slice at each stage of doubling procedure.
Precomputes the x coordinates of the left (L) and right (R) endpoints of the
interval `I` produced in the "doubling" algorithm [Neal 2003][1] P713. Note
that we simultaneously compute all possible doubling values for each chain,
for the reason that at small-medium densities, the gains from parallel
evaluation might cause a speed-up, but this will be benchmarked against the
while loop implementation.
Args:
x_initial: `tf.Tensor` of any shape and any real dtype consumable by
`target_log_prob`. The initial points.
target_log_prob: A callable taking a `tf.Tensor` of shape and dtype as
`x_initial` and returning a tensor of the same shape. The log density of
the target distribution.
log_slice_heights: `tf.Tensor` with the same shape as `x_initial` and the
same dtype as returned by `target_log_prob`. The log of the height of the
slice for each chain. The values must be bounded above by
`target_log_prob(x_initial)`.
max_doublings: Scalar positive int32 `tf.Tensor`. The maximum number of
doublings to consider.
step_size: `tf.Tensor` with same dtype as and shape compatible with
`x_initial`. The size of the initial interval.
seed: (Optional) positive int. The random seed. If None, no seed is set.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'find_slice_bounds').
Returns:
upper_bounds: A tensor of same shape and dtype as `x_initial`. Slice upper
bounds for each chain.
lower_bounds: A tensor of same shape and dtype as `x_initial`. Slice lower
bounds for each chain.
both_ok: A tensor of shape `x_initial` and boolean dtype. Indicates if both
the chosen upper and lower bound lie outside of the slice.
#### References
[1]: Radford M. Neal. Slice Sampling. The Annals of Statistics. 2003, Vol 31,
No. 3 , 705-767.
https://projecteuclid.org/download/pdf_1/euclid.aos/1056562461
"""
with tf.compat.v1.name_scope(
name, 'slice_bounds_by_doubling',
[x_initial, log_slice_heights, max_doublings, step_size]):
seed_gen = distributions.SeedStream(seed, salt='slice_bounds_by_doubling')
x_initial = tf.convert_to_tensor(value=x_initial)
batch_shape = tf.shape(input=x_initial)
dtype = step_size.dtype.base_dtype
left_endpoints = x_initial + step_size * tf.random.uniform(
batch_shape, minval=-1.0, maxval=0.0, dtype=dtype, seed=seed_gen())
# Compute the increments by which we need to step the upper and lower bounds
# part of the doubling procedure.
left_increments, widths = _left_doubling_increments(
batch_shape, max_doublings, step_size, seed=seed_gen())
# The left and right end points. Shape (max_doublings+1,) + batch_shape.
left_endpoints -= left_increments
right_endpoints = left_endpoints + widths
# Test if these end points lie outside of the slice.
# Checks if the end points of the slice are outside the graph of the pdf.
left_ep_values = tf.map_fn(target_log_prob, left_endpoints)
right_ep_values = tf.map_fn(target_log_prob, right_endpoints)
left_ok = left_ep_values < log_slice_heights
right_ok = right_ep_values < log_slice_heights
both_ok = left_ok & right_ok
both_ok_f = tf.reshape(both_ok, [max_doublings + 1, -1])
best_interval_idx = _find_best_interval_idx(
tf.cast(both_ok_f, dtype=tf.int32))
# Formats the above index as required to use with gather_nd.
point_index_gather = tf.stack(
[best_interval_idx,
tf.range(tf.size(input=best_interval_idx))],
axis=1,
name='point_index_gather')
left_ep_f = tf.reshape(left_endpoints, [max_doublings + 1, -1])
right_ep_f = tf.reshape(right_endpoints, [max_doublings + 1, -1])
# The x values of the uppper and lower bounds of the slices for each chain.
lower_bounds = tf.reshape(tf.gather_nd(left_ep_f, point_index_gather),
batch_shape)
upper_bounds = tf.reshape(tf.gather_nd(right_ep_f, point_index_gather),
batch_shape)
both_ok = tf.reduce_any(input_tensor=both_ok, axis=0)
return upper_bounds, lower_bounds, both_ok
|
[
"Returns",
"the",
"bounds",
"of",
"the",
"slice",
"at",
"each",
"stage",
"of",
"doubling",
"procedure",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/internal/slice_sampler_utils.py#L129-L222
|
[
"def",
"slice_bounds_by_doubling",
"(",
"x_initial",
",",
"target_log_prob",
",",
"log_slice_heights",
",",
"max_doublings",
",",
"step_size",
",",
"seed",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'slice_bounds_by_doubling'",
",",
"[",
"x_initial",
",",
"log_slice_heights",
",",
"max_doublings",
",",
"step_size",
"]",
")",
":",
"seed_gen",
"=",
"distributions",
".",
"SeedStream",
"(",
"seed",
",",
"salt",
"=",
"'slice_bounds_by_doubling'",
")",
"x_initial",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x_initial",
")",
"batch_shape",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"x_initial",
")",
"dtype",
"=",
"step_size",
".",
"dtype",
".",
"base_dtype",
"left_endpoints",
"=",
"x_initial",
"+",
"step_size",
"*",
"tf",
".",
"random",
".",
"uniform",
"(",
"batch_shape",
",",
"minval",
"=",
"-",
"1.0",
",",
"maxval",
"=",
"0.0",
",",
"dtype",
"=",
"dtype",
",",
"seed",
"=",
"seed_gen",
"(",
")",
")",
"# Compute the increments by which we need to step the upper and lower bounds",
"# part of the doubling procedure.",
"left_increments",
",",
"widths",
"=",
"_left_doubling_increments",
"(",
"batch_shape",
",",
"max_doublings",
",",
"step_size",
",",
"seed",
"=",
"seed_gen",
"(",
")",
")",
"# The left and right end points. Shape (max_doublings+1,) + batch_shape.",
"left_endpoints",
"-=",
"left_increments",
"right_endpoints",
"=",
"left_endpoints",
"+",
"widths",
"# Test if these end points lie outside of the slice.",
"# Checks if the end points of the slice are outside the graph of the pdf.",
"left_ep_values",
"=",
"tf",
".",
"map_fn",
"(",
"target_log_prob",
",",
"left_endpoints",
")",
"right_ep_values",
"=",
"tf",
".",
"map_fn",
"(",
"target_log_prob",
",",
"right_endpoints",
")",
"left_ok",
"=",
"left_ep_values",
"<",
"log_slice_heights",
"right_ok",
"=",
"right_ep_values",
"<",
"log_slice_heights",
"both_ok",
"=",
"left_ok",
"&",
"right_ok",
"both_ok_f",
"=",
"tf",
".",
"reshape",
"(",
"both_ok",
",",
"[",
"max_doublings",
"+",
"1",
",",
"-",
"1",
"]",
")",
"best_interval_idx",
"=",
"_find_best_interval_idx",
"(",
"tf",
".",
"cast",
"(",
"both_ok_f",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
")",
"# Formats the above index as required to use with gather_nd.",
"point_index_gather",
"=",
"tf",
".",
"stack",
"(",
"[",
"best_interval_idx",
",",
"tf",
".",
"range",
"(",
"tf",
".",
"size",
"(",
"input",
"=",
"best_interval_idx",
")",
")",
"]",
",",
"axis",
"=",
"1",
",",
"name",
"=",
"'point_index_gather'",
")",
"left_ep_f",
"=",
"tf",
".",
"reshape",
"(",
"left_endpoints",
",",
"[",
"max_doublings",
"+",
"1",
",",
"-",
"1",
"]",
")",
"right_ep_f",
"=",
"tf",
".",
"reshape",
"(",
"right_endpoints",
",",
"[",
"max_doublings",
"+",
"1",
",",
"-",
"1",
"]",
")",
"# The x values of the uppper and lower bounds of the slices for each chain.",
"lower_bounds",
"=",
"tf",
".",
"reshape",
"(",
"tf",
".",
"gather_nd",
"(",
"left_ep_f",
",",
"point_index_gather",
")",
",",
"batch_shape",
")",
"upper_bounds",
"=",
"tf",
".",
"reshape",
"(",
"tf",
".",
"gather_nd",
"(",
"right_ep_f",
",",
"point_index_gather",
")",
",",
"batch_shape",
")",
"both_ok",
"=",
"tf",
".",
"reduce_any",
"(",
"input_tensor",
"=",
"both_ok",
",",
"axis",
"=",
"0",
")",
"return",
"upper_bounds",
",",
"lower_bounds",
",",
"both_ok"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_sample_with_shrinkage
|
Samples from the slice by applying shrinkage for rejected points.
Implements the one dimensional slice sampling algorithm of Neal (2003), with a
doubling algorithm (Neal 2003 P715 Fig. 4), which doubles the size of the
interval at each iteration and shrinkage (Neal 2003 P716 Fig. 5), which
reduces the width of the slice when a selected point is rejected, by setting
the relevant bound that that value. Randomly sampled points are checked for
two criteria: that they lie within the slice and that they pass the
acceptability check (Neal 2003 P717 Fig. 6), which tests that the new state
could have generated the previous one.
Args:
x_initial: A tensor of any shape. The initial positions of the chains. This
function assumes that all the dimensions of `x_initial` are batch
dimensions (i.e. the event shape is `[]`).
target_log_prob: Callable accepting a tensor like `x_initial` and returning
a tensor containing the log density at that point of the same shape.
log_slice_heights: Tensor of the same shape and dtype as the return value
of `target_log_prob` when applied to `x_initial`. The log of the height of
the chosen slice.
step_size: A tensor of shape and dtype compatible with `x_initial`. The min
interval size in the doubling algorithm.
lower_bounds: Tensor of same shape and dtype as `x_initial`. Slice lower
bounds for each chain.
upper_bounds: Tensor of same shape and dtype as `x_initial`. Slice upper
bounds for each chain.
seed: (Optional) positive int. The random seed. If None, no seed is set.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'find_slice_bounds').
Returns:
x_proposed: A tensor of the same shape and dtype as `x_initial`. The next
proposed state of the chain.
|
tensorflow_probability/python/mcmc/internal/slice_sampler_utils.py
|
def _sample_with_shrinkage(x_initial, target_log_prob, log_slice_heights,
step_size, lower_bounds, upper_bounds, seed=None,
name=None):
"""Samples from the slice by applying shrinkage for rejected points.
Implements the one dimensional slice sampling algorithm of Neal (2003), with a
doubling algorithm (Neal 2003 P715 Fig. 4), which doubles the size of the
interval at each iteration and shrinkage (Neal 2003 P716 Fig. 5), which
reduces the width of the slice when a selected point is rejected, by setting
the relevant bound that that value. Randomly sampled points are checked for
two criteria: that they lie within the slice and that they pass the
acceptability check (Neal 2003 P717 Fig. 6), which tests that the new state
could have generated the previous one.
Args:
x_initial: A tensor of any shape. The initial positions of the chains. This
function assumes that all the dimensions of `x_initial` are batch
dimensions (i.e. the event shape is `[]`).
target_log_prob: Callable accepting a tensor like `x_initial` and returning
a tensor containing the log density at that point of the same shape.
log_slice_heights: Tensor of the same shape and dtype as the return value
of `target_log_prob` when applied to `x_initial`. The log of the height of
the chosen slice.
step_size: A tensor of shape and dtype compatible with `x_initial`. The min
interval size in the doubling algorithm.
lower_bounds: Tensor of same shape and dtype as `x_initial`. Slice lower
bounds for each chain.
upper_bounds: Tensor of same shape and dtype as `x_initial`. Slice upper
bounds for each chain.
seed: (Optional) positive int. The random seed. If None, no seed is set.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'find_slice_bounds').
Returns:
x_proposed: A tensor of the same shape and dtype as `x_initial`. The next
proposed state of the chain.
"""
with tf.compat.v1.name_scope(
name, 'sample_with_shrinkage',
[x_initial, log_slice_heights, step_size, lower_bounds, upper_bounds]):
seed_gen = distributions.SeedStream(seed, salt='_sample_with_shrinkage')
# Keeps track of whether an acceptable sample has been found for the chain.
found = tf.zeros_like(x_initial, dtype=tf.bool)
cond = lambda found, *ignored_args: ~tf.reduce_all(input_tensor=found)
x_next = tf.identity(x_initial)
x_initial_shape = tf.shape(input=x_initial)
x_initial_dtype = x_initial.dtype.base_dtype
def _body(found, left, right, x_next):
"""Iterates until every chain has found a suitable next state."""
proportions = tf.random.uniform(
x_initial_shape, dtype=x_initial_dtype, seed=seed_gen())
x_proposed = tf.where(~found, left + proportions * (right - left), x_next)
accept_res = _test_acceptance(x_initial, target_log_prob=target_log_prob,
decided=found,
log_slice_heights=log_slice_heights,
x_proposed=x_proposed, step_size=step_size,
lower_bounds=left, upper_bounds=right)
boundary_test = log_slice_heights < target_log_prob(x_proposed)
can_accept = boundary_test & accept_res
next_found = found | can_accept
# Note that it might seem that we are moving the left and right end points
# even if the point has been accepted (which is contrary to the stated
# algorithm in Neal). However, this does not matter because the endpoints
# for points that have been already accepted are not used again so it
# doesn't matter what we do with them.
next_left = tf.where(x_proposed < x_initial, x_proposed, left)
next_right = tf.where(x_proposed >= x_initial, x_proposed, right)
return next_found, next_left, next_right, x_proposed
return tf.while_loop(
cond=cond,
body=_body,
loop_vars=(found, lower_bounds, upper_bounds, x_next))[-1]
|
def _sample_with_shrinkage(x_initial, target_log_prob, log_slice_heights,
step_size, lower_bounds, upper_bounds, seed=None,
name=None):
"""Samples from the slice by applying shrinkage for rejected points.
Implements the one dimensional slice sampling algorithm of Neal (2003), with a
doubling algorithm (Neal 2003 P715 Fig. 4), which doubles the size of the
interval at each iteration and shrinkage (Neal 2003 P716 Fig. 5), which
reduces the width of the slice when a selected point is rejected, by setting
the relevant bound that that value. Randomly sampled points are checked for
two criteria: that they lie within the slice and that they pass the
acceptability check (Neal 2003 P717 Fig. 6), which tests that the new state
could have generated the previous one.
Args:
x_initial: A tensor of any shape. The initial positions of the chains. This
function assumes that all the dimensions of `x_initial` are batch
dimensions (i.e. the event shape is `[]`).
target_log_prob: Callable accepting a tensor like `x_initial` and returning
a tensor containing the log density at that point of the same shape.
log_slice_heights: Tensor of the same shape and dtype as the return value
of `target_log_prob` when applied to `x_initial`. The log of the height of
the chosen slice.
step_size: A tensor of shape and dtype compatible with `x_initial`. The min
interval size in the doubling algorithm.
lower_bounds: Tensor of same shape and dtype as `x_initial`. Slice lower
bounds for each chain.
upper_bounds: Tensor of same shape and dtype as `x_initial`. Slice upper
bounds for each chain.
seed: (Optional) positive int. The random seed. If None, no seed is set.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'find_slice_bounds').
Returns:
x_proposed: A tensor of the same shape and dtype as `x_initial`. The next
proposed state of the chain.
"""
with tf.compat.v1.name_scope(
name, 'sample_with_shrinkage',
[x_initial, log_slice_heights, step_size, lower_bounds, upper_bounds]):
seed_gen = distributions.SeedStream(seed, salt='_sample_with_shrinkage')
# Keeps track of whether an acceptable sample has been found for the chain.
found = tf.zeros_like(x_initial, dtype=tf.bool)
cond = lambda found, *ignored_args: ~tf.reduce_all(input_tensor=found)
x_next = tf.identity(x_initial)
x_initial_shape = tf.shape(input=x_initial)
x_initial_dtype = x_initial.dtype.base_dtype
def _body(found, left, right, x_next):
"""Iterates until every chain has found a suitable next state."""
proportions = tf.random.uniform(
x_initial_shape, dtype=x_initial_dtype, seed=seed_gen())
x_proposed = tf.where(~found, left + proportions * (right - left), x_next)
accept_res = _test_acceptance(x_initial, target_log_prob=target_log_prob,
decided=found,
log_slice_heights=log_slice_heights,
x_proposed=x_proposed, step_size=step_size,
lower_bounds=left, upper_bounds=right)
boundary_test = log_slice_heights < target_log_prob(x_proposed)
can_accept = boundary_test & accept_res
next_found = found | can_accept
# Note that it might seem that we are moving the left and right end points
# even if the point has been accepted (which is contrary to the stated
# algorithm in Neal). However, this does not matter because the endpoints
# for points that have been already accepted are not used again so it
# doesn't matter what we do with them.
next_left = tf.where(x_proposed < x_initial, x_proposed, left)
next_right = tf.where(x_proposed >= x_initial, x_proposed, right)
return next_found, next_left, next_right, x_proposed
return tf.while_loop(
cond=cond,
body=_body,
loop_vars=(found, lower_bounds, upper_bounds, x_next))[-1]
|
[
"Samples",
"from",
"the",
"slice",
"by",
"applying",
"shrinkage",
"for",
"rejected",
"points",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/internal/slice_sampler_utils.py#L304-L376
|
[
"def",
"_sample_with_shrinkage",
"(",
"x_initial",
",",
"target_log_prob",
",",
"log_slice_heights",
",",
"step_size",
",",
"lower_bounds",
",",
"upper_bounds",
",",
"seed",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'sample_with_shrinkage'",
",",
"[",
"x_initial",
",",
"log_slice_heights",
",",
"step_size",
",",
"lower_bounds",
",",
"upper_bounds",
"]",
")",
":",
"seed_gen",
"=",
"distributions",
".",
"SeedStream",
"(",
"seed",
",",
"salt",
"=",
"'_sample_with_shrinkage'",
")",
"# Keeps track of whether an acceptable sample has been found for the chain.",
"found",
"=",
"tf",
".",
"zeros_like",
"(",
"x_initial",
",",
"dtype",
"=",
"tf",
".",
"bool",
")",
"cond",
"=",
"lambda",
"found",
",",
"*",
"ignored_args",
":",
"~",
"tf",
".",
"reduce_all",
"(",
"input_tensor",
"=",
"found",
")",
"x_next",
"=",
"tf",
".",
"identity",
"(",
"x_initial",
")",
"x_initial_shape",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"x_initial",
")",
"x_initial_dtype",
"=",
"x_initial",
".",
"dtype",
".",
"base_dtype",
"def",
"_body",
"(",
"found",
",",
"left",
",",
"right",
",",
"x_next",
")",
":",
"\"\"\"Iterates until every chain has found a suitable next state.\"\"\"",
"proportions",
"=",
"tf",
".",
"random",
".",
"uniform",
"(",
"x_initial_shape",
",",
"dtype",
"=",
"x_initial_dtype",
",",
"seed",
"=",
"seed_gen",
"(",
")",
")",
"x_proposed",
"=",
"tf",
".",
"where",
"(",
"~",
"found",
",",
"left",
"+",
"proportions",
"*",
"(",
"right",
"-",
"left",
")",
",",
"x_next",
")",
"accept_res",
"=",
"_test_acceptance",
"(",
"x_initial",
",",
"target_log_prob",
"=",
"target_log_prob",
",",
"decided",
"=",
"found",
",",
"log_slice_heights",
"=",
"log_slice_heights",
",",
"x_proposed",
"=",
"x_proposed",
",",
"step_size",
"=",
"step_size",
",",
"lower_bounds",
"=",
"left",
",",
"upper_bounds",
"=",
"right",
")",
"boundary_test",
"=",
"log_slice_heights",
"<",
"target_log_prob",
"(",
"x_proposed",
")",
"can_accept",
"=",
"boundary_test",
"&",
"accept_res",
"next_found",
"=",
"found",
"|",
"can_accept",
"# Note that it might seem that we are moving the left and right end points",
"# even if the point has been accepted (which is contrary to the stated",
"# algorithm in Neal). However, this does not matter because the endpoints",
"# for points that have been already accepted are not used again so it",
"# doesn't matter what we do with them.",
"next_left",
"=",
"tf",
".",
"where",
"(",
"x_proposed",
"<",
"x_initial",
",",
"x_proposed",
",",
"left",
")",
"next_right",
"=",
"tf",
".",
"where",
"(",
"x_proposed",
">=",
"x_initial",
",",
"x_proposed",
",",
"right",
")",
"return",
"next_found",
",",
"next_left",
",",
"next_right",
",",
"x_proposed",
"return",
"tf",
".",
"while_loop",
"(",
"cond",
"=",
"cond",
",",
"body",
"=",
"_body",
",",
"loop_vars",
"=",
"(",
"found",
",",
"lower_bounds",
",",
"upper_bounds",
",",
"x_next",
")",
")",
"[",
"-",
"1",
"]"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
slice_sampler_one_dim
|
For a given x position in each Markov chain, returns the next x.
Applies the one dimensional slice sampling algorithm as defined in Neal (2003)
to an input tensor x of shape (num_chains,) where num_chains is the number of
simulataneous Markov chains, and returns the next tensor x of shape
(num_chains,) when these chains are evolved by the slice sampling algorithm.
Args:
target_log_prob: Callable accepting a tensor like `x_initial` and returning
a tensor containing the log density at that point of the same shape.
x_initial: A tensor of any shape. The initial positions of the chains. This
function assumes that all the dimensions of `x_initial` are batch
dimensions (i.e. the event shape is `[]`).
step_size: A tensor of shape and dtype compatible with `x_initial`. The min
interval size in the doubling algorithm.
max_doublings: Scalar tensor of dtype `tf.int32`. The maximum number of
doublings to try to find the slice bounds.
seed: (Optional) positive int. The random seed. If None, no seed is set.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'find_slice_bounds').
Returns:
retval: A tensor of the same shape and dtype as `x_initial`. The next state
of the Markov chain.
next_target_log_prob: The target log density evaluated at `retval`.
bounds_satisfied: A tensor of bool dtype and shape batch dimensions.
upper_bounds: Tensor of the same shape and dtype as `x_initial`. The upper
bounds for the slice found.
lower_bounds: Tensor of the same shape and dtype as `x_initial`. The lower
bounds for the slice found.
|
tensorflow_probability/python/mcmc/internal/slice_sampler_utils.py
|
def slice_sampler_one_dim(target_log_prob, x_initial, step_size=0.01,
max_doublings=30, seed=None, name=None):
"""For a given x position in each Markov chain, returns the next x.
Applies the one dimensional slice sampling algorithm as defined in Neal (2003)
to an input tensor x of shape (num_chains,) where num_chains is the number of
simulataneous Markov chains, and returns the next tensor x of shape
(num_chains,) when these chains are evolved by the slice sampling algorithm.
Args:
target_log_prob: Callable accepting a tensor like `x_initial` and returning
a tensor containing the log density at that point of the same shape.
x_initial: A tensor of any shape. The initial positions of the chains. This
function assumes that all the dimensions of `x_initial` are batch
dimensions (i.e. the event shape is `[]`).
step_size: A tensor of shape and dtype compatible with `x_initial`. The min
interval size in the doubling algorithm.
max_doublings: Scalar tensor of dtype `tf.int32`. The maximum number of
doublings to try to find the slice bounds.
seed: (Optional) positive int. The random seed. If None, no seed is set.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'find_slice_bounds').
Returns:
retval: A tensor of the same shape and dtype as `x_initial`. The next state
of the Markov chain.
next_target_log_prob: The target log density evaluated at `retval`.
bounds_satisfied: A tensor of bool dtype and shape batch dimensions.
upper_bounds: Tensor of the same shape and dtype as `x_initial`. The upper
bounds for the slice found.
lower_bounds: Tensor of the same shape and dtype as `x_initial`. The lower
bounds for the slice found.
"""
with tf.compat.v1.name_scope(name, 'slice_sampler_one_dim',
[x_initial, step_size, max_doublings]):
x_initial = tf.convert_to_tensor(value=x_initial)
# Obtain the input dtype of the array.
dtype = x_initial.dtype.base_dtype
# Select the height of the slice. Tensor of shape x_initial.shape.
log_slice_heights = target_log_prob(x_initial) - tf.random.gamma(
tf.shape(input=x_initial), alpha=1, dtype=dtype, seed=seed)
# Given the above x and slice heights, compute the bounds of the slice for
# each chain.
upper_bounds, lower_bounds, bounds_satisfied = slice_bounds_by_doubling(
x_initial, target_log_prob, log_slice_heights, max_doublings, step_size,
seed=seed)
retval = _sample_with_shrinkage(x_initial, target_log_prob=target_log_prob,
log_slice_heights=log_slice_heights,
step_size=step_size,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds, seed=seed)
return (retval, target_log_prob(retval), bounds_satisfied,
upper_bounds, lower_bounds)
|
def slice_sampler_one_dim(target_log_prob, x_initial, step_size=0.01,
max_doublings=30, seed=None, name=None):
"""For a given x position in each Markov chain, returns the next x.
Applies the one dimensional slice sampling algorithm as defined in Neal (2003)
to an input tensor x of shape (num_chains,) where num_chains is the number of
simulataneous Markov chains, and returns the next tensor x of shape
(num_chains,) when these chains are evolved by the slice sampling algorithm.
Args:
target_log_prob: Callable accepting a tensor like `x_initial` and returning
a tensor containing the log density at that point of the same shape.
x_initial: A tensor of any shape. The initial positions of the chains. This
function assumes that all the dimensions of `x_initial` are batch
dimensions (i.e. the event shape is `[]`).
step_size: A tensor of shape and dtype compatible with `x_initial`. The min
interval size in the doubling algorithm.
max_doublings: Scalar tensor of dtype `tf.int32`. The maximum number of
doublings to try to find the slice bounds.
seed: (Optional) positive int. The random seed. If None, no seed is set.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'find_slice_bounds').
Returns:
retval: A tensor of the same shape and dtype as `x_initial`. The next state
of the Markov chain.
next_target_log_prob: The target log density evaluated at `retval`.
bounds_satisfied: A tensor of bool dtype and shape batch dimensions.
upper_bounds: Tensor of the same shape and dtype as `x_initial`. The upper
bounds for the slice found.
lower_bounds: Tensor of the same shape and dtype as `x_initial`. The lower
bounds for the slice found.
"""
with tf.compat.v1.name_scope(name, 'slice_sampler_one_dim',
[x_initial, step_size, max_doublings]):
x_initial = tf.convert_to_tensor(value=x_initial)
# Obtain the input dtype of the array.
dtype = x_initial.dtype.base_dtype
# Select the height of the slice. Tensor of shape x_initial.shape.
log_slice_heights = target_log_prob(x_initial) - tf.random.gamma(
tf.shape(input=x_initial), alpha=1, dtype=dtype, seed=seed)
# Given the above x and slice heights, compute the bounds of the slice for
# each chain.
upper_bounds, lower_bounds, bounds_satisfied = slice_bounds_by_doubling(
x_initial, target_log_prob, log_slice_heights, max_doublings, step_size,
seed=seed)
retval = _sample_with_shrinkage(x_initial, target_log_prob=target_log_prob,
log_slice_heights=log_slice_heights,
step_size=step_size,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds, seed=seed)
return (retval, target_log_prob(retval), bounds_satisfied,
upper_bounds, lower_bounds)
|
[
"For",
"a",
"given",
"x",
"position",
"in",
"each",
"Markov",
"chain",
"returns",
"the",
"next",
"x",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/internal/slice_sampler_utils.py#L379-L431
|
[
"def",
"slice_sampler_one_dim",
"(",
"target_log_prob",
",",
"x_initial",
",",
"step_size",
"=",
"0.01",
",",
"max_doublings",
"=",
"30",
",",
"seed",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'slice_sampler_one_dim'",
",",
"[",
"x_initial",
",",
"step_size",
",",
"max_doublings",
"]",
")",
":",
"x_initial",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x_initial",
")",
"# Obtain the input dtype of the array.",
"dtype",
"=",
"x_initial",
".",
"dtype",
".",
"base_dtype",
"# Select the height of the slice. Tensor of shape x_initial.shape.",
"log_slice_heights",
"=",
"target_log_prob",
"(",
"x_initial",
")",
"-",
"tf",
".",
"random",
".",
"gamma",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"x_initial",
")",
",",
"alpha",
"=",
"1",
",",
"dtype",
"=",
"dtype",
",",
"seed",
"=",
"seed",
")",
"# Given the above x and slice heights, compute the bounds of the slice for",
"# each chain.",
"upper_bounds",
",",
"lower_bounds",
",",
"bounds_satisfied",
"=",
"slice_bounds_by_doubling",
"(",
"x_initial",
",",
"target_log_prob",
",",
"log_slice_heights",
",",
"max_doublings",
",",
"step_size",
",",
"seed",
"=",
"seed",
")",
"retval",
"=",
"_sample_with_shrinkage",
"(",
"x_initial",
",",
"target_log_prob",
"=",
"target_log_prob",
",",
"log_slice_heights",
"=",
"log_slice_heights",
",",
"step_size",
"=",
"step_size",
",",
"lower_bounds",
"=",
"lower_bounds",
",",
"upper_bounds",
"=",
"upper_bounds",
",",
"seed",
"=",
"seed",
")",
"return",
"(",
"retval",
",",
"target_log_prob",
"(",
"retval",
")",
",",
"bounds_satisfied",
",",
"upper_bounds",
",",
"lower_bounds",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
sample_annealed_importance_chain
|
Runs annealed importance sampling (AIS) to estimate normalizing constants.
This function uses an MCMC transition operator (e.g., Hamiltonian Monte Carlo)
to sample from a series of distributions that slowly interpolates between
an initial "proposal" distribution:
`exp(proposal_log_prob_fn(x) - proposal_log_normalizer)`
and the target distribution:
`exp(target_log_prob_fn(x) - target_log_normalizer)`,
accumulating importance weights along the way. The product of these
importance weights gives an unbiased estimate of the ratio of the
normalizing constants of the initial distribution and the target
distribution:
`E[exp(ais_weights)] = exp(target_log_normalizer - proposal_log_normalizer)`.
Note: When running in graph mode, `proposal_log_prob_fn` and
`target_log_prob_fn` are called exactly three times (although this may be
reduced to two times in the future).
Args:
num_steps: Integer number of Markov chain updates to run. More
iterations means more expense, but smoother annealing between q
and p, which in turn means exponentially lower variance for the
normalizing constant estimator.
proposal_log_prob_fn: Python callable that returns the log density of the
initial distribution.
target_log_prob_fn: Python callable which takes an argument like
`current_state` (or `*current_state` if it's a list) and returns its
(possibly unnormalized) log-density under the target distribution.
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s). The first `r` dimensions index
independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.
make_kernel_fn: Python `callable` which returns a `TransitionKernel`-like
object. Must take one argument representing the `TransitionKernel`'s
`target_log_prob_fn`. The `target_log_prob_fn` argument represents the
`TransitionKernel`'s target log distribution. Note:
`sample_annealed_importance_chain` creates a new `target_log_prob_fn`
which is an interpolation between the supplied `target_log_prob_fn` and
`proposal_log_prob_fn`; it is this interpolated function which is used as
an argument to `make_kernel_fn`.
parallel_iterations: The number of iterations allowed to run in parallel.
It must be a positive integer. See `tf.while_loop` for more details.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "sample_annealed_importance_chain").
Returns:
next_state: `Tensor` or Python list of `Tensor`s representing the
state(s) of the Markov chain(s) at the final iteration. Has same shape as
input `current_state`.
ais_weights: Tensor with the estimated weight(s). Has shape matching
`target_log_prob_fn(current_state)`.
kernel_results: `collections.namedtuple` of internal calculations used to
advance the chain.
#### Examples
##### Estimate the normalizing constant of a log-gamma distribution.
```python
tfd = tfp.distributions
# Run 100 AIS chains in parallel
num_chains = 100
dims = 20
dtype = np.float32
proposal = tfd.MultivatiateNormalDiag(
loc=tf.zeros([dims], dtype=dtype))
target = tfd.TransformedDistribution(
distribution=tfd.Gamma(concentration=dtype(2),
rate=dtype(3)),
bijector=tfp.bijectors.Invert(tfp.bijectors.Exp()),
event_shape=[dims])
chains_state, ais_weights, kernels_results = (
tfp.mcmc.sample_annealed_importance_chain(
num_steps=1000,
proposal_log_prob_fn=proposal.log_prob,
target_log_prob_fn=target.log_prob,
current_state=proposal.sample(num_chains),
make_kernel_fn=lambda tlp_fn: tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=tlp_fn,
step_size=0.2,
num_leapfrog_steps=2)))
log_estimated_normalizer = (tf.reduce_logsumexp(ais_weights)
- np.log(num_chains))
log_true_normalizer = tf.lgamma(2.) - 2. * tf.log(3.)
```
##### Estimate marginal likelihood of a Bayesian regression model.
```python
tfd = tfp.distributions
def make_prior(dims, dtype):
return tfd.MultivariateNormalDiag(
loc=tf.zeros(dims, dtype))
def make_likelihood(weights, x):
return tfd.MultivariateNormalDiag(
loc=tf.tensordot(weights, x, axes=[[0], [-1]]))
# Run 100 AIS chains in parallel
num_chains = 100
dims = 10
dtype = np.float32
# Make training data.
x = np.random.randn(num_chains, dims).astype(dtype)
true_weights = np.random.randn(dims).astype(dtype)
y = np.dot(x, true_weights) + np.random.randn(num_chains)
# Setup model.
prior = make_prior(dims, dtype)
def target_log_prob_fn(weights):
return prior.log_prob(weights) + make_likelihood(weights, x).log_prob(y)
proposal = tfd.MultivariateNormalDiag(
loc=tf.zeros(dims, dtype))
weight_samples, ais_weights, kernel_results = (
tfp.mcmc.sample_annealed_importance_chain(
num_steps=1000,
proposal_log_prob_fn=proposal.log_prob,
target_log_prob_fn=target_log_prob_fn
current_state=tf.zeros([num_chains, dims], dtype),
make_kernel_fn=lambda tlp_fn: tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=tlp_fn,
step_size=0.1,
num_leapfrog_steps=2)))
log_normalizer_estimate = (tf.reduce_logsumexp(ais_weights)
- np.log(num_chains))
```
|
tensorflow_probability/python/mcmc/sample_annealed_importance.py
|
def sample_annealed_importance_chain(
num_steps,
proposal_log_prob_fn,
target_log_prob_fn,
current_state,
make_kernel_fn,
parallel_iterations=10,
name=None):
"""Runs annealed importance sampling (AIS) to estimate normalizing constants.
This function uses an MCMC transition operator (e.g., Hamiltonian Monte Carlo)
to sample from a series of distributions that slowly interpolates between
an initial "proposal" distribution:
`exp(proposal_log_prob_fn(x) - proposal_log_normalizer)`
and the target distribution:
`exp(target_log_prob_fn(x) - target_log_normalizer)`,
accumulating importance weights along the way. The product of these
importance weights gives an unbiased estimate of the ratio of the
normalizing constants of the initial distribution and the target
distribution:
`E[exp(ais_weights)] = exp(target_log_normalizer - proposal_log_normalizer)`.
Note: When running in graph mode, `proposal_log_prob_fn` and
`target_log_prob_fn` are called exactly three times (although this may be
reduced to two times in the future).
Args:
num_steps: Integer number of Markov chain updates to run. More
iterations means more expense, but smoother annealing between q
and p, which in turn means exponentially lower variance for the
normalizing constant estimator.
proposal_log_prob_fn: Python callable that returns the log density of the
initial distribution.
target_log_prob_fn: Python callable which takes an argument like
`current_state` (or `*current_state` if it's a list) and returns its
(possibly unnormalized) log-density under the target distribution.
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s). The first `r` dimensions index
independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.
make_kernel_fn: Python `callable` which returns a `TransitionKernel`-like
object. Must take one argument representing the `TransitionKernel`'s
`target_log_prob_fn`. The `target_log_prob_fn` argument represents the
`TransitionKernel`'s target log distribution. Note:
`sample_annealed_importance_chain` creates a new `target_log_prob_fn`
which is an interpolation between the supplied `target_log_prob_fn` and
`proposal_log_prob_fn`; it is this interpolated function which is used as
an argument to `make_kernel_fn`.
parallel_iterations: The number of iterations allowed to run in parallel.
It must be a positive integer. See `tf.while_loop` for more details.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "sample_annealed_importance_chain").
Returns:
next_state: `Tensor` or Python list of `Tensor`s representing the
state(s) of the Markov chain(s) at the final iteration. Has same shape as
input `current_state`.
ais_weights: Tensor with the estimated weight(s). Has shape matching
`target_log_prob_fn(current_state)`.
kernel_results: `collections.namedtuple` of internal calculations used to
advance the chain.
#### Examples
##### Estimate the normalizing constant of a log-gamma distribution.
```python
tfd = tfp.distributions
# Run 100 AIS chains in parallel
num_chains = 100
dims = 20
dtype = np.float32
proposal = tfd.MultivatiateNormalDiag(
loc=tf.zeros([dims], dtype=dtype))
target = tfd.TransformedDistribution(
distribution=tfd.Gamma(concentration=dtype(2),
rate=dtype(3)),
bijector=tfp.bijectors.Invert(tfp.bijectors.Exp()),
event_shape=[dims])
chains_state, ais_weights, kernels_results = (
tfp.mcmc.sample_annealed_importance_chain(
num_steps=1000,
proposal_log_prob_fn=proposal.log_prob,
target_log_prob_fn=target.log_prob,
current_state=proposal.sample(num_chains),
make_kernel_fn=lambda tlp_fn: tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=tlp_fn,
step_size=0.2,
num_leapfrog_steps=2)))
log_estimated_normalizer = (tf.reduce_logsumexp(ais_weights)
- np.log(num_chains))
log_true_normalizer = tf.lgamma(2.) - 2. * tf.log(3.)
```
##### Estimate marginal likelihood of a Bayesian regression model.
```python
tfd = tfp.distributions
def make_prior(dims, dtype):
return tfd.MultivariateNormalDiag(
loc=tf.zeros(dims, dtype))
def make_likelihood(weights, x):
return tfd.MultivariateNormalDiag(
loc=tf.tensordot(weights, x, axes=[[0], [-1]]))
# Run 100 AIS chains in parallel
num_chains = 100
dims = 10
dtype = np.float32
# Make training data.
x = np.random.randn(num_chains, dims).astype(dtype)
true_weights = np.random.randn(dims).astype(dtype)
y = np.dot(x, true_weights) + np.random.randn(num_chains)
# Setup model.
prior = make_prior(dims, dtype)
def target_log_prob_fn(weights):
return prior.log_prob(weights) + make_likelihood(weights, x).log_prob(y)
proposal = tfd.MultivariateNormalDiag(
loc=tf.zeros(dims, dtype))
weight_samples, ais_weights, kernel_results = (
tfp.mcmc.sample_annealed_importance_chain(
num_steps=1000,
proposal_log_prob_fn=proposal.log_prob,
target_log_prob_fn=target_log_prob_fn
current_state=tf.zeros([num_chains, dims], dtype),
make_kernel_fn=lambda tlp_fn: tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=tlp_fn,
step_size=0.1,
num_leapfrog_steps=2)))
log_normalizer_estimate = (tf.reduce_logsumexp(ais_weights)
- np.log(num_chains))
```
"""
with tf.compat.v1.name_scope(name, "sample_annealed_importance_chain",
[num_steps, current_state]):
num_steps = tf.convert_to_tensor(
value=num_steps, dtype=tf.int32, name="num_steps")
if mcmc_util.is_list_like(current_state):
current_state = [
tf.convert_to_tensor(value=s, name="current_state")
for s in current_state
]
else:
current_state = tf.convert_to_tensor(
value=current_state, name="current_state")
def _make_convex_combined_log_prob_fn(iter_):
def _fn(*args):
p = tf.identity(proposal_log_prob_fn(*args), name="proposal_log_prob")
t = tf.identity(target_log_prob_fn(*args), name="target_log_prob")
dtype = p.dtype.base_dtype
beta = tf.cast(iter_ + 1, dtype) / tf.cast(num_steps, dtype)
return tf.identity(beta * t + (1. - beta) * p,
name="convex_combined_log_prob")
return _fn
def _loop_body(iter_, ais_weights, current_state, kernel_results):
"""Closure which implements `tf.while_loop` body."""
x = (current_state if mcmc_util.is_list_like(current_state)
else [current_state])
proposal_log_prob = proposal_log_prob_fn(*x)
target_log_prob = target_log_prob_fn(*x)
ais_weights += ((target_log_prob - proposal_log_prob) /
tf.cast(num_steps, ais_weights.dtype))
kernel = make_kernel_fn(_make_convex_combined_log_prob_fn(iter_))
next_state, inner_results = kernel.one_step(
current_state, kernel_results.inner_results)
kernel_results = AISResults(
proposal_log_prob=proposal_log_prob,
target_log_prob=target_log_prob,
inner_results=inner_results,
)
return [iter_ + 1, ais_weights, next_state, kernel_results]
def _bootstrap_results(init_state):
"""Creates first version of `previous_kernel_results`."""
kernel = make_kernel_fn(_make_convex_combined_log_prob_fn(iter_=0))
inner_results = kernel.bootstrap_results(init_state)
convex_combined_log_prob = inner_results.accepted_results.target_log_prob
dtype = convex_combined_log_prob.dtype.as_numpy_dtype
shape = tf.shape(input=convex_combined_log_prob)
proposal_log_prob = tf.fill(shape, dtype(np.nan),
name="bootstrap_proposal_log_prob")
target_log_prob = tf.fill(shape, dtype(np.nan),
name="target_target_log_prob")
return AISResults(
proposal_log_prob=proposal_log_prob,
target_log_prob=target_log_prob,
inner_results=inner_results,
)
previous_kernel_results = _bootstrap_results(current_state)
inner_results = previous_kernel_results.inner_results
ais_weights = tf.zeros(
shape=tf.broadcast_dynamic_shape(
tf.shape(input=inner_results.proposed_results.target_log_prob),
tf.shape(input=inner_results.accepted_results.target_log_prob)),
dtype=inner_results.proposed_results.target_log_prob.dtype.base_dtype)
[_, ais_weights, current_state, kernel_results] = tf.while_loop(
cond=lambda iter_, *args: iter_ < num_steps,
body=_loop_body,
loop_vars=[
np.int32(0), # iter_
ais_weights,
current_state,
previous_kernel_results,
],
parallel_iterations=parallel_iterations)
return [current_state, ais_weights, kernel_results]
|
def sample_annealed_importance_chain(
num_steps,
proposal_log_prob_fn,
target_log_prob_fn,
current_state,
make_kernel_fn,
parallel_iterations=10,
name=None):
"""Runs annealed importance sampling (AIS) to estimate normalizing constants.
This function uses an MCMC transition operator (e.g., Hamiltonian Monte Carlo)
to sample from a series of distributions that slowly interpolates between
an initial "proposal" distribution:
`exp(proposal_log_prob_fn(x) - proposal_log_normalizer)`
and the target distribution:
`exp(target_log_prob_fn(x) - target_log_normalizer)`,
accumulating importance weights along the way. The product of these
importance weights gives an unbiased estimate of the ratio of the
normalizing constants of the initial distribution and the target
distribution:
`E[exp(ais_weights)] = exp(target_log_normalizer - proposal_log_normalizer)`.
Note: When running in graph mode, `proposal_log_prob_fn` and
`target_log_prob_fn` are called exactly three times (although this may be
reduced to two times in the future).
Args:
num_steps: Integer number of Markov chain updates to run. More
iterations means more expense, but smoother annealing between q
and p, which in turn means exponentially lower variance for the
normalizing constant estimator.
proposal_log_prob_fn: Python callable that returns the log density of the
initial distribution.
target_log_prob_fn: Python callable which takes an argument like
`current_state` (or `*current_state` if it's a list) and returns its
(possibly unnormalized) log-density under the target distribution.
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s). The first `r` dimensions index
independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.
make_kernel_fn: Python `callable` which returns a `TransitionKernel`-like
object. Must take one argument representing the `TransitionKernel`'s
`target_log_prob_fn`. The `target_log_prob_fn` argument represents the
`TransitionKernel`'s target log distribution. Note:
`sample_annealed_importance_chain` creates a new `target_log_prob_fn`
which is an interpolation between the supplied `target_log_prob_fn` and
`proposal_log_prob_fn`; it is this interpolated function which is used as
an argument to `make_kernel_fn`.
parallel_iterations: The number of iterations allowed to run in parallel.
It must be a positive integer. See `tf.while_loop` for more details.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "sample_annealed_importance_chain").
Returns:
next_state: `Tensor` or Python list of `Tensor`s representing the
state(s) of the Markov chain(s) at the final iteration. Has same shape as
input `current_state`.
ais_weights: Tensor with the estimated weight(s). Has shape matching
`target_log_prob_fn(current_state)`.
kernel_results: `collections.namedtuple` of internal calculations used to
advance the chain.
#### Examples
##### Estimate the normalizing constant of a log-gamma distribution.
```python
tfd = tfp.distributions
# Run 100 AIS chains in parallel
num_chains = 100
dims = 20
dtype = np.float32
proposal = tfd.MultivatiateNormalDiag(
loc=tf.zeros([dims], dtype=dtype))
target = tfd.TransformedDistribution(
distribution=tfd.Gamma(concentration=dtype(2),
rate=dtype(3)),
bijector=tfp.bijectors.Invert(tfp.bijectors.Exp()),
event_shape=[dims])
chains_state, ais_weights, kernels_results = (
tfp.mcmc.sample_annealed_importance_chain(
num_steps=1000,
proposal_log_prob_fn=proposal.log_prob,
target_log_prob_fn=target.log_prob,
current_state=proposal.sample(num_chains),
make_kernel_fn=lambda tlp_fn: tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=tlp_fn,
step_size=0.2,
num_leapfrog_steps=2)))
log_estimated_normalizer = (tf.reduce_logsumexp(ais_weights)
- np.log(num_chains))
log_true_normalizer = tf.lgamma(2.) - 2. * tf.log(3.)
```
##### Estimate marginal likelihood of a Bayesian regression model.
```python
tfd = tfp.distributions
def make_prior(dims, dtype):
return tfd.MultivariateNormalDiag(
loc=tf.zeros(dims, dtype))
def make_likelihood(weights, x):
return tfd.MultivariateNormalDiag(
loc=tf.tensordot(weights, x, axes=[[0], [-1]]))
# Run 100 AIS chains in parallel
num_chains = 100
dims = 10
dtype = np.float32
# Make training data.
x = np.random.randn(num_chains, dims).astype(dtype)
true_weights = np.random.randn(dims).astype(dtype)
y = np.dot(x, true_weights) + np.random.randn(num_chains)
# Setup model.
prior = make_prior(dims, dtype)
def target_log_prob_fn(weights):
return prior.log_prob(weights) + make_likelihood(weights, x).log_prob(y)
proposal = tfd.MultivariateNormalDiag(
loc=tf.zeros(dims, dtype))
weight_samples, ais_weights, kernel_results = (
tfp.mcmc.sample_annealed_importance_chain(
num_steps=1000,
proposal_log_prob_fn=proposal.log_prob,
target_log_prob_fn=target_log_prob_fn
current_state=tf.zeros([num_chains, dims], dtype),
make_kernel_fn=lambda tlp_fn: tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=tlp_fn,
step_size=0.1,
num_leapfrog_steps=2)))
log_normalizer_estimate = (tf.reduce_logsumexp(ais_weights)
- np.log(num_chains))
```
"""
with tf.compat.v1.name_scope(name, "sample_annealed_importance_chain",
[num_steps, current_state]):
num_steps = tf.convert_to_tensor(
value=num_steps, dtype=tf.int32, name="num_steps")
if mcmc_util.is_list_like(current_state):
current_state = [
tf.convert_to_tensor(value=s, name="current_state")
for s in current_state
]
else:
current_state = tf.convert_to_tensor(
value=current_state, name="current_state")
def _make_convex_combined_log_prob_fn(iter_):
def _fn(*args):
p = tf.identity(proposal_log_prob_fn(*args), name="proposal_log_prob")
t = tf.identity(target_log_prob_fn(*args), name="target_log_prob")
dtype = p.dtype.base_dtype
beta = tf.cast(iter_ + 1, dtype) / tf.cast(num_steps, dtype)
return tf.identity(beta * t + (1. - beta) * p,
name="convex_combined_log_prob")
return _fn
def _loop_body(iter_, ais_weights, current_state, kernel_results):
"""Closure which implements `tf.while_loop` body."""
x = (current_state if mcmc_util.is_list_like(current_state)
else [current_state])
proposal_log_prob = proposal_log_prob_fn(*x)
target_log_prob = target_log_prob_fn(*x)
ais_weights += ((target_log_prob - proposal_log_prob) /
tf.cast(num_steps, ais_weights.dtype))
kernel = make_kernel_fn(_make_convex_combined_log_prob_fn(iter_))
next_state, inner_results = kernel.one_step(
current_state, kernel_results.inner_results)
kernel_results = AISResults(
proposal_log_prob=proposal_log_prob,
target_log_prob=target_log_prob,
inner_results=inner_results,
)
return [iter_ + 1, ais_weights, next_state, kernel_results]
def _bootstrap_results(init_state):
"""Creates first version of `previous_kernel_results`."""
kernel = make_kernel_fn(_make_convex_combined_log_prob_fn(iter_=0))
inner_results = kernel.bootstrap_results(init_state)
convex_combined_log_prob = inner_results.accepted_results.target_log_prob
dtype = convex_combined_log_prob.dtype.as_numpy_dtype
shape = tf.shape(input=convex_combined_log_prob)
proposal_log_prob = tf.fill(shape, dtype(np.nan),
name="bootstrap_proposal_log_prob")
target_log_prob = tf.fill(shape, dtype(np.nan),
name="target_target_log_prob")
return AISResults(
proposal_log_prob=proposal_log_prob,
target_log_prob=target_log_prob,
inner_results=inner_results,
)
previous_kernel_results = _bootstrap_results(current_state)
inner_results = previous_kernel_results.inner_results
ais_weights = tf.zeros(
shape=tf.broadcast_dynamic_shape(
tf.shape(input=inner_results.proposed_results.target_log_prob),
tf.shape(input=inner_results.accepted_results.target_log_prob)),
dtype=inner_results.proposed_results.target_log_prob.dtype.base_dtype)
[_, ais_weights, current_state, kernel_results] = tf.while_loop(
cond=lambda iter_, *args: iter_ < num_steps,
body=_loop_body,
loop_vars=[
np.int32(0), # iter_
ais_weights,
current_state,
previous_kernel_results,
],
parallel_iterations=parallel_iterations)
return [current_state, ais_weights, kernel_results]
|
[
"Runs",
"annealed",
"importance",
"sampling",
"(",
"AIS",
")",
"to",
"estimate",
"normalizing",
"constants",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/sample_annealed_importance.py#L43-L272
|
[
"def",
"sample_annealed_importance_chain",
"(",
"num_steps",
",",
"proposal_log_prob_fn",
",",
"target_log_prob_fn",
",",
"current_state",
",",
"make_kernel_fn",
",",
"parallel_iterations",
"=",
"10",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"\"sample_annealed_importance_chain\"",
",",
"[",
"num_steps",
",",
"current_state",
"]",
")",
":",
"num_steps",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"num_steps",
",",
"dtype",
"=",
"tf",
".",
"int32",
",",
"name",
"=",
"\"num_steps\"",
")",
"if",
"mcmc_util",
".",
"is_list_like",
"(",
"current_state",
")",
":",
"current_state",
"=",
"[",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"s",
",",
"name",
"=",
"\"current_state\"",
")",
"for",
"s",
"in",
"current_state",
"]",
"else",
":",
"current_state",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"current_state",
",",
"name",
"=",
"\"current_state\"",
")",
"def",
"_make_convex_combined_log_prob_fn",
"(",
"iter_",
")",
":",
"def",
"_fn",
"(",
"*",
"args",
")",
":",
"p",
"=",
"tf",
".",
"identity",
"(",
"proposal_log_prob_fn",
"(",
"*",
"args",
")",
",",
"name",
"=",
"\"proposal_log_prob\"",
")",
"t",
"=",
"tf",
".",
"identity",
"(",
"target_log_prob_fn",
"(",
"*",
"args",
")",
",",
"name",
"=",
"\"target_log_prob\"",
")",
"dtype",
"=",
"p",
".",
"dtype",
".",
"base_dtype",
"beta",
"=",
"tf",
".",
"cast",
"(",
"iter_",
"+",
"1",
",",
"dtype",
")",
"/",
"tf",
".",
"cast",
"(",
"num_steps",
",",
"dtype",
")",
"return",
"tf",
".",
"identity",
"(",
"beta",
"*",
"t",
"+",
"(",
"1.",
"-",
"beta",
")",
"*",
"p",
",",
"name",
"=",
"\"convex_combined_log_prob\"",
")",
"return",
"_fn",
"def",
"_loop_body",
"(",
"iter_",
",",
"ais_weights",
",",
"current_state",
",",
"kernel_results",
")",
":",
"\"\"\"Closure which implements `tf.while_loop` body.\"\"\"",
"x",
"=",
"(",
"current_state",
"if",
"mcmc_util",
".",
"is_list_like",
"(",
"current_state",
")",
"else",
"[",
"current_state",
"]",
")",
"proposal_log_prob",
"=",
"proposal_log_prob_fn",
"(",
"*",
"x",
")",
"target_log_prob",
"=",
"target_log_prob_fn",
"(",
"*",
"x",
")",
"ais_weights",
"+=",
"(",
"(",
"target_log_prob",
"-",
"proposal_log_prob",
")",
"/",
"tf",
".",
"cast",
"(",
"num_steps",
",",
"ais_weights",
".",
"dtype",
")",
")",
"kernel",
"=",
"make_kernel_fn",
"(",
"_make_convex_combined_log_prob_fn",
"(",
"iter_",
")",
")",
"next_state",
",",
"inner_results",
"=",
"kernel",
".",
"one_step",
"(",
"current_state",
",",
"kernel_results",
".",
"inner_results",
")",
"kernel_results",
"=",
"AISResults",
"(",
"proposal_log_prob",
"=",
"proposal_log_prob",
",",
"target_log_prob",
"=",
"target_log_prob",
",",
"inner_results",
"=",
"inner_results",
",",
")",
"return",
"[",
"iter_",
"+",
"1",
",",
"ais_weights",
",",
"next_state",
",",
"kernel_results",
"]",
"def",
"_bootstrap_results",
"(",
"init_state",
")",
":",
"\"\"\"Creates first version of `previous_kernel_results`.\"\"\"",
"kernel",
"=",
"make_kernel_fn",
"(",
"_make_convex_combined_log_prob_fn",
"(",
"iter_",
"=",
"0",
")",
")",
"inner_results",
"=",
"kernel",
".",
"bootstrap_results",
"(",
"init_state",
")",
"convex_combined_log_prob",
"=",
"inner_results",
".",
"accepted_results",
".",
"target_log_prob",
"dtype",
"=",
"convex_combined_log_prob",
".",
"dtype",
".",
"as_numpy_dtype",
"shape",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"convex_combined_log_prob",
")",
"proposal_log_prob",
"=",
"tf",
".",
"fill",
"(",
"shape",
",",
"dtype",
"(",
"np",
".",
"nan",
")",
",",
"name",
"=",
"\"bootstrap_proposal_log_prob\"",
")",
"target_log_prob",
"=",
"tf",
".",
"fill",
"(",
"shape",
",",
"dtype",
"(",
"np",
".",
"nan",
")",
",",
"name",
"=",
"\"target_target_log_prob\"",
")",
"return",
"AISResults",
"(",
"proposal_log_prob",
"=",
"proposal_log_prob",
",",
"target_log_prob",
"=",
"target_log_prob",
",",
"inner_results",
"=",
"inner_results",
",",
")",
"previous_kernel_results",
"=",
"_bootstrap_results",
"(",
"current_state",
")",
"inner_results",
"=",
"previous_kernel_results",
".",
"inner_results",
"ais_weights",
"=",
"tf",
".",
"zeros",
"(",
"shape",
"=",
"tf",
".",
"broadcast_dynamic_shape",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"inner_results",
".",
"proposed_results",
".",
"target_log_prob",
")",
",",
"tf",
".",
"shape",
"(",
"input",
"=",
"inner_results",
".",
"accepted_results",
".",
"target_log_prob",
")",
")",
",",
"dtype",
"=",
"inner_results",
".",
"proposed_results",
".",
"target_log_prob",
".",
"dtype",
".",
"base_dtype",
")",
"[",
"_",
",",
"ais_weights",
",",
"current_state",
",",
"kernel_results",
"]",
"=",
"tf",
".",
"while_loop",
"(",
"cond",
"=",
"lambda",
"iter_",
",",
"*",
"args",
":",
"iter_",
"<",
"num_steps",
",",
"body",
"=",
"_loop_body",
",",
"loop_vars",
"=",
"[",
"np",
".",
"int32",
"(",
"0",
")",
",",
"# iter_",
"ais_weights",
",",
"current_state",
",",
"previous_kernel_results",
",",
"]",
",",
"parallel_iterations",
"=",
"parallel_iterations",
")",
"return",
"[",
"current_state",
",",
"ais_weights",
",",
"kernel_results",
"]"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
make_value_setter
|
Creates a value-setting interceptor.
This function creates an interceptor that sets values of Edward2 random
variable objects. This is useful for a range of tasks, including conditioning
on observed data, sampling from posterior predictive distributions, and as a
building block of inference primitives such as computing log joint
probabilities (see examples below).
Args:
**model_kwargs: dict of str to Tensor. Keys are the names of random
variables in the model to which this interceptor is being applied. Values
are Tensors to set their value to. Variables not included in this dict
will not be set and will maintain their existing value semantics (by
default, a sample from the parent-conditional distribution).
Returns:
set_values: function that sets the value of intercepted ops.
#### Examples
Consider for illustration a model with latent `z` and
observed `x`, and a corresponding trainable posterior model:
```python
num_observations = 10
def model():
z = ed.Normal(loc=0, scale=1., name='z') # log rate
x = ed.Poisson(rate=tf.exp(z) * tf.ones(num_observations), name='x')
return x
def variational_model():
return ed.Normal(loc=tf.Variable(0.),
scale=tf.nn.softplus(tf.Variable(-4.)),
name='z') # for simplicity, match name of the model RV.
```
We can use a value-setting interceptor to condition the model on observed
data. This approach is slightly more cumbersome than that of partially
evaluating the complete log-joint function, but has the potential advantage
that it returns a new model callable, which may be used to sample downstream
variables, passed into additional transformations, etc.
```python
x_observed = np.array([6, 3, 1, 8, 7, 0, 6, 4, 7, 5])
def observed_model():
with ed.interception(make_value_setter(x=x_observed)):
model()
observed_log_joint_fn = ed.make_log_joint_fn(observed_model)
# After fixing 'x', the observed log joint is now only a function of 'z'.
# This enables us to define a variational lower bound,
# `E_q[ log p(x, z) - log q(z)]`, simply by evaluating the observed and
# variational log joints at variational samples.
variational_log_joint_fn = ed.make_log_joint_fn(variational_model)
with ed.tape() as variational_sample: # Sample trace from variational model.
variational_model()
elbo_loss = -(observed_log_joint_fn(**variational_sample) -
variational_log_joint_fn(**variational_sample))
```
After performing inference by minimizing the variational loss, a value-setting
interceptor enables simulation from the posterior predictive distribution:
```python
with ed.tape() as posterior_samples: # tape is a map {rv.name : rv}
variational_model()
with ed.interception(ed.make_value_setter(**posterior_samples)):
x = model()
# x is a sample from p(X | Z = z') where z' ~ q(z) (the variational model)
```
As another example, using a value setter inside of `ed.tape` enables
computing the log joint probability, by setting all variables to
posterior values and then accumulating the log probs of those values under
the induced parent-conditional distributions. This is one way that we could
have implemented `ed.make_log_joint_fn`:
```python
def make_log_joint_fn_demo(model):
def log_joint_fn(**model_kwargs):
with ed.tape() as model_tape:
with ed.make_value_setter(**model_kwargs):
model()
# accumulate sum_i log p(X_i = x_i | X_{:i-1} = x_{:i-1})
log_prob = 0.
for rv in model_tape.values():
log_prob += tf.reduce_sum(rv.log_prob(rv.value))
return log_prob
return log_joint_fn
```
|
tensorflow_probability/python/edward2/program_transformations.py
|
def make_value_setter(**model_kwargs):
"""Creates a value-setting interceptor.
This function creates an interceptor that sets values of Edward2 random
variable objects. This is useful for a range of tasks, including conditioning
on observed data, sampling from posterior predictive distributions, and as a
building block of inference primitives such as computing log joint
probabilities (see examples below).
Args:
**model_kwargs: dict of str to Tensor. Keys are the names of random
variables in the model to which this interceptor is being applied. Values
are Tensors to set their value to. Variables not included in this dict
will not be set and will maintain their existing value semantics (by
default, a sample from the parent-conditional distribution).
Returns:
set_values: function that sets the value of intercepted ops.
#### Examples
Consider for illustration a model with latent `z` and
observed `x`, and a corresponding trainable posterior model:
```python
num_observations = 10
def model():
z = ed.Normal(loc=0, scale=1., name='z') # log rate
x = ed.Poisson(rate=tf.exp(z) * tf.ones(num_observations), name='x')
return x
def variational_model():
return ed.Normal(loc=tf.Variable(0.),
scale=tf.nn.softplus(tf.Variable(-4.)),
name='z') # for simplicity, match name of the model RV.
```
We can use a value-setting interceptor to condition the model on observed
data. This approach is slightly more cumbersome than that of partially
evaluating the complete log-joint function, but has the potential advantage
that it returns a new model callable, which may be used to sample downstream
variables, passed into additional transformations, etc.
```python
x_observed = np.array([6, 3, 1, 8, 7, 0, 6, 4, 7, 5])
def observed_model():
with ed.interception(make_value_setter(x=x_observed)):
model()
observed_log_joint_fn = ed.make_log_joint_fn(observed_model)
# After fixing 'x', the observed log joint is now only a function of 'z'.
# This enables us to define a variational lower bound,
# `E_q[ log p(x, z) - log q(z)]`, simply by evaluating the observed and
# variational log joints at variational samples.
variational_log_joint_fn = ed.make_log_joint_fn(variational_model)
with ed.tape() as variational_sample: # Sample trace from variational model.
variational_model()
elbo_loss = -(observed_log_joint_fn(**variational_sample) -
variational_log_joint_fn(**variational_sample))
```
After performing inference by minimizing the variational loss, a value-setting
interceptor enables simulation from the posterior predictive distribution:
```python
with ed.tape() as posterior_samples: # tape is a map {rv.name : rv}
variational_model()
with ed.interception(ed.make_value_setter(**posterior_samples)):
x = model()
# x is a sample from p(X | Z = z') where z' ~ q(z) (the variational model)
```
As another example, using a value setter inside of `ed.tape` enables
computing the log joint probability, by setting all variables to
posterior values and then accumulating the log probs of those values under
the induced parent-conditional distributions. This is one way that we could
have implemented `ed.make_log_joint_fn`:
```python
def make_log_joint_fn_demo(model):
def log_joint_fn(**model_kwargs):
with ed.tape() as model_tape:
with ed.make_value_setter(**model_kwargs):
model()
# accumulate sum_i log p(X_i = x_i | X_{:i-1} = x_{:i-1})
log_prob = 0.
for rv in model_tape.values():
log_prob += tf.reduce_sum(rv.log_prob(rv.value))
return log_prob
return log_joint_fn
```
"""
def set_values(f, *args, **kwargs):
"""Sets random variable values to its aligned value."""
name = kwargs.get("name")
if name in model_kwargs:
kwargs["value"] = model_kwargs[name]
return interceptable(f)(*args, **kwargs)
return set_values
|
def make_value_setter(**model_kwargs):
"""Creates a value-setting interceptor.
This function creates an interceptor that sets values of Edward2 random
variable objects. This is useful for a range of tasks, including conditioning
on observed data, sampling from posterior predictive distributions, and as a
building block of inference primitives such as computing log joint
probabilities (see examples below).
Args:
**model_kwargs: dict of str to Tensor. Keys are the names of random
variables in the model to which this interceptor is being applied. Values
are Tensors to set their value to. Variables not included in this dict
will not be set and will maintain their existing value semantics (by
default, a sample from the parent-conditional distribution).
Returns:
set_values: function that sets the value of intercepted ops.
#### Examples
Consider for illustration a model with latent `z` and
observed `x`, and a corresponding trainable posterior model:
```python
num_observations = 10
def model():
z = ed.Normal(loc=0, scale=1., name='z') # log rate
x = ed.Poisson(rate=tf.exp(z) * tf.ones(num_observations), name='x')
return x
def variational_model():
return ed.Normal(loc=tf.Variable(0.),
scale=tf.nn.softplus(tf.Variable(-4.)),
name='z') # for simplicity, match name of the model RV.
```
We can use a value-setting interceptor to condition the model on observed
data. This approach is slightly more cumbersome than that of partially
evaluating the complete log-joint function, but has the potential advantage
that it returns a new model callable, which may be used to sample downstream
variables, passed into additional transformations, etc.
```python
x_observed = np.array([6, 3, 1, 8, 7, 0, 6, 4, 7, 5])
def observed_model():
with ed.interception(make_value_setter(x=x_observed)):
model()
observed_log_joint_fn = ed.make_log_joint_fn(observed_model)
# After fixing 'x', the observed log joint is now only a function of 'z'.
# This enables us to define a variational lower bound,
# `E_q[ log p(x, z) - log q(z)]`, simply by evaluating the observed and
# variational log joints at variational samples.
variational_log_joint_fn = ed.make_log_joint_fn(variational_model)
with ed.tape() as variational_sample: # Sample trace from variational model.
variational_model()
elbo_loss = -(observed_log_joint_fn(**variational_sample) -
variational_log_joint_fn(**variational_sample))
```
After performing inference by minimizing the variational loss, a value-setting
interceptor enables simulation from the posterior predictive distribution:
```python
with ed.tape() as posterior_samples: # tape is a map {rv.name : rv}
variational_model()
with ed.interception(ed.make_value_setter(**posterior_samples)):
x = model()
# x is a sample from p(X | Z = z') where z' ~ q(z) (the variational model)
```
As another example, using a value setter inside of `ed.tape` enables
computing the log joint probability, by setting all variables to
posterior values and then accumulating the log probs of those values under
the induced parent-conditional distributions. This is one way that we could
have implemented `ed.make_log_joint_fn`:
```python
def make_log_joint_fn_demo(model):
def log_joint_fn(**model_kwargs):
with ed.tape() as model_tape:
with ed.make_value_setter(**model_kwargs):
model()
# accumulate sum_i log p(X_i = x_i | X_{:i-1} = x_{:i-1})
log_prob = 0.
for rv in model_tape.values():
log_prob += tf.reduce_sum(rv.log_prob(rv.value))
return log_prob
return log_joint_fn
```
"""
def set_values(f, *args, **kwargs):
"""Sets random variable values to its aligned value."""
name = kwargs.get("name")
if name in model_kwargs:
kwargs["value"] = model_kwargs[name]
return interceptable(f)(*args, **kwargs)
return set_values
|
[
"Creates",
"a",
"value",
"-",
"setting",
"interceptor",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/program_transformations.py#L34-L135
|
[
"def",
"make_value_setter",
"(",
"*",
"*",
"model_kwargs",
")",
":",
"def",
"set_values",
"(",
"f",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Sets random variable values to its aligned value.\"\"\"",
"name",
"=",
"kwargs",
".",
"get",
"(",
"\"name\"",
")",
"if",
"name",
"in",
"model_kwargs",
":",
"kwargs",
"[",
"\"value\"",
"]",
"=",
"model_kwargs",
"[",
"name",
"]",
"return",
"interceptable",
"(",
"f",
")",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"set_values"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
make_log_joint_fn
|
Takes Edward probabilistic program and returns its log joint function.
Args:
model: Python callable which executes the generative process of a
computable probability distribution using `ed.RandomVariable`s.
Returns:
A log-joint probability function. Its inputs are `model`'s original inputs
and random variables which appear during the program execution. Its output
is a scalar tf.Tensor.
#### Examples
Below we define Bayesian logistic regression as an Edward program,
representing the model's generative process. We apply `make_log_joint_fn` in
order to represent the model in terms of its joint probability function.
```python
from tensorflow_probability import edward2 as ed
def logistic_regression(features):
coeffs = ed.Normal(loc=0., scale=1.,
sample_shape=features.shape[1], name="coeffs")
outcomes = ed.Bernoulli(logits=tf.tensordot(features, coeffs, [[1], [0]]),
name="outcomes")
return outcomes
log_joint = ed.make_log_joint_fn(logistic_regression)
features = tf.random_normal([3, 2])
coeffs_value = tf.random_normal([2])
outcomes_value = tf.round(tf.random_uniform([3]))
output = log_joint(features, coeffs=coeffs_value, outcomes=outcomes_value)
```
|
tensorflow_probability/python/edward2/program_transformations.py
|
def make_log_joint_fn(model):
"""Takes Edward probabilistic program and returns its log joint function.
Args:
model: Python callable which executes the generative process of a
computable probability distribution using `ed.RandomVariable`s.
Returns:
A log-joint probability function. Its inputs are `model`'s original inputs
and random variables which appear during the program execution. Its output
is a scalar tf.Tensor.
#### Examples
Below we define Bayesian logistic regression as an Edward program,
representing the model's generative process. We apply `make_log_joint_fn` in
order to represent the model in terms of its joint probability function.
```python
from tensorflow_probability import edward2 as ed
def logistic_regression(features):
coeffs = ed.Normal(loc=0., scale=1.,
sample_shape=features.shape[1], name="coeffs")
outcomes = ed.Bernoulli(logits=tf.tensordot(features, coeffs, [[1], [0]]),
name="outcomes")
return outcomes
log_joint = ed.make_log_joint_fn(logistic_regression)
features = tf.random_normal([3, 2])
coeffs_value = tf.random_normal([2])
outcomes_value = tf.round(tf.random_uniform([3]))
output = log_joint(features, coeffs=coeffs_value, outcomes=outcomes_value)
```
"""
def log_joint_fn(*args, **kwargs):
"""Log-probability of inputs according to a joint probability distribution.
Args:
*args: Positional arguments. They are the model's original inputs and can
alternatively be specified as part of `kwargs`.
**kwargs: Keyword arguments, where for each key-value pair `k` and `v`,
`v` is passed as a `value` to the random variable(s) whose keyword
argument `name` during construction is equal to `k`.
Returns:
Scalar tf.Tensor, which represents the model's log-probability summed
over all Edward random variables and their dimensions.
Raises:
TypeError: If a random variable in the model has no specified value in
`**kwargs`.
"""
log_probs = []
def interceptor(rv_constructor, *rv_args, **rv_kwargs):
"""Overrides a random variable's `value` and accumulates its log-prob."""
# Set value to keyword argument indexed by `name` (an input tensor).
rv_name = rv_kwargs.get("name")
if rv_name is None:
raise KeyError("Random variable constructor {} has no name "
"in its arguments.".format(rv_constructor.__name__))
# If no value is explicitly passed in for an RV, default to the value
# from the RV constructor. This may have been set explicitly by the user
# or forwarded from a lower-level interceptor.
previously_specified_value = rv_kwargs.get("value")
value = kwargs.get(rv_name, previously_specified_value)
if value is None:
raise LookupError("Keyword argument specifying value for {} is "
"missing.".format(rv_name))
rv_kwargs["value"] = value
rv = rv_constructor(*rv_args, **rv_kwargs)
log_prob = tf.reduce_sum(input_tensor=rv.distribution.log_prob(rv.value))
log_probs.append(log_prob)
return rv
model_kwargs = _get_function_inputs(model, kwargs)
with interception(interceptor):
model(*args, **model_kwargs)
log_prob = sum(log_probs)
return log_prob
return log_joint_fn
|
def make_log_joint_fn(model):
"""Takes Edward probabilistic program and returns its log joint function.
Args:
model: Python callable which executes the generative process of a
computable probability distribution using `ed.RandomVariable`s.
Returns:
A log-joint probability function. Its inputs are `model`'s original inputs
and random variables which appear during the program execution. Its output
is a scalar tf.Tensor.
#### Examples
Below we define Bayesian logistic regression as an Edward program,
representing the model's generative process. We apply `make_log_joint_fn` in
order to represent the model in terms of its joint probability function.
```python
from tensorflow_probability import edward2 as ed
def logistic_regression(features):
coeffs = ed.Normal(loc=0., scale=1.,
sample_shape=features.shape[1], name="coeffs")
outcomes = ed.Bernoulli(logits=tf.tensordot(features, coeffs, [[1], [0]]),
name="outcomes")
return outcomes
log_joint = ed.make_log_joint_fn(logistic_regression)
features = tf.random_normal([3, 2])
coeffs_value = tf.random_normal([2])
outcomes_value = tf.round(tf.random_uniform([3]))
output = log_joint(features, coeffs=coeffs_value, outcomes=outcomes_value)
```
"""
def log_joint_fn(*args, **kwargs):
"""Log-probability of inputs according to a joint probability distribution.
Args:
*args: Positional arguments. They are the model's original inputs and can
alternatively be specified as part of `kwargs`.
**kwargs: Keyword arguments, where for each key-value pair `k` and `v`,
`v` is passed as a `value` to the random variable(s) whose keyword
argument `name` during construction is equal to `k`.
Returns:
Scalar tf.Tensor, which represents the model's log-probability summed
over all Edward random variables and their dimensions.
Raises:
TypeError: If a random variable in the model has no specified value in
`**kwargs`.
"""
log_probs = []
def interceptor(rv_constructor, *rv_args, **rv_kwargs):
"""Overrides a random variable's `value` and accumulates its log-prob."""
# Set value to keyword argument indexed by `name` (an input tensor).
rv_name = rv_kwargs.get("name")
if rv_name is None:
raise KeyError("Random variable constructor {} has no name "
"in its arguments.".format(rv_constructor.__name__))
# If no value is explicitly passed in for an RV, default to the value
# from the RV constructor. This may have been set explicitly by the user
# or forwarded from a lower-level interceptor.
previously_specified_value = rv_kwargs.get("value")
value = kwargs.get(rv_name, previously_specified_value)
if value is None:
raise LookupError("Keyword argument specifying value for {} is "
"missing.".format(rv_name))
rv_kwargs["value"] = value
rv = rv_constructor(*rv_args, **rv_kwargs)
log_prob = tf.reduce_sum(input_tensor=rv.distribution.log_prob(rv.value))
log_probs.append(log_prob)
return rv
model_kwargs = _get_function_inputs(model, kwargs)
with interception(interceptor):
model(*args, **model_kwargs)
log_prob = sum(log_probs)
return log_prob
return log_joint_fn
|
[
"Takes",
"Edward",
"probabilistic",
"program",
"and",
"returns",
"its",
"log",
"joint",
"function",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/program_transformations.py#L138-L223
|
[
"def",
"make_log_joint_fn",
"(",
"model",
")",
":",
"def",
"log_joint_fn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Log-probability of inputs according to a joint probability distribution.\n\n Args:\n *args: Positional arguments. They are the model's original inputs and can\n alternatively be specified as part of `kwargs`.\n **kwargs: Keyword arguments, where for each key-value pair `k` and `v`,\n `v` is passed as a `value` to the random variable(s) whose keyword\n argument `name` during construction is equal to `k`.\n\n Returns:\n Scalar tf.Tensor, which represents the model's log-probability summed\n over all Edward random variables and their dimensions.\n\n Raises:\n TypeError: If a random variable in the model has no specified value in\n `**kwargs`.\n \"\"\"",
"log_probs",
"=",
"[",
"]",
"def",
"interceptor",
"(",
"rv_constructor",
",",
"*",
"rv_args",
",",
"*",
"*",
"rv_kwargs",
")",
":",
"\"\"\"Overrides a random variable's `value` and accumulates its log-prob.\"\"\"",
"# Set value to keyword argument indexed by `name` (an input tensor).",
"rv_name",
"=",
"rv_kwargs",
".",
"get",
"(",
"\"name\"",
")",
"if",
"rv_name",
"is",
"None",
":",
"raise",
"KeyError",
"(",
"\"Random variable constructor {} has no name \"",
"\"in its arguments.\"",
".",
"format",
"(",
"rv_constructor",
".",
"__name__",
")",
")",
"# If no value is explicitly passed in for an RV, default to the value",
"# from the RV constructor. This may have been set explicitly by the user",
"# or forwarded from a lower-level interceptor.",
"previously_specified_value",
"=",
"rv_kwargs",
".",
"get",
"(",
"\"value\"",
")",
"value",
"=",
"kwargs",
".",
"get",
"(",
"rv_name",
",",
"previously_specified_value",
")",
"if",
"value",
"is",
"None",
":",
"raise",
"LookupError",
"(",
"\"Keyword argument specifying value for {} is \"",
"\"missing.\"",
".",
"format",
"(",
"rv_name",
")",
")",
"rv_kwargs",
"[",
"\"value\"",
"]",
"=",
"value",
"rv",
"=",
"rv_constructor",
"(",
"*",
"rv_args",
",",
"*",
"*",
"rv_kwargs",
")",
"log_prob",
"=",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"rv",
".",
"distribution",
".",
"log_prob",
"(",
"rv",
".",
"value",
")",
")",
"log_probs",
".",
"append",
"(",
"log_prob",
")",
"return",
"rv",
"model_kwargs",
"=",
"_get_function_inputs",
"(",
"model",
",",
"kwargs",
")",
"with",
"interception",
"(",
"interceptor",
")",
":",
"model",
"(",
"*",
"args",
",",
"*",
"*",
"model_kwargs",
")",
"log_prob",
"=",
"sum",
"(",
"log_probs",
")",
"return",
"log_prob",
"return",
"log_joint_fn"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_get_function_inputs
|
Filters inputs to be compatible with function `f`'s signature.
Args:
f: Function according to whose input signature we filter arguments.
src_kwargs: Keyword arguments to filter according to `f`.
Returns:
kwargs: Dict of key-value pairs in `src_kwargs` which exist in `f`'s
signature.
|
tensorflow_probability/python/edward2/program_transformations.py
|
def _get_function_inputs(f, src_kwargs):
"""Filters inputs to be compatible with function `f`'s signature.
Args:
f: Function according to whose input signature we filter arguments.
src_kwargs: Keyword arguments to filter according to `f`.
Returns:
kwargs: Dict of key-value pairs in `src_kwargs` which exist in `f`'s
signature.
"""
if hasattr(f, "_func"): # functions returned by tf.make_template
f = f._func # pylint: disable=protected-access
try: # getargspec was deprecated in Python 3.6
argspec = inspect.getfullargspec(f)
except AttributeError:
argspec = inspect.getargspec(f)
fkwargs = {k: v for k, v in six.iteritems(src_kwargs) if k in argspec.args}
return fkwargs
|
def _get_function_inputs(f, src_kwargs):
"""Filters inputs to be compatible with function `f`'s signature.
Args:
f: Function according to whose input signature we filter arguments.
src_kwargs: Keyword arguments to filter according to `f`.
Returns:
kwargs: Dict of key-value pairs in `src_kwargs` which exist in `f`'s
signature.
"""
if hasattr(f, "_func"): # functions returned by tf.make_template
f = f._func # pylint: disable=protected-access
try: # getargspec was deprecated in Python 3.6
argspec = inspect.getfullargspec(f)
except AttributeError:
argspec = inspect.getargspec(f)
fkwargs = {k: v for k, v in six.iteritems(src_kwargs) if k in argspec.args}
return fkwargs
|
[
"Filters",
"inputs",
"to",
"be",
"compatible",
"with",
"function",
"f",
"s",
"signature",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/program_transformations.py#L226-L246
|
[
"def",
"_get_function_inputs",
"(",
"f",
",",
"src_kwargs",
")",
":",
"if",
"hasattr",
"(",
"f",
",",
"\"_func\"",
")",
":",
"# functions returned by tf.make_template",
"f",
"=",
"f",
".",
"_func",
"# pylint: disable=protected-access",
"try",
":",
"# getargspec was deprecated in Python 3.6",
"argspec",
"=",
"inspect",
".",
"getfullargspec",
"(",
"f",
")",
"except",
"AttributeError",
":",
"argspec",
"=",
"inspect",
".",
"getargspec",
"(",
"f",
")",
"fkwargs",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"src_kwargs",
")",
"if",
"k",
"in",
"argspec",
".",
"args",
"}",
"return",
"fkwargs"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_vggconv_block
|
Network block for VGG.
|
tensorflow_probability/examples/models/bayesian_vgg.py
|
def _vggconv_block(x, filters, kernel, stride, kernel_posterior_fn):
"""Network block for VGG."""
out = tfp.layers.Convolution2DFlipout(
filters,
kernel,
padding='same',
kernel_posterior_fn=kernel_posterior_fn)(x)
out = tf.keras.layers.BatchNormalization()(out)
out = tf.keras.layers.Activation('relu')(out)
out = tfp.layers.Convolution2DFlipout(
filters,
kernel,
padding='same',
kernel_posterior_fn=kernel_posterior_fn)(out)
out = tf.keras.layers.BatchNormalization()(out)
out = tf.keras.layers.Activation('relu')(out)
out = tf.keras.layers.MaxPooling2D(
pool_size=(2, 2), strides=stride)(out)
return out
|
def _vggconv_block(x, filters, kernel, stride, kernel_posterior_fn):
"""Network block for VGG."""
out = tfp.layers.Convolution2DFlipout(
filters,
kernel,
padding='same',
kernel_posterior_fn=kernel_posterior_fn)(x)
out = tf.keras.layers.BatchNormalization()(out)
out = tf.keras.layers.Activation('relu')(out)
out = tfp.layers.Convolution2DFlipout(
filters,
kernel,
padding='same',
kernel_posterior_fn=kernel_posterior_fn)(out)
out = tf.keras.layers.BatchNormalization()(out)
out = tf.keras.layers.Activation('relu')(out)
out = tf.keras.layers.MaxPooling2D(
pool_size=(2, 2), strides=stride)(out)
return out
|
[
"Network",
"block",
"for",
"VGG",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/models/bayesian_vgg.py#L85-L105
|
[
"def",
"_vggconv_block",
"(",
"x",
",",
"filters",
",",
"kernel",
",",
"stride",
",",
"kernel_posterior_fn",
")",
":",
"out",
"=",
"tfp",
".",
"layers",
".",
"Convolution2DFlipout",
"(",
"filters",
",",
"kernel",
",",
"padding",
"=",
"'same'",
",",
"kernel_posterior_fn",
"=",
"kernel_posterior_fn",
")",
"(",
"x",
")",
"out",
"=",
"tf",
".",
"keras",
".",
"layers",
".",
"BatchNormalization",
"(",
")",
"(",
"out",
")",
"out",
"=",
"tf",
".",
"keras",
".",
"layers",
".",
"Activation",
"(",
"'relu'",
")",
"(",
"out",
")",
"out",
"=",
"tfp",
".",
"layers",
".",
"Convolution2DFlipout",
"(",
"filters",
",",
"kernel",
",",
"padding",
"=",
"'same'",
",",
"kernel_posterior_fn",
"=",
"kernel_posterior_fn",
")",
"(",
"out",
")",
"out",
"=",
"tf",
".",
"keras",
".",
"layers",
".",
"BatchNormalization",
"(",
")",
"(",
"out",
")",
"out",
"=",
"tf",
".",
"keras",
".",
"layers",
".",
"Activation",
"(",
"'relu'",
")",
"(",
"out",
")",
"out",
"=",
"tf",
".",
"keras",
".",
"layers",
".",
"MaxPooling2D",
"(",
"pool_size",
"=",
"(",
"2",
",",
"2",
")",
",",
"strides",
"=",
"stride",
")",
"(",
"out",
")",
"return",
"out"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_DenseVariational.compute_output_shape
|
Computes the output shape of the layer.
Args:
input_shape: Shape tuple (tuple of integers) or list of shape tuples
(one per output tensor of the layer). Shape tuples can include None for
free dimensions, instead of an integer.
Returns:
output_shape: A tuple representing the output shape.
Raises:
ValueError: If innermost dimension of `input_shape` is not defined.
|
tensorflow_probability/python/layers/dense_variational.py
|
def compute_output_shape(self, input_shape):
"""Computes the output shape of the layer.
Args:
input_shape: Shape tuple (tuple of integers) or list of shape tuples
(one per output tensor of the layer). Shape tuples can include None for
free dimensions, instead of an integer.
Returns:
output_shape: A tuple representing the output shape.
Raises:
ValueError: If innermost dimension of `input_shape` is not defined.
"""
input_shape = tf.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if tf.compat.dimension_value(input_shape[-1]) is None:
raise ValueError(
'The innermost dimension of `input_shape` must be defined, '
'but saw: {}'.format(input_shape))
return input_shape[:-1].concatenate(self.units)
|
def compute_output_shape(self, input_shape):
"""Computes the output shape of the layer.
Args:
input_shape: Shape tuple (tuple of integers) or list of shape tuples
(one per output tensor of the layer). Shape tuples can include None for
free dimensions, instead of an integer.
Returns:
output_shape: A tuple representing the output shape.
Raises:
ValueError: If innermost dimension of `input_shape` is not defined.
"""
input_shape = tf.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if tf.compat.dimension_value(input_shape[-1]) is None:
raise ValueError(
'The innermost dimension of `input_shape` must be defined, '
'but saw: {}'.format(input_shape))
return input_shape[:-1].concatenate(self.units)
|
[
"Computes",
"the",
"output",
"shape",
"of",
"the",
"layer",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/dense_variational.py#L193-L213
|
[
"def",
"compute_output_shape",
"(",
"self",
",",
"input_shape",
")",
":",
"input_shape",
"=",
"tf",
".",
"TensorShape",
"(",
"input_shape",
")",
"input_shape",
"=",
"input_shape",
".",
"with_rank_at_least",
"(",
"2",
")",
"if",
"tf",
".",
"compat",
".",
"dimension_value",
"(",
"input_shape",
"[",
"-",
"1",
"]",
")",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'The innermost dimension of `input_shape` must be defined, '",
"'but saw: {}'",
".",
"format",
"(",
"input_shape",
")",
")",
"return",
"input_shape",
"[",
":",
"-",
"1",
"]",
".",
"concatenate",
"(",
"self",
".",
"units",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_DenseVariational.get_config
|
Returns the config of the layer.
A layer config is a Python dictionary (serializable) containing the
configuration of a layer. The same layer can be reinstantiated later
(without its trained weights) from this configuration.
Returns:
config: A Python dictionary of class keyword arguments and their
serialized values.
|
tensorflow_probability/python/layers/dense_variational.py
|
def get_config(self):
"""Returns the config of the layer.
A layer config is a Python dictionary (serializable) containing the
configuration of a layer. The same layer can be reinstantiated later
(without its trained weights) from this configuration.
Returns:
config: A Python dictionary of class keyword arguments and their
serialized values.
"""
config = {
'units': self.units,
'activation': (tf.keras.activations.serialize(self.activation)
if self.activation else None),
'activity_regularizer':
tf.keras.initializers.serialize(self.activity_regularizer),
}
function_keys = [
'kernel_posterior_fn',
'kernel_posterior_tensor_fn',
'kernel_prior_fn',
'kernel_divergence_fn',
'bias_posterior_fn',
'bias_posterior_tensor_fn',
'bias_prior_fn',
'bias_divergence_fn',
]
for function_key in function_keys:
function = getattr(self, function_key)
if function is None:
function_name = None
function_type = None
else:
function_name, function_type = tfp_layers_util.serialize_function(
function)
config[function_key] = function_name
config[function_key + '_type'] = function_type
base_config = super(_DenseVariational, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
def get_config(self):
"""Returns the config of the layer.
A layer config is a Python dictionary (serializable) containing the
configuration of a layer. The same layer can be reinstantiated later
(without its trained weights) from this configuration.
Returns:
config: A Python dictionary of class keyword arguments and their
serialized values.
"""
config = {
'units': self.units,
'activation': (tf.keras.activations.serialize(self.activation)
if self.activation else None),
'activity_regularizer':
tf.keras.initializers.serialize(self.activity_regularizer),
}
function_keys = [
'kernel_posterior_fn',
'kernel_posterior_tensor_fn',
'kernel_prior_fn',
'kernel_divergence_fn',
'bias_posterior_fn',
'bias_posterior_tensor_fn',
'bias_prior_fn',
'bias_divergence_fn',
]
for function_key in function_keys:
function = getattr(self, function_key)
if function is None:
function_name = None
function_type = None
else:
function_name, function_type = tfp_layers_util.serialize_function(
function)
config[function_key] = function_name
config[function_key + '_type'] = function_type
base_config = super(_DenseVariational, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
[
"Returns",
"the",
"config",
"of",
"the",
"layer",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/dense_variational.py#L215-L254
|
[
"def",
"get_config",
"(",
"self",
")",
":",
"config",
"=",
"{",
"'units'",
":",
"self",
".",
"units",
",",
"'activation'",
":",
"(",
"tf",
".",
"keras",
".",
"activations",
".",
"serialize",
"(",
"self",
".",
"activation",
")",
"if",
"self",
".",
"activation",
"else",
"None",
")",
",",
"'activity_regularizer'",
":",
"tf",
".",
"keras",
".",
"initializers",
".",
"serialize",
"(",
"self",
".",
"activity_regularizer",
")",
",",
"}",
"function_keys",
"=",
"[",
"'kernel_posterior_fn'",
",",
"'kernel_posterior_tensor_fn'",
",",
"'kernel_prior_fn'",
",",
"'kernel_divergence_fn'",
",",
"'bias_posterior_fn'",
",",
"'bias_posterior_tensor_fn'",
",",
"'bias_prior_fn'",
",",
"'bias_divergence_fn'",
",",
"]",
"for",
"function_key",
"in",
"function_keys",
":",
"function",
"=",
"getattr",
"(",
"self",
",",
"function_key",
")",
"if",
"function",
"is",
"None",
":",
"function_name",
"=",
"None",
"function_type",
"=",
"None",
"else",
":",
"function_name",
",",
"function_type",
"=",
"tfp_layers_util",
".",
"serialize_function",
"(",
"function",
")",
"config",
"[",
"function_key",
"]",
"=",
"function_name",
"config",
"[",
"function_key",
"+",
"'_type'",
"]",
"=",
"function_type",
"base_config",
"=",
"super",
"(",
"_DenseVariational",
",",
"self",
")",
".",
"get_config",
"(",
")",
"return",
"dict",
"(",
"list",
"(",
"base_config",
".",
"items",
"(",
")",
")",
"+",
"list",
"(",
"config",
".",
"items",
"(",
")",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
kernel
|
Simulates a No-U-Turn Sampler (NUTS) trajectory.
Args:
target_log_prob_fn: Python callable which takes an argument like
`*current_state` and returns its (possibly unnormalized) log-density under
the target distribution.
current_state: List of `Tensor`s representing the states to simulate from.
step_size: List of `Tensor`s representing the step sizes for the leapfrog
integrator. Must have same shape as `current_state`.
seed: Integer to seed the random number generator.
current_target_log_prob: Scalar `Tensor` representing the value of
`target_log_prob_fn` at the `current_state`.
current_grads_target_log_prob: List of `Tensor`s representing gradient of
`current_target_log_prob` with respect to `current_state`. Must have same
shape as `current_state`.
name: A name for the operation.
Returns:
next_state: List of `Tensor`s representing the next states of the NUTS
trajectory. Has same shape as `current_state`.
next_target_log_prob: Scalar `Tensor` representing the value of
`target_log_prob_fn` at `next_state`.
next_grads_target_log_prob: List of `Tensor`s representing the gradient of
`next_target_log_prob` with respect to `next_state`.
Raises:
NotImplementedError: If the execution mode is not eager.
|
experimental/no_u_turn_sampler/nuts.py
|
def kernel(target_log_prob_fn,
current_state,
step_size,
seed=None,
current_target_log_prob=None,
current_grads_target_log_prob=None,
name=None):
"""Simulates a No-U-Turn Sampler (NUTS) trajectory.
Args:
target_log_prob_fn: Python callable which takes an argument like
`*current_state` and returns its (possibly unnormalized) log-density under
the target distribution.
current_state: List of `Tensor`s representing the states to simulate from.
step_size: List of `Tensor`s representing the step sizes for the leapfrog
integrator. Must have same shape as `current_state`.
seed: Integer to seed the random number generator.
current_target_log_prob: Scalar `Tensor` representing the value of
`target_log_prob_fn` at the `current_state`.
current_grads_target_log_prob: List of `Tensor`s representing gradient of
`current_target_log_prob` with respect to `current_state`. Must have same
shape as `current_state`.
name: A name for the operation.
Returns:
next_state: List of `Tensor`s representing the next states of the NUTS
trajectory. Has same shape as `current_state`.
next_target_log_prob: Scalar `Tensor` representing the value of
`target_log_prob_fn` at `next_state`.
next_grads_target_log_prob: List of `Tensor`s representing the gradient of
`next_target_log_prob` with respect to `next_state`.
Raises:
NotImplementedError: If the execution mode is not eager.
"""
if not tf.executing_eagerly():
raise NotImplementedError("`kernel` is only available in Eager mode.")
with tf.compat.v1.name_scope(
name,
default_name="nuts_kernel",
values=[
current_state, step_size, seed, current_target_log_prob,
current_grads_target_log_prob
]):
with tf.compat.v1.name_scope("initialize"):
current_state = [tf.convert_to_tensor(value=s) for s in current_state]
step_size = [tf.convert_to_tensor(value=s) for s in step_size]
value_and_gradients_fn = lambda *args: tfp.math.value_and_gradient( # pylint: disable=g-long-lambda
target_log_prob_fn, args)
value_and_gradients_fn = _embed_no_none_gradient_check(
value_and_gradients_fn)
if (current_target_log_prob is None or
current_grads_target_log_prob is None):
(current_target_log_prob,
current_grads_target_log_prob) = value_and_gradients_fn(*current_state)
seed_stream = tfd.SeedStream(seed, "nuts_kernel")
current_momentum = []
for state_tensor in current_state:
momentum_tensor = tf.random.normal(
shape=tf.shape(input=state_tensor),
dtype=state_tensor.dtype,
seed=seed_stream())
current_momentum.append(momentum_tensor)
# Draw a slice variable u ~ Uniform(0, p(initial state, initial
# momentum)) and compute log u. For numerical stability, we perform this
# in log space where log u = log (u' * p(...)) = log u' + log
# p(...) and u' ~ Uniform(0, 1).
log_slice_sample = tf.math.log(tf.random.uniform([], seed=seed_stream()))
log_slice_sample += _log_joint(current_target_log_prob,
current_momentum)
# Initialize loop variables. It comprises a collection of information
# about a "reverse" state, a collection of information about a "forward"
# state, a collection of information about the next state,
# the trajectory's tree depth, the number of candidate states, and
# whether to continue the trajectory.
reverse_state = current_state
reverse_target_log_prob = current_target_log_prob
reverse_grads_target_log_prob = current_grads_target_log_prob
reverse_momentum = current_momentum
forward_state = current_state
forward_target_log_prob = current_target_log_prob
forward_grads_target_log_prob = current_grads_target_log_prob
forward_momentum = current_momentum
next_state = current_state
next_target_log_prob = current_target_log_prob
next_grads_target_log_prob = current_grads_target_log_prob
depth = 0
num_states = 1
continue_trajectory = True
while continue_trajectory:
# Grow the No-U-Turn Sampler trajectory by choosing a random direction and
# simulating Hamiltonian dynamics in that direction. This extends either
# the forward or reverse state.
direction = tfp.math.random_rademacher([], seed=seed_stream())
if direction < 0:
[
reverse_state,
reverse_target_log_prob,
reverse_grads_target_log_prob,
reverse_momentum,
_,
_,
_,
_,
next_state_in_subtree,
next_target_log_prob_in_subtree,
next_grads_target_log_prob_in_subtree,
num_states_in_subtree,
continue_trajectory,
] = _build_tree(
value_and_gradients_fn=value_and_gradients_fn,
current_state=reverse_state,
current_target_log_prob=reverse_target_log_prob,
current_grads_target_log_prob=reverse_grads_target_log_prob,
current_momentum=reverse_momentum,
direction=direction,
depth=depth,
step_size=step_size,
log_slice_sample=log_slice_sample,
seed=seed_stream())
else:
[
_,
_,
_,
_,
forward_state,
forward_target_log_prob,
forward_grads_target_log_prob,
forward_momentum,
next_state_in_subtree,
next_target_log_prob_in_subtree,
next_grads_target_log_prob_in_subtree,
num_states_in_subtree,
continue_trajectory,
] = _build_tree(
value_and_gradients_fn=value_and_gradients_fn,
current_state=forward_state,
current_target_log_prob=forward_target_log_prob,
current_grads_target_log_prob=forward_grads_target_log_prob,
current_momentum=forward_momentum,
direction=direction,
depth=depth,
step_size=step_size,
log_slice_sample=log_slice_sample,
seed=seed_stream())
if continue_trajectory:
# If the built tree did not terminate, accept the tree's next state
# with a certain probability.
accept_state_in_subtree = _random_bernoulli(
[],
probs=tf.minimum(1., num_states_in_subtree / num_states),
dtype=tf.bool,
seed=seed_stream())
if accept_state_in_subtree:
next_state = next_state_in_subtree
next_target_log_prob = next_target_log_prob_in_subtree
next_grads_target_log_prob = next_grads_target_log_prob_in_subtree
# Continue the NUTS trajectory if the tree-building did not terminate, and
# if the reverse-most and forward-most states do not exhibit a U-turn.
has_no_u_turn = tf.logical_and(
_has_no_u_turn(forward_state, reverse_state, forward_momentum),
_has_no_u_turn(forward_state, reverse_state, reverse_momentum))
continue_trajectory = continue_trajectory and has_no_u_turn
num_states += num_states_in_subtree
depth += 1
return next_state, next_target_log_prob, next_grads_target_log_prob
|
def kernel(target_log_prob_fn,
current_state,
step_size,
seed=None,
current_target_log_prob=None,
current_grads_target_log_prob=None,
name=None):
"""Simulates a No-U-Turn Sampler (NUTS) trajectory.
Args:
target_log_prob_fn: Python callable which takes an argument like
`*current_state` and returns its (possibly unnormalized) log-density under
the target distribution.
current_state: List of `Tensor`s representing the states to simulate from.
step_size: List of `Tensor`s representing the step sizes for the leapfrog
integrator. Must have same shape as `current_state`.
seed: Integer to seed the random number generator.
current_target_log_prob: Scalar `Tensor` representing the value of
`target_log_prob_fn` at the `current_state`.
current_grads_target_log_prob: List of `Tensor`s representing gradient of
`current_target_log_prob` with respect to `current_state`. Must have same
shape as `current_state`.
name: A name for the operation.
Returns:
next_state: List of `Tensor`s representing the next states of the NUTS
trajectory. Has same shape as `current_state`.
next_target_log_prob: Scalar `Tensor` representing the value of
`target_log_prob_fn` at `next_state`.
next_grads_target_log_prob: List of `Tensor`s representing the gradient of
`next_target_log_prob` with respect to `next_state`.
Raises:
NotImplementedError: If the execution mode is not eager.
"""
if not tf.executing_eagerly():
raise NotImplementedError("`kernel` is only available in Eager mode.")
with tf.compat.v1.name_scope(
name,
default_name="nuts_kernel",
values=[
current_state, step_size, seed, current_target_log_prob,
current_grads_target_log_prob
]):
with tf.compat.v1.name_scope("initialize"):
current_state = [tf.convert_to_tensor(value=s) for s in current_state]
step_size = [tf.convert_to_tensor(value=s) for s in step_size]
value_and_gradients_fn = lambda *args: tfp.math.value_and_gradient( # pylint: disable=g-long-lambda
target_log_prob_fn, args)
value_and_gradients_fn = _embed_no_none_gradient_check(
value_and_gradients_fn)
if (current_target_log_prob is None or
current_grads_target_log_prob is None):
(current_target_log_prob,
current_grads_target_log_prob) = value_and_gradients_fn(*current_state)
seed_stream = tfd.SeedStream(seed, "nuts_kernel")
current_momentum = []
for state_tensor in current_state:
momentum_tensor = tf.random.normal(
shape=tf.shape(input=state_tensor),
dtype=state_tensor.dtype,
seed=seed_stream())
current_momentum.append(momentum_tensor)
# Draw a slice variable u ~ Uniform(0, p(initial state, initial
# momentum)) and compute log u. For numerical stability, we perform this
# in log space where log u = log (u' * p(...)) = log u' + log
# p(...) and u' ~ Uniform(0, 1).
log_slice_sample = tf.math.log(tf.random.uniform([], seed=seed_stream()))
log_slice_sample += _log_joint(current_target_log_prob,
current_momentum)
# Initialize loop variables. It comprises a collection of information
# about a "reverse" state, a collection of information about a "forward"
# state, a collection of information about the next state,
# the trajectory's tree depth, the number of candidate states, and
# whether to continue the trajectory.
reverse_state = current_state
reverse_target_log_prob = current_target_log_prob
reverse_grads_target_log_prob = current_grads_target_log_prob
reverse_momentum = current_momentum
forward_state = current_state
forward_target_log_prob = current_target_log_prob
forward_grads_target_log_prob = current_grads_target_log_prob
forward_momentum = current_momentum
next_state = current_state
next_target_log_prob = current_target_log_prob
next_grads_target_log_prob = current_grads_target_log_prob
depth = 0
num_states = 1
continue_trajectory = True
while continue_trajectory:
# Grow the No-U-Turn Sampler trajectory by choosing a random direction and
# simulating Hamiltonian dynamics in that direction. This extends either
# the forward or reverse state.
direction = tfp.math.random_rademacher([], seed=seed_stream())
if direction < 0:
[
reverse_state,
reverse_target_log_prob,
reverse_grads_target_log_prob,
reverse_momentum,
_,
_,
_,
_,
next_state_in_subtree,
next_target_log_prob_in_subtree,
next_grads_target_log_prob_in_subtree,
num_states_in_subtree,
continue_trajectory,
] = _build_tree(
value_and_gradients_fn=value_and_gradients_fn,
current_state=reverse_state,
current_target_log_prob=reverse_target_log_prob,
current_grads_target_log_prob=reverse_grads_target_log_prob,
current_momentum=reverse_momentum,
direction=direction,
depth=depth,
step_size=step_size,
log_slice_sample=log_slice_sample,
seed=seed_stream())
else:
[
_,
_,
_,
_,
forward_state,
forward_target_log_prob,
forward_grads_target_log_prob,
forward_momentum,
next_state_in_subtree,
next_target_log_prob_in_subtree,
next_grads_target_log_prob_in_subtree,
num_states_in_subtree,
continue_trajectory,
] = _build_tree(
value_and_gradients_fn=value_and_gradients_fn,
current_state=forward_state,
current_target_log_prob=forward_target_log_prob,
current_grads_target_log_prob=forward_grads_target_log_prob,
current_momentum=forward_momentum,
direction=direction,
depth=depth,
step_size=step_size,
log_slice_sample=log_slice_sample,
seed=seed_stream())
if continue_trajectory:
# If the built tree did not terminate, accept the tree's next state
# with a certain probability.
accept_state_in_subtree = _random_bernoulli(
[],
probs=tf.minimum(1., num_states_in_subtree / num_states),
dtype=tf.bool,
seed=seed_stream())
if accept_state_in_subtree:
next_state = next_state_in_subtree
next_target_log_prob = next_target_log_prob_in_subtree
next_grads_target_log_prob = next_grads_target_log_prob_in_subtree
# Continue the NUTS trajectory if the tree-building did not terminate, and
# if the reverse-most and forward-most states do not exhibit a U-turn.
has_no_u_turn = tf.logical_and(
_has_no_u_turn(forward_state, reverse_state, forward_momentum),
_has_no_u_turn(forward_state, reverse_state, reverse_momentum))
continue_trajectory = continue_trajectory and has_no_u_turn
num_states += num_states_in_subtree
depth += 1
return next_state, next_target_log_prob, next_grads_target_log_prob
|
[
"Simulates",
"a",
"No",
"-",
"U",
"-",
"Turn",
"Sampler",
"(",
"NUTS",
")",
"trajectory",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/no_u_turn_sampler/nuts.py#L48-L222
|
[
"def",
"kernel",
"(",
"target_log_prob_fn",
",",
"current_state",
",",
"step_size",
",",
"seed",
"=",
"None",
",",
"current_target_log_prob",
"=",
"None",
",",
"current_grads_target_log_prob",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"if",
"not",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"`kernel` is only available in Eager mode.\"",
")",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"default_name",
"=",
"\"nuts_kernel\"",
",",
"values",
"=",
"[",
"current_state",
",",
"step_size",
",",
"seed",
",",
"current_target_log_prob",
",",
"current_grads_target_log_prob",
"]",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"\"initialize\"",
")",
":",
"current_state",
"=",
"[",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"s",
")",
"for",
"s",
"in",
"current_state",
"]",
"step_size",
"=",
"[",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"s",
")",
"for",
"s",
"in",
"step_size",
"]",
"value_and_gradients_fn",
"=",
"lambda",
"*",
"args",
":",
"tfp",
".",
"math",
".",
"value_and_gradient",
"(",
"# pylint: disable=g-long-lambda",
"target_log_prob_fn",
",",
"args",
")",
"value_and_gradients_fn",
"=",
"_embed_no_none_gradient_check",
"(",
"value_and_gradients_fn",
")",
"if",
"(",
"current_target_log_prob",
"is",
"None",
"or",
"current_grads_target_log_prob",
"is",
"None",
")",
":",
"(",
"current_target_log_prob",
",",
"current_grads_target_log_prob",
")",
"=",
"value_and_gradients_fn",
"(",
"*",
"current_state",
")",
"seed_stream",
"=",
"tfd",
".",
"SeedStream",
"(",
"seed",
",",
"\"nuts_kernel\"",
")",
"current_momentum",
"=",
"[",
"]",
"for",
"state_tensor",
"in",
"current_state",
":",
"momentum_tensor",
"=",
"tf",
".",
"random",
".",
"normal",
"(",
"shape",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"state_tensor",
")",
",",
"dtype",
"=",
"state_tensor",
".",
"dtype",
",",
"seed",
"=",
"seed_stream",
"(",
")",
")",
"current_momentum",
".",
"append",
"(",
"momentum_tensor",
")",
"# Draw a slice variable u ~ Uniform(0, p(initial state, initial",
"# momentum)) and compute log u. For numerical stability, we perform this",
"# in log space where log u = log (u' * p(...)) = log u' + log",
"# p(...) and u' ~ Uniform(0, 1).",
"log_slice_sample",
"=",
"tf",
".",
"math",
".",
"log",
"(",
"tf",
".",
"random",
".",
"uniform",
"(",
"[",
"]",
",",
"seed",
"=",
"seed_stream",
"(",
")",
")",
")",
"log_slice_sample",
"+=",
"_log_joint",
"(",
"current_target_log_prob",
",",
"current_momentum",
")",
"# Initialize loop variables. It comprises a collection of information",
"# about a \"reverse\" state, a collection of information about a \"forward\"",
"# state, a collection of information about the next state,",
"# the trajectory's tree depth, the number of candidate states, and",
"# whether to continue the trajectory.",
"reverse_state",
"=",
"current_state",
"reverse_target_log_prob",
"=",
"current_target_log_prob",
"reverse_grads_target_log_prob",
"=",
"current_grads_target_log_prob",
"reverse_momentum",
"=",
"current_momentum",
"forward_state",
"=",
"current_state",
"forward_target_log_prob",
"=",
"current_target_log_prob",
"forward_grads_target_log_prob",
"=",
"current_grads_target_log_prob",
"forward_momentum",
"=",
"current_momentum",
"next_state",
"=",
"current_state",
"next_target_log_prob",
"=",
"current_target_log_prob",
"next_grads_target_log_prob",
"=",
"current_grads_target_log_prob",
"depth",
"=",
"0",
"num_states",
"=",
"1",
"continue_trajectory",
"=",
"True",
"while",
"continue_trajectory",
":",
"# Grow the No-U-Turn Sampler trajectory by choosing a random direction and",
"# simulating Hamiltonian dynamics in that direction. This extends either",
"# the forward or reverse state.",
"direction",
"=",
"tfp",
".",
"math",
".",
"random_rademacher",
"(",
"[",
"]",
",",
"seed",
"=",
"seed_stream",
"(",
")",
")",
"if",
"direction",
"<",
"0",
":",
"[",
"reverse_state",
",",
"reverse_target_log_prob",
",",
"reverse_grads_target_log_prob",
",",
"reverse_momentum",
",",
"_",
",",
"_",
",",
"_",
",",
"_",
",",
"next_state_in_subtree",
",",
"next_target_log_prob_in_subtree",
",",
"next_grads_target_log_prob_in_subtree",
",",
"num_states_in_subtree",
",",
"continue_trajectory",
",",
"]",
"=",
"_build_tree",
"(",
"value_and_gradients_fn",
"=",
"value_and_gradients_fn",
",",
"current_state",
"=",
"reverse_state",
",",
"current_target_log_prob",
"=",
"reverse_target_log_prob",
",",
"current_grads_target_log_prob",
"=",
"reverse_grads_target_log_prob",
",",
"current_momentum",
"=",
"reverse_momentum",
",",
"direction",
"=",
"direction",
",",
"depth",
"=",
"depth",
",",
"step_size",
"=",
"step_size",
",",
"log_slice_sample",
"=",
"log_slice_sample",
",",
"seed",
"=",
"seed_stream",
"(",
")",
")",
"else",
":",
"[",
"_",
",",
"_",
",",
"_",
",",
"_",
",",
"forward_state",
",",
"forward_target_log_prob",
",",
"forward_grads_target_log_prob",
",",
"forward_momentum",
",",
"next_state_in_subtree",
",",
"next_target_log_prob_in_subtree",
",",
"next_grads_target_log_prob_in_subtree",
",",
"num_states_in_subtree",
",",
"continue_trajectory",
",",
"]",
"=",
"_build_tree",
"(",
"value_and_gradients_fn",
"=",
"value_and_gradients_fn",
",",
"current_state",
"=",
"forward_state",
",",
"current_target_log_prob",
"=",
"forward_target_log_prob",
",",
"current_grads_target_log_prob",
"=",
"forward_grads_target_log_prob",
",",
"current_momentum",
"=",
"forward_momentum",
",",
"direction",
"=",
"direction",
",",
"depth",
"=",
"depth",
",",
"step_size",
"=",
"step_size",
",",
"log_slice_sample",
"=",
"log_slice_sample",
",",
"seed",
"=",
"seed_stream",
"(",
")",
")",
"if",
"continue_trajectory",
":",
"# If the built tree did not terminate, accept the tree's next state",
"# with a certain probability.",
"accept_state_in_subtree",
"=",
"_random_bernoulli",
"(",
"[",
"]",
",",
"probs",
"=",
"tf",
".",
"minimum",
"(",
"1.",
",",
"num_states_in_subtree",
"/",
"num_states",
")",
",",
"dtype",
"=",
"tf",
".",
"bool",
",",
"seed",
"=",
"seed_stream",
"(",
")",
")",
"if",
"accept_state_in_subtree",
":",
"next_state",
"=",
"next_state_in_subtree",
"next_target_log_prob",
"=",
"next_target_log_prob_in_subtree",
"next_grads_target_log_prob",
"=",
"next_grads_target_log_prob_in_subtree",
"# Continue the NUTS trajectory if the tree-building did not terminate, and",
"# if the reverse-most and forward-most states do not exhibit a U-turn.",
"has_no_u_turn",
"=",
"tf",
".",
"logical_and",
"(",
"_has_no_u_turn",
"(",
"forward_state",
",",
"reverse_state",
",",
"forward_momentum",
")",
",",
"_has_no_u_turn",
"(",
"forward_state",
",",
"reverse_state",
",",
"reverse_momentum",
")",
")",
"continue_trajectory",
"=",
"continue_trajectory",
"and",
"has_no_u_turn",
"num_states",
"+=",
"num_states_in_subtree",
"depth",
"+=",
"1",
"return",
"next_state",
",",
"next_target_log_prob",
",",
"next_grads_target_log_prob"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_build_tree
|
Builds a tree at a given tree depth and at a given state.
The `current` state is immediately adjacent to, but outside of,
the subtrajectory spanned by the returned `forward` and `reverse` states.
Args:
value_and_gradients_fn: Python callable which takes an argument like
`*current_state` and returns a tuple of its (possibly unnormalized)
log-density under the target distribution and its gradient with respect to
each state.
current_state: List of `Tensor`s representing the current states of the
NUTS trajectory.
current_target_log_prob: Scalar `Tensor` representing the value of
`target_log_prob_fn` at the `current_state`.
current_grads_target_log_prob: List of `Tensor`s representing gradient of
`current_target_log_prob` with respect to `current_state`. Must have same
shape as `current_state`.
current_momentum: List of `Tensor`s representing the momentums of
`current_state`. Must have same shape as `current_state`.
direction: int that is either -1 or 1. It determines whether to perform
leapfrog integration backwards (reverse) or forward in time respectively.
depth: non-negative int that indicates how deep of a tree to build.
Each call to `_build_tree` takes `2**depth` leapfrog steps.
step_size: List of `Tensor`s representing the step sizes for the leapfrog
integrator. Must have same shape as `current_state`.
log_slice_sample: The log of an auxiliary slice variable. It is used
together with `max_simulation_error` to avoid simulating trajectories with
too much numerical error.
max_simulation_error: Maximum simulation error to tolerate before
terminating the trajectory. Simulation error is the
`log_slice_sample` minus the log-joint probability at the simulated state.
seed: Integer to seed the random number generator.
Returns:
reverse_state: List of `Tensor`s representing the "reverse" states of the
NUTS trajectory. Has same shape as `current_state`.
reverse_target_log_prob: Scalar `Tensor` representing the value of
`target_log_prob_fn` at the `reverse_state`.
reverse_grads_target_log_prob: List of `Tensor`s representing gradient of
`reverse_target_log_prob` with respect to `reverse_state`. Has same shape
as `reverse_state`.
reverse_momentum: List of `Tensor`s representing the momentums of
`reverse_state`. Has same shape as `reverse_state`.
forward_state: List of `Tensor`s representing the "forward" states of the
NUTS trajectory. Has same shape as `current_state`.
forward_target_log_prob: Scalar `Tensor` representing the value of
`target_log_prob_fn` at the `forward_state`.
forward_grads_target_log_prob: List of `Tensor`s representing gradient of
`forward_target_log_prob` with respect to `forward_state`. Has same shape
as `forward_state`.
forward_momentum: List of `Tensor`s representing the momentums of
`forward_state`. Has same shape as `forward_state`.
next_state: List of `Tensor`s representing the next states of the NUTS
trajectory. Has same shape as `current_state`.
next_target_log_prob: Scalar `Tensor` representing the value of
`target_log_prob_fn` at `next_state`.
next_grads_target_log_prob: List of `Tensor`s representing the gradient of
`next_target_log_prob` with respect to `next_state`.
num_states: Number of acceptable candidate states in the subtree. A state is
acceptable if it is "in the slice", that is, if its log-joint probability
with its momentum is greater than `log_slice_sample`.
continue_trajectory: bool determining whether to continue the simulation
trajectory. The trajectory is continued if no U-turns are encountered
within the built subtree, and if the log-probability accumulation due to
integration error does not exceed `max_simulation_error`.
|
experimental/no_u_turn_sampler/nuts.py
|
def _build_tree(value_and_gradients_fn,
current_state,
current_target_log_prob,
current_grads_target_log_prob,
current_momentum,
direction,
depth,
step_size,
log_slice_sample,
max_simulation_error=1000.,
seed=None):
"""Builds a tree at a given tree depth and at a given state.
The `current` state is immediately adjacent to, but outside of,
the subtrajectory spanned by the returned `forward` and `reverse` states.
Args:
value_and_gradients_fn: Python callable which takes an argument like
`*current_state` and returns a tuple of its (possibly unnormalized)
log-density under the target distribution and its gradient with respect to
each state.
current_state: List of `Tensor`s representing the current states of the
NUTS trajectory.
current_target_log_prob: Scalar `Tensor` representing the value of
`target_log_prob_fn` at the `current_state`.
current_grads_target_log_prob: List of `Tensor`s representing gradient of
`current_target_log_prob` with respect to `current_state`. Must have same
shape as `current_state`.
current_momentum: List of `Tensor`s representing the momentums of
`current_state`. Must have same shape as `current_state`.
direction: int that is either -1 or 1. It determines whether to perform
leapfrog integration backwards (reverse) or forward in time respectively.
depth: non-negative int that indicates how deep of a tree to build.
Each call to `_build_tree` takes `2**depth` leapfrog steps.
step_size: List of `Tensor`s representing the step sizes for the leapfrog
integrator. Must have same shape as `current_state`.
log_slice_sample: The log of an auxiliary slice variable. It is used
together with `max_simulation_error` to avoid simulating trajectories with
too much numerical error.
max_simulation_error: Maximum simulation error to tolerate before
terminating the trajectory. Simulation error is the
`log_slice_sample` minus the log-joint probability at the simulated state.
seed: Integer to seed the random number generator.
Returns:
reverse_state: List of `Tensor`s representing the "reverse" states of the
NUTS trajectory. Has same shape as `current_state`.
reverse_target_log_prob: Scalar `Tensor` representing the value of
`target_log_prob_fn` at the `reverse_state`.
reverse_grads_target_log_prob: List of `Tensor`s representing gradient of
`reverse_target_log_prob` with respect to `reverse_state`. Has same shape
as `reverse_state`.
reverse_momentum: List of `Tensor`s representing the momentums of
`reverse_state`. Has same shape as `reverse_state`.
forward_state: List of `Tensor`s representing the "forward" states of the
NUTS trajectory. Has same shape as `current_state`.
forward_target_log_prob: Scalar `Tensor` representing the value of
`target_log_prob_fn` at the `forward_state`.
forward_grads_target_log_prob: List of `Tensor`s representing gradient of
`forward_target_log_prob` with respect to `forward_state`. Has same shape
as `forward_state`.
forward_momentum: List of `Tensor`s representing the momentums of
`forward_state`. Has same shape as `forward_state`.
next_state: List of `Tensor`s representing the next states of the NUTS
trajectory. Has same shape as `current_state`.
next_target_log_prob: Scalar `Tensor` representing the value of
`target_log_prob_fn` at `next_state`.
next_grads_target_log_prob: List of `Tensor`s representing the gradient of
`next_target_log_prob` with respect to `next_state`.
num_states: Number of acceptable candidate states in the subtree. A state is
acceptable if it is "in the slice", that is, if its log-joint probability
with its momentum is greater than `log_slice_sample`.
continue_trajectory: bool determining whether to continue the simulation
trajectory. The trajectory is continued if no U-turns are encountered
within the built subtree, and if the log-probability accumulation due to
integration error does not exceed `max_simulation_error`.
"""
if depth == 0: # base case
# Take a leapfrog step. Terminate the tree-building if the simulation
# error from the leapfrog integrator is too large. States discovered by
# continuing the simulation are likely to have very low probability.
[
next_state,
next_target_log_prob,
next_grads_target_log_prob,
next_momentum,
] = _leapfrog(
value_and_gradients_fn=value_and_gradients_fn,
current_state=current_state,
current_grads_target_log_prob=current_grads_target_log_prob,
current_momentum=current_momentum,
step_size=direction * step_size)
next_log_joint = _log_joint(next_target_log_prob, next_momentum)
num_states = tf.cast(next_log_joint > log_slice_sample, dtype=tf.int32)
continue_trajectory = (next_log_joint >
log_slice_sample - max_simulation_error)
return [
next_state,
next_target_log_prob,
next_grads_target_log_prob,
next_momentum,
next_state,
next_target_log_prob,
next_grads_target_log_prob,
next_momentum,
next_state,
next_target_log_prob,
next_grads_target_log_prob,
num_states,
continue_trajectory,
]
# Build a tree at the current state.
seed_stream = tfd.SeedStream(seed, "build_tree")
[
reverse_state,
reverse_target_log_prob,
reverse_grads_target_log_prob,
reverse_momentum,
forward_state,
forward_target_log_prob,
forward_grads_target_log_prob,
forward_momentum,
next_state,
next_target_log_prob,
next_grads_target_log_prob,
num_states,
continue_trajectory,
] = _build_tree(value_and_gradients_fn=value_and_gradients_fn,
current_state=current_state,
current_target_log_prob=current_target_log_prob,
current_grads_target_log_prob=current_grads_target_log_prob,
current_momentum=current_momentum,
direction=direction,
depth=depth - 1,
step_size=step_size,
log_slice_sample=log_slice_sample,
seed=seed_stream())
if continue_trajectory:
# If the just-built subtree did not terminate, build a second subtree at
# the forward or reverse state, as appropriate.
if direction < 0:
[
reverse_state,
reverse_target_log_prob,
reverse_grads_target_log_prob,
reverse_momentum,
_,
_,
_,
_,
far_state,
far_target_log_prob,
far_grads_target_log_prob,
far_num_states,
far_continue_trajectory,
] = _build_tree(
value_and_gradients_fn=value_and_gradients_fn,
current_state=reverse_state,
current_target_log_prob=reverse_target_log_prob,
current_grads_target_log_prob=reverse_grads_target_log_prob,
current_momentum=reverse_momentum,
direction=direction,
depth=depth - 1,
step_size=step_size,
log_slice_sample=log_slice_sample,
seed=seed_stream())
else:
[
_,
_,
_,
_,
forward_state,
forward_target_log_prob,
forward_grads_target_log_prob,
forward_momentum,
far_state,
far_target_log_prob,
far_grads_target_log_prob,
far_num_states,
far_continue_trajectory,
] = _build_tree(
value_and_gradients_fn=value_and_gradients_fn,
current_state=forward_state,
current_target_log_prob=forward_target_log_prob,
current_grads_target_log_prob=forward_grads_target_log_prob,
current_momentum=forward_momentum,
direction=direction,
depth=depth - 1,
step_size=step_size,
log_slice_sample=log_slice_sample,
seed=seed_stream())
# Propose either `next_state` (which came from the first subtree and so is
# nearby) or the new forward/reverse state (which came from the second
# subtree and so is far away).
num_states += far_num_states
accept_far_state = _random_bernoulli(
[],
probs=far_num_states / num_states,
dtype=tf.bool,
seed=seed_stream())
if accept_far_state:
next_state = far_state
next_target_log_prob = far_target_log_prob
next_grads_target_log_prob = far_grads_target_log_prob
# Continue the NUTS trajectory if the far subtree did not terminate either,
# and if the reverse-most and forward-most states do not exhibit a U-turn.
has_no_u_turn = tf.logical_and(
_has_no_u_turn(forward_state, reverse_state, forward_momentum),
_has_no_u_turn(forward_state, reverse_state, reverse_momentum))
continue_trajectory = far_continue_trajectory and has_no_u_turn
return [
reverse_state,
reverse_target_log_prob,
reverse_grads_target_log_prob,
reverse_momentum,
forward_state,
forward_target_log_prob,
forward_grads_target_log_prob,
forward_momentum,
next_state,
next_target_log_prob,
next_grads_target_log_prob,
num_states,
continue_trajectory,
]
|
def _build_tree(value_and_gradients_fn,
current_state,
current_target_log_prob,
current_grads_target_log_prob,
current_momentum,
direction,
depth,
step_size,
log_slice_sample,
max_simulation_error=1000.,
seed=None):
"""Builds a tree at a given tree depth and at a given state.
The `current` state is immediately adjacent to, but outside of,
the subtrajectory spanned by the returned `forward` and `reverse` states.
Args:
value_and_gradients_fn: Python callable which takes an argument like
`*current_state` and returns a tuple of its (possibly unnormalized)
log-density under the target distribution and its gradient with respect to
each state.
current_state: List of `Tensor`s representing the current states of the
NUTS trajectory.
current_target_log_prob: Scalar `Tensor` representing the value of
`target_log_prob_fn` at the `current_state`.
current_grads_target_log_prob: List of `Tensor`s representing gradient of
`current_target_log_prob` with respect to `current_state`. Must have same
shape as `current_state`.
current_momentum: List of `Tensor`s representing the momentums of
`current_state`. Must have same shape as `current_state`.
direction: int that is either -1 or 1. It determines whether to perform
leapfrog integration backwards (reverse) or forward in time respectively.
depth: non-negative int that indicates how deep of a tree to build.
Each call to `_build_tree` takes `2**depth` leapfrog steps.
step_size: List of `Tensor`s representing the step sizes for the leapfrog
integrator. Must have same shape as `current_state`.
log_slice_sample: The log of an auxiliary slice variable. It is used
together with `max_simulation_error` to avoid simulating trajectories with
too much numerical error.
max_simulation_error: Maximum simulation error to tolerate before
terminating the trajectory. Simulation error is the
`log_slice_sample` minus the log-joint probability at the simulated state.
seed: Integer to seed the random number generator.
Returns:
reverse_state: List of `Tensor`s representing the "reverse" states of the
NUTS trajectory. Has same shape as `current_state`.
reverse_target_log_prob: Scalar `Tensor` representing the value of
`target_log_prob_fn` at the `reverse_state`.
reverse_grads_target_log_prob: List of `Tensor`s representing gradient of
`reverse_target_log_prob` with respect to `reverse_state`. Has same shape
as `reverse_state`.
reverse_momentum: List of `Tensor`s representing the momentums of
`reverse_state`. Has same shape as `reverse_state`.
forward_state: List of `Tensor`s representing the "forward" states of the
NUTS trajectory. Has same shape as `current_state`.
forward_target_log_prob: Scalar `Tensor` representing the value of
`target_log_prob_fn` at the `forward_state`.
forward_grads_target_log_prob: List of `Tensor`s representing gradient of
`forward_target_log_prob` with respect to `forward_state`. Has same shape
as `forward_state`.
forward_momentum: List of `Tensor`s representing the momentums of
`forward_state`. Has same shape as `forward_state`.
next_state: List of `Tensor`s representing the next states of the NUTS
trajectory. Has same shape as `current_state`.
next_target_log_prob: Scalar `Tensor` representing the value of
`target_log_prob_fn` at `next_state`.
next_grads_target_log_prob: List of `Tensor`s representing the gradient of
`next_target_log_prob` with respect to `next_state`.
num_states: Number of acceptable candidate states in the subtree. A state is
acceptable if it is "in the slice", that is, if its log-joint probability
with its momentum is greater than `log_slice_sample`.
continue_trajectory: bool determining whether to continue the simulation
trajectory. The trajectory is continued if no U-turns are encountered
within the built subtree, and if the log-probability accumulation due to
integration error does not exceed `max_simulation_error`.
"""
if depth == 0: # base case
# Take a leapfrog step. Terminate the tree-building if the simulation
# error from the leapfrog integrator is too large. States discovered by
# continuing the simulation are likely to have very low probability.
[
next_state,
next_target_log_prob,
next_grads_target_log_prob,
next_momentum,
] = _leapfrog(
value_and_gradients_fn=value_and_gradients_fn,
current_state=current_state,
current_grads_target_log_prob=current_grads_target_log_prob,
current_momentum=current_momentum,
step_size=direction * step_size)
next_log_joint = _log_joint(next_target_log_prob, next_momentum)
num_states = tf.cast(next_log_joint > log_slice_sample, dtype=tf.int32)
continue_trajectory = (next_log_joint >
log_slice_sample - max_simulation_error)
return [
next_state,
next_target_log_prob,
next_grads_target_log_prob,
next_momentum,
next_state,
next_target_log_prob,
next_grads_target_log_prob,
next_momentum,
next_state,
next_target_log_prob,
next_grads_target_log_prob,
num_states,
continue_trajectory,
]
# Build a tree at the current state.
seed_stream = tfd.SeedStream(seed, "build_tree")
[
reverse_state,
reverse_target_log_prob,
reverse_grads_target_log_prob,
reverse_momentum,
forward_state,
forward_target_log_prob,
forward_grads_target_log_prob,
forward_momentum,
next_state,
next_target_log_prob,
next_grads_target_log_prob,
num_states,
continue_trajectory,
] = _build_tree(value_and_gradients_fn=value_and_gradients_fn,
current_state=current_state,
current_target_log_prob=current_target_log_prob,
current_grads_target_log_prob=current_grads_target_log_prob,
current_momentum=current_momentum,
direction=direction,
depth=depth - 1,
step_size=step_size,
log_slice_sample=log_slice_sample,
seed=seed_stream())
if continue_trajectory:
# If the just-built subtree did not terminate, build a second subtree at
# the forward or reverse state, as appropriate.
if direction < 0:
[
reverse_state,
reverse_target_log_prob,
reverse_grads_target_log_prob,
reverse_momentum,
_,
_,
_,
_,
far_state,
far_target_log_prob,
far_grads_target_log_prob,
far_num_states,
far_continue_trajectory,
] = _build_tree(
value_and_gradients_fn=value_and_gradients_fn,
current_state=reverse_state,
current_target_log_prob=reverse_target_log_prob,
current_grads_target_log_prob=reverse_grads_target_log_prob,
current_momentum=reverse_momentum,
direction=direction,
depth=depth - 1,
step_size=step_size,
log_slice_sample=log_slice_sample,
seed=seed_stream())
else:
[
_,
_,
_,
_,
forward_state,
forward_target_log_prob,
forward_grads_target_log_prob,
forward_momentum,
far_state,
far_target_log_prob,
far_grads_target_log_prob,
far_num_states,
far_continue_trajectory,
] = _build_tree(
value_and_gradients_fn=value_and_gradients_fn,
current_state=forward_state,
current_target_log_prob=forward_target_log_prob,
current_grads_target_log_prob=forward_grads_target_log_prob,
current_momentum=forward_momentum,
direction=direction,
depth=depth - 1,
step_size=step_size,
log_slice_sample=log_slice_sample,
seed=seed_stream())
# Propose either `next_state` (which came from the first subtree and so is
# nearby) or the new forward/reverse state (which came from the second
# subtree and so is far away).
num_states += far_num_states
accept_far_state = _random_bernoulli(
[],
probs=far_num_states / num_states,
dtype=tf.bool,
seed=seed_stream())
if accept_far_state:
next_state = far_state
next_target_log_prob = far_target_log_prob
next_grads_target_log_prob = far_grads_target_log_prob
# Continue the NUTS trajectory if the far subtree did not terminate either,
# and if the reverse-most and forward-most states do not exhibit a U-turn.
has_no_u_turn = tf.logical_and(
_has_no_u_turn(forward_state, reverse_state, forward_momentum),
_has_no_u_turn(forward_state, reverse_state, reverse_momentum))
continue_trajectory = far_continue_trajectory and has_no_u_turn
return [
reverse_state,
reverse_target_log_prob,
reverse_grads_target_log_prob,
reverse_momentum,
forward_state,
forward_target_log_prob,
forward_grads_target_log_prob,
forward_momentum,
next_state,
next_target_log_prob,
next_grads_target_log_prob,
num_states,
continue_trajectory,
]
|
[
"Builds",
"a",
"tree",
"at",
"a",
"given",
"tree",
"depth",
"and",
"at",
"a",
"given",
"state",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/no_u_turn_sampler/nuts.py#L225-L454
|
[
"def",
"_build_tree",
"(",
"value_and_gradients_fn",
",",
"current_state",
",",
"current_target_log_prob",
",",
"current_grads_target_log_prob",
",",
"current_momentum",
",",
"direction",
",",
"depth",
",",
"step_size",
",",
"log_slice_sample",
",",
"max_simulation_error",
"=",
"1000.",
",",
"seed",
"=",
"None",
")",
":",
"if",
"depth",
"==",
"0",
":",
"# base case",
"# Take a leapfrog step. Terminate the tree-building if the simulation",
"# error from the leapfrog integrator is too large. States discovered by",
"# continuing the simulation are likely to have very low probability.",
"[",
"next_state",
",",
"next_target_log_prob",
",",
"next_grads_target_log_prob",
",",
"next_momentum",
",",
"]",
"=",
"_leapfrog",
"(",
"value_and_gradients_fn",
"=",
"value_and_gradients_fn",
",",
"current_state",
"=",
"current_state",
",",
"current_grads_target_log_prob",
"=",
"current_grads_target_log_prob",
",",
"current_momentum",
"=",
"current_momentum",
",",
"step_size",
"=",
"direction",
"*",
"step_size",
")",
"next_log_joint",
"=",
"_log_joint",
"(",
"next_target_log_prob",
",",
"next_momentum",
")",
"num_states",
"=",
"tf",
".",
"cast",
"(",
"next_log_joint",
">",
"log_slice_sample",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"continue_trajectory",
"=",
"(",
"next_log_joint",
">",
"log_slice_sample",
"-",
"max_simulation_error",
")",
"return",
"[",
"next_state",
",",
"next_target_log_prob",
",",
"next_grads_target_log_prob",
",",
"next_momentum",
",",
"next_state",
",",
"next_target_log_prob",
",",
"next_grads_target_log_prob",
",",
"next_momentum",
",",
"next_state",
",",
"next_target_log_prob",
",",
"next_grads_target_log_prob",
",",
"num_states",
",",
"continue_trajectory",
",",
"]",
"# Build a tree at the current state.",
"seed_stream",
"=",
"tfd",
".",
"SeedStream",
"(",
"seed",
",",
"\"build_tree\"",
")",
"[",
"reverse_state",
",",
"reverse_target_log_prob",
",",
"reverse_grads_target_log_prob",
",",
"reverse_momentum",
",",
"forward_state",
",",
"forward_target_log_prob",
",",
"forward_grads_target_log_prob",
",",
"forward_momentum",
",",
"next_state",
",",
"next_target_log_prob",
",",
"next_grads_target_log_prob",
",",
"num_states",
",",
"continue_trajectory",
",",
"]",
"=",
"_build_tree",
"(",
"value_and_gradients_fn",
"=",
"value_and_gradients_fn",
",",
"current_state",
"=",
"current_state",
",",
"current_target_log_prob",
"=",
"current_target_log_prob",
",",
"current_grads_target_log_prob",
"=",
"current_grads_target_log_prob",
",",
"current_momentum",
"=",
"current_momentum",
",",
"direction",
"=",
"direction",
",",
"depth",
"=",
"depth",
"-",
"1",
",",
"step_size",
"=",
"step_size",
",",
"log_slice_sample",
"=",
"log_slice_sample",
",",
"seed",
"=",
"seed_stream",
"(",
")",
")",
"if",
"continue_trajectory",
":",
"# If the just-built subtree did not terminate, build a second subtree at",
"# the forward or reverse state, as appropriate.",
"if",
"direction",
"<",
"0",
":",
"[",
"reverse_state",
",",
"reverse_target_log_prob",
",",
"reverse_grads_target_log_prob",
",",
"reverse_momentum",
",",
"_",
",",
"_",
",",
"_",
",",
"_",
",",
"far_state",
",",
"far_target_log_prob",
",",
"far_grads_target_log_prob",
",",
"far_num_states",
",",
"far_continue_trajectory",
",",
"]",
"=",
"_build_tree",
"(",
"value_and_gradients_fn",
"=",
"value_and_gradients_fn",
",",
"current_state",
"=",
"reverse_state",
",",
"current_target_log_prob",
"=",
"reverse_target_log_prob",
",",
"current_grads_target_log_prob",
"=",
"reverse_grads_target_log_prob",
",",
"current_momentum",
"=",
"reverse_momentum",
",",
"direction",
"=",
"direction",
",",
"depth",
"=",
"depth",
"-",
"1",
",",
"step_size",
"=",
"step_size",
",",
"log_slice_sample",
"=",
"log_slice_sample",
",",
"seed",
"=",
"seed_stream",
"(",
")",
")",
"else",
":",
"[",
"_",
",",
"_",
",",
"_",
",",
"_",
",",
"forward_state",
",",
"forward_target_log_prob",
",",
"forward_grads_target_log_prob",
",",
"forward_momentum",
",",
"far_state",
",",
"far_target_log_prob",
",",
"far_grads_target_log_prob",
",",
"far_num_states",
",",
"far_continue_trajectory",
",",
"]",
"=",
"_build_tree",
"(",
"value_and_gradients_fn",
"=",
"value_and_gradients_fn",
",",
"current_state",
"=",
"forward_state",
",",
"current_target_log_prob",
"=",
"forward_target_log_prob",
",",
"current_grads_target_log_prob",
"=",
"forward_grads_target_log_prob",
",",
"current_momentum",
"=",
"forward_momentum",
",",
"direction",
"=",
"direction",
",",
"depth",
"=",
"depth",
"-",
"1",
",",
"step_size",
"=",
"step_size",
",",
"log_slice_sample",
"=",
"log_slice_sample",
",",
"seed",
"=",
"seed_stream",
"(",
")",
")",
"# Propose either `next_state` (which came from the first subtree and so is",
"# nearby) or the new forward/reverse state (which came from the second",
"# subtree and so is far away).",
"num_states",
"+=",
"far_num_states",
"accept_far_state",
"=",
"_random_bernoulli",
"(",
"[",
"]",
",",
"probs",
"=",
"far_num_states",
"/",
"num_states",
",",
"dtype",
"=",
"tf",
".",
"bool",
",",
"seed",
"=",
"seed_stream",
"(",
")",
")",
"if",
"accept_far_state",
":",
"next_state",
"=",
"far_state",
"next_target_log_prob",
"=",
"far_target_log_prob",
"next_grads_target_log_prob",
"=",
"far_grads_target_log_prob",
"# Continue the NUTS trajectory if the far subtree did not terminate either,",
"# and if the reverse-most and forward-most states do not exhibit a U-turn.",
"has_no_u_turn",
"=",
"tf",
".",
"logical_and",
"(",
"_has_no_u_turn",
"(",
"forward_state",
",",
"reverse_state",
",",
"forward_momentum",
")",
",",
"_has_no_u_turn",
"(",
"forward_state",
",",
"reverse_state",
",",
"reverse_momentum",
")",
")",
"continue_trajectory",
"=",
"far_continue_trajectory",
"and",
"has_no_u_turn",
"return",
"[",
"reverse_state",
",",
"reverse_target_log_prob",
",",
"reverse_grads_target_log_prob",
",",
"reverse_momentum",
",",
"forward_state",
",",
"forward_target_log_prob",
",",
"forward_grads_target_log_prob",
",",
"forward_momentum",
",",
"next_state",
",",
"next_target_log_prob",
",",
"next_grads_target_log_prob",
",",
"num_states",
",",
"continue_trajectory",
",",
"]"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_embed_no_none_gradient_check
|
Wraps value and gradients function to assist with None gradients.
|
experimental/no_u_turn_sampler/nuts.py
|
def _embed_no_none_gradient_check(value_and_gradients_fn):
"""Wraps value and gradients function to assist with None gradients."""
@functools.wraps(value_and_gradients_fn)
def func_wrapped(*args, **kwargs):
"""Wrapped function which checks for None gradients."""
value, grads = value_and_gradients_fn(*args, **kwargs)
if any(grad is None for grad in grads):
raise ValueError("Gradient is None for a state.")
return value, grads
return func_wrapped
|
def _embed_no_none_gradient_check(value_and_gradients_fn):
"""Wraps value and gradients function to assist with None gradients."""
@functools.wraps(value_and_gradients_fn)
def func_wrapped(*args, **kwargs):
"""Wrapped function which checks for None gradients."""
value, grads = value_and_gradients_fn(*args, **kwargs)
if any(grad is None for grad in grads):
raise ValueError("Gradient is None for a state.")
return value, grads
return func_wrapped
|
[
"Wraps",
"value",
"and",
"gradients",
"function",
"to",
"assist",
"with",
"None",
"gradients",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/no_u_turn_sampler/nuts.py#L457-L466
|
[
"def",
"_embed_no_none_gradient_check",
"(",
"value_and_gradients_fn",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"value_and_gradients_fn",
")",
"def",
"func_wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Wrapped function which checks for None gradients.\"\"\"",
"value",
",",
"grads",
"=",
"value_and_gradients_fn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"any",
"(",
"grad",
"is",
"None",
"for",
"grad",
"in",
"grads",
")",
":",
"raise",
"ValueError",
"(",
"\"Gradient is None for a state.\"",
")",
"return",
"value",
",",
"grads",
"return",
"func_wrapped"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_has_no_u_turn
|
If two given states and momentum do not exhibit a U-turn pattern.
|
experimental/no_u_turn_sampler/nuts.py
|
def _has_no_u_turn(state_one, state_two, momentum):
"""If two given states and momentum do not exhibit a U-turn pattern."""
dot_product = sum([
tf.reduce_sum(input_tensor=(s1 - s2) * m)
for s1, s2, m in zip(state_one, state_two, momentum)
])
return dot_product > 0
|
def _has_no_u_turn(state_one, state_two, momentum):
"""If two given states and momentum do not exhibit a U-turn pattern."""
dot_product = sum([
tf.reduce_sum(input_tensor=(s1 - s2) * m)
for s1, s2, m in zip(state_one, state_two, momentum)
])
return dot_product > 0
|
[
"If",
"two",
"given",
"states",
"and",
"momentum",
"do",
"not",
"exhibit",
"a",
"U",
"-",
"turn",
"pattern",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/no_u_turn_sampler/nuts.py#L469-L475
|
[
"def",
"_has_no_u_turn",
"(",
"state_one",
",",
"state_two",
",",
"momentum",
")",
":",
"dot_product",
"=",
"sum",
"(",
"[",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"(",
"s1",
"-",
"s2",
")",
"*",
"m",
")",
"for",
"s1",
",",
"s2",
",",
"m",
"in",
"zip",
"(",
"state_one",
",",
"state_two",
",",
"momentum",
")",
"]",
")",
"return",
"dot_product",
">",
"0"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_leapfrog
|
Runs one step of leapfrog integration.
|
experimental/no_u_turn_sampler/nuts.py
|
def _leapfrog(value_and_gradients_fn,
current_state,
current_grads_target_log_prob,
current_momentum,
step_size):
"""Runs one step of leapfrog integration."""
mid_momentum = [
m + 0.5 * step * g for m, step, g in
zip(current_momentum, step_size, current_grads_target_log_prob)]
next_state = [
s + step * m for s, step, m in
zip(current_state, step_size, mid_momentum)]
next_target_log_prob, next_grads_target_log_prob = value_and_gradients_fn(
*next_state)
next_momentum = [
m + 0.5 * step * g for m, step, g in
zip(mid_momentum, step_size, next_grads_target_log_prob)]
return [
next_state,
next_target_log_prob,
next_grads_target_log_prob,
next_momentum,
]
|
def _leapfrog(value_and_gradients_fn,
current_state,
current_grads_target_log_prob,
current_momentum,
step_size):
"""Runs one step of leapfrog integration."""
mid_momentum = [
m + 0.5 * step * g for m, step, g in
zip(current_momentum, step_size, current_grads_target_log_prob)]
next_state = [
s + step * m for s, step, m in
zip(current_state, step_size, mid_momentum)]
next_target_log_prob, next_grads_target_log_prob = value_and_gradients_fn(
*next_state)
next_momentum = [
m + 0.5 * step * g for m, step, g in
zip(mid_momentum, step_size, next_grads_target_log_prob)]
return [
next_state,
next_target_log_prob,
next_grads_target_log_prob,
next_momentum,
]
|
[
"Runs",
"one",
"step",
"of",
"leapfrog",
"integration",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/no_u_turn_sampler/nuts.py#L478-L500
|
[
"def",
"_leapfrog",
"(",
"value_and_gradients_fn",
",",
"current_state",
",",
"current_grads_target_log_prob",
",",
"current_momentum",
",",
"step_size",
")",
":",
"mid_momentum",
"=",
"[",
"m",
"+",
"0.5",
"*",
"step",
"*",
"g",
"for",
"m",
",",
"step",
",",
"g",
"in",
"zip",
"(",
"current_momentum",
",",
"step_size",
",",
"current_grads_target_log_prob",
")",
"]",
"next_state",
"=",
"[",
"s",
"+",
"step",
"*",
"m",
"for",
"s",
",",
"step",
",",
"m",
"in",
"zip",
"(",
"current_state",
",",
"step_size",
",",
"mid_momentum",
")",
"]",
"next_target_log_prob",
",",
"next_grads_target_log_prob",
"=",
"value_and_gradients_fn",
"(",
"*",
"next_state",
")",
"next_momentum",
"=",
"[",
"m",
"+",
"0.5",
"*",
"step",
"*",
"g",
"for",
"m",
",",
"step",
",",
"g",
"in",
"zip",
"(",
"mid_momentum",
",",
"step_size",
",",
"next_grads_target_log_prob",
")",
"]",
"return",
"[",
"next_state",
",",
"next_target_log_prob",
",",
"next_grads_target_log_prob",
",",
"next_momentum",
",",
"]"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_log_joint
|
Log-joint probability given a state's log-probability and momentum.
|
experimental/no_u_turn_sampler/nuts.py
|
def _log_joint(current_target_log_prob, current_momentum):
"""Log-joint probability given a state's log-probability and momentum."""
momentum_log_prob = -sum(
[tf.reduce_sum(input_tensor=0.5 * (m**2.)) for m in current_momentum])
return current_target_log_prob + momentum_log_prob
|
def _log_joint(current_target_log_prob, current_momentum):
"""Log-joint probability given a state's log-probability and momentum."""
momentum_log_prob = -sum(
[tf.reduce_sum(input_tensor=0.5 * (m**2.)) for m in current_momentum])
return current_target_log_prob + momentum_log_prob
|
[
"Log",
"-",
"joint",
"probability",
"given",
"a",
"state",
"s",
"log",
"-",
"probability",
"and",
"momentum",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/no_u_turn_sampler/nuts.py#L503-L507
|
[
"def",
"_log_joint",
"(",
"current_target_log_prob",
",",
"current_momentum",
")",
":",
"momentum_log_prob",
"=",
"-",
"sum",
"(",
"[",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"0.5",
"*",
"(",
"m",
"**",
"2.",
")",
")",
"for",
"m",
"in",
"current_momentum",
"]",
")",
"return",
"current_target_log_prob",
"+",
"momentum_log_prob"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_random_bernoulli
|
Returns samples from a Bernoulli distribution.
|
experimental/no_u_turn_sampler/nuts.py
|
def _random_bernoulli(shape, probs, dtype=tf.int32, seed=None, name=None):
"""Returns samples from a Bernoulli distribution."""
with tf.compat.v1.name_scope(name, "random_bernoulli", [shape, probs]):
probs = tf.convert_to_tensor(value=probs)
random_uniform = tf.random.uniform(shape, dtype=probs.dtype, seed=seed)
return tf.cast(tf.less(random_uniform, probs), dtype)
|
def _random_bernoulli(shape, probs, dtype=tf.int32, seed=None, name=None):
"""Returns samples from a Bernoulli distribution."""
with tf.compat.v1.name_scope(name, "random_bernoulli", [shape, probs]):
probs = tf.convert_to_tensor(value=probs)
random_uniform = tf.random.uniform(shape, dtype=probs.dtype, seed=seed)
return tf.cast(tf.less(random_uniform, probs), dtype)
|
[
"Returns",
"samples",
"from",
"a",
"Bernoulli",
"distribution",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/no_u_turn_sampler/nuts.py#L510-L515
|
[
"def",
"_random_bernoulli",
"(",
"shape",
",",
"probs",
",",
"dtype",
"=",
"tf",
".",
"int32",
",",
"seed",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"\"random_bernoulli\"",
",",
"[",
"shape",
",",
"probs",
"]",
")",
":",
"probs",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"probs",
")",
"random_uniform",
"=",
"tf",
".",
"random",
".",
"uniform",
"(",
"shape",
",",
"dtype",
"=",
"probs",
".",
"dtype",
",",
"seed",
"=",
"seed",
")",
"return",
"tf",
".",
"cast",
"(",
"tf",
".",
"less",
"(",
"random_uniform",
",",
"probs",
")",
",",
"dtype",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
default_loc_scale_fn
|
Makes closure which creates `loc`, `scale` params from `tf.get_variable`.
This function produces a closure which produces `loc`, `scale` using
`tf.get_variable`. The closure accepts the following arguments:
dtype: Type of parameter's event.
shape: Python `list`-like representing the parameter's event shape.
name: Python `str` name prepended to any created (or existing)
`tf.Variable`s.
trainable: Python `bool` indicating all created `tf.Variable`s should be
added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`.
add_variable_fn: `tf.get_variable`-like `callable` used to create (or
access existing) `tf.Variable`s.
Args:
is_singular: Python `bool` indicating if `scale is None`. Default: `False`.
loc_initializer: Initializer function for the `loc` parameters.
The default is `tf.random_normal_initializer(mean=0., stddev=0.1)`.
untransformed_scale_initializer: Initializer function for the `scale`
parameters. Default value: `tf.random_normal_initializer(mean=-3.,
stddev=0.1)`. This implies the softplus transformed result is initialized
near `0`. It allows a `Normal` distribution with `scale` parameter set to
this value to approximately act like a point mass.
loc_regularizer: Regularizer function for the `loc` parameters.
The default (`None`) is to use the `tf.get_variable` default.
untransformed_scale_regularizer: Regularizer function for the `scale`
parameters. The default (`None`) is to use the `tf.get_variable` default.
loc_constraint: An optional projection function to be applied to the
loc after being updated by an `Optimizer`. The function must take as input
the unprojected variable and must return the projected variable (which
must have the same shape). Constraints are not safe to use when doing
asynchronous distributed training.
The default (`None`) is to use the `tf.get_variable` default.
untransformed_scale_constraint: An optional projection function to be
applied to the `scale` parameters after being updated by an `Optimizer`
(e.g. used to implement norm constraints or value constraints). The
function must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are not
safe to use when doing asynchronous distributed training. The default
(`None`) is to use the `tf.get_variable` default.
Returns:
default_loc_scale_fn: Python `callable` which instantiates `loc`, `scale`
parameters from args: `dtype, shape, name, trainable, add_variable_fn`.
|
tensorflow_probability/python/layers/util.py
|
def default_loc_scale_fn(
is_singular=False,
loc_initializer=tf.compat.v1.initializers.random_normal(stddev=0.1),
untransformed_scale_initializer=tf.compat.v1.initializers.random_normal(
mean=-3., stddev=0.1),
loc_regularizer=None,
untransformed_scale_regularizer=None,
loc_constraint=None,
untransformed_scale_constraint=None):
"""Makes closure which creates `loc`, `scale` params from `tf.get_variable`.
This function produces a closure which produces `loc`, `scale` using
`tf.get_variable`. The closure accepts the following arguments:
dtype: Type of parameter's event.
shape: Python `list`-like representing the parameter's event shape.
name: Python `str` name prepended to any created (or existing)
`tf.Variable`s.
trainable: Python `bool` indicating all created `tf.Variable`s should be
added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`.
add_variable_fn: `tf.get_variable`-like `callable` used to create (or
access existing) `tf.Variable`s.
Args:
is_singular: Python `bool` indicating if `scale is None`. Default: `False`.
loc_initializer: Initializer function for the `loc` parameters.
The default is `tf.random_normal_initializer(mean=0., stddev=0.1)`.
untransformed_scale_initializer: Initializer function for the `scale`
parameters. Default value: `tf.random_normal_initializer(mean=-3.,
stddev=0.1)`. This implies the softplus transformed result is initialized
near `0`. It allows a `Normal` distribution with `scale` parameter set to
this value to approximately act like a point mass.
loc_regularizer: Regularizer function for the `loc` parameters.
The default (`None`) is to use the `tf.get_variable` default.
untransformed_scale_regularizer: Regularizer function for the `scale`
parameters. The default (`None`) is to use the `tf.get_variable` default.
loc_constraint: An optional projection function to be applied to the
loc after being updated by an `Optimizer`. The function must take as input
the unprojected variable and must return the projected variable (which
must have the same shape). Constraints are not safe to use when doing
asynchronous distributed training.
The default (`None`) is to use the `tf.get_variable` default.
untransformed_scale_constraint: An optional projection function to be
applied to the `scale` parameters after being updated by an `Optimizer`
(e.g. used to implement norm constraints or value constraints). The
function must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are not
safe to use when doing asynchronous distributed training. The default
(`None`) is to use the `tf.get_variable` default.
Returns:
default_loc_scale_fn: Python `callable` which instantiates `loc`, `scale`
parameters from args: `dtype, shape, name, trainable, add_variable_fn`.
"""
def _fn(dtype, shape, name, trainable, add_variable_fn):
"""Creates `loc`, `scale` parameters."""
loc = add_variable_fn(
name=name + '_loc',
shape=shape,
initializer=loc_initializer,
regularizer=loc_regularizer,
constraint=loc_constraint,
dtype=dtype,
trainable=trainable)
if is_singular:
return loc, None
untransformed_scale = add_variable_fn(
name=name + '_untransformed_scale',
shape=shape,
initializer=untransformed_scale_initializer,
regularizer=untransformed_scale_regularizer,
constraint=untransformed_scale_constraint,
dtype=dtype,
trainable=trainable)
scale = (np.finfo(dtype.as_numpy_dtype).eps +
tf.nn.softplus(untransformed_scale))
return loc, scale
return _fn
|
def default_loc_scale_fn(
is_singular=False,
loc_initializer=tf.compat.v1.initializers.random_normal(stddev=0.1),
untransformed_scale_initializer=tf.compat.v1.initializers.random_normal(
mean=-3., stddev=0.1),
loc_regularizer=None,
untransformed_scale_regularizer=None,
loc_constraint=None,
untransformed_scale_constraint=None):
"""Makes closure which creates `loc`, `scale` params from `tf.get_variable`.
This function produces a closure which produces `loc`, `scale` using
`tf.get_variable`. The closure accepts the following arguments:
dtype: Type of parameter's event.
shape: Python `list`-like representing the parameter's event shape.
name: Python `str` name prepended to any created (or existing)
`tf.Variable`s.
trainable: Python `bool` indicating all created `tf.Variable`s should be
added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`.
add_variable_fn: `tf.get_variable`-like `callable` used to create (or
access existing) `tf.Variable`s.
Args:
is_singular: Python `bool` indicating if `scale is None`. Default: `False`.
loc_initializer: Initializer function for the `loc` parameters.
The default is `tf.random_normal_initializer(mean=0., stddev=0.1)`.
untransformed_scale_initializer: Initializer function for the `scale`
parameters. Default value: `tf.random_normal_initializer(mean=-3.,
stddev=0.1)`. This implies the softplus transformed result is initialized
near `0`. It allows a `Normal` distribution with `scale` parameter set to
this value to approximately act like a point mass.
loc_regularizer: Regularizer function for the `loc` parameters.
The default (`None`) is to use the `tf.get_variable` default.
untransformed_scale_regularizer: Regularizer function for the `scale`
parameters. The default (`None`) is to use the `tf.get_variable` default.
loc_constraint: An optional projection function to be applied to the
loc after being updated by an `Optimizer`. The function must take as input
the unprojected variable and must return the projected variable (which
must have the same shape). Constraints are not safe to use when doing
asynchronous distributed training.
The default (`None`) is to use the `tf.get_variable` default.
untransformed_scale_constraint: An optional projection function to be
applied to the `scale` parameters after being updated by an `Optimizer`
(e.g. used to implement norm constraints or value constraints). The
function must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are not
safe to use when doing asynchronous distributed training. The default
(`None`) is to use the `tf.get_variable` default.
Returns:
default_loc_scale_fn: Python `callable` which instantiates `loc`, `scale`
parameters from args: `dtype, shape, name, trainable, add_variable_fn`.
"""
def _fn(dtype, shape, name, trainable, add_variable_fn):
"""Creates `loc`, `scale` parameters."""
loc = add_variable_fn(
name=name + '_loc',
shape=shape,
initializer=loc_initializer,
regularizer=loc_regularizer,
constraint=loc_constraint,
dtype=dtype,
trainable=trainable)
if is_singular:
return loc, None
untransformed_scale = add_variable_fn(
name=name + '_untransformed_scale',
shape=shape,
initializer=untransformed_scale_initializer,
regularizer=untransformed_scale_regularizer,
constraint=untransformed_scale_constraint,
dtype=dtype,
trainable=trainable)
scale = (np.finfo(dtype.as_numpy_dtype).eps +
tf.nn.softplus(untransformed_scale))
return loc, scale
return _fn
|
[
"Makes",
"closure",
"which",
"creates",
"loc",
"scale",
"params",
"from",
"tf",
".",
"get_variable",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/util.py#L39-L116
|
[
"def",
"default_loc_scale_fn",
"(",
"is_singular",
"=",
"False",
",",
"loc_initializer",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"initializers",
".",
"random_normal",
"(",
"stddev",
"=",
"0.1",
")",
",",
"untransformed_scale_initializer",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"initializers",
".",
"random_normal",
"(",
"mean",
"=",
"-",
"3.",
",",
"stddev",
"=",
"0.1",
")",
",",
"loc_regularizer",
"=",
"None",
",",
"untransformed_scale_regularizer",
"=",
"None",
",",
"loc_constraint",
"=",
"None",
",",
"untransformed_scale_constraint",
"=",
"None",
")",
":",
"def",
"_fn",
"(",
"dtype",
",",
"shape",
",",
"name",
",",
"trainable",
",",
"add_variable_fn",
")",
":",
"\"\"\"Creates `loc`, `scale` parameters.\"\"\"",
"loc",
"=",
"add_variable_fn",
"(",
"name",
"=",
"name",
"+",
"'_loc'",
",",
"shape",
"=",
"shape",
",",
"initializer",
"=",
"loc_initializer",
",",
"regularizer",
"=",
"loc_regularizer",
",",
"constraint",
"=",
"loc_constraint",
",",
"dtype",
"=",
"dtype",
",",
"trainable",
"=",
"trainable",
")",
"if",
"is_singular",
":",
"return",
"loc",
",",
"None",
"untransformed_scale",
"=",
"add_variable_fn",
"(",
"name",
"=",
"name",
"+",
"'_untransformed_scale'",
",",
"shape",
"=",
"shape",
",",
"initializer",
"=",
"untransformed_scale_initializer",
",",
"regularizer",
"=",
"untransformed_scale_regularizer",
",",
"constraint",
"=",
"untransformed_scale_constraint",
",",
"dtype",
"=",
"dtype",
",",
"trainable",
"=",
"trainable",
")",
"scale",
"=",
"(",
"np",
".",
"finfo",
"(",
"dtype",
".",
"as_numpy_dtype",
")",
".",
"eps",
"+",
"tf",
".",
"nn",
".",
"softplus",
"(",
"untransformed_scale",
")",
")",
"return",
"loc",
",",
"scale",
"return",
"_fn"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
default_mean_field_normal_fn
|
Creates a function to build Normal distributions with trainable params.
This function produces a closure which produces `tfd.Normal`
parameterized by a loc` and `scale` each created using `tf.get_variable`.
Args:
is_singular: Python `bool` if `True`, forces the special case limit of
`scale->0`, i.e., a `Deterministic` distribution.
loc_initializer: Initializer function for the `loc` parameters.
The default is `tf.random_normal_initializer(mean=0., stddev=0.1)`.
untransformed_scale_initializer: Initializer function for the `scale`
parameters. Default value: `tf.random_normal_initializer(mean=-3.,
stddev=0.1)`. This implies the softplus transformed result is initialized
near `0`. It allows a `Normal` distribution with `scale` parameter set to
this value to approximately act like a point mass.
loc_regularizer: Regularizer function for the `loc` parameters.
untransformed_scale_regularizer: Regularizer function for the `scale`
parameters.
loc_constraint: An optional projection function to be applied to the
loc after being updated by an `Optimizer`. The function must take as input
the unprojected variable and must return the projected variable (which
must have the same shape). Constraints are not safe to use when doing
asynchronous distributed training.
untransformed_scale_constraint: An optional projection function to be
applied to the `scale` parameters after being updated by an `Optimizer`
(e.g. used to implement norm constraints or value constraints). The
function must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are not
safe to use when doing asynchronous distributed training.
Returns:
make_normal_fn: Python `callable` which creates a `tfd.Normal`
using from args: `dtype, shape, name, trainable, add_variable_fn`.
|
tensorflow_probability/python/layers/util.py
|
def default_mean_field_normal_fn(
is_singular=False,
loc_initializer=tf.compat.v1.initializers.random_normal(stddev=0.1),
untransformed_scale_initializer=tf.compat.v1.initializers.random_normal(
mean=-3., stddev=0.1),
loc_regularizer=None,
untransformed_scale_regularizer=None,
loc_constraint=None,
untransformed_scale_constraint=None):
"""Creates a function to build Normal distributions with trainable params.
This function produces a closure which produces `tfd.Normal`
parameterized by a loc` and `scale` each created using `tf.get_variable`.
Args:
is_singular: Python `bool` if `True`, forces the special case limit of
`scale->0`, i.e., a `Deterministic` distribution.
loc_initializer: Initializer function for the `loc` parameters.
The default is `tf.random_normal_initializer(mean=0., stddev=0.1)`.
untransformed_scale_initializer: Initializer function for the `scale`
parameters. Default value: `tf.random_normal_initializer(mean=-3.,
stddev=0.1)`. This implies the softplus transformed result is initialized
near `0`. It allows a `Normal` distribution with `scale` parameter set to
this value to approximately act like a point mass.
loc_regularizer: Regularizer function for the `loc` parameters.
untransformed_scale_regularizer: Regularizer function for the `scale`
parameters.
loc_constraint: An optional projection function to be applied to the
loc after being updated by an `Optimizer`. The function must take as input
the unprojected variable and must return the projected variable (which
must have the same shape). Constraints are not safe to use when doing
asynchronous distributed training.
untransformed_scale_constraint: An optional projection function to be
applied to the `scale` parameters after being updated by an `Optimizer`
(e.g. used to implement norm constraints or value constraints). The
function must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are not
safe to use when doing asynchronous distributed training.
Returns:
make_normal_fn: Python `callable` which creates a `tfd.Normal`
using from args: `dtype, shape, name, trainable, add_variable_fn`.
"""
loc_scale_fn = default_loc_scale_fn(
is_singular=is_singular,
loc_initializer=loc_initializer,
untransformed_scale_initializer=untransformed_scale_initializer,
loc_regularizer=loc_regularizer,
untransformed_scale_regularizer=untransformed_scale_regularizer,
loc_constraint=loc_constraint,
untransformed_scale_constraint=untransformed_scale_constraint)
def _fn(dtype, shape, name, trainable, add_variable_fn):
"""Creates multivariate `Deterministic` or `Normal` distribution.
Args:
dtype: Type of parameter's event.
shape: Python `list`-like representing the parameter's event shape.
name: Python `str` name prepended to any created (or existing)
`tf.Variable`s.
trainable: Python `bool` indicating all created `tf.Variable`s should be
added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`.
add_variable_fn: `tf.get_variable`-like `callable` used to create (or
access existing) `tf.Variable`s.
Returns:
Multivariate `Deterministic` or `Normal` distribution.
"""
loc, scale = loc_scale_fn(dtype, shape, name, trainable, add_variable_fn)
if scale is None:
dist = tfd.Deterministic(loc=loc)
else:
dist = tfd.Normal(loc=loc, scale=scale)
batch_ndims = tf.size(input=dist.batch_shape_tensor())
return tfd.Independent(dist, reinterpreted_batch_ndims=batch_ndims)
return _fn
|
def default_mean_field_normal_fn(
is_singular=False,
loc_initializer=tf.compat.v1.initializers.random_normal(stddev=0.1),
untransformed_scale_initializer=tf.compat.v1.initializers.random_normal(
mean=-3., stddev=0.1),
loc_regularizer=None,
untransformed_scale_regularizer=None,
loc_constraint=None,
untransformed_scale_constraint=None):
"""Creates a function to build Normal distributions with trainable params.
This function produces a closure which produces `tfd.Normal`
parameterized by a loc` and `scale` each created using `tf.get_variable`.
Args:
is_singular: Python `bool` if `True`, forces the special case limit of
`scale->0`, i.e., a `Deterministic` distribution.
loc_initializer: Initializer function for the `loc` parameters.
The default is `tf.random_normal_initializer(mean=0., stddev=0.1)`.
untransformed_scale_initializer: Initializer function for the `scale`
parameters. Default value: `tf.random_normal_initializer(mean=-3.,
stddev=0.1)`. This implies the softplus transformed result is initialized
near `0`. It allows a `Normal` distribution with `scale` parameter set to
this value to approximately act like a point mass.
loc_regularizer: Regularizer function for the `loc` parameters.
untransformed_scale_regularizer: Regularizer function for the `scale`
parameters.
loc_constraint: An optional projection function to be applied to the
loc after being updated by an `Optimizer`. The function must take as input
the unprojected variable and must return the projected variable (which
must have the same shape). Constraints are not safe to use when doing
asynchronous distributed training.
untransformed_scale_constraint: An optional projection function to be
applied to the `scale` parameters after being updated by an `Optimizer`
(e.g. used to implement norm constraints or value constraints). The
function must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are not
safe to use when doing asynchronous distributed training.
Returns:
make_normal_fn: Python `callable` which creates a `tfd.Normal`
using from args: `dtype, shape, name, trainable, add_variable_fn`.
"""
loc_scale_fn = default_loc_scale_fn(
is_singular=is_singular,
loc_initializer=loc_initializer,
untransformed_scale_initializer=untransformed_scale_initializer,
loc_regularizer=loc_regularizer,
untransformed_scale_regularizer=untransformed_scale_regularizer,
loc_constraint=loc_constraint,
untransformed_scale_constraint=untransformed_scale_constraint)
def _fn(dtype, shape, name, trainable, add_variable_fn):
"""Creates multivariate `Deterministic` or `Normal` distribution.
Args:
dtype: Type of parameter's event.
shape: Python `list`-like representing the parameter's event shape.
name: Python `str` name prepended to any created (or existing)
`tf.Variable`s.
trainable: Python `bool` indicating all created `tf.Variable`s should be
added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`.
add_variable_fn: `tf.get_variable`-like `callable` used to create (or
access existing) `tf.Variable`s.
Returns:
Multivariate `Deterministic` or `Normal` distribution.
"""
loc, scale = loc_scale_fn(dtype, shape, name, trainable, add_variable_fn)
if scale is None:
dist = tfd.Deterministic(loc=loc)
else:
dist = tfd.Normal(loc=loc, scale=scale)
batch_ndims = tf.size(input=dist.batch_shape_tensor())
return tfd.Independent(dist, reinterpreted_batch_ndims=batch_ndims)
return _fn
|
[
"Creates",
"a",
"function",
"to",
"build",
"Normal",
"distributions",
"with",
"trainable",
"params",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/util.py#L119-L193
|
[
"def",
"default_mean_field_normal_fn",
"(",
"is_singular",
"=",
"False",
",",
"loc_initializer",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"initializers",
".",
"random_normal",
"(",
"stddev",
"=",
"0.1",
")",
",",
"untransformed_scale_initializer",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"initializers",
".",
"random_normal",
"(",
"mean",
"=",
"-",
"3.",
",",
"stddev",
"=",
"0.1",
")",
",",
"loc_regularizer",
"=",
"None",
",",
"untransformed_scale_regularizer",
"=",
"None",
",",
"loc_constraint",
"=",
"None",
",",
"untransformed_scale_constraint",
"=",
"None",
")",
":",
"loc_scale_fn",
"=",
"default_loc_scale_fn",
"(",
"is_singular",
"=",
"is_singular",
",",
"loc_initializer",
"=",
"loc_initializer",
",",
"untransformed_scale_initializer",
"=",
"untransformed_scale_initializer",
",",
"loc_regularizer",
"=",
"loc_regularizer",
",",
"untransformed_scale_regularizer",
"=",
"untransformed_scale_regularizer",
",",
"loc_constraint",
"=",
"loc_constraint",
",",
"untransformed_scale_constraint",
"=",
"untransformed_scale_constraint",
")",
"def",
"_fn",
"(",
"dtype",
",",
"shape",
",",
"name",
",",
"trainable",
",",
"add_variable_fn",
")",
":",
"\"\"\"Creates multivariate `Deterministic` or `Normal` distribution.\n\n Args:\n dtype: Type of parameter's event.\n shape: Python `list`-like representing the parameter's event shape.\n name: Python `str` name prepended to any created (or existing)\n `tf.Variable`s.\n trainable: Python `bool` indicating all created `tf.Variable`s should be\n added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`.\n add_variable_fn: `tf.get_variable`-like `callable` used to create (or\n access existing) `tf.Variable`s.\n\n Returns:\n Multivariate `Deterministic` or `Normal` distribution.\n \"\"\"",
"loc",
",",
"scale",
"=",
"loc_scale_fn",
"(",
"dtype",
",",
"shape",
",",
"name",
",",
"trainable",
",",
"add_variable_fn",
")",
"if",
"scale",
"is",
"None",
":",
"dist",
"=",
"tfd",
".",
"Deterministic",
"(",
"loc",
"=",
"loc",
")",
"else",
":",
"dist",
"=",
"tfd",
".",
"Normal",
"(",
"loc",
"=",
"loc",
",",
"scale",
"=",
"scale",
")",
"batch_ndims",
"=",
"tf",
".",
"size",
"(",
"input",
"=",
"dist",
".",
"batch_shape_tensor",
"(",
")",
")",
"return",
"tfd",
".",
"Independent",
"(",
"dist",
",",
"reinterpreted_batch_ndims",
"=",
"batch_ndims",
")",
"return",
"_fn"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
default_multivariate_normal_fn
|
Creates multivariate standard `Normal` distribution.
Args:
dtype: Type of parameter's event.
shape: Python `list`-like representing the parameter's event shape.
name: Python `str` name prepended to any created (or existing)
`tf.Variable`s.
trainable: Python `bool` indicating all created `tf.Variable`s should be
added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`.
add_variable_fn: `tf.get_variable`-like `callable` used to create (or
access existing) `tf.Variable`s.
Returns:
Multivariate standard `Normal` distribution.
|
tensorflow_probability/python/layers/util.py
|
def default_multivariate_normal_fn(dtype, shape, name, trainable,
add_variable_fn):
"""Creates multivariate standard `Normal` distribution.
Args:
dtype: Type of parameter's event.
shape: Python `list`-like representing the parameter's event shape.
name: Python `str` name prepended to any created (or existing)
`tf.Variable`s.
trainable: Python `bool` indicating all created `tf.Variable`s should be
added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`.
add_variable_fn: `tf.get_variable`-like `callable` used to create (or
access existing) `tf.Variable`s.
Returns:
Multivariate standard `Normal` distribution.
"""
del name, trainable, add_variable_fn # unused
dist = tfd.Normal(loc=tf.zeros(shape, dtype), scale=dtype.as_numpy_dtype(1))
batch_ndims = tf.size(input=dist.batch_shape_tensor())
return tfd.Independent(dist, reinterpreted_batch_ndims=batch_ndims)
|
def default_multivariate_normal_fn(dtype, shape, name, trainable,
add_variable_fn):
"""Creates multivariate standard `Normal` distribution.
Args:
dtype: Type of parameter's event.
shape: Python `list`-like representing the parameter's event shape.
name: Python `str` name prepended to any created (or existing)
`tf.Variable`s.
trainable: Python `bool` indicating all created `tf.Variable`s should be
added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`.
add_variable_fn: `tf.get_variable`-like `callable` used to create (or
access existing) `tf.Variable`s.
Returns:
Multivariate standard `Normal` distribution.
"""
del name, trainable, add_variable_fn # unused
dist = tfd.Normal(loc=tf.zeros(shape, dtype), scale=dtype.as_numpy_dtype(1))
batch_ndims = tf.size(input=dist.batch_shape_tensor())
return tfd.Independent(dist, reinterpreted_batch_ndims=batch_ndims)
|
[
"Creates",
"multivariate",
"standard",
"Normal",
"distribution",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/util.py#L196-L216
|
[
"def",
"default_multivariate_normal_fn",
"(",
"dtype",
",",
"shape",
",",
"name",
",",
"trainable",
",",
"add_variable_fn",
")",
":",
"del",
"name",
",",
"trainable",
",",
"add_variable_fn",
"# unused",
"dist",
"=",
"tfd",
".",
"Normal",
"(",
"loc",
"=",
"tf",
".",
"zeros",
"(",
"shape",
",",
"dtype",
")",
",",
"scale",
"=",
"dtype",
".",
"as_numpy_dtype",
"(",
"1",
")",
")",
"batch_ndims",
"=",
"tf",
".",
"size",
"(",
"input",
"=",
"dist",
".",
"batch_shape_tensor",
"(",
")",
")",
"return",
"tfd",
".",
"Independent",
"(",
"dist",
",",
"reinterpreted_batch_ndims",
"=",
"batch_ndims",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
deserialize_function
|
Deserializes the Keras-serialized function.
(De)serializing Python functions from/to bytecode is unsafe. Therefore we
also use the function's type as an anonymous function ('lambda') or named
function in the Python environment ('function'). In the latter case, this lets
us use the Python scope to obtain the function rather than reload it from
bytecode. (Note that both cases are brittle!)
Keras-deserialized functions do not perform lexical scoping. Any modules that
the function requires must be imported within the function itself.
This serialization mimicks the implementation in `tf.keras.layers.Lambda`.
Args:
serial: Serialized Keras object: typically a dict, string, or bytecode.
function_type: Python string denoting 'function' or 'lambda'.
Returns:
function: Function the serialized Keras object represents.
#### Examples
```python
serial, function_type = serialize_function(lambda x: x)
function = deserialize_function(serial, function_type)
assert function(2.3) == 2.3 # function is identity
```
|
tensorflow_probability/python/layers/util.py
|
def deserialize_function(serial, function_type):
"""Deserializes the Keras-serialized function.
(De)serializing Python functions from/to bytecode is unsafe. Therefore we
also use the function's type as an anonymous function ('lambda') or named
function in the Python environment ('function'). In the latter case, this lets
us use the Python scope to obtain the function rather than reload it from
bytecode. (Note that both cases are brittle!)
Keras-deserialized functions do not perform lexical scoping. Any modules that
the function requires must be imported within the function itself.
This serialization mimicks the implementation in `tf.keras.layers.Lambda`.
Args:
serial: Serialized Keras object: typically a dict, string, or bytecode.
function_type: Python string denoting 'function' or 'lambda'.
Returns:
function: Function the serialized Keras object represents.
#### Examples
```python
serial, function_type = serialize_function(lambda x: x)
function = deserialize_function(serial, function_type)
assert function(2.3) == 2.3 # function is identity
```
"""
if function_type == 'function':
# Simple lookup in custom objects
function = tf.keras.utils.deserialize_keras_object(serial)
elif function_type == 'lambda':
# Unsafe deserialization from bytecode
function = generic_utils.func_load(serial)
else:
raise TypeError('Unknown function type:', function_type)
return function
|
def deserialize_function(serial, function_type):
"""Deserializes the Keras-serialized function.
(De)serializing Python functions from/to bytecode is unsafe. Therefore we
also use the function's type as an anonymous function ('lambda') or named
function in the Python environment ('function'). In the latter case, this lets
us use the Python scope to obtain the function rather than reload it from
bytecode. (Note that both cases are brittle!)
Keras-deserialized functions do not perform lexical scoping. Any modules that
the function requires must be imported within the function itself.
This serialization mimicks the implementation in `tf.keras.layers.Lambda`.
Args:
serial: Serialized Keras object: typically a dict, string, or bytecode.
function_type: Python string denoting 'function' or 'lambda'.
Returns:
function: Function the serialized Keras object represents.
#### Examples
```python
serial, function_type = serialize_function(lambda x: x)
function = deserialize_function(serial, function_type)
assert function(2.3) == 2.3 # function is identity
```
"""
if function_type == 'function':
# Simple lookup in custom objects
function = tf.keras.utils.deserialize_keras_object(serial)
elif function_type == 'lambda':
# Unsafe deserialization from bytecode
function = generic_utils.func_load(serial)
else:
raise TypeError('Unknown function type:', function_type)
return function
|
[
"Deserializes",
"the",
"Keras",
"-",
"serialized",
"function",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/util.py#L219-L257
|
[
"def",
"deserialize_function",
"(",
"serial",
",",
"function_type",
")",
":",
"if",
"function_type",
"==",
"'function'",
":",
"# Simple lookup in custom objects",
"function",
"=",
"tf",
".",
"keras",
".",
"utils",
".",
"deserialize_keras_object",
"(",
"serial",
")",
"elif",
"function_type",
"==",
"'lambda'",
":",
"# Unsafe deserialization from bytecode",
"function",
"=",
"generic_utils",
".",
"func_load",
"(",
"serial",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Unknown function type:'",
",",
"function_type",
")",
"return",
"function"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
serialize_function
|
Serializes function for Keras.
(De)serializing Python functions from/to bytecode is unsafe. Therefore we
return the function's type as an anonymous function ('lambda') or named
function in the Python environment ('function'). In the latter case, this lets
us use the Python scope to obtain the function rather than reload it from
bytecode. (Note that both cases are brittle!)
This serialization mimicks the implementation in `tf.keras.layers.Lambda`.
Args:
func: Python function to serialize.
Returns:
(serial, function_type): Serialized object, which is a tuple of its
bytecode (if function is anonymous) or name (if function is named), and its
function type.
|
tensorflow_probability/python/layers/util.py
|
def serialize_function(func):
"""Serializes function for Keras.
(De)serializing Python functions from/to bytecode is unsafe. Therefore we
return the function's type as an anonymous function ('lambda') or named
function in the Python environment ('function'). In the latter case, this lets
us use the Python scope to obtain the function rather than reload it from
bytecode. (Note that both cases are brittle!)
This serialization mimicks the implementation in `tf.keras.layers.Lambda`.
Args:
func: Python function to serialize.
Returns:
(serial, function_type): Serialized object, which is a tuple of its
bytecode (if function is anonymous) or name (if function is named), and its
function type.
"""
if isinstance(func, types.LambdaType):
return generic_utils.func_dump(func), 'lambda'
return func.__name__, 'function'
|
def serialize_function(func):
"""Serializes function for Keras.
(De)serializing Python functions from/to bytecode is unsafe. Therefore we
return the function's type as an anonymous function ('lambda') or named
function in the Python environment ('function'). In the latter case, this lets
us use the Python scope to obtain the function rather than reload it from
bytecode. (Note that both cases are brittle!)
This serialization mimicks the implementation in `tf.keras.layers.Lambda`.
Args:
func: Python function to serialize.
Returns:
(serial, function_type): Serialized object, which is a tuple of its
bytecode (if function is anonymous) or name (if function is named), and its
function type.
"""
if isinstance(func, types.LambdaType):
return generic_utils.func_dump(func), 'lambda'
return func.__name__, 'function'
|
[
"Serializes",
"function",
"for",
"Keras",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/util.py#L260-L281
|
[
"def",
"serialize_function",
"(",
"func",
")",
":",
"if",
"isinstance",
"(",
"func",
",",
"types",
".",
"LambdaType",
")",
":",
"return",
"generic_utils",
".",
"func_dump",
"(",
"func",
")",
",",
"'lambda'",
"return",
"func",
".",
"__name__",
",",
"'function'"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
broadcast_structure
|
Broadcasts `from_structure` to `to_structure`.
This is useful for downstream usage of `zip` or `tf.nest.map_structure`.
If `from_structure` is a singleton, it is tiled to match the structure of
`to_structure`. Note that the elements in `from_structure` are not copied if
this tiling occurs.
Args:
to_structure: A structure.
from_structure: A structure.
Returns:
new_from_structure: Same structure as `to_structure`.
#### Example:
```python
a_structure = ['a', 'b', 'c']
b_structure = broadcast_structure(a_structure, 'd')
# -> ['d', 'd', 'd']
c_structure = tf.nest.map_structure(
lambda a, b: a + b, a_structure, b_structure)
# -> ['ad', 'bd', 'cd']
```
|
tensorflow_probability/python/internal/nest_util.py
|
def broadcast_structure(to_structure, from_structure):
"""Broadcasts `from_structure` to `to_structure`.
This is useful for downstream usage of `zip` or `tf.nest.map_structure`.
If `from_structure` is a singleton, it is tiled to match the structure of
`to_structure`. Note that the elements in `from_structure` are not copied if
this tiling occurs.
Args:
to_structure: A structure.
from_structure: A structure.
Returns:
new_from_structure: Same structure as `to_structure`.
#### Example:
```python
a_structure = ['a', 'b', 'c']
b_structure = broadcast_structure(a_structure, 'd')
# -> ['d', 'd', 'd']
c_structure = tf.nest.map_structure(
lambda a, b: a + b, a_structure, b_structure)
# -> ['ad', 'bd', 'cd']
```
"""
from_parts = tf.nest.flatten(from_structure)
if len(from_parts) == 1:
from_structure = tf.nest.map_structure(lambda _: from_parts[0],
to_structure)
return from_structure
|
def broadcast_structure(to_structure, from_structure):
"""Broadcasts `from_structure` to `to_structure`.
This is useful for downstream usage of `zip` or `tf.nest.map_structure`.
If `from_structure` is a singleton, it is tiled to match the structure of
`to_structure`. Note that the elements in `from_structure` are not copied if
this tiling occurs.
Args:
to_structure: A structure.
from_structure: A structure.
Returns:
new_from_structure: Same structure as `to_structure`.
#### Example:
```python
a_structure = ['a', 'b', 'c']
b_structure = broadcast_structure(a_structure, 'd')
# -> ['d', 'd', 'd']
c_structure = tf.nest.map_structure(
lambda a, b: a + b, a_structure, b_structure)
# -> ['ad', 'bd', 'cd']
```
"""
from_parts = tf.nest.flatten(from_structure)
if len(from_parts) == 1:
from_structure = tf.nest.map_structure(lambda _: from_parts[0],
to_structure)
return from_structure
|
[
"Broadcasts",
"from_structure",
"to",
"to_structure",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/nest_util.py#L36-L67
|
[
"def",
"broadcast_structure",
"(",
"to_structure",
",",
"from_structure",
")",
":",
"from_parts",
"=",
"tf",
".",
"nest",
".",
"flatten",
"(",
"from_structure",
")",
"if",
"len",
"(",
"from_parts",
")",
"==",
"1",
":",
"from_structure",
"=",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"lambda",
"_",
":",
"from_parts",
"[",
"0",
"]",
",",
"to_structure",
")",
"return",
"from_structure"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
expand_as_args
|
Returns `True` if `args` should be expanded as `*args`.
|
tensorflow_probability/python/internal/nest_util.py
|
def expand_as_args(args):
"""Returns `True` if `args` should be expanded as `*args`."""
return (isinstance(args, collections.Sequence) and
not _is_namedtuple(args) and not _force_leaf(args))
|
def expand_as_args(args):
"""Returns `True` if `args` should be expanded as `*args`."""
return (isinstance(args, collections.Sequence) and
not _is_namedtuple(args) and not _force_leaf(args))
|
[
"Returns",
"True",
"if",
"args",
"should",
"be",
"expanded",
"as",
"*",
"args",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/nest_util.py#L76-L79
|
[
"def",
"expand_as_args",
"(",
"args",
")",
":",
"return",
"(",
"isinstance",
"(",
"args",
",",
"collections",
".",
"Sequence",
")",
"and",
"not",
"_is_namedtuple",
"(",
"args",
")",
"and",
"not",
"_force_leaf",
"(",
"args",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_nested_convert_to_tensor
|
Eagerly converts struct to Tensor, recursing upon failure.
|
tensorflow_probability/python/internal/nest_util.py
|
def _nested_convert_to_tensor(struct, dtype=None, name=None):
"""Eagerly converts struct to Tensor, recursing upon failure."""
if dtype is not None or not tf.nest.is_nested(struct):
return tf.convert_to_tensor(struct, dtype=dtype)
if _maybe_convertible_to_tensor(struct):
try:
# Try converting the structure wholesale.
return tf.convert_to_tensor(value=struct, name=name)
except (ValueError, TypeError):
# Unfortunately Eager/Graph mode don't agree on the error type.
pass
# Try converting all of its children.
shallow_struct = _get_shallow_structure(struct)
return nest.map_structure_up_to(
shallow_struct, lambda s: _nested_convert_to_tensor(s, name=name), struct)
|
def _nested_convert_to_tensor(struct, dtype=None, name=None):
"""Eagerly converts struct to Tensor, recursing upon failure."""
if dtype is not None or not tf.nest.is_nested(struct):
return tf.convert_to_tensor(struct, dtype=dtype)
if _maybe_convertible_to_tensor(struct):
try:
# Try converting the structure wholesale.
return tf.convert_to_tensor(value=struct, name=name)
except (ValueError, TypeError):
# Unfortunately Eager/Graph mode don't agree on the error type.
pass
# Try converting all of its children.
shallow_struct = _get_shallow_structure(struct)
return nest.map_structure_up_to(
shallow_struct, lambda s: _nested_convert_to_tensor(s, name=name), struct)
|
[
"Eagerly",
"converts",
"struct",
"to",
"Tensor",
"recursing",
"upon",
"failure",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/nest_util.py#L98-L113
|
[
"def",
"_nested_convert_to_tensor",
"(",
"struct",
",",
"dtype",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"if",
"dtype",
"is",
"not",
"None",
"or",
"not",
"tf",
".",
"nest",
".",
"is_nested",
"(",
"struct",
")",
":",
"return",
"tf",
".",
"convert_to_tensor",
"(",
"struct",
",",
"dtype",
"=",
"dtype",
")",
"if",
"_maybe_convertible_to_tensor",
"(",
"struct",
")",
":",
"try",
":",
"# Try converting the structure wholesale.",
"return",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"struct",
",",
"name",
"=",
"name",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"# Unfortunately Eager/Graph mode don't agree on the error type.",
"pass",
"# Try converting all of its children.",
"shallow_struct",
"=",
"_get_shallow_structure",
"(",
"struct",
")",
"return",
"nest",
".",
"map_structure_up_to",
"(",
"shallow_struct",
",",
"lambda",
"s",
":",
"_nested_convert_to_tensor",
"(",
"s",
",",
"name",
"=",
"name",
")",
",",
"struct",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
convert_args_to_tensor
|
Converts `args` to `Tensor`s.
Use this when it is necessary to convert user-provided arguments that will
then be passed to user-provided callables.
When `dtype` is `None` this function behaves as follows:
1A. If the top-level structure is a `list`/`tuple` but not a `namedtuple`,
then it is left as is and only its elements are converted to `Tensor`s.
2A. The sub-structures are converted to `Tensor`s eagerly. E.g. if `args` is
`{'arg': [[1], [2]]}` it is converted to
`{'arg': tf.constant([[1], [2]])}`. If the conversion fails, it will
attempt to recurse into its children.
When `dtype` is specified, it acts as both a structural and numeric type
constraint. `dtype` can be a single `DType`, `None` or a nested collection
thereof. The conversion rule becomes as follows:
1B. The return value of this function will have the same structure as `dtype`.
2B. If the leaf of `dtype` is a concrete `DType`, then the corresponding
sub-structure in `args` is converted to a `Tensor`.
3B. If the leaf of `dtype` is `None`, then the corresponding sub-structure is
converted eagerly as described in the rule 2A above.
Args:
args: Arguments to convert to `Tensor`s.
dtype: Optional structure/numeric type constraint.
name: Optional name-scope to use.
Returns:
args: Converted `args`.
#### Examples.
This table shows some useful conversion cases. `T` means `Tensor`, `NT` means
`namedtuple` and `CNT` means a `namedtuple` with a `Tensor`-conversion
function registered.
| args | dtype | output |
|:------------:|:----------:|:------------------:|
| `{"a": 1}` | `None` | `{"a": T(1)}` |
| `T(1)` | `None` | `T(1)` |
| `[1]` | `None` | `[T(1)]` |
| `[1]` | `tf.int32` | `T([1])` |
| `[[T(1)]]` | `None` | `[T([1])]` |
| `[[T(1)]]` | `[[None]]` | `[[T(1)]]` |
| `NT(1, 2)` | `None` | `NT(T(1), T(2))` |
| `NT(1, 2)` | `tf.int32` | `T([1, 2])` |
| `CNT(1, 2)` | `None` | `T(...)` |
| `[[1, [2]]]` | `None` | `[[T(1), T([2])]]` |
|
tensorflow_probability/python/internal/nest_util.py
|
def convert_args_to_tensor(args, dtype=None, name=None):
"""Converts `args` to `Tensor`s.
Use this when it is necessary to convert user-provided arguments that will
then be passed to user-provided callables.
When `dtype` is `None` this function behaves as follows:
1A. If the top-level structure is a `list`/`tuple` but not a `namedtuple`,
then it is left as is and only its elements are converted to `Tensor`s.
2A. The sub-structures are converted to `Tensor`s eagerly. E.g. if `args` is
`{'arg': [[1], [2]]}` it is converted to
`{'arg': tf.constant([[1], [2]])}`. If the conversion fails, it will
attempt to recurse into its children.
When `dtype` is specified, it acts as both a structural and numeric type
constraint. `dtype` can be a single `DType`, `None` or a nested collection
thereof. The conversion rule becomes as follows:
1B. The return value of this function will have the same structure as `dtype`.
2B. If the leaf of `dtype` is a concrete `DType`, then the corresponding
sub-structure in `args` is converted to a `Tensor`.
3B. If the leaf of `dtype` is `None`, then the corresponding sub-structure is
converted eagerly as described in the rule 2A above.
Args:
args: Arguments to convert to `Tensor`s.
dtype: Optional structure/numeric type constraint.
name: Optional name-scope to use.
Returns:
args: Converted `args`.
#### Examples.
This table shows some useful conversion cases. `T` means `Tensor`, `NT` means
`namedtuple` and `CNT` means a `namedtuple` with a `Tensor`-conversion
function registered.
| args | dtype | output |
|:------------:|:----------:|:------------------:|
| `{"a": 1}` | `None` | `{"a": T(1)}` |
| `T(1)` | `None` | `T(1)` |
| `[1]` | `None` | `[T(1)]` |
| `[1]` | `tf.int32` | `T([1])` |
| `[[T(1)]]` | `None` | `[T([1])]` |
| `[[T(1)]]` | `[[None]]` | `[[T(1)]]` |
| `NT(1, 2)` | `None` | `NT(T(1), T(2))` |
| `NT(1, 2)` | `tf.int32` | `T([1, 2])` |
| `CNT(1, 2)` | `None` | `T(...)` |
| `[[1, [2]]]` | `None` | `[[T(1), T([2])]]` |
"""
if dtype is None:
if expand_as_args(args) or _expand_as_kwargs(args):
shallow_args = _get_shallow_structure(args)
return nest.map_structure_up_to(
shallow_args, lambda s: _nested_convert_to_tensor(s, name=name), args)
else:
return _nested_convert_to_tensor(args, name=name)
else:
return nest.map_structure_up_to(
dtype, lambda s, dtype: _nested_convert_to_tensor(s, dtype, name), args,
dtype)
|
def convert_args_to_tensor(args, dtype=None, name=None):
"""Converts `args` to `Tensor`s.
Use this when it is necessary to convert user-provided arguments that will
then be passed to user-provided callables.
When `dtype` is `None` this function behaves as follows:
1A. If the top-level structure is a `list`/`tuple` but not a `namedtuple`,
then it is left as is and only its elements are converted to `Tensor`s.
2A. The sub-structures are converted to `Tensor`s eagerly. E.g. if `args` is
`{'arg': [[1], [2]]}` it is converted to
`{'arg': tf.constant([[1], [2]])}`. If the conversion fails, it will
attempt to recurse into its children.
When `dtype` is specified, it acts as both a structural and numeric type
constraint. `dtype` can be a single `DType`, `None` or a nested collection
thereof. The conversion rule becomes as follows:
1B. The return value of this function will have the same structure as `dtype`.
2B. If the leaf of `dtype` is a concrete `DType`, then the corresponding
sub-structure in `args` is converted to a `Tensor`.
3B. If the leaf of `dtype` is `None`, then the corresponding sub-structure is
converted eagerly as described in the rule 2A above.
Args:
args: Arguments to convert to `Tensor`s.
dtype: Optional structure/numeric type constraint.
name: Optional name-scope to use.
Returns:
args: Converted `args`.
#### Examples.
This table shows some useful conversion cases. `T` means `Tensor`, `NT` means
`namedtuple` and `CNT` means a `namedtuple` with a `Tensor`-conversion
function registered.
| args | dtype | output |
|:------------:|:----------:|:------------------:|
| `{"a": 1}` | `None` | `{"a": T(1)}` |
| `T(1)` | `None` | `T(1)` |
| `[1]` | `None` | `[T(1)]` |
| `[1]` | `tf.int32` | `T([1])` |
| `[[T(1)]]` | `None` | `[T([1])]` |
| `[[T(1)]]` | `[[None]]` | `[[T(1)]]` |
| `NT(1, 2)` | `None` | `NT(T(1), T(2))` |
| `NT(1, 2)` | `tf.int32` | `T([1, 2])` |
| `CNT(1, 2)` | `None` | `T(...)` |
| `[[1, [2]]]` | `None` | `[[T(1), T([2])]]` |
"""
if dtype is None:
if expand_as_args(args) or _expand_as_kwargs(args):
shallow_args = _get_shallow_structure(args)
return nest.map_structure_up_to(
shallow_args, lambda s: _nested_convert_to_tensor(s, name=name), args)
else:
return _nested_convert_to_tensor(args, name=name)
else:
return nest.map_structure_up_to(
dtype, lambda s, dtype: _nested_convert_to_tensor(s, dtype, name), args,
dtype)
|
[
"Converts",
"args",
"to",
"Tensor",
"s",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/nest_util.py#L116-L182
|
[
"def",
"convert_args_to_tensor",
"(",
"args",
",",
"dtype",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"if",
"dtype",
"is",
"None",
":",
"if",
"expand_as_args",
"(",
"args",
")",
"or",
"_expand_as_kwargs",
"(",
"args",
")",
":",
"shallow_args",
"=",
"_get_shallow_structure",
"(",
"args",
")",
"return",
"nest",
".",
"map_structure_up_to",
"(",
"shallow_args",
",",
"lambda",
"s",
":",
"_nested_convert_to_tensor",
"(",
"s",
",",
"name",
"=",
"name",
")",
",",
"args",
")",
"else",
":",
"return",
"_nested_convert_to_tensor",
"(",
"args",
",",
"name",
"=",
"name",
")",
"else",
":",
"return",
"nest",
".",
"map_structure_up_to",
"(",
"dtype",
",",
"lambda",
"s",
",",
"dtype",
":",
"_nested_convert_to_tensor",
"(",
"s",
",",
"dtype",
",",
"name",
")",
",",
"args",
",",
"dtype",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
call_fn
|
Calls `fn` with `args`, possibly expanding `args`.
Use this function when calling a user-provided callable using user-provided
arguments.
The expansion rules are as follows:
`fn(*args)` if `args` is a `list` or a `tuple`, but not a `namedtuple`.
`fn(**args)` if `args` is a `dict`.
`fn(args)` otherwise.
Args:
fn: A callable that takes either `args` as an argument(s).
args: Arguments to `fn`.
Returns:
result: Return value of `fn`.
|
tensorflow_probability/python/internal/nest_util.py
|
def call_fn(fn, args):
"""Calls `fn` with `args`, possibly expanding `args`.
Use this function when calling a user-provided callable using user-provided
arguments.
The expansion rules are as follows:
`fn(*args)` if `args` is a `list` or a `tuple`, but not a `namedtuple`.
`fn(**args)` if `args` is a `dict`.
`fn(args)` otherwise.
Args:
fn: A callable that takes either `args` as an argument(s).
args: Arguments to `fn`.
Returns:
result: Return value of `fn`.
"""
if expand_as_args(args):
return fn(*args)
elif _expand_as_kwargs(args):
return fn(**args)
else:
return fn(args)
|
def call_fn(fn, args):
"""Calls `fn` with `args`, possibly expanding `args`.
Use this function when calling a user-provided callable using user-provided
arguments.
The expansion rules are as follows:
`fn(*args)` if `args` is a `list` or a `tuple`, but not a `namedtuple`.
`fn(**args)` if `args` is a `dict`.
`fn(args)` otherwise.
Args:
fn: A callable that takes either `args` as an argument(s).
args: Arguments to `fn`.
Returns:
result: Return value of `fn`.
"""
if expand_as_args(args):
return fn(*args)
elif _expand_as_kwargs(args):
return fn(**args)
else:
return fn(args)
|
[
"Calls",
"fn",
"with",
"args",
"possibly",
"expanding",
"args",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/nest_util.py#L185-L210
|
[
"def",
"call_fn",
"(",
"fn",
",",
"args",
")",
":",
"if",
"expand_as_args",
"(",
"args",
")",
":",
"return",
"fn",
"(",
"*",
"args",
")",
"elif",
"_expand_as_kwargs",
"(",
"args",
")",
":",
"return",
"fn",
"(",
"*",
"*",
"args",
")",
"else",
":",
"return",
"fn",
"(",
"args",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_wrap_method
|
Replaces member function's first arg, `self`, to `self._value()`.
This function is used by `_get_tensor_like_attributes` to take existing
`Tensor` member functions and make them operate on `self._value()`, i.e., the
concretization of a `Distribution`.
Args:
cls: The `class` from which we will look up the `attr`.
attr: Python `str` representing the `attr` to inject a new notion of `self`.
Returns:
dependency_injected_function: Python `callable` (or `property`)
corresponding to `cls.attr` with `self` replaced as `self._value()`.
|
tensorflow_probability/python/layers/internal/distribution_tensor_coercible.py
|
def _wrap_method(cls, attr):
"""Replaces member function's first arg, `self`, to `self._value()`.
This function is used by `_get_tensor_like_attributes` to take existing
`Tensor` member functions and make them operate on `self._value()`, i.e., the
concretization of a `Distribution`.
Args:
cls: The `class` from which we will look up the `attr`.
attr: Python `str` representing the `attr` to inject a new notion of `self`.
Returns:
dependency_injected_function: Python `callable` (or `property`)
corresponding to `cls.attr` with `self` replaced as `self._value()`.
"""
fn = getattr(cls, attr)
is_property = isinstance(fn, property)
if is_property:
fn = fn.fget
@functools.wraps(fn)
def wrapped(self, *args, **kwargs):
return fn(self._value(), *args, **kwargs) # pylint: disable=protected-access
return property(wrapped) if is_property else wrapped
|
def _wrap_method(cls, attr):
"""Replaces member function's first arg, `self`, to `self._value()`.
This function is used by `_get_tensor_like_attributes` to take existing
`Tensor` member functions and make them operate on `self._value()`, i.e., the
concretization of a `Distribution`.
Args:
cls: The `class` from which we will look up the `attr`.
attr: Python `str` representing the `attr` to inject a new notion of `self`.
Returns:
dependency_injected_function: Python `callable` (or `property`)
corresponding to `cls.attr` with `self` replaced as `self._value()`.
"""
fn = getattr(cls, attr)
is_property = isinstance(fn, property)
if is_property:
fn = fn.fget
@functools.wraps(fn)
def wrapped(self, *args, **kwargs):
return fn(self._value(), *args, **kwargs) # pylint: disable=protected-access
return property(wrapped) if is_property else wrapped
|
[
"Replaces",
"member",
"function",
"s",
"first",
"arg",
"self",
"to",
"self",
".",
"_value",
"()",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/internal/distribution_tensor_coercible.py#L32-L54
|
[
"def",
"_wrap_method",
"(",
"cls",
",",
"attr",
")",
":",
"fn",
"=",
"getattr",
"(",
"cls",
",",
"attr",
")",
"is_property",
"=",
"isinstance",
"(",
"fn",
",",
"property",
")",
"if",
"is_property",
":",
"fn",
"=",
"fn",
".",
"fget",
"@",
"functools",
".",
"wraps",
"(",
"fn",
")",
"def",
"wrapped",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"fn",
"(",
"self",
".",
"_value",
"(",
")",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# pylint: disable=protected-access",
"return",
"property",
"(",
"wrapped",
")",
"if",
"is_property",
"else",
"wrapped"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_get_tensor_like_attributes
|
Returns `Tensor` attributes related to shape and Python builtins.
|
tensorflow_probability/python/layers/internal/distribution_tensor_coercible.py
|
def _get_tensor_like_attributes():
"""Returns `Tensor` attributes related to shape and Python builtins."""
# Enable "Tensor semantics" for distributions.
# See tensorflow/python/framework/ops.py `class Tensor` for details.
attrs = dict()
# Setup overloadable operators and white-listed members / properties.
attrs.update((attr, _wrap_method(tf.Tensor, attr))
for attr in tf.Tensor.OVERLOADABLE_OPERATORS.union({'__iter__'}))
# Copy some members straight-through.
attrs.update((attr, getattr(tf.Tensor, attr))
for attr in {'__nonzero__', '__bool__', '__array_priority__'})
return attrs
|
def _get_tensor_like_attributes():
"""Returns `Tensor` attributes related to shape and Python builtins."""
# Enable "Tensor semantics" for distributions.
# See tensorflow/python/framework/ops.py `class Tensor` for details.
attrs = dict()
# Setup overloadable operators and white-listed members / properties.
attrs.update((attr, _wrap_method(tf.Tensor, attr))
for attr in tf.Tensor.OVERLOADABLE_OPERATORS.union({'__iter__'}))
# Copy some members straight-through.
attrs.update((attr, getattr(tf.Tensor, attr))
for attr in {'__nonzero__', '__bool__', '__array_priority__'})
return attrs
|
[
"Returns",
"Tensor",
"attributes",
"related",
"to",
"shape",
"and",
"Python",
"builtins",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/internal/distribution_tensor_coercible.py#L57-L68
|
[
"def",
"_get_tensor_like_attributes",
"(",
")",
":",
"# Enable \"Tensor semantics\" for distributions.",
"# See tensorflow/python/framework/ops.py `class Tensor` for details.",
"attrs",
"=",
"dict",
"(",
")",
"# Setup overloadable operators and white-listed members / properties.",
"attrs",
".",
"update",
"(",
"(",
"attr",
",",
"_wrap_method",
"(",
"tf",
".",
"Tensor",
",",
"attr",
")",
")",
"for",
"attr",
"in",
"tf",
".",
"Tensor",
".",
"OVERLOADABLE_OPERATORS",
".",
"union",
"(",
"{",
"'__iter__'",
"}",
")",
")",
"# Copy some members straight-through.",
"attrs",
".",
"update",
"(",
"(",
"attr",
",",
"getattr",
"(",
"tf",
".",
"Tensor",
",",
"attr",
")",
")",
"for",
"attr",
"in",
"{",
"'__nonzero__'",
",",
"'__bool__'",
",",
"'__array_priority__'",
"}",
")",
"return",
"attrs"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_value
|
Get the value returned by `tf.convert_to_tensor(distribution)`.
Note: this function may mutate the distribution instance state by caching
the concretized `Tensor` value.
Args:
dtype: Must return a `Tensor` with the given `dtype` if specified.
name: If the conversion function creates a new `Tensor`, it should use the
given `name` if specified.
as_ref: `as_ref` is true, the function must return a `Tensor` reference,
such as a `Variable`.
Returns:
concretized_distribution_value: `Tensor` identical to
`tf.convert_to_tensor(distribution)`.
#### Examples
```python
tfd = tfp.distributions
x = tfd.Normal(0.5, 1).set_tensor_conversion(tfd.Distribution.mean)
x._value()
# ==> tf.convert_to_tensor(x) ==> 0.5
x._value() + 2
# ==> tf.convert_to_tensor(x) + 2. ==> 2.5
x + 2
# ==> tf.convert_to_tensor(x) + 2. ==> 2.5
```
|
tensorflow_probability/python/layers/internal/distribution_tensor_coercible.py
|
def _value(self, dtype=None, name=None, as_ref=False): # pylint: disable=g-doc-args
"""Get the value returned by `tf.convert_to_tensor(distribution)`.
Note: this function may mutate the distribution instance state by caching
the concretized `Tensor` value.
Args:
dtype: Must return a `Tensor` with the given `dtype` if specified.
name: If the conversion function creates a new `Tensor`, it should use the
given `name` if specified.
as_ref: `as_ref` is true, the function must return a `Tensor` reference,
such as a `Variable`.
Returns:
concretized_distribution_value: `Tensor` identical to
`tf.convert_to_tensor(distribution)`.
#### Examples
```python
tfd = tfp.distributions
x = tfd.Normal(0.5, 1).set_tensor_conversion(tfd.Distribution.mean)
x._value()
# ==> tf.convert_to_tensor(x) ==> 0.5
x._value() + 2
# ==> tf.convert_to_tensor(x) + 2. ==> 2.5
x + 2
# ==> tf.convert_to_tensor(x) + 2. ==> 2.5
```
"""
# pylint: disable=protected-access
if as_ref:
raise NotImplementedError(
'Cannot convert a `Distribution` to a reference '
'(e.g., `tf.Variable`).')
if self._concrete_value is None:
if self._convert_to_tensor_fn is None:
raise NotImplementedError(
'Failed to convert object of type {} to Tensor. Contents: {}. '
'Call `distribution.set_tensor_conversion(lambda self: ...)` to '
'enable `tf.convert_to_tensor` capability. For example: '
'`x = tfd.Normal(0,1).set_tensor_conversion(tfd.Distribution.mean)`'
' results in `tf.convert_to_tensor(x)` being identical to '
'`x.mean()`.'.format(type(self), self))
with self._name_scope('value'):
self._concrete_value = (self._convert_to_tensor_fn(self)
if callable(self._convert_to_tensor_fn)
else self._convert_to_tensor_fn)
if not tf.is_tensor(self._concrete_value):
self._concrete_value = tfd._convert_to_tensor(
value=self._concrete_value,
name=name or 'concrete_value',
dtype=dtype,
dtype_hint=self.dtype)
return self._concrete_value
|
def _value(self, dtype=None, name=None, as_ref=False): # pylint: disable=g-doc-args
"""Get the value returned by `tf.convert_to_tensor(distribution)`.
Note: this function may mutate the distribution instance state by caching
the concretized `Tensor` value.
Args:
dtype: Must return a `Tensor` with the given `dtype` if specified.
name: If the conversion function creates a new `Tensor`, it should use the
given `name` if specified.
as_ref: `as_ref` is true, the function must return a `Tensor` reference,
such as a `Variable`.
Returns:
concretized_distribution_value: `Tensor` identical to
`tf.convert_to_tensor(distribution)`.
#### Examples
```python
tfd = tfp.distributions
x = tfd.Normal(0.5, 1).set_tensor_conversion(tfd.Distribution.mean)
x._value()
# ==> tf.convert_to_tensor(x) ==> 0.5
x._value() + 2
# ==> tf.convert_to_tensor(x) + 2. ==> 2.5
x + 2
# ==> tf.convert_to_tensor(x) + 2. ==> 2.5
```
"""
# pylint: disable=protected-access
if as_ref:
raise NotImplementedError(
'Cannot convert a `Distribution` to a reference '
'(e.g., `tf.Variable`).')
if self._concrete_value is None:
if self._convert_to_tensor_fn is None:
raise NotImplementedError(
'Failed to convert object of type {} to Tensor. Contents: {}. '
'Call `distribution.set_tensor_conversion(lambda self: ...)` to '
'enable `tf.convert_to_tensor` capability. For example: '
'`x = tfd.Normal(0,1).set_tensor_conversion(tfd.Distribution.mean)`'
' results in `tf.convert_to_tensor(x)` being identical to '
'`x.mean()`.'.format(type(self), self))
with self._name_scope('value'):
self._concrete_value = (self._convert_to_tensor_fn(self)
if callable(self._convert_to_tensor_fn)
else self._convert_to_tensor_fn)
if not tf.is_tensor(self._concrete_value):
self._concrete_value = tfd._convert_to_tensor(
value=self._concrete_value,
name=name or 'concrete_value',
dtype=dtype,
dtype_hint=self.dtype)
return self._concrete_value
|
[
"Get",
"the",
"value",
"returned",
"by",
"tf",
".",
"convert_to_tensor",
"(",
"distribution",
")",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/internal/distribution_tensor_coercible.py#L71-L128
|
[
"def",
"_value",
"(",
"self",
",",
"dtype",
"=",
"None",
",",
"name",
"=",
"None",
",",
"as_ref",
"=",
"False",
")",
":",
"# pylint: disable=g-doc-args",
"# pylint: disable=protected-access",
"if",
"as_ref",
":",
"raise",
"NotImplementedError",
"(",
"'Cannot convert a `Distribution` to a reference '",
"'(e.g., `tf.Variable`).'",
")",
"if",
"self",
".",
"_concrete_value",
"is",
"None",
":",
"if",
"self",
".",
"_convert_to_tensor_fn",
"is",
"None",
":",
"raise",
"NotImplementedError",
"(",
"'Failed to convert object of type {} to Tensor. Contents: {}. '",
"'Call `distribution.set_tensor_conversion(lambda self: ...)` to '",
"'enable `tf.convert_to_tensor` capability. For example: '",
"'`x = tfd.Normal(0,1).set_tensor_conversion(tfd.Distribution.mean)`'",
"' results in `tf.convert_to_tensor(x)` being identical to '",
"'`x.mean()`.'",
".",
"format",
"(",
"type",
"(",
"self",
")",
",",
"self",
")",
")",
"with",
"self",
".",
"_name_scope",
"(",
"'value'",
")",
":",
"self",
".",
"_concrete_value",
"=",
"(",
"self",
".",
"_convert_to_tensor_fn",
"(",
"self",
")",
"if",
"callable",
"(",
"self",
".",
"_convert_to_tensor_fn",
")",
"else",
"self",
".",
"_convert_to_tensor_fn",
")",
"if",
"not",
"tf",
".",
"is_tensor",
"(",
"self",
".",
"_concrete_value",
")",
":",
"self",
".",
"_concrete_value",
"=",
"tfd",
".",
"_convert_to_tensor",
"(",
"value",
"=",
"self",
".",
"_concrete_value",
",",
"name",
"=",
"name",
"or",
"'concrete_value'",
",",
"dtype",
"=",
"dtype",
",",
"dtype_hint",
"=",
"self",
".",
"dtype",
")",
"return",
"self",
".",
"_concrete_value"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
make_encoder
|
Creates the encoder function.
Args:
activation: Activation function in hidden layers.
latent_size: The dimensionality of the encoding.
base_depth: The lowest depth for a layer.
Returns:
encoder: A `callable` mapping a `Tensor` of images to a
`tfd.Distribution` instance over encodings.
|
tensorflow_probability/examples/vae.py
|
def make_encoder(activation, latent_size, base_depth):
"""Creates the encoder function.
Args:
activation: Activation function in hidden layers.
latent_size: The dimensionality of the encoding.
base_depth: The lowest depth for a layer.
Returns:
encoder: A `callable` mapping a `Tensor` of images to a
`tfd.Distribution` instance over encodings.
"""
conv = functools.partial(
tf.keras.layers.Conv2D, padding="SAME", activation=activation)
encoder_net = tf.keras.Sequential([
conv(base_depth, 5, 1),
conv(base_depth, 5, 2),
conv(2 * base_depth, 5, 1),
conv(2 * base_depth, 5, 2),
conv(4 * latent_size, 7, padding="VALID"),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(2 * latent_size, activation=None),
])
def encoder(images):
images = 2 * tf.cast(images, dtype=tf.float32) - 1
net = encoder_net(images)
return tfd.MultivariateNormalDiag(
loc=net[..., :latent_size],
scale_diag=tf.nn.softplus(net[..., latent_size:] +
_softplus_inverse(1.0)),
name="code")
return encoder
|
def make_encoder(activation, latent_size, base_depth):
"""Creates the encoder function.
Args:
activation: Activation function in hidden layers.
latent_size: The dimensionality of the encoding.
base_depth: The lowest depth for a layer.
Returns:
encoder: A `callable` mapping a `Tensor` of images to a
`tfd.Distribution` instance over encodings.
"""
conv = functools.partial(
tf.keras.layers.Conv2D, padding="SAME", activation=activation)
encoder_net = tf.keras.Sequential([
conv(base_depth, 5, 1),
conv(base_depth, 5, 2),
conv(2 * base_depth, 5, 1),
conv(2 * base_depth, 5, 2),
conv(4 * latent_size, 7, padding="VALID"),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(2 * latent_size, activation=None),
])
def encoder(images):
images = 2 * tf.cast(images, dtype=tf.float32) - 1
net = encoder_net(images)
return tfd.MultivariateNormalDiag(
loc=net[..., :latent_size],
scale_diag=tf.nn.softplus(net[..., latent_size:] +
_softplus_inverse(1.0)),
name="code")
return encoder
|
[
"Creates",
"the",
"encoder",
"function",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/vae.py#L191-L225
|
[
"def",
"make_encoder",
"(",
"activation",
",",
"latent_size",
",",
"base_depth",
")",
":",
"conv",
"=",
"functools",
".",
"partial",
"(",
"tf",
".",
"keras",
".",
"layers",
".",
"Conv2D",
",",
"padding",
"=",
"\"SAME\"",
",",
"activation",
"=",
"activation",
")",
"encoder_net",
"=",
"tf",
".",
"keras",
".",
"Sequential",
"(",
"[",
"conv",
"(",
"base_depth",
",",
"5",
",",
"1",
")",
",",
"conv",
"(",
"base_depth",
",",
"5",
",",
"2",
")",
",",
"conv",
"(",
"2",
"*",
"base_depth",
",",
"5",
",",
"1",
")",
",",
"conv",
"(",
"2",
"*",
"base_depth",
",",
"5",
",",
"2",
")",
",",
"conv",
"(",
"4",
"*",
"latent_size",
",",
"7",
",",
"padding",
"=",
"\"VALID\"",
")",
",",
"tf",
".",
"keras",
".",
"layers",
".",
"Flatten",
"(",
")",
",",
"tf",
".",
"keras",
".",
"layers",
".",
"Dense",
"(",
"2",
"*",
"latent_size",
",",
"activation",
"=",
"None",
")",
",",
"]",
")",
"def",
"encoder",
"(",
"images",
")",
":",
"images",
"=",
"2",
"*",
"tf",
".",
"cast",
"(",
"images",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"-",
"1",
"net",
"=",
"encoder_net",
"(",
"images",
")",
"return",
"tfd",
".",
"MultivariateNormalDiag",
"(",
"loc",
"=",
"net",
"[",
"...",
",",
":",
"latent_size",
"]",
",",
"scale_diag",
"=",
"tf",
".",
"nn",
".",
"softplus",
"(",
"net",
"[",
"...",
",",
"latent_size",
":",
"]",
"+",
"_softplus_inverse",
"(",
"1.0",
")",
")",
",",
"name",
"=",
"\"code\"",
")",
"return",
"encoder"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
make_decoder
|
Creates the decoder function.
Args:
activation: Activation function in hidden layers.
latent_size: Dimensionality of the encoding.
output_shape: The output image shape.
base_depth: Smallest depth for a layer.
Returns:
decoder: A `callable` mapping a `Tensor` of encodings to a
`tfd.Distribution` instance over images.
|
tensorflow_probability/examples/vae.py
|
def make_decoder(activation, latent_size, output_shape, base_depth):
"""Creates the decoder function.
Args:
activation: Activation function in hidden layers.
latent_size: Dimensionality of the encoding.
output_shape: The output image shape.
base_depth: Smallest depth for a layer.
Returns:
decoder: A `callable` mapping a `Tensor` of encodings to a
`tfd.Distribution` instance over images.
"""
deconv = functools.partial(
tf.keras.layers.Conv2DTranspose, padding="SAME", activation=activation)
conv = functools.partial(
tf.keras.layers.Conv2D, padding="SAME", activation=activation)
decoder_net = tf.keras.Sequential([
deconv(2 * base_depth, 7, padding="VALID"),
deconv(2 * base_depth, 5),
deconv(2 * base_depth, 5, 2),
deconv(base_depth, 5),
deconv(base_depth, 5, 2),
deconv(base_depth, 5),
conv(output_shape[-1], 5, activation=None),
])
def decoder(codes):
original_shape = tf.shape(input=codes)
# Collapse the sample and batch dimension and convert to rank-4 tensor for
# use with a convolutional decoder network.
codes = tf.reshape(codes, (-1, 1, 1, latent_size))
logits = decoder_net(codes)
logits = tf.reshape(
logits, shape=tf.concat([original_shape[:-1], output_shape], axis=0))
return tfd.Independent(tfd.Bernoulli(logits=logits),
reinterpreted_batch_ndims=len(output_shape),
name="image")
return decoder
|
def make_decoder(activation, latent_size, output_shape, base_depth):
"""Creates the decoder function.
Args:
activation: Activation function in hidden layers.
latent_size: Dimensionality of the encoding.
output_shape: The output image shape.
base_depth: Smallest depth for a layer.
Returns:
decoder: A `callable` mapping a `Tensor` of encodings to a
`tfd.Distribution` instance over images.
"""
deconv = functools.partial(
tf.keras.layers.Conv2DTranspose, padding="SAME", activation=activation)
conv = functools.partial(
tf.keras.layers.Conv2D, padding="SAME", activation=activation)
decoder_net = tf.keras.Sequential([
deconv(2 * base_depth, 7, padding="VALID"),
deconv(2 * base_depth, 5),
deconv(2 * base_depth, 5, 2),
deconv(base_depth, 5),
deconv(base_depth, 5, 2),
deconv(base_depth, 5),
conv(output_shape[-1], 5, activation=None),
])
def decoder(codes):
original_shape = tf.shape(input=codes)
# Collapse the sample and batch dimension and convert to rank-4 tensor for
# use with a convolutional decoder network.
codes = tf.reshape(codes, (-1, 1, 1, latent_size))
logits = decoder_net(codes)
logits = tf.reshape(
logits, shape=tf.concat([original_shape[:-1], output_shape], axis=0))
return tfd.Independent(tfd.Bernoulli(logits=logits),
reinterpreted_batch_ndims=len(output_shape),
name="image")
return decoder
|
[
"Creates",
"the",
"decoder",
"function",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/vae.py#L228-L268
|
[
"def",
"make_decoder",
"(",
"activation",
",",
"latent_size",
",",
"output_shape",
",",
"base_depth",
")",
":",
"deconv",
"=",
"functools",
".",
"partial",
"(",
"tf",
".",
"keras",
".",
"layers",
".",
"Conv2DTranspose",
",",
"padding",
"=",
"\"SAME\"",
",",
"activation",
"=",
"activation",
")",
"conv",
"=",
"functools",
".",
"partial",
"(",
"tf",
".",
"keras",
".",
"layers",
".",
"Conv2D",
",",
"padding",
"=",
"\"SAME\"",
",",
"activation",
"=",
"activation",
")",
"decoder_net",
"=",
"tf",
".",
"keras",
".",
"Sequential",
"(",
"[",
"deconv",
"(",
"2",
"*",
"base_depth",
",",
"7",
",",
"padding",
"=",
"\"VALID\"",
")",
",",
"deconv",
"(",
"2",
"*",
"base_depth",
",",
"5",
")",
",",
"deconv",
"(",
"2",
"*",
"base_depth",
",",
"5",
",",
"2",
")",
",",
"deconv",
"(",
"base_depth",
",",
"5",
")",
",",
"deconv",
"(",
"base_depth",
",",
"5",
",",
"2",
")",
",",
"deconv",
"(",
"base_depth",
",",
"5",
")",
",",
"conv",
"(",
"output_shape",
"[",
"-",
"1",
"]",
",",
"5",
",",
"activation",
"=",
"None",
")",
",",
"]",
")",
"def",
"decoder",
"(",
"codes",
")",
":",
"original_shape",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"codes",
")",
"# Collapse the sample and batch dimension and convert to rank-4 tensor for",
"# use with a convolutional decoder network.",
"codes",
"=",
"tf",
".",
"reshape",
"(",
"codes",
",",
"(",
"-",
"1",
",",
"1",
",",
"1",
",",
"latent_size",
")",
")",
"logits",
"=",
"decoder_net",
"(",
"codes",
")",
"logits",
"=",
"tf",
".",
"reshape",
"(",
"logits",
",",
"shape",
"=",
"tf",
".",
"concat",
"(",
"[",
"original_shape",
"[",
":",
"-",
"1",
"]",
",",
"output_shape",
"]",
",",
"axis",
"=",
"0",
")",
")",
"return",
"tfd",
".",
"Independent",
"(",
"tfd",
".",
"Bernoulli",
"(",
"logits",
"=",
"logits",
")",
",",
"reinterpreted_batch_ndims",
"=",
"len",
"(",
"output_shape",
")",
",",
"name",
"=",
"\"image\"",
")",
"return",
"decoder"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
make_mixture_prior
|
Creates the mixture of Gaussians prior distribution.
Args:
latent_size: The dimensionality of the latent representation.
mixture_components: Number of elements of the mixture.
Returns:
random_prior: A `tfd.Distribution` instance representing the distribution
over encodings in the absence of any evidence.
|
tensorflow_probability/examples/vae.py
|
def make_mixture_prior(latent_size, mixture_components):
"""Creates the mixture of Gaussians prior distribution.
Args:
latent_size: The dimensionality of the latent representation.
mixture_components: Number of elements of the mixture.
Returns:
random_prior: A `tfd.Distribution` instance representing the distribution
over encodings in the absence of any evidence.
"""
if mixture_components == 1:
# See the module docstring for why we don't learn the parameters here.
return tfd.MultivariateNormalDiag(
loc=tf.zeros([latent_size]),
scale_identity_multiplier=1.0)
loc = tf.compat.v1.get_variable(
name="loc", shape=[mixture_components, latent_size])
raw_scale_diag = tf.compat.v1.get_variable(
name="raw_scale_diag", shape=[mixture_components, latent_size])
mixture_logits = tf.compat.v1.get_variable(
name="mixture_logits", shape=[mixture_components])
return tfd.MixtureSameFamily(
components_distribution=tfd.MultivariateNormalDiag(
loc=loc,
scale_diag=tf.nn.softplus(raw_scale_diag)),
mixture_distribution=tfd.Categorical(logits=mixture_logits),
name="prior")
|
def make_mixture_prior(latent_size, mixture_components):
"""Creates the mixture of Gaussians prior distribution.
Args:
latent_size: The dimensionality of the latent representation.
mixture_components: Number of elements of the mixture.
Returns:
random_prior: A `tfd.Distribution` instance representing the distribution
over encodings in the absence of any evidence.
"""
if mixture_components == 1:
# See the module docstring for why we don't learn the parameters here.
return tfd.MultivariateNormalDiag(
loc=tf.zeros([latent_size]),
scale_identity_multiplier=1.0)
loc = tf.compat.v1.get_variable(
name="loc", shape=[mixture_components, latent_size])
raw_scale_diag = tf.compat.v1.get_variable(
name="raw_scale_diag", shape=[mixture_components, latent_size])
mixture_logits = tf.compat.v1.get_variable(
name="mixture_logits", shape=[mixture_components])
return tfd.MixtureSameFamily(
components_distribution=tfd.MultivariateNormalDiag(
loc=loc,
scale_diag=tf.nn.softplus(raw_scale_diag)),
mixture_distribution=tfd.Categorical(logits=mixture_logits),
name="prior")
|
[
"Creates",
"the",
"mixture",
"of",
"Gaussians",
"prior",
"distribution",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/vae.py#L271-L300
|
[
"def",
"make_mixture_prior",
"(",
"latent_size",
",",
"mixture_components",
")",
":",
"if",
"mixture_components",
"==",
"1",
":",
"# See the module docstring for why we don't learn the parameters here.",
"return",
"tfd",
".",
"MultivariateNormalDiag",
"(",
"loc",
"=",
"tf",
".",
"zeros",
"(",
"[",
"latent_size",
"]",
")",
",",
"scale_identity_multiplier",
"=",
"1.0",
")",
"loc",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"get_variable",
"(",
"name",
"=",
"\"loc\"",
",",
"shape",
"=",
"[",
"mixture_components",
",",
"latent_size",
"]",
")",
"raw_scale_diag",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"get_variable",
"(",
"name",
"=",
"\"raw_scale_diag\"",
",",
"shape",
"=",
"[",
"mixture_components",
",",
"latent_size",
"]",
")",
"mixture_logits",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"get_variable",
"(",
"name",
"=",
"\"mixture_logits\"",
",",
"shape",
"=",
"[",
"mixture_components",
"]",
")",
"return",
"tfd",
".",
"MixtureSameFamily",
"(",
"components_distribution",
"=",
"tfd",
".",
"MultivariateNormalDiag",
"(",
"loc",
"=",
"loc",
",",
"scale_diag",
"=",
"tf",
".",
"nn",
".",
"softplus",
"(",
"raw_scale_diag",
")",
")",
",",
"mixture_distribution",
"=",
"tfd",
".",
"Categorical",
"(",
"logits",
"=",
"mixture_logits",
")",
",",
"name",
"=",
"\"prior\"",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
pack_images
|
Helper utility to make a field of images.
|
tensorflow_probability/examples/vae.py
|
def pack_images(images, rows, cols):
"""Helper utility to make a field of images."""
shape = tf.shape(input=images)
width = shape[-3]
height = shape[-2]
depth = shape[-1]
images = tf.reshape(images, (-1, width, height, depth))
batch = tf.shape(input=images)[0]
rows = tf.minimum(rows, batch)
cols = tf.minimum(batch // rows, cols)
images = images[:rows * cols]
images = tf.reshape(images, (rows, cols, width, height, depth))
images = tf.transpose(a=images, perm=[0, 2, 1, 3, 4])
images = tf.reshape(images, [1, rows * width, cols * height, depth])
return images
|
def pack_images(images, rows, cols):
"""Helper utility to make a field of images."""
shape = tf.shape(input=images)
width = shape[-3]
height = shape[-2]
depth = shape[-1]
images = tf.reshape(images, (-1, width, height, depth))
batch = tf.shape(input=images)[0]
rows = tf.minimum(rows, batch)
cols = tf.minimum(batch // rows, cols)
images = images[:rows * cols]
images = tf.reshape(images, (rows, cols, width, height, depth))
images = tf.transpose(a=images, perm=[0, 2, 1, 3, 4])
images = tf.reshape(images, [1, rows * width, cols * height, depth])
return images
|
[
"Helper",
"utility",
"to",
"make",
"a",
"field",
"of",
"images",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/vae.py#L303-L317
|
[
"def",
"pack_images",
"(",
"images",
",",
"rows",
",",
"cols",
")",
":",
"shape",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"images",
")",
"width",
"=",
"shape",
"[",
"-",
"3",
"]",
"height",
"=",
"shape",
"[",
"-",
"2",
"]",
"depth",
"=",
"shape",
"[",
"-",
"1",
"]",
"images",
"=",
"tf",
".",
"reshape",
"(",
"images",
",",
"(",
"-",
"1",
",",
"width",
",",
"height",
",",
"depth",
")",
")",
"batch",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"images",
")",
"[",
"0",
"]",
"rows",
"=",
"tf",
".",
"minimum",
"(",
"rows",
",",
"batch",
")",
"cols",
"=",
"tf",
".",
"minimum",
"(",
"batch",
"//",
"rows",
",",
"cols",
")",
"images",
"=",
"images",
"[",
":",
"rows",
"*",
"cols",
"]",
"images",
"=",
"tf",
".",
"reshape",
"(",
"images",
",",
"(",
"rows",
",",
"cols",
",",
"width",
",",
"height",
",",
"depth",
")",
")",
"images",
"=",
"tf",
".",
"transpose",
"(",
"a",
"=",
"images",
",",
"perm",
"=",
"[",
"0",
",",
"2",
",",
"1",
",",
"3",
",",
"4",
"]",
")",
"images",
"=",
"tf",
".",
"reshape",
"(",
"images",
",",
"[",
"1",
",",
"rows",
"*",
"width",
",",
"cols",
"*",
"height",
",",
"depth",
"]",
")",
"return",
"images"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
model_fn
|
Builds the model function for use in an estimator.
Arguments:
features: The input features for the estimator.
labels: The labels, unused here.
mode: Signifies whether it is train or test or predict.
params: Some hyperparameters as a dictionary.
config: The RunConfig, unused here.
Returns:
EstimatorSpec: A tf.estimator.EstimatorSpec instance.
|
tensorflow_probability/examples/vae.py
|
def model_fn(features, labels, mode, params, config):
"""Builds the model function for use in an estimator.
Arguments:
features: The input features for the estimator.
labels: The labels, unused here.
mode: Signifies whether it is train or test or predict.
params: Some hyperparameters as a dictionary.
config: The RunConfig, unused here.
Returns:
EstimatorSpec: A tf.estimator.EstimatorSpec instance.
"""
del labels, config
if params["analytic_kl"] and params["mixture_components"] != 1:
raise NotImplementedError(
"Using `analytic_kl` is only supported when `mixture_components = 1` "
"since there's no closed form otherwise.")
encoder = make_encoder(params["activation"],
params["latent_size"],
params["base_depth"])
decoder = make_decoder(params["activation"],
params["latent_size"],
IMAGE_SHAPE,
params["base_depth"])
latent_prior = make_mixture_prior(params["latent_size"],
params["mixture_components"])
image_tile_summary(
"input", tf.cast(features, dtype=tf.float32), rows=1, cols=16)
approx_posterior = encoder(features)
approx_posterior_sample = approx_posterior.sample(params["n_samples"])
decoder_likelihood = decoder(approx_posterior_sample)
image_tile_summary(
"recon/sample",
tf.cast(decoder_likelihood.sample()[:3, :16], dtype=tf.float32),
rows=3,
cols=16)
image_tile_summary(
"recon/mean",
decoder_likelihood.mean()[:3, :16],
rows=3,
cols=16)
# `distortion` is just the negative log likelihood.
distortion = -decoder_likelihood.log_prob(features)
avg_distortion = tf.reduce_mean(input_tensor=distortion)
tf.compat.v1.summary.scalar("distortion", avg_distortion)
if params["analytic_kl"]:
rate = tfd.kl_divergence(approx_posterior, latent_prior)
else:
rate = (approx_posterior.log_prob(approx_posterior_sample)
- latent_prior.log_prob(approx_posterior_sample))
avg_rate = tf.reduce_mean(input_tensor=rate)
tf.compat.v1.summary.scalar("rate", avg_rate)
elbo_local = -(rate + distortion)
elbo = tf.reduce_mean(input_tensor=elbo_local)
loss = -elbo
tf.compat.v1.summary.scalar("elbo", elbo)
importance_weighted_elbo = tf.reduce_mean(
input_tensor=tf.reduce_logsumexp(input_tensor=elbo_local, axis=0) -
tf.math.log(tf.cast(params["n_samples"], dtype=tf.float32)))
tf.compat.v1.summary.scalar("elbo/importance_weighted",
importance_weighted_elbo)
# Decode samples from the prior for visualization.
random_image = decoder(latent_prior.sample(16))
image_tile_summary(
"random/sample",
tf.cast(random_image.sample(), dtype=tf.float32),
rows=4,
cols=4)
image_tile_summary("random/mean", random_image.mean(), rows=4, cols=4)
# Perform variational inference by minimizing the -ELBO.
global_step = tf.compat.v1.train.get_or_create_global_step()
learning_rate = tf.compat.v1.train.cosine_decay(
params["learning_rate"], global_step, params["max_steps"])
tf.compat.v1.summary.scalar("learning_rate", learning_rate)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=global_step)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops={
"elbo":
tf.compat.v1.metrics.mean(elbo),
"elbo/importance_weighted":
tf.compat.v1.metrics.mean(importance_weighted_elbo),
"rate":
tf.compat.v1.metrics.mean(avg_rate),
"distortion":
tf.compat.v1.metrics.mean(avg_distortion),
},
)
|
def model_fn(features, labels, mode, params, config):
"""Builds the model function for use in an estimator.
Arguments:
features: The input features for the estimator.
labels: The labels, unused here.
mode: Signifies whether it is train or test or predict.
params: Some hyperparameters as a dictionary.
config: The RunConfig, unused here.
Returns:
EstimatorSpec: A tf.estimator.EstimatorSpec instance.
"""
del labels, config
if params["analytic_kl"] and params["mixture_components"] != 1:
raise NotImplementedError(
"Using `analytic_kl` is only supported when `mixture_components = 1` "
"since there's no closed form otherwise.")
encoder = make_encoder(params["activation"],
params["latent_size"],
params["base_depth"])
decoder = make_decoder(params["activation"],
params["latent_size"],
IMAGE_SHAPE,
params["base_depth"])
latent_prior = make_mixture_prior(params["latent_size"],
params["mixture_components"])
image_tile_summary(
"input", tf.cast(features, dtype=tf.float32), rows=1, cols=16)
approx_posterior = encoder(features)
approx_posterior_sample = approx_posterior.sample(params["n_samples"])
decoder_likelihood = decoder(approx_posterior_sample)
image_tile_summary(
"recon/sample",
tf.cast(decoder_likelihood.sample()[:3, :16], dtype=tf.float32),
rows=3,
cols=16)
image_tile_summary(
"recon/mean",
decoder_likelihood.mean()[:3, :16],
rows=3,
cols=16)
# `distortion` is just the negative log likelihood.
distortion = -decoder_likelihood.log_prob(features)
avg_distortion = tf.reduce_mean(input_tensor=distortion)
tf.compat.v1.summary.scalar("distortion", avg_distortion)
if params["analytic_kl"]:
rate = tfd.kl_divergence(approx_posterior, latent_prior)
else:
rate = (approx_posterior.log_prob(approx_posterior_sample)
- latent_prior.log_prob(approx_posterior_sample))
avg_rate = tf.reduce_mean(input_tensor=rate)
tf.compat.v1.summary.scalar("rate", avg_rate)
elbo_local = -(rate + distortion)
elbo = tf.reduce_mean(input_tensor=elbo_local)
loss = -elbo
tf.compat.v1.summary.scalar("elbo", elbo)
importance_weighted_elbo = tf.reduce_mean(
input_tensor=tf.reduce_logsumexp(input_tensor=elbo_local, axis=0) -
tf.math.log(tf.cast(params["n_samples"], dtype=tf.float32)))
tf.compat.v1.summary.scalar("elbo/importance_weighted",
importance_weighted_elbo)
# Decode samples from the prior for visualization.
random_image = decoder(latent_prior.sample(16))
image_tile_summary(
"random/sample",
tf.cast(random_image.sample(), dtype=tf.float32),
rows=4,
cols=4)
image_tile_summary("random/mean", random_image.mean(), rows=4, cols=4)
# Perform variational inference by minimizing the -ELBO.
global_step = tf.compat.v1.train.get_or_create_global_step()
learning_rate = tf.compat.v1.train.cosine_decay(
params["learning_rate"], global_step, params["max_steps"])
tf.compat.v1.summary.scalar("learning_rate", learning_rate)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=global_step)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops={
"elbo":
tf.compat.v1.metrics.mean(elbo),
"elbo/importance_weighted":
tf.compat.v1.metrics.mean(importance_weighted_elbo),
"rate":
tf.compat.v1.metrics.mean(avg_rate),
"distortion":
tf.compat.v1.metrics.mean(avg_distortion),
},
)
|
[
"Builds",
"the",
"model",
"function",
"for",
"use",
"in",
"an",
"estimator",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/vae.py#L325-L428
|
[
"def",
"model_fn",
"(",
"features",
",",
"labels",
",",
"mode",
",",
"params",
",",
"config",
")",
":",
"del",
"labels",
",",
"config",
"if",
"params",
"[",
"\"analytic_kl\"",
"]",
"and",
"params",
"[",
"\"mixture_components\"",
"]",
"!=",
"1",
":",
"raise",
"NotImplementedError",
"(",
"\"Using `analytic_kl` is only supported when `mixture_components = 1` \"",
"\"since there's no closed form otherwise.\"",
")",
"encoder",
"=",
"make_encoder",
"(",
"params",
"[",
"\"activation\"",
"]",
",",
"params",
"[",
"\"latent_size\"",
"]",
",",
"params",
"[",
"\"base_depth\"",
"]",
")",
"decoder",
"=",
"make_decoder",
"(",
"params",
"[",
"\"activation\"",
"]",
",",
"params",
"[",
"\"latent_size\"",
"]",
",",
"IMAGE_SHAPE",
",",
"params",
"[",
"\"base_depth\"",
"]",
")",
"latent_prior",
"=",
"make_mixture_prior",
"(",
"params",
"[",
"\"latent_size\"",
"]",
",",
"params",
"[",
"\"mixture_components\"",
"]",
")",
"image_tile_summary",
"(",
"\"input\"",
",",
"tf",
".",
"cast",
"(",
"features",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
",",
"rows",
"=",
"1",
",",
"cols",
"=",
"16",
")",
"approx_posterior",
"=",
"encoder",
"(",
"features",
")",
"approx_posterior_sample",
"=",
"approx_posterior",
".",
"sample",
"(",
"params",
"[",
"\"n_samples\"",
"]",
")",
"decoder_likelihood",
"=",
"decoder",
"(",
"approx_posterior_sample",
")",
"image_tile_summary",
"(",
"\"recon/sample\"",
",",
"tf",
".",
"cast",
"(",
"decoder_likelihood",
".",
"sample",
"(",
")",
"[",
":",
"3",
",",
":",
"16",
"]",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
",",
"rows",
"=",
"3",
",",
"cols",
"=",
"16",
")",
"image_tile_summary",
"(",
"\"recon/mean\"",
",",
"decoder_likelihood",
".",
"mean",
"(",
")",
"[",
":",
"3",
",",
":",
"16",
"]",
",",
"rows",
"=",
"3",
",",
"cols",
"=",
"16",
")",
"# `distortion` is just the negative log likelihood.",
"distortion",
"=",
"-",
"decoder_likelihood",
".",
"log_prob",
"(",
"features",
")",
"avg_distortion",
"=",
"tf",
".",
"reduce_mean",
"(",
"input_tensor",
"=",
"distortion",
")",
"tf",
".",
"compat",
".",
"v1",
".",
"summary",
".",
"scalar",
"(",
"\"distortion\"",
",",
"avg_distortion",
")",
"if",
"params",
"[",
"\"analytic_kl\"",
"]",
":",
"rate",
"=",
"tfd",
".",
"kl_divergence",
"(",
"approx_posterior",
",",
"latent_prior",
")",
"else",
":",
"rate",
"=",
"(",
"approx_posterior",
".",
"log_prob",
"(",
"approx_posterior_sample",
")",
"-",
"latent_prior",
".",
"log_prob",
"(",
"approx_posterior_sample",
")",
")",
"avg_rate",
"=",
"tf",
".",
"reduce_mean",
"(",
"input_tensor",
"=",
"rate",
")",
"tf",
".",
"compat",
".",
"v1",
".",
"summary",
".",
"scalar",
"(",
"\"rate\"",
",",
"avg_rate",
")",
"elbo_local",
"=",
"-",
"(",
"rate",
"+",
"distortion",
")",
"elbo",
"=",
"tf",
".",
"reduce_mean",
"(",
"input_tensor",
"=",
"elbo_local",
")",
"loss",
"=",
"-",
"elbo",
"tf",
".",
"compat",
".",
"v1",
".",
"summary",
".",
"scalar",
"(",
"\"elbo\"",
",",
"elbo",
")",
"importance_weighted_elbo",
"=",
"tf",
".",
"reduce_mean",
"(",
"input_tensor",
"=",
"tf",
".",
"reduce_logsumexp",
"(",
"input_tensor",
"=",
"elbo_local",
",",
"axis",
"=",
"0",
")",
"-",
"tf",
".",
"math",
".",
"log",
"(",
"tf",
".",
"cast",
"(",
"params",
"[",
"\"n_samples\"",
"]",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
")",
")",
"tf",
".",
"compat",
".",
"v1",
".",
"summary",
".",
"scalar",
"(",
"\"elbo/importance_weighted\"",
",",
"importance_weighted_elbo",
")",
"# Decode samples from the prior for visualization.",
"random_image",
"=",
"decoder",
"(",
"latent_prior",
".",
"sample",
"(",
"16",
")",
")",
"image_tile_summary",
"(",
"\"random/sample\"",
",",
"tf",
".",
"cast",
"(",
"random_image",
".",
"sample",
"(",
")",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
",",
"rows",
"=",
"4",
",",
"cols",
"=",
"4",
")",
"image_tile_summary",
"(",
"\"random/mean\"",
",",
"random_image",
".",
"mean",
"(",
")",
",",
"rows",
"=",
"4",
",",
"cols",
"=",
"4",
")",
"# Perform variational inference by minimizing the -ELBO.",
"global_step",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"train",
".",
"get_or_create_global_step",
"(",
")",
"learning_rate",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"train",
".",
"cosine_decay",
"(",
"params",
"[",
"\"learning_rate\"",
"]",
",",
"global_step",
",",
"params",
"[",
"\"max_steps\"",
"]",
")",
"tf",
".",
"compat",
".",
"v1",
".",
"summary",
".",
"scalar",
"(",
"\"learning_rate\"",
",",
"learning_rate",
")",
"optimizer",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"train",
".",
"AdamOptimizer",
"(",
"learning_rate",
")",
"train_op",
"=",
"optimizer",
".",
"minimize",
"(",
"loss",
",",
"global_step",
"=",
"global_step",
")",
"return",
"tf",
".",
"estimator",
".",
"EstimatorSpec",
"(",
"mode",
"=",
"mode",
",",
"loss",
"=",
"loss",
",",
"train_op",
"=",
"train_op",
",",
"eval_metric_ops",
"=",
"{",
"\"elbo\"",
":",
"tf",
".",
"compat",
".",
"v1",
".",
"metrics",
".",
"mean",
"(",
"elbo",
")",
",",
"\"elbo/importance_weighted\"",
":",
"tf",
".",
"compat",
".",
"v1",
".",
"metrics",
".",
"mean",
"(",
"importance_weighted_elbo",
")",
",",
"\"rate\"",
":",
"tf",
".",
"compat",
".",
"v1",
".",
"metrics",
".",
"mean",
"(",
"avg_rate",
")",
",",
"\"distortion\"",
":",
"tf",
".",
"compat",
".",
"v1",
".",
"metrics",
".",
"mean",
"(",
"avg_distortion",
")",
",",
"}",
",",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
download
|
Downloads a file.
|
tensorflow_probability/examples/vae.py
|
def download(directory, filename):
"""Downloads a file."""
filepath = os.path.join(directory, filename)
if tf.io.gfile.exists(filepath):
return filepath
if not tf.io.gfile.exists(directory):
tf.io.gfile.makedirs(directory)
url = os.path.join(ROOT_PATH, filename)
print("Downloading %s to %s" % (url, filepath))
urllib.request.urlretrieve(url, filepath)
return filepath
|
def download(directory, filename):
"""Downloads a file."""
filepath = os.path.join(directory, filename)
if tf.io.gfile.exists(filepath):
return filepath
if not tf.io.gfile.exists(directory):
tf.io.gfile.makedirs(directory)
url = os.path.join(ROOT_PATH, filename)
print("Downloading %s to %s" % (url, filepath))
urllib.request.urlretrieve(url, filepath)
return filepath
|
[
"Downloads",
"a",
"file",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/vae.py#L435-L445
|
[
"def",
"download",
"(",
"directory",
",",
"filename",
")",
":",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"filename",
")",
"if",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"filepath",
")",
":",
"return",
"filepath",
"if",
"not",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"directory",
")",
":",
"tf",
".",
"io",
".",
"gfile",
".",
"makedirs",
"(",
"directory",
")",
"url",
"=",
"os",
".",
"path",
".",
"join",
"(",
"ROOT_PATH",
",",
"filename",
")",
"print",
"(",
"\"Downloading %s to %s\"",
"%",
"(",
"url",
",",
"filepath",
")",
")",
"urllib",
".",
"request",
".",
"urlretrieve",
"(",
"url",
",",
"filepath",
")",
"return",
"filepath"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
build_fake_input_fns
|
Builds fake MNIST-style data for unit testing.
|
tensorflow_probability/examples/vae.py
|
def build_fake_input_fns(batch_size):
"""Builds fake MNIST-style data for unit testing."""
random_sample = np.random.rand(batch_size, *IMAGE_SHAPE).astype("float32")
def train_input_fn():
dataset = tf.data.Dataset.from_tensor_slices(
random_sample).map(lambda row: (row, 0)).batch(batch_size).repeat()
return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
def eval_input_fn():
dataset = tf.data.Dataset.from_tensor_slices(
random_sample).map(lambda row: (row, 0)).batch(batch_size)
return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
return train_input_fn, eval_input_fn
|
def build_fake_input_fns(batch_size):
"""Builds fake MNIST-style data for unit testing."""
random_sample = np.random.rand(batch_size, *IMAGE_SHAPE).astype("float32")
def train_input_fn():
dataset = tf.data.Dataset.from_tensor_slices(
random_sample).map(lambda row: (row, 0)).batch(batch_size).repeat()
return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
def eval_input_fn():
dataset = tf.data.Dataset.from_tensor_slices(
random_sample).map(lambda row: (row, 0)).batch(batch_size)
return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
return train_input_fn, eval_input_fn
|
[
"Builds",
"fake",
"MNIST",
"-",
"style",
"data",
"for",
"unit",
"testing",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/vae.py#L462-L476
|
[
"def",
"build_fake_input_fns",
"(",
"batch_size",
")",
":",
"random_sample",
"=",
"np",
".",
"random",
".",
"rand",
"(",
"batch_size",
",",
"*",
"IMAGE_SHAPE",
")",
".",
"astype",
"(",
"\"float32\"",
")",
"def",
"train_input_fn",
"(",
")",
":",
"dataset",
"=",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"random_sample",
")",
".",
"map",
"(",
"lambda",
"row",
":",
"(",
"row",
",",
"0",
")",
")",
".",
"batch",
"(",
"batch_size",
")",
".",
"repeat",
"(",
")",
"return",
"tf",
".",
"compat",
".",
"v1",
".",
"data",
".",
"make_one_shot_iterator",
"(",
"dataset",
")",
".",
"get_next",
"(",
")",
"def",
"eval_input_fn",
"(",
")",
":",
"dataset",
"=",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"random_sample",
")",
".",
"map",
"(",
"lambda",
"row",
":",
"(",
"row",
",",
"0",
")",
")",
".",
"batch",
"(",
"batch_size",
")",
"return",
"tf",
".",
"compat",
".",
"v1",
".",
"data",
".",
"make_one_shot_iterator",
"(",
"dataset",
")",
".",
"get_next",
"(",
")",
"return",
"train_input_fn",
",",
"eval_input_fn"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
build_input_fns
|
Builds an Iterator switching between train and heldout data.
|
tensorflow_probability/examples/vae.py
|
def build_input_fns(data_dir, batch_size):
"""Builds an Iterator switching between train and heldout data."""
# Build an iterator over training batches.
def train_input_fn():
dataset = static_mnist_dataset(data_dir, "train")
dataset = dataset.shuffle(50000).repeat().batch(batch_size)
return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
# Build an iterator over the heldout set.
def eval_input_fn():
eval_dataset = static_mnist_dataset(data_dir, "valid")
eval_dataset = eval_dataset.batch(batch_size)
return tf.compat.v1.data.make_one_shot_iterator(eval_dataset).get_next()
return train_input_fn, eval_input_fn
|
def build_input_fns(data_dir, batch_size):
"""Builds an Iterator switching between train and heldout data."""
# Build an iterator over training batches.
def train_input_fn():
dataset = static_mnist_dataset(data_dir, "train")
dataset = dataset.shuffle(50000).repeat().batch(batch_size)
return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
# Build an iterator over the heldout set.
def eval_input_fn():
eval_dataset = static_mnist_dataset(data_dir, "valid")
eval_dataset = eval_dataset.batch(batch_size)
return tf.compat.v1.data.make_one_shot_iterator(eval_dataset).get_next()
return train_input_fn, eval_input_fn
|
[
"Builds",
"an",
"Iterator",
"switching",
"between",
"train",
"and",
"heldout",
"data",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/vae.py#L479-L494
|
[
"def",
"build_input_fns",
"(",
"data_dir",
",",
"batch_size",
")",
":",
"# Build an iterator over training batches.",
"def",
"train_input_fn",
"(",
")",
":",
"dataset",
"=",
"static_mnist_dataset",
"(",
"data_dir",
",",
"\"train\"",
")",
"dataset",
"=",
"dataset",
".",
"shuffle",
"(",
"50000",
")",
".",
"repeat",
"(",
")",
".",
"batch",
"(",
"batch_size",
")",
"return",
"tf",
".",
"compat",
".",
"v1",
".",
"data",
".",
"make_one_shot_iterator",
"(",
"dataset",
")",
".",
"get_next",
"(",
")",
"# Build an iterator over the heldout set.",
"def",
"eval_input_fn",
"(",
")",
":",
"eval_dataset",
"=",
"static_mnist_dataset",
"(",
"data_dir",
",",
"\"valid\"",
")",
"eval_dataset",
"=",
"eval_dataset",
".",
"batch",
"(",
"batch_size",
")",
"return",
"tf",
".",
"compat",
".",
"v1",
".",
"data",
".",
"make_one_shot_iterator",
"(",
"eval_dataset",
")",
".",
"get_next",
"(",
")",
"return",
"train_input_fn",
",",
"eval_input_fn"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.