partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
test
triangular
The Triangular Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` The Triangular Csiszar-function is: ```none f(u) = (u - 1)**2 / (1 + u) ``` This Csiszar-function induces a symmetric f-Divergence, i.e., `D_f[p, q] = D_f[q, p]`. Warning: this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. name: Python `str` name prefixed to Ops created by this function. Returns: triangular_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at `u = exp(logu)`.
tensorflow_probability/python/vi/csiszar_divergence.py
def triangular(logu, name=None): """The Triangular Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` The Triangular Csiszar-function is: ```none f(u) = (u - 1)**2 / (1 + u) ``` This Csiszar-function induces a symmetric f-Divergence, i.e., `D_f[p, q] = D_f[q, p]`. Warning: this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. name: Python `str` name prefixed to Ops created by this function. Returns: triangular_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at `u = exp(logu)`. """ with tf.compat.v1.name_scope(name, "triangular", [logu]): logu = tf.convert_to_tensor(value=logu, name="logu") return pearson(logu) / (1. + tf.exp(logu))
def triangular(logu, name=None): """The Triangular Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` The Triangular Csiszar-function is: ```none f(u) = (u - 1)**2 / (1 + u) ``` This Csiszar-function induces a symmetric f-Divergence, i.e., `D_f[p, q] = D_f[q, p]`. Warning: this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. name: Python `str` name prefixed to Ops created by this function. Returns: triangular_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at `u = exp(logu)`. """ with tf.compat.v1.name_scope(name, "triangular", [logu]): logu = tf.convert_to_tensor(value=logu, name="logu") return pearson(logu) / (1. + tf.exp(logu))
[ "The", "Triangular", "Csiszar", "-", "function", "in", "log", "-", "space", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/vi/csiszar_divergence.py#L427-L459
[ "def", "triangular", "(", "logu", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "\"triangular\"", ",", "[", "logu", "]", ")", ":", "logu", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "logu", ",", "name", "=", "\"logu\"", ")", "return", "pearson", "(", "logu", ")", "/", "(", "1.", "+", "tf", ".", "exp", "(", "logu", ")", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
t_power
The T-Power Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` When `self_normalized = True` the T-Power Csiszar-function is: ```none f(u) = s [ u**t - 1 - t(u - 1) ] s = { -1 0 < t < 1 { +1 otherwise ``` When `self_normalized = False` the `- t(u - 1)` term is omitted. This is similar to the `amari_alpha` Csiszar-function, with the associated divergence being the same up to factors depending only on `t`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. t: `Tensor` of same `dtype` as `logu` and broadcastable shape. self_normalized: Python `bool` indicating whether `f'(u=1)=0`. name: Python `str` name prefixed to Ops created by this function. Returns: t_power_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at `u = exp(logu)`.
tensorflow_probability/python/vi/csiszar_divergence.py
def t_power(logu, t, self_normalized=False, name=None): """The T-Power Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` When `self_normalized = True` the T-Power Csiszar-function is: ```none f(u) = s [ u**t - 1 - t(u - 1) ] s = { -1 0 < t < 1 { +1 otherwise ``` When `self_normalized = False` the `- t(u - 1)` term is omitted. This is similar to the `amari_alpha` Csiszar-function, with the associated divergence being the same up to factors depending only on `t`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. t: `Tensor` of same `dtype` as `logu` and broadcastable shape. self_normalized: Python `bool` indicating whether `f'(u=1)=0`. name: Python `str` name prefixed to Ops created by this function. Returns: t_power_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at `u = exp(logu)`. """ with tf.compat.v1.name_scope(name, "t_power", [logu, t]): logu = tf.convert_to_tensor(value=logu, name="logu") t = tf.convert_to_tensor(value=t, dtype=logu.dtype.base_dtype, name="t") fu = tf.math.expm1(t * logu) if self_normalized: fu -= t * tf.math.expm1(logu) fu *= tf.where(tf.logical_and(0. < t, t < 1.), -tf.ones_like(t), tf.ones_like(t)) return fu
def t_power(logu, t, self_normalized=False, name=None): """The T-Power Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` When `self_normalized = True` the T-Power Csiszar-function is: ```none f(u) = s [ u**t - 1 - t(u - 1) ] s = { -1 0 < t < 1 { +1 otherwise ``` When `self_normalized = False` the `- t(u - 1)` term is omitted. This is similar to the `amari_alpha` Csiszar-function, with the associated divergence being the same up to factors depending only on `t`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. t: `Tensor` of same `dtype` as `logu` and broadcastable shape. self_normalized: Python `bool` indicating whether `f'(u=1)=0`. name: Python `str` name prefixed to Ops created by this function. Returns: t_power_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at `u = exp(logu)`. """ with tf.compat.v1.name_scope(name, "t_power", [logu, t]): logu = tf.convert_to_tensor(value=logu, name="logu") t = tf.convert_to_tensor(value=t, dtype=logu.dtype.base_dtype, name="t") fu = tf.math.expm1(t * logu) if self_normalized: fu -= t * tf.math.expm1(logu) fu *= tf.where(tf.logical_and(0. < t, t < 1.), -tf.ones_like(t), tf.ones_like(t)) return fu
[ "The", "T", "-", "Power", "Csiszar", "-", "function", "in", "log", "-", "space", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/vi/csiszar_divergence.py#L462-L503
[ "def", "t_power", "(", "logu", ",", "t", ",", "self_normalized", "=", "False", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "\"t_power\"", ",", "[", "logu", ",", "t", "]", ")", ":", "logu", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "logu", ",", "name", "=", "\"logu\"", ")", "t", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "t", ",", "dtype", "=", "logu", ".", "dtype", ".", "base_dtype", ",", "name", "=", "\"t\"", ")", "fu", "=", "tf", ".", "math", ".", "expm1", "(", "t", "*", "logu", ")", "if", "self_normalized", ":", "fu", "-=", "t", "*", "tf", ".", "math", ".", "expm1", "(", "logu", ")", "fu", "*=", "tf", ".", "where", "(", "tf", ".", "logical_and", "(", "0.", "<", "t", ",", "t", "<", "1.", ")", ",", "-", "tf", ".", "ones_like", "(", "t", ")", ",", "tf", ".", "ones_like", "(", "t", ")", ")", "return", "fu" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
log1p_abs
The log1p-abs Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` The Log1p-Abs Csiszar-function is: ```none f(u) = u**(sign(u-1)) - 1 ``` This function is so-named because it was invented from the following recipe. Choose a convex function g such that g(0)=0 and solve for f: ```none log(1 + f(u)) = g(log(u)). <=> f(u) = exp(g(log(u))) - 1 ``` That is, the graph is identically `g` when y-axis is `log1p`-domain and x-axis is `log`-domain. Warning: this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. name: Python `str` name prefixed to Ops created by this function. Returns: log1p_abs_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at `u = exp(logu)`.
tensorflow_probability/python/vi/csiszar_divergence.py
def log1p_abs(logu, name=None): """The log1p-abs Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` The Log1p-Abs Csiszar-function is: ```none f(u) = u**(sign(u-1)) - 1 ``` This function is so-named because it was invented from the following recipe. Choose a convex function g such that g(0)=0 and solve for f: ```none log(1 + f(u)) = g(log(u)). <=> f(u) = exp(g(log(u))) - 1 ``` That is, the graph is identically `g` when y-axis is `log1p`-domain and x-axis is `log`-domain. Warning: this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. name: Python `str` name prefixed to Ops created by this function. Returns: log1p_abs_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at `u = exp(logu)`. """ with tf.compat.v1.name_scope(name, "log1p_abs", [logu]): logu = tf.convert_to_tensor(value=logu, name="logu") return tf.math.expm1(tf.abs(logu))
def log1p_abs(logu, name=None): """The log1p-abs Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` The Log1p-Abs Csiszar-function is: ```none f(u) = u**(sign(u-1)) - 1 ``` This function is so-named because it was invented from the following recipe. Choose a convex function g such that g(0)=0 and solve for f: ```none log(1 + f(u)) = g(log(u)). <=> f(u) = exp(g(log(u))) - 1 ``` That is, the graph is identically `g` when y-axis is `log1p`-domain and x-axis is `log`-domain. Warning: this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. name: Python `str` name prefixed to Ops created by this function. Returns: log1p_abs_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at `u = exp(logu)`. """ with tf.compat.v1.name_scope(name, "log1p_abs", [logu]): logu = tf.convert_to_tensor(value=logu, name="logu") return tf.math.expm1(tf.abs(logu))
[ "The", "log1p", "-", "abs", "Csiszar", "-", "function", "in", "log", "-", "space", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/vi/csiszar_divergence.py#L506-L547
[ "def", "log1p_abs", "(", "logu", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "\"log1p_abs\"", ",", "[", "logu", "]", ")", ":", "logu", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "logu", ",", "name", "=", "\"logu\"", ")", "return", "tf", ".", "math", ".", "expm1", "(", "tf", ".", "abs", "(", "logu", ")", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
jeffreys
The Jeffreys Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` The Jeffreys Csiszar-function is: ```none f(u) = 0.5 ( u log(u) - log(u) ) = 0.5 kl_forward + 0.5 kl_reverse = symmetrized_csiszar_function(kl_reverse) = symmetrized_csiszar_function(kl_forward) ``` This Csiszar-function induces a symmetric f-Divergence, i.e., `D_f[p, q] = D_f[q, p]`. Warning: this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. name: Python `str` name prefixed to Ops created by this function. Returns: jeffreys_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at `u = exp(logu)`.
tensorflow_probability/python/vi/csiszar_divergence.py
def jeffreys(logu, name=None): """The Jeffreys Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` The Jeffreys Csiszar-function is: ```none f(u) = 0.5 ( u log(u) - log(u) ) = 0.5 kl_forward + 0.5 kl_reverse = symmetrized_csiszar_function(kl_reverse) = symmetrized_csiszar_function(kl_forward) ``` This Csiszar-function induces a symmetric f-Divergence, i.e., `D_f[p, q] = D_f[q, p]`. Warning: this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. name: Python `str` name prefixed to Ops created by this function. Returns: jeffreys_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at `u = exp(logu)`. """ with tf.compat.v1.name_scope(name, "jeffreys", [logu]): logu = tf.convert_to_tensor(value=logu, name="logu") return 0.5 * tf.math.expm1(logu) * logu
def jeffreys(logu, name=None): """The Jeffreys Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` The Jeffreys Csiszar-function is: ```none f(u) = 0.5 ( u log(u) - log(u) ) = 0.5 kl_forward + 0.5 kl_reverse = symmetrized_csiszar_function(kl_reverse) = symmetrized_csiszar_function(kl_forward) ``` This Csiszar-function induces a symmetric f-Divergence, i.e., `D_f[p, q] = D_f[q, p]`. Warning: this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. name: Python `str` name prefixed to Ops created by this function. Returns: jeffreys_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at `u = exp(logu)`. """ with tf.compat.v1.name_scope(name, "jeffreys", [logu]): logu = tf.convert_to_tensor(value=logu, name="logu") return 0.5 * tf.math.expm1(logu) * logu
[ "The", "Jeffreys", "Csiszar", "-", "function", "in", "log", "-", "space", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/vi/csiszar_divergence.py#L550-L585
[ "def", "jeffreys", "(", "logu", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "\"jeffreys\"", ",", "[", "logu", "]", ")", ":", "logu", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "logu", ",", "name", "=", "\"logu\"", ")", "return", "0.5", "*", "tf", ".", "math", ".", "expm1", "(", "logu", ")", "*", "logu" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
modified_gan
The Modified-GAN Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` When `self_normalized = True` the modified-GAN (Generative/Adversarial Network) Csiszar-function is: ```none f(u) = log(1 + u) - log(u) + 0.5 (u - 1) ``` When `self_normalized = False` the `0.5 (u - 1)` is omitted. The unmodified GAN Csiszar-function is identical to Jensen-Shannon (with `self_normalized = False`). Warning: this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even when `p, q` are unnormalized measures. name: Python `str` name prefixed to Ops created by this function. Returns: chi_square_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at `u = exp(logu)`.
tensorflow_probability/python/vi/csiszar_divergence.py
def modified_gan(logu, self_normalized=False, name=None): """The Modified-GAN Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` When `self_normalized = True` the modified-GAN (Generative/Adversarial Network) Csiszar-function is: ```none f(u) = log(1 + u) - log(u) + 0.5 (u - 1) ``` When `self_normalized = False` the `0.5 (u - 1)` is omitted. The unmodified GAN Csiszar-function is identical to Jensen-Shannon (with `self_normalized = False`). Warning: this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even when `p, q` are unnormalized measures. name: Python `str` name prefixed to Ops created by this function. Returns: chi_square_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at `u = exp(logu)`. """ with tf.compat.v1.name_scope(name, "chi_square", [logu]): logu = tf.convert_to_tensor(value=logu, name="logu") y = tf.nn.softplus(logu) - logu if self_normalized: y += 0.5 * tf.math.expm1(logu) return y
def modified_gan(logu, self_normalized=False, name=None): """The Modified-GAN Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` When `self_normalized = True` the modified-GAN (Generative/Adversarial Network) Csiszar-function is: ```none f(u) = log(1 + u) - log(u) + 0.5 (u - 1) ``` When `self_normalized = False` the `0.5 (u - 1)` is omitted. The unmodified GAN Csiszar-function is identical to Jensen-Shannon (with `self_normalized = False`). Warning: this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even when `p, q` are unnormalized measures. name: Python `str` name prefixed to Ops created by this function. Returns: chi_square_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at `u = exp(logu)`. """ with tf.compat.v1.name_scope(name, "chi_square", [logu]): logu = tf.convert_to_tensor(value=logu, name="logu") y = tf.nn.softplus(logu) - logu if self_normalized: y += 0.5 * tf.math.expm1(logu) return y
[ "The", "Modified", "-", "GAN", "Csiszar", "-", "function", "in", "log", "-", "space", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/vi/csiszar_divergence.py#L620-L661
[ "def", "modified_gan", "(", "logu", ",", "self_normalized", "=", "False", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "\"chi_square\"", ",", "[", "logu", "]", ")", ":", "logu", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "logu", ",", "name", "=", "\"logu\"", ")", "y", "=", "tf", ".", "nn", ".", "softplus", "(", "logu", ")", "-", "logu", "if", "self_normalized", ":", "y", "+=", "0.5", "*", "tf", ".", "math", ".", "expm1", "(", "logu", ")", "return", "y" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
dual_csiszar_function
Calculates the dual Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` The Csiszar-dual is defined as: ```none f^*(u) = u f(1 / u) ``` where `f` is some other Csiszar-function. For example, the dual of `kl_reverse` is `kl_forward`, i.e., ```none f(u) = -log(u) f^*(u) = u f(1 / u) = -u log(1 / u) = u log(u) ``` The dual of the dual is the original function: ```none f^**(u) = {u f(1/u)}^*(u) = u (1/u) f(1/(1/u)) = f(u) ``` Warning: this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. csiszar_function: Python `callable` representing a Csiszar-function over log-domain. name: Python `str` name prefixed to Ops created by this function. Returns: dual_f_of_u: `float`-like `Tensor` of the result of calculating the dual of `f` at `u = exp(logu)`.
tensorflow_probability/python/vi/csiszar_divergence.py
def dual_csiszar_function(logu, csiszar_function, name=None): """Calculates the dual Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` The Csiszar-dual is defined as: ```none f^*(u) = u f(1 / u) ``` where `f` is some other Csiszar-function. For example, the dual of `kl_reverse` is `kl_forward`, i.e., ```none f(u) = -log(u) f^*(u) = u f(1 / u) = -u log(1 / u) = u log(u) ``` The dual of the dual is the original function: ```none f^**(u) = {u f(1/u)}^*(u) = u (1/u) f(1/(1/u)) = f(u) ``` Warning: this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. csiszar_function: Python `callable` representing a Csiszar-function over log-domain. name: Python `str` name prefixed to Ops created by this function. Returns: dual_f_of_u: `float`-like `Tensor` of the result of calculating the dual of `f` at `u = exp(logu)`. """ with tf.compat.v1.name_scope(name, "dual_csiszar_function", [logu]): return tf.exp(logu) * csiszar_function(-logu)
def dual_csiszar_function(logu, csiszar_function, name=None): """Calculates the dual Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` The Csiszar-dual is defined as: ```none f^*(u) = u f(1 / u) ``` where `f` is some other Csiszar-function. For example, the dual of `kl_reverse` is `kl_forward`, i.e., ```none f(u) = -log(u) f^*(u) = u f(1 / u) = -u log(1 / u) = u log(u) ``` The dual of the dual is the original function: ```none f^**(u) = {u f(1/u)}^*(u) = u (1/u) f(1/(1/u)) = f(u) ``` Warning: this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. csiszar_function: Python `callable` representing a Csiszar-function over log-domain. name: Python `str` name prefixed to Ops created by this function. Returns: dual_f_of_u: `float`-like `Tensor` of the result of calculating the dual of `f` at `u = exp(logu)`. """ with tf.compat.v1.name_scope(name, "dual_csiszar_function", [logu]): return tf.exp(logu) * csiszar_function(-logu)
[ "Calculates", "the", "dual", "Csiszar", "-", "function", "in", "log", "-", "space", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/vi/csiszar_divergence.py#L664-L709
[ "def", "dual_csiszar_function", "(", "logu", ",", "csiszar_function", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "\"dual_csiszar_function\"", ",", "[", "logu", "]", ")", ":", "return", "tf", ".", "exp", "(", "logu", ")", "*", "csiszar_function", "(", "-", "logu", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
symmetrized_csiszar_function
Symmetrizes a Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` The symmetrized Csiszar-function is defined as: ```none f_g(u) = 0.5 g(u) + 0.5 u g (1 / u) ``` where `g` is some other Csiszar-function. We say the function is "symmetrized" because: ```none D_{f_g}[p, q] = D_{f_g}[q, p] ``` for all `p << >> q` (i.e., `support(p) = support(q)`). There exists alternatives for symmetrizing a Csiszar-function. For example, ```none f_g(u) = max(f(u), f^*(u)), ``` where `f^*` is the dual Csiszar-function, also implies a symmetric f-Divergence. Example: When either of the following functions are symmetrized, we obtain the Jensen-Shannon Csiszar-function, i.e., ```none g(u) = -log(u) - (1 + u) log((1 + u) / 2) + u - 1 h(u) = log(4) + 2 u log(u / (1 + u)) ``` implies, ```none f_g(u) = f_h(u) = u log(u) - (1 + u) log((1 + u) / 2) = jensen_shannon(log(u)). ``` Warning: this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. csiszar_function: Python `callable` representing a Csiszar-function over log-domain. name: Python `str` name prefixed to Ops created by this function. Returns: symmetrized_g_of_u: `float`-like `Tensor` of the result of applying the symmetrization of `g` evaluated at `u = exp(logu)`.
tensorflow_probability/python/vi/csiszar_divergence.py
def symmetrized_csiszar_function(logu, csiszar_function, name=None): """Symmetrizes a Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` The symmetrized Csiszar-function is defined as: ```none f_g(u) = 0.5 g(u) + 0.5 u g (1 / u) ``` where `g` is some other Csiszar-function. We say the function is "symmetrized" because: ```none D_{f_g}[p, q] = D_{f_g}[q, p] ``` for all `p << >> q` (i.e., `support(p) = support(q)`). There exists alternatives for symmetrizing a Csiszar-function. For example, ```none f_g(u) = max(f(u), f^*(u)), ``` where `f^*` is the dual Csiszar-function, also implies a symmetric f-Divergence. Example: When either of the following functions are symmetrized, we obtain the Jensen-Shannon Csiszar-function, i.e., ```none g(u) = -log(u) - (1 + u) log((1 + u) / 2) + u - 1 h(u) = log(4) + 2 u log(u / (1 + u)) ``` implies, ```none f_g(u) = f_h(u) = u log(u) - (1 + u) log((1 + u) / 2) = jensen_shannon(log(u)). ``` Warning: this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. csiszar_function: Python `callable` representing a Csiszar-function over log-domain. name: Python `str` name prefixed to Ops created by this function. Returns: symmetrized_g_of_u: `float`-like `Tensor` of the result of applying the symmetrization of `g` evaluated at `u = exp(logu)`. """ with tf.compat.v1.name_scope(name, "symmetrized_csiszar_function", [logu]): logu = tf.convert_to_tensor(value=logu, name="logu") return 0.5 * (csiszar_function(logu) + dual_csiszar_function(logu, csiszar_function))
def symmetrized_csiszar_function(logu, csiszar_function, name=None): """Symmetrizes a Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` The symmetrized Csiszar-function is defined as: ```none f_g(u) = 0.5 g(u) + 0.5 u g (1 / u) ``` where `g` is some other Csiszar-function. We say the function is "symmetrized" because: ```none D_{f_g}[p, q] = D_{f_g}[q, p] ``` for all `p << >> q` (i.e., `support(p) = support(q)`). There exists alternatives for symmetrizing a Csiszar-function. For example, ```none f_g(u) = max(f(u), f^*(u)), ``` where `f^*` is the dual Csiszar-function, also implies a symmetric f-Divergence. Example: When either of the following functions are symmetrized, we obtain the Jensen-Shannon Csiszar-function, i.e., ```none g(u) = -log(u) - (1 + u) log((1 + u) / 2) + u - 1 h(u) = log(4) + 2 u log(u / (1 + u)) ``` implies, ```none f_g(u) = f_h(u) = u log(u) - (1 + u) log((1 + u) / 2) = jensen_shannon(log(u)). ``` Warning: this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. csiszar_function: Python `callable` representing a Csiszar-function over log-domain. name: Python `str` name prefixed to Ops created by this function. Returns: symmetrized_g_of_u: `float`-like `Tensor` of the result of applying the symmetrization of `g` evaluated at `u = exp(logu)`. """ with tf.compat.v1.name_scope(name, "symmetrized_csiszar_function", [logu]): logu = tf.convert_to_tensor(value=logu, name="logu") return 0.5 * (csiszar_function(logu) + dual_csiszar_function(logu, csiszar_function))
[ "Symmetrizes", "a", "Csiszar", "-", "function", "in", "log", "-", "space", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/vi/csiszar_divergence.py#L712-L780
[ "def", "symmetrized_csiszar_function", "(", "logu", ",", "csiszar_function", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "\"symmetrized_csiszar_function\"", ",", "[", "logu", "]", ")", ":", "logu", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "logu", ",", "name", "=", "\"logu\"", ")", "return", "0.5", "*", "(", "csiszar_function", "(", "logu", ")", "+", "dual_csiszar_function", "(", "logu", ",", "csiszar_function", ")", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
monte_carlo_csiszar_f_divergence
Monte-Carlo approximation of the Csiszar f-Divergence. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` The Csiszar f-Divergence for Csiszar-function f is given by: ```none D_f[p(X), q(X)] := E_{q(X)}[ f( p(X) / q(X) ) ] ~= m**-1 sum_j^m f( p(x_j) / q(x_j) ), where x_j ~iid q(X) ``` Tricks: Reparameterization and Score-Gradient When q is "reparameterized", i.e., a diffeomorphic transformation of a parameterless distribution (e.g., `Normal(Y; m, s) <=> Y = sX + m, X ~ Normal(0,1)`), we can swap gradient and expectation, i.e., `grad[Avg{ s_i : i=1...n }] = Avg{ grad[s_i] : i=1...n }` where `S_n=Avg{s_i}` and `s_i = f(x_i), x_i ~iid q(X)`. However, if q is not reparameterized, TensorFlow's gradient will be incorrect since the chain-rule stops at samples of unreparameterized distributions. In this circumstance using the Score-Gradient trick results in an unbiased gradient, i.e., ```none grad[ E_q[f(X)] ] = grad[ int dx q(x) f(x) ] = int dx grad[ q(x) f(x) ] = int dx [ q'(x) f(x) + q(x) f'(x) ] = int dx q(x) [q'(x) / q(x) f(x) + f'(x) ] = int dx q(x) grad[ f(x) q(x) / stop_grad[q(x)] ] = E_q[ grad[ f(x) q(x) / stop_grad[q(x)] ] ] ``` Unless `q.reparameterization_type != tfd.FULLY_REPARAMETERIZED` it is usually preferable to set `use_reparametrization = True`. Example Application: The Csiszar f-Divergence is a useful framework for variational inference. I.e., observe that, ```none f(p(x)) = f( E_{q(Z | x)}[ p(x, Z) / q(Z | x) ] ) <= E_{q(Z | x)}[ f( p(x, Z) / q(Z | x) ) ] := D_f[p(x, Z), q(Z | x)] ``` The inequality follows from the fact that the "perspective" of `f`, i.e., `(s, t) |-> t f(s / t))`, is convex in `(s, t)` when `s/t in domain(f)` and `t` is a real. Since the above framework includes the popular Evidence Lower BOund (ELBO) as a special case, i.e., `f(u) = -log(u)`, we call this framework "Evidence Divergence Bound Optimization" (EDBO). Args: f: Python `callable` representing a Csiszar-function in log-space, i.e., takes `p_log_prob(q_samples) - q.log_prob(q_samples)`. p_log_prob: Python `callable` taking (a batch of) samples from `q` and returning the natural-log of the probability under distribution `p`. (In variational inference `p` is the joint distribution.) q: `tf.Distribution`-like instance; must implement: `reparameterization_type`, `sample(n, seed)`, and `log_prob(x)`. (In variational inference `q` is the approximate posterior distribution.) num_draws: Integer scalar number of draws used to approximate the f-Divergence expectation. use_reparametrization: Python `bool`. When `None` (the default), automatically set to: `q.reparameterization_type == tfd.FULLY_REPARAMETERIZED`. When `True` uses the standard Monte-Carlo average. When `False` uses the score-gradient trick. (See above for details.) When `False`, consider using `csiszar_vimco`. seed: Python `int` seed for `q.sample`. name: Python `str` name prefixed to Ops created by this function. Returns: monte_carlo_csiszar_f_divergence: `float`-like `Tensor` Monte Carlo approximation of the Csiszar f-Divergence. Raises: ValueError: if `q` is not a reparameterized distribution and `use_reparametrization = True`. A distribution `q` is said to be "reparameterized" when its samples are generated by transforming the samples of another distribution which does not depend on the parameterization of `q`. This property ensures the gradient (with respect to parameters) is valid. TypeError: if `p_log_prob` is not a Python `callable`.
tensorflow_probability/python/vi/csiszar_divergence.py
def monte_carlo_csiszar_f_divergence( f, p_log_prob, q, num_draws, use_reparametrization=None, seed=None, name=None): """Monte-Carlo approximation of the Csiszar f-Divergence. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` The Csiszar f-Divergence for Csiszar-function f is given by: ```none D_f[p(X), q(X)] := E_{q(X)}[ f( p(X) / q(X) ) ] ~= m**-1 sum_j^m f( p(x_j) / q(x_j) ), where x_j ~iid q(X) ``` Tricks: Reparameterization and Score-Gradient When q is "reparameterized", i.e., a diffeomorphic transformation of a parameterless distribution (e.g., `Normal(Y; m, s) <=> Y = sX + m, X ~ Normal(0,1)`), we can swap gradient and expectation, i.e., `grad[Avg{ s_i : i=1...n }] = Avg{ grad[s_i] : i=1...n }` where `S_n=Avg{s_i}` and `s_i = f(x_i), x_i ~iid q(X)`. However, if q is not reparameterized, TensorFlow's gradient will be incorrect since the chain-rule stops at samples of unreparameterized distributions. In this circumstance using the Score-Gradient trick results in an unbiased gradient, i.e., ```none grad[ E_q[f(X)] ] = grad[ int dx q(x) f(x) ] = int dx grad[ q(x) f(x) ] = int dx [ q'(x) f(x) + q(x) f'(x) ] = int dx q(x) [q'(x) / q(x) f(x) + f'(x) ] = int dx q(x) grad[ f(x) q(x) / stop_grad[q(x)] ] = E_q[ grad[ f(x) q(x) / stop_grad[q(x)] ] ] ``` Unless `q.reparameterization_type != tfd.FULLY_REPARAMETERIZED` it is usually preferable to set `use_reparametrization = True`. Example Application: The Csiszar f-Divergence is a useful framework for variational inference. I.e., observe that, ```none f(p(x)) = f( E_{q(Z | x)}[ p(x, Z) / q(Z | x) ] ) <= E_{q(Z | x)}[ f( p(x, Z) / q(Z | x) ) ] := D_f[p(x, Z), q(Z | x)] ``` The inequality follows from the fact that the "perspective" of `f`, i.e., `(s, t) |-> t f(s / t))`, is convex in `(s, t)` when `s/t in domain(f)` and `t` is a real. Since the above framework includes the popular Evidence Lower BOund (ELBO) as a special case, i.e., `f(u) = -log(u)`, we call this framework "Evidence Divergence Bound Optimization" (EDBO). Args: f: Python `callable` representing a Csiszar-function in log-space, i.e., takes `p_log_prob(q_samples) - q.log_prob(q_samples)`. p_log_prob: Python `callable` taking (a batch of) samples from `q` and returning the natural-log of the probability under distribution `p`. (In variational inference `p` is the joint distribution.) q: `tf.Distribution`-like instance; must implement: `reparameterization_type`, `sample(n, seed)`, and `log_prob(x)`. (In variational inference `q` is the approximate posterior distribution.) num_draws: Integer scalar number of draws used to approximate the f-Divergence expectation. use_reparametrization: Python `bool`. When `None` (the default), automatically set to: `q.reparameterization_type == tfd.FULLY_REPARAMETERIZED`. When `True` uses the standard Monte-Carlo average. When `False` uses the score-gradient trick. (See above for details.) When `False`, consider using `csiszar_vimco`. seed: Python `int` seed for `q.sample`. name: Python `str` name prefixed to Ops created by this function. Returns: monte_carlo_csiszar_f_divergence: `float`-like `Tensor` Monte Carlo approximation of the Csiszar f-Divergence. Raises: ValueError: if `q` is not a reparameterized distribution and `use_reparametrization = True`. A distribution `q` is said to be "reparameterized" when its samples are generated by transforming the samples of another distribution which does not depend on the parameterization of `q`. This property ensures the gradient (with respect to parameters) is valid. TypeError: if `p_log_prob` is not a Python `callable`. """ reparameterization_types = tf.nest.flatten(q.reparameterization_type) with tf.compat.v1.name_scope(name, "monte_carlo_csiszar_f_divergence", [num_draws]): if use_reparametrization is None: use_reparametrization = all( reparameterization_type == tfd.FULLY_REPARAMETERIZED for reparameterization_type in reparameterization_types) elif (use_reparametrization and any(reparameterization_type != tfd.FULLY_REPARAMETERIZED for reparameterization_type in reparameterization_types)): # TODO(jvdillon): Consider only raising an exception if the gradient is # requested. raise ValueError( "Distribution `q` must be reparameterized, i.e., a diffeomorphic " "transformation of a parameterless distribution. (Otherwise this " "function has a biased gradient.)") if not callable(p_log_prob): raise TypeError("`p_log_prob` must be a Python `callable` function.") return monte_carlo.expectation( f=lambda q_samples: f(p_log_prob(q_samples) - q.log_prob(q_samples)), samples=q.sample(num_draws, seed=seed), log_prob=q.log_prob, # Only used if use_reparametrization=False. use_reparametrization=use_reparametrization)
def monte_carlo_csiszar_f_divergence( f, p_log_prob, q, num_draws, use_reparametrization=None, seed=None, name=None): """Monte-Carlo approximation of the Csiszar f-Divergence. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` The Csiszar f-Divergence for Csiszar-function f is given by: ```none D_f[p(X), q(X)] := E_{q(X)}[ f( p(X) / q(X) ) ] ~= m**-1 sum_j^m f( p(x_j) / q(x_j) ), where x_j ~iid q(X) ``` Tricks: Reparameterization and Score-Gradient When q is "reparameterized", i.e., a diffeomorphic transformation of a parameterless distribution (e.g., `Normal(Y; m, s) <=> Y = sX + m, X ~ Normal(0,1)`), we can swap gradient and expectation, i.e., `grad[Avg{ s_i : i=1...n }] = Avg{ grad[s_i] : i=1...n }` where `S_n=Avg{s_i}` and `s_i = f(x_i), x_i ~iid q(X)`. However, if q is not reparameterized, TensorFlow's gradient will be incorrect since the chain-rule stops at samples of unreparameterized distributions. In this circumstance using the Score-Gradient trick results in an unbiased gradient, i.e., ```none grad[ E_q[f(X)] ] = grad[ int dx q(x) f(x) ] = int dx grad[ q(x) f(x) ] = int dx [ q'(x) f(x) + q(x) f'(x) ] = int dx q(x) [q'(x) / q(x) f(x) + f'(x) ] = int dx q(x) grad[ f(x) q(x) / stop_grad[q(x)] ] = E_q[ grad[ f(x) q(x) / stop_grad[q(x)] ] ] ``` Unless `q.reparameterization_type != tfd.FULLY_REPARAMETERIZED` it is usually preferable to set `use_reparametrization = True`. Example Application: The Csiszar f-Divergence is a useful framework for variational inference. I.e., observe that, ```none f(p(x)) = f( E_{q(Z | x)}[ p(x, Z) / q(Z | x) ] ) <= E_{q(Z | x)}[ f( p(x, Z) / q(Z | x) ) ] := D_f[p(x, Z), q(Z | x)] ``` The inequality follows from the fact that the "perspective" of `f`, i.e., `(s, t) |-> t f(s / t))`, is convex in `(s, t)` when `s/t in domain(f)` and `t` is a real. Since the above framework includes the popular Evidence Lower BOund (ELBO) as a special case, i.e., `f(u) = -log(u)`, we call this framework "Evidence Divergence Bound Optimization" (EDBO). Args: f: Python `callable` representing a Csiszar-function in log-space, i.e., takes `p_log_prob(q_samples) - q.log_prob(q_samples)`. p_log_prob: Python `callable` taking (a batch of) samples from `q` and returning the natural-log of the probability under distribution `p`. (In variational inference `p` is the joint distribution.) q: `tf.Distribution`-like instance; must implement: `reparameterization_type`, `sample(n, seed)`, and `log_prob(x)`. (In variational inference `q` is the approximate posterior distribution.) num_draws: Integer scalar number of draws used to approximate the f-Divergence expectation. use_reparametrization: Python `bool`. When `None` (the default), automatically set to: `q.reparameterization_type == tfd.FULLY_REPARAMETERIZED`. When `True` uses the standard Monte-Carlo average. When `False` uses the score-gradient trick. (See above for details.) When `False`, consider using `csiszar_vimco`. seed: Python `int` seed for `q.sample`. name: Python `str` name prefixed to Ops created by this function. Returns: monte_carlo_csiszar_f_divergence: `float`-like `Tensor` Monte Carlo approximation of the Csiszar f-Divergence. Raises: ValueError: if `q` is not a reparameterized distribution and `use_reparametrization = True`. A distribution `q` is said to be "reparameterized" when its samples are generated by transforming the samples of another distribution which does not depend on the parameterization of `q`. This property ensures the gradient (with respect to parameters) is valid. TypeError: if `p_log_prob` is not a Python `callable`. """ reparameterization_types = tf.nest.flatten(q.reparameterization_type) with tf.compat.v1.name_scope(name, "monte_carlo_csiszar_f_divergence", [num_draws]): if use_reparametrization is None: use_reparametrization = all( reparameterization_type == tfd.FULLY_REPARAMETERIZED for reparameterization_type in reparameterization_types) elif (use_reparametrization and any(reparameterization_type != tfd.FULLY_REPARAMETERIZED for reparameterization_type in reparameterization_types)): # TODO(jvdillon): Consider only raising an exception if the gradient is # requested. raise ValueError( "Distribution `q` must be reparameterized, i.e., a diffeomorphic " "transformation of a parameterless distribution. (Otherwise this " "function has a biased gradient.)") if not callable(p_log_prob): raise TypeError("`p_log_prob` must be a Python `callable` function.") return monte_carlo.expectation( f=lambda q_samples: f(p_log_prob(q_samples) - q.log_prob(q_samples)), samples=q.sample(num_draws, seed=seed), log_prob=q.log_prob, # Only used if use_reparametrization=False. use_reparametrization=use_reparametrization)
[ "Monte", "-", "Carlo", "approximation", "of", "the", "Csiszar", "f", "-", "Divergence", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/vi/csiszar_divergence.py#L783-L906
[ "def", "monte_carlo_csiszar_f_divergence", "(", "f", ",", "p_log_prob", ",", "q", ",", "num_draws", ",", "use_reparametrization", "=", "None", ",", "seed", "=", "None", ",", "name", "=", "None", ")", ":", "reparameterization_types", "=", "tf", ".", "nest", ".", "flatten", "(", "q", ".", "reparameterization_type", ")", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "\"monte_carlo_csiszar_f_divergence\"", ",", "[", "num_draws", "]", ")", ":", "if", "use_reparametrization", "is", "None", ":", "use_reparametrization", "=", "all", "(", "reparameterization_type", "==", "tfd", ".", "FULLY_REPARAMETERIZED", "for", "reparameterization_type", "in", "reparameterization_types", ")", "elif", "(", "use_reparametrization", "and", "any", "(", "reparameterization_type", "!=", "tfd", ".", "FULLY_REPARAMETERIZED", "for", "reparameterization_type", "in", "reparameterization_types", ")", ")", ":", "# TODO(jvdillon): Consider only raising an exception if the gradient is", "# requested.", "raise", "ValueError", "(", "\"Distribution `q` must be reparameterized, i.e., a diffeomorphic \"", "\"transformation of a parameterless distribution. (Otherwise this \"", "\"function has a biased gradient.)\"", ")", "if", "not", "callable", "(", "p_log_prob", ")", ":", "raise", "TypeError", "(", "\"`p_log_prob` must be a Python `callable` function.\"", ")", "return", "monte_carlo", ".", "expectation", "(", "f", "=", "lambda", "q_samples", ":", "f", "(", "p_log_prob", "(", "q_samples", ")", "-", "q", ".", "log_prob", "(", "q_samples", ")", ")", ",", "samples", "=", "q", ".", "sample", "(", "num_draws", ",", "seed", "=", "seed", ")", ",", "log_prob", "=", "q", ".", "log_prob", ",", "# Only used if use_reparametrization=False.", "use_reparametrization", "=", "use_reparametrization", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
csiszar_vimco
Use VIMCO to lower the variance of gradient[csiszar_function(Avg(logu))]. This function generalizes VIMCO [(Mnih and Rezende, 2016)][1] to Csiszar f-Divergences. Note: if `q.reparameterization_type = tfd.FULLY_REPARAMETERIZED`, consider using `monte_carlo_csiszar_f_divergence`. The VIMCO loss is: ```none vimco = f(Avg{logu[i] : i=0,...,m-1}) where, logu[i] = log( p(x, h[i]) / q(h[i] | x) ) h[i] iid~ q(H | x) ``` Interestingly, the VIMCO gradient is not the naive gradient of `vimco`. Rather, it is characterized by: ```none grad[vimco] - variance_reducing_term where, variance_reducing_term = Sum{ grad[log q(h[i] | x)] * (vimco - f(log Avg{h[j;i] : j=0,...,m-1})) : i=0, ..., m-1 } h[j;i] = { u[j] j!=i { GeometricAverage{ u[k] : k!=i} j==i ``` (We omitted `stop_gradient` for brevity. See implementation for more details.) The `Avg{h[j;i] : j}` term is a kind of "swap-out average" where the `i`-th element has been replaced by the leave-`i`-out Geometric-average. This implementation prefers numerical precision over efficiency, i.e., `O(num_draws * num_batch_draws * prod(batch_shape) * prod(event_shape))`. (The constant may be fairly large, perhaps around 12.) Args: f: Python `callable` representing a Csiszar-function in log-space. p_log_prob: Python `callable` representing the natural-log of the probability under distribution `p`. (In variational inference `p` is the joint distribution.) q: `tf.Distribution`-like instance; must implement: `sample(n, seed)`, and `log_prob(x)`. (In variational inference `q` is the approximate posterior distribution.) num_draws: Integer scalar number of draws used to approximate the f-Divergence expectation. num_batch_draws: Integer scalar number of draws used to approximate the f-Divergence expectation. seed: Python `int` seed for `q.sample`. name: Python `str` name prefixed to Ops created by this function. Returns: vimco: The Csiszar f-Divergence generalized VIMCO objective. Raises: ValueError: if `num_draws < 2`. #### References [1]: Andriy Mnih and Danilo Rezende. Variational Inference for Monte Carlo objectives. In _International Conference on Machine Learning_, 2016. https://arxiv.org/abs/1602.06725
tensorflow_probability/python/vi/csiszar_divergence.py
def csiszar_vimco(f, p_log_prob, q, num_draws, num_batch_draws=1, seed=None, name=None): """Use VIMCO to lower the variance of gradient[csiszar_function(Avg(logu))]. This function generalizes VIMCO [(Mnih and Rezende, 2016)][1] to Csiszar f-Divergences. Note: if `q.reparameterization_type = tfd.FULLY_REPARAMETERIZED`, consider using `monte_carlo_csiszar_f_divergence`. The VIMCO loss is: ```none vimco = f(Avg{logu[i] : i=0,...,m-1}) where, logu[i] = log( p(x, h[i]) / q(h[i] | x) ) h[i] iid~ q(H | x) ``` Interestingly, the VIMCO gradient is not the naive gradient of `vimco`. Rather, it is characterized by: ```none grad[vimco] - variance_reducing_term where, variance_reducing_term = Sum{ grad[log q(h[i] | x)] * (vimco - f(log Avg{h[j;i] : j=0,...,m-1})) : i=0, ..., m-1 } h[j;i] = { u[j] j!=i { GeometricAverage{ u[k] : k!=i} j==i ``` (We omitted `stop_gradient` for brevity. See implementation for more details.) The `Avg{h[j;i] : j}` term is a kind of "swap-out average" where the `i`-th element has been replaced by the leave-`i`-out Geometric-average. This implementation prefers numerical precision over efficiency, i.e., `O(num_draws * num_batch_draws * prod(batch_shape) * prod(event_shape))`. (The constant may be fairly large, perhaps around 12.) Args: f: Python `callable` representing a Csiszar-function in log-space. p_log_prob: Python `callable` representing the natural-log of the probability under distribution `p`. (In variational inference `p` is the joint distribution.) q: `tf.Distribution`-like instance; must implement: `sample(n, seed)`, and `log_prob(x)`. (In variational inference `q` is the approximate posterior distribution.) num_draws: Integer scalar number of draws used to approximate the f-Divergence expectation. num_batch_draws: Integer scalar number of draws used to approximate the f-Divergence expectation. seed: Python `int` seed for `q.sample`. name: Python `str` name prefixed to Ops created by this function. Returns: vimco: The Csiszar f-Divergence generalized VIMCO objective. Raises: ValueError: if `num_draws < 2`. #### References [1]: Andriy Mnih and Danilo Rezende. Variational Inference for Monte Carlo objectives. In _International Conference on Machine Learning_, 2016. https://arxiv.org/abs/1602.06725 """ with tf.compat.v1.name_scope(name, "csiszar_vimco", [num_draws, num_batch_draws]): if num_draws < 2: raise ValueError("Must specify num_draws > 1.") stop = tf.stop_gradient # For readability. x = stop(q.sample(sample_shape=[num_draws, num_batch_draws], seed=seed)) logqx = q.log_prob(x) logu = p_log_prob(x) - logqx f_log_avg_u, f_log_sooavg_u = [f(r) for r in csiszar_vimco_helper(logu)] dotprod = tf.reduce_sum( input_tensor=logqx * stop(f_log_avg_u - f_log_sooavg_u), axis=0) # Sum over iid samples. # We now rewrite f_log_avg_u so that: # `grad[f_log_avg_u] := grad[f_log_avg_u + dotprod]`. # To achieve this, we use a trick that # `f(x) - stop(f(x)) == zeros_like(f(x))` # but its gradient is grad[f(x)]. # Note that IEEE754 specifies that `x - x == 0.` and `x + 0. == x`, hence # this trick loses no precision. For more discussion regarding the relevant # portions of the IEEE754 standard, see the StackOverflow question, # "Is there a floating point value of x, for which x-x == 0 is false?" # http://stackoverflow.com/q/2686644 f_log_avg_u += dotprod - stop(dotprod) # Add zeros_like(dot_prod). return tf.reduce_mean(input_tensor=f_log_avg_u, axis=0)
def csiszar_vimco(f, p_log_prob, q, num_draws, num_batch_draws=1, seed=None, name=None): """Use VIMCO to lower the variance of gradient[csiszar_function(Avg(logu))]. This function generalizes VIMCO [(Mnih and Rezende, 2016)][1] to Csiszar f-Divergences. Note: if `q.reparameterization_type = tfd.FULLY_REPARAMETERIZED`, consider using `monte_carlo_csiszar_f_divergence`. The VIMCO loss is: ```none vimco = f(Avg{logu[i] : i=0,...,m-1}) where, logu[i] = log( p(x, h[i]) / q(h[i] | x) ) h[i] iid~ q(H | x) ``` Interestingly, the VIMCO gradient is not the naive gradient of `vimco`. Rather, it is characterized by: ```none grad[vimco] - variance_reducing_term where, variance_reducing_term = Sum{ grad[log q(h[i] | x)] * (vimco - f(log Avg{h[j;i] : j=0,...,m-1})) : i=0, ..., m-1 } h[j;i] = { u[j] j!=i { GeometricAverage{ u[k] : k!=i} j==i ``` (We omitted `stop_gradient` for brevity. See implementation for more details.) The `Avg{h[j;i] : j}` term is a kind of "swap-out average" where the `i`-th element has been replaced by the leave-`i`-out Geometric-average. This implementation prefers numerical precision over efficiency, i.e., `O(num_draws * num_batch_draws * prod(batch_shape) * prod(event_shape))`. (The constant may be fairly large, perhaps around 12.) Args: f: Python `callable` representing a Csiszar-function in log-space. p_log_prob: Python `callable` representing the natural-log of the probability under distribution `p`. (In variational inference `p` is the joint distribution.) q: `tf.Distribution`-like instance; must implement: `sample(n, seed)`, and `log_prob(x)`. (In variational inference `q` is the approximate posterior distribution.) num_draws: Integer scalar number of draws used to approximate the f-Divergence expectation. num_batch_draws: Integer scalar number of draws used to approximate the f-Divergence expectation. seed: Python `int` seed for `q.sample`. name: Python `str` name prefixed to Ops created by this function. Returns: vimco: The Csiszar f-Divergence generalized VIMCO objective. Raises: ValueError: if `num_draws < 2`. #### References [1]: Andriy Mnih and Danilo Rezende. Variational Inference for Monte Carlo objectives. In _International Conference on Machine Learning_, 2016. https://arxiv.org/abs/1602.06725 """ with tf.compat.v1.name_scope(name, "csiszar_vimco", [num_draws, num_batch_draws]): if num_draws < 2: raise ValueError("Must specify num_draws > 1.") stop = tf.stop_gradient # For readability. x = stop(q.sample(sample_shape=[num_draws, num_batch_draws], seed=seed)) logqx = q.log_prob(x) logu = p_log_prob(x) - logqx f_log_avg_u, f_log_sooavg_u = [f(r) for r in csiszar_vimco_helper(logu)] dotprod = tf.reduce_sum( input_tensor=logqx * stop(f_log_avg_u - f_log_sooavg_u), axis=0) # Sum over iid samples. # We now rewrite f_log_avg_u so that: # `grad[f_log_avg_u] := grad[f_log_avg_u + dotprod]`. # To achieve this, we use a trick that # `f(x) - stop(f(x)) == zeros_like(f(x))` # but its gradient is grad[f(x)]. # Note that IEEE754 specifies that `x - x == 0.` and `x + 0. == x`, hence # this trick loses no precision. For more discussion regarding the relevant # portions of the IEEE754 standard, see the StackOverflow question, # "Is there a floating point value of x, for which x-x == 0 is false?" # http://stackoverflow.com/q/2686644 f_log_avg_u += dotprod - stop(dotprod) # Add zeros_like(dot_prod). return tf.reduce_mean(input_tensor=f_log_avg_u, axis=0)
[ "Use", "VIMCO", "to", "lower", "the", "variance", "of", "gradient", "[", "csiszar_function", "(", "Avg", "(", "logu", "))", "]", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/vi/csiszar_divergence.py#L909-L1006
[ "def", "csiszar_vimco", "(", "f", ",", "p_log_prob", ",", "q", ",", "num_draws", ",", "num_batch_draws", "=", "1", ",", "seed", "=", "None", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "\"csiszar_vimco\"", ",", "[", "num_draws", ",", "num_batch_draws", "]", ")", ":", "if", "num_draws", "<", "2", ":", "raise", "ValueError", "(", "\"Must specify num_draws > 1.\"", ")", "stop", "=", "tf", ".", "stop_gradient", "# For readability.", "x", "=", "stop", "(", "q", ".", "sample", "(", "sample_shape", "=", "[", "num_draws", ",", "num_batch_draws", "]", ",", "seed", "=", "seed", ")", ")", "logqx", "=", "q", ".", "log_prob", "(", "x", ")", "logu", "=", "p_log_prob", "(", "x", ")", "-", "logqx", "f_log_avg_u", ",", "f_log_sooavg_u", "=", "[", "f", "(", "r", ")", "for", "r", "in", "csiszar_vimco_helper", "(", "logu", ")", "]", "dotprod", "=", "tf", ".", "reduce_sum", "(", "input_tensor", "=", "logqx", "*", "stop", "(", "f_log_avg_u", "-", "f_log_sooavg_u", ")", ",", "axis", "=", "0", ")", "# Sum over iid samples.", "# We now rewrite f_log_avg_u so that:", "# `grad[f_log_avg_u] := grad[f_log_avg_u + dotprod]`.", "# To achieve this, we use a trick that", "# `f(x) - stop(f(x)) == zeros_like(f(x))`", "# but its gradient is grad[f(x)].", "# Note that IEEE754 specifies that `x - x == 0.` and `x + 0. == x`, hence", "# this trick loses no precision. For more discussion regarding the relevant", "# portions of the IEEE754 standard, see the StackOverflow question,", "# \"Is there a floating point value of x, for which x-x == 0 is false?\"", "# http://stackoverflow.com/q/2686644", "f_log_avg_u", "+=", "dotprod", "-", "stop", "(", "dotprod", ")", "# Add zeros_like(dot_prod).", "return", "tf", ".", "reduce_mean", "(", "input_tensor", "=", "f_log_avg_u", ",", "axis", "=", "0", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
csiszar_vimco_helper
Helper to `csiszar_vimco`; computes `log_avg_u`, `log_sooavg_u`. `axis = 0` of `logu` is presumed to correspond to iid samples from `q`, i.e., ```none logu[j] = log(u[j]) u[j] = p(x, h[j]) / q(h[j] | x) h[j] iid~ q(H | x) ``` Args: logu: Floating-type `Tensor` representing `log(p(x, h) / q(h | x))`. name: Python `str` name prefixed to Ops created by this function. Returns: log_avg_u: `logu.dtype` `Tensor` corresponding to the natural-log of the average of `u`. The sum of the gradient of `log_avg_u` is `1`. log_sooavg_u: `logu.dtype` `Tensor` characterized by the natural-log of the average of `u`` except that the average swaps-out `u[i]` for the leave-`i`-out Geometric-average. The mean of the gradient of `log_sooavg_u` is `1`. Mathematically `log_sooavg_u` is, ```none log_sooavg_u[i] = log(Avg{h[j ; i] : j=0, ..., m-1}) h[j ; i] = { u[j] j!=i { GeometricAverage{u[k] : k != i} j==i ```
tensorflow_probability/python/vi/csiszar_divergence.py
def csiszar_vimco_helper(logu, name=None): """Helper to `csiszar_vimco`; computes `log_avg_u`, `log_sooavg_u`. `axis = 0` of `logu` is presumed to correspond to iid samples from `q`, i.e., ```none logu[j] = log(u[j]) u[j] = p(x, h[j]) / q(h[j] | x) h[j] iid~ q(H | x) ``` Args: logu: Floating-type `Tensor` representing `log(p(x, h) / q(h | x))`. name: Python `str` name prefixed to Ops created by this function. Returns: log_avg_u: `logu.dtype` `Tensor` corresponding to the natural-log of the average of `u`. The sum of the gradient of `log_avg_u` is `1`. log_sooavg_u: `logu.dtype` `Tensor` characterized by the natural-log of the average of `u`` except that the average swaps-out `u[i]` for the leave-`i`-out Geometric-average. The mean of the gradient of `log_sooavg_u` is `1`. Mathematically `log_sooavg_u` is, ```none log_sooavg_u[i] = log(Avg{h[j ; i] : j=0, ..., m-1}) h[j ; i] = { u[j] j!=i { GeometricAverage{u[k] : k != i} j==i ``` """ with tf.compat.v1.name_scope(name, "csiszar_vimco_helper", [logu]): logu = tf.convert_to_tensor(value=logu, name="logu") n = tf.compat.dimension_value(logu.shape.with_rank_at_least(1)[0]) if n is None: n = tf.shape(input=logu)[0] log_n = tf.math.log(tf.cast(n, dtype=logu.dtype)) nm1 = tf.cast(n - 1, dtype=logu.dtype) else: log_n = np.log(n).astype(logu.dtype.as_numpy_dtype) nm1 = np.asarray(n - 1, dtype=logu.dtype.as_numpy_dtype) # Throughout we reduce across axis=0 since this is presumed to be iid # samples. log_max_u = tf.reduce_max(input_tensor=logu, axis=0) log_sum_u_minus_log_max_u = tf.reduce_logsumexp( input_tensor=logu - log_max_u, axis=0) # log_loosum_u[i] = # = logsumexp(logu[j] : j != i) # = log( exp(logsumexp(logu)) - exp(logu[i]) ) # = log( exp(logsumexp(logu - logu[i])) exp(logu[i]) - exp(logu[i])) # = logu[i] + log(exp(logsumexp(logu - logu[i])) - 1) # = logu[i] + log(exp(logsumexp(logu) - logu[i]) - 1) # = logu[i] + softplus_inverse(logsumexp(logu) - logu[i]) d = log_sum_u_minus_log_max_u + (log_max_u - logu) # We use `d != 0` rather than `d > 0.` because `d < 0.` should never # happens; if it does we want to complain loudly (which `softplus_inverse` # will). d_ok = tf.not_equal(d, 0.) safe_d = tf.where(d_ok, d, tf.ones_like(d)) d_ok_result = logu + tfd.softplus_inverse(safe_d) inf = np.array(np.inf, dtype=logu.dtype.as_numpy_dtype) # When not(d_ok) and is_positive_and_largest then we manually compute the # log_loosum_u. (We can efficiently do this for any one point but not all, # hence we still need the above calculation.) This is good because when # this condition is met, we cannot use the above calculation; its -inf. # We now compute the log-leave-out-max-sum, replicate it to every # point and make sure to select it only when we need to. is_positive_and_largest = tf.logical_and( logu > 0., tf.equal(logu, log_max_u[tf.newaxis, ...])) log_lomsum_u = tf.reduce_logsumexp( input_tensor=tf.where(is_positive_and_largest, tf.fill(tf.shape(input=logu), -inf), logu), axis=0, keepdims=True) log_lomsum_u = tf.tile( log_lomsum_u, multiples=1 + tf.pad(tensor=[n - 1], paddings=[[0, tf.rank(logu) - 1]])) d_not_ok_result = tf.where(is_positive_and_largest, log_lomsum_u, tf.fill(tf.shape(input=d), -inf)) log_loosum_u = tf.where(d_ok, d_ok_result, d_not_ok_result) # The swap-one-out-sum ("soosum") is n different sums, each of which # replaces the i-th item with the i-th-left-out average, i.e., # soo_sum_u[i] = [exp(logu) - exp(logu[i])] + exp(mean(logu[!=i])) # = exp(log_loosum_u[i]) + exp(looavg_logu[i]) looavg_logu = (tf.reduce_sum(input_tensor=logu, axis=0) - logu) / nm1 log_soosum_u = tf.reduce_logsumexp( input_tensor=tf.stack([log_loosum_u, looavg_logu]), axis=0) log_avg_u = log_sum_u_minus_log_max_u + log_max_u - log_n log_sooavg_u = log_soosum_u - log_n log_avg_u.set_shape(logu.shape.with_rank_at_least(1)[1:]) log_sooavg_u.set_shape(logu.shape) return log_avg_u, log_sooavg_u
def csiszar_vimco_helper(logu, name=None): """Helper to `csiszar_vimco`; computes `log_avg_u`, `log_sooavg_u`. `axis = 0` of `logu` is presumed to correspond to iid samples from `q`, i.e., ```none logu[j] = log(u[j]) u[j] = p(x, h[j]) / q(h[j] | x) h[j] iid~ q(H | x) ``` Args: logu: Floating-type `Tensor` representing `log(p(x, h) / q(h | x))`. name: Python `str` name prefixed to Ops created by this function. Returns: log_avg_u: `logu.dtype` `Tensor` corresponding to the natural-log of the average of `u`. The sum of the gradient of `log_avg_u` is `1`. log_sooavg_u: `logu.dtype` `Tensor` characterized by the natural-log of the average of `u`` except that the average swaps-out `u[i]` for the leave-`i`-out Geometric-average. The mean of the gradient of `log_sooavg_u` is `1`. Mathematically `log_sooavg_u` is, ```none log_sooavg_u[i] = log(Avg{h[j ; i] : j=0, ..., m-1}) h[j ; i] = { u[j] j!=i { GeometricAverage{u[k] : k != i} j==i ``` """ with tf.compat.v1.name_scope(name, "csiszar_vimco_helper", [logu]): logu = tf.convert_to_tensor(value=logu, name="logu") n = tf.compat.dimension_value(logu.shape.with_rank_at_least(1)[0]) if n is None: n = tf.shape(input=logu)[0] log_n = tf.math.log(tf.cast(n, dtype=logu.dtype)) nm1 = tf.cast(n - 1, dtype=logu.dtype) else: log_n = np.log(n).astype(logu.dtype.as_numpy_dtype) nm1 = np.asarray(n - 1, dtype=logu.dtype.as_numpy_dtype) # Throughout we reduce across axis=0 since this is presumed to be iid # samples. log_max_u = tf.reduce_max(input_tensor=logu, axis=0) log_sum_u_minus_log_max_u = tf.reduce_logsumexp( input_tensor=logu - log_max_u, axis=0) # log_loosum_u[i] = # = logsumexp(logu[j] : j != i) # = log( exp(logsumexp(logu)) - exp(logu[i]) ) # = log( exp(logsumexp(logu - logu[i])) exp(logu[i]) - exp(logu[i])) # = logu[i] + log(exp(logsumexp(logu - logu[i])) - 1) # = logu[i] + log(exp(logsumexp(logu) - logu[i]) - 1) # = logu[i] + softplus_inverse(logsumexp(logu) - logu[i]) d = log_sum_u_minus_log_max_u + (log_max_u - logu) # We use `d != 0` rather than `d > 0.` because `d < 0.` should never # happens; if it does we want to complain loudly (which `softplus_inverse` # will). d_ok = tf.not_equal(d, 0.) safe_d = tf.where(d_ok, d, tf.ones_like(d)) d_ok_result = logu + tfd.softplus_inverse(safe_d) inf = np.array(np.inf, dtype=logu.dtype.as_numpy_dtype) # When not(d_ok) and is_positive_and_largest then we manually compute the # log_loosum_u. (We can efficiently do this for any one point but not all, # hence we still need the above calculation.) This is good because when # this condition is met, we cannot use the above calculation; its -inf. # We now compute the log-leave-out-max-sum, replicate it to every # point and make sure to select it only when we need to. is_positive_and_largest = tf.logical_and( logu > 0., tf.equal(logu, log_max_u[tf.newaxis, ...])) log_lomsum_u = tf.reduce_logsumexp( input_tensor=tf.where(is_positive_and_largest, tf.fill(tf.shape(input=logu), -inf), logu), axis=0, keepdims=True) log_lomsum_u = tf.tile( log_lomsum_u, multiples=1 + tf.pad(tensor=[n - 1], paddings=[[0, tf.rank(logu) - 1]])) d_not_ok_result = tf.where(is_positive_and_largest, log_lomsum_u, tf.fill(tf.shape(input=d), -inf)) log_loosum_u = tf.where(d_ok, d_ok_result, d_not_ok_result) # The swap-one-out-sum ("soosum") is n different sums, each of which # replaces the i-th item with the i-th-left-out average, i.e., # soo_sum_u[i] = [exp(logu) - exp(logu[i])] + exp(mean(logu[!=i])) # = exp(log_loosum_u[i]) + exp(looavg_logu[i]) looavg_logu = (tf.reduce_sum(input_tensor=logu, axis=0) - logu) / nm1 log_soosum_u = tf.reduce_logsumexp( input_tensor=tf.stack([log_loosum_u, looavg_logu]), axis=0) log_avg_u = log_sum_u_minus_log_max_u + log_max_u - log_n log_sooavg_u = log_soosum_u - log_n log_avg_u.set_shape(logu.shape.with_rank_at_least(1)[1:]) log_sooavg_u.set_shape(logu.shape) return log_avg_u, log_sooavg_u
[ "Helper", "to", "csiszar_vimco", ";", "computes", "log_avg_u", "log_sooavg_u", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/vi/csiszar_divergence.py#L1009-L1111
[ "def", "csiszar_vimco_helper", "(", "logu", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "\"csiszar_vimco_helper\"", ",", "[", "logu", "]", ")", ":", "logu", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "logu", ",", "name", "=", "\"logu\"", ")", "n", "=", "tf", ".", "compat", ".", "dimension_value", "(", "logu", ".", "shape", ".", "with_rank_at_least", "(", "1", ")", "[", "0", "]", ")", "if", "n", "is", "None", ":", "n", "=", "tf", ".", "shape", "(", "input", "=", "logu", ")", "[", "0", "]", "log_n", "=", "tf", ".", "math", ".", "log", "(", "tf", ".", "cast", "(", "n", ",", "dtype", "=", "logu", ".", "dtype", ")", ")", "nm1", "=", "tf", ".", "cast", "(", "n", "-", "1", ",", "dtype", "=", "logu", ".", "dtype", ")", "else", ":", "log_n", "=", "np", ".", "log", "(", "n", ")", ".", "astype", "(", "logu", ".", "dtype", ".", "as_numpy_dtype", ")", "nm1", "=", "np", ".", "asarray", "(", "n", "-", "1", ",", "dtype", "=", "logu", ".", "dtype", ".", "as_numpy_dtype", ")", "# Throughout we reduce across axis=0 since this is presumed to be iid", "# samples.", "log_max_u", "=", "tf", ".", "reduce_max", "(", "input_tensor", "=", "logu", ",", "axis", "=", "0", ")", "log_sum_u_minus_log_max_u", "=", "tf", ".", "reduce_logsumexp", "(", "input_tensor", "=", "logu", "-", "log_max_u", ",", "axis", "=", "0", ")", "# log_loosum_u[i] =", "# = logsumexp(logu[j] : j != i)", "# = log( exp(logsumexp(logu)) - exp(logu[i]) )", "# = log( exp(logsumexp(logu - logu[i])) exp(logu[i]) - exp(logu[i]))", "# = logu[i] + log(exp(logsumexp(logu - logu[i])) - 1)", "# = logu[i] + log(exp(logsumexp(logu) - logu[i]) - 1)", "# = logu[i] + softplus_inverse(logsumexp(logu) - logu[i])", "d", "=", "log_sum_u_minus_log_max_u", "+", "(", "log_max_u", "-", "logu", ")", "# We use `d != 0` rather than `d > 0.` because `d < 0.` should never", "# happens; if it does we want to complain loudly (which `softplus_inverse`", "# will).", "d_ok", "=", "tf", ".", "not_equal", "(", "d", ",", "0.", ")", "safe_d", "=", "tf", ".", "where", "(", "d_ok", ",", "d", ",", "tf", ".", "ones_like", "(", "d", ")", ")", "d_ok_result", "=", "logu", "+", "tfd", ".", "softplus_inverse", "(", "safe_d", ")", "inf", "=", "np", ".", "array", "(", "np", ".", "inf", ",", "dtype", "=", "logu", ".", "dtype", ".", "as_numpy_dtype", ")", "# When not(d_ok) and is_positive_and_largest then we manually compute the", "# log_loosum_u. (We can efficiently do this for any one point but not all,", "# hence we still need the above calculation.) This is good because when", "# this condition is met, we cannot use the above calculation; its -inf.", "# We now compute the log-leave-out-max-sum, replicate it to every", "# point and make sure to select it only when we need to.", "is_positive_and_largest", "=", "tf", ".", "logical_and", "(", "logu", ">", "0.", ",", "tf", ".", "equal", "(", "logu", ",", "log_max_u", "[", "tf", ".", "newaxis", ",", "...", "]", ")", ")", "log_lomsum_u", "=", "tf", ".", "reduce_logsumexp", "(", "input_tensor", "=", "tf", ".", "where", "(", "is_positive_and_largest", ",", "tf", ".", "fill", "(", "tf", ".", "shape", "(", "input", "=", "logu", ")", ",", "-", "inf", ")", ",", "logu", ")", ",", "axis", "=", "0", ",", "keepdims", "=", "True", ")", "log_lomsum_u", "=", "tf", ".", "tile", "(", "log_lomsum_u", ",", "multiples", "=", "1", "+", "tf", ".", "pad", "(", "tensor", "=", "[", "n", "-", "1", "]", ",", "paddings", "=", "[", "[", "0", ",", "tf", ".", "rank", "(", "logu", ")", "-", "1", "]", "]", ")", ")", "d_not_ok_result", "=", "tf", ".", "where", "(", "is_positive_and_largest", ",", "log_lomsum_u", ",", "tf", ".", "fill", "(", "tf", ".", "shape", "(", "input", "=", "d", ")", ",", "-", "inf", ")", ")", "log_loosum_u", "=", "tf", ".", "where", "(", "d_ok", ",", "d_ok_result", ",", "d_not_ok_result", ")", "# The swap-one-out-sum (\"soosum\") is n different sums, each of which", "# replaces the i-th item with the i-th-left-out average, i.e.,", "# soo_sum_u[i] = [exp(logu) - exp(logu[i])] + exp(mean(logu[!=i]))", "# = exp(log_loosum_u[i]) + exp(looavg_logu[i])", "looavg_logu", "=", "(", "tf", ".", "reduce_sum", "(", "input_tensor", "=", "logu", ",", "axis", "=", "0", ")", "-", "logu", ")", "/", "nm1", "log_soosum_u", "=", "tf", ".", "reduce_logsumexp", "(", "input_tensor", "=", "tf", ".", "stack", "(", "[", "log_loosum_u", ",", "looavg_logu", "]", ")", ",", "axis", "=", "0", ")", "log_avg_u", "=", "log_sum_u_minus_log_max_u", "+", "log_max_u", "-", "log_n", "log_sooavg_u", "=", "log_soosum_u", "-", "log_n", "log_avg_u", ".", "set_shape", "(", "logu", ".", "shape", ".", "with_rank_at_least", "(", "1", ")", "[", "1", ":", "]", ")", "log_sooavg_u", ".", "set_shape", "(", "logu", ".", "shape", ")", "return", "log_avg_u", ",", "log_sooavg_u" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_interp_regular_1d_grid_impl
1-D interpolation that works with/without batching.
tensorflow_probability/python/math/interpolation.py
def _interp_regular_1d_grid_impl(x, x_ref_min, x_ref_max, y_ref, axis=-1, batch_y_ref=False, fill_value='constant_extension', fill_value_below=None, fill_value_above=None, grid_regularizing_transform=None, name=None): """1-D interpolation that works with/without batching.""" # To understand the implemention differences between the batch/no-batch # versions of this function, you should probably understand the difference # between tf.gather and tf.batch_gather. In particular, we do *not* make the # no-batch version a special case of the batch version, because that would # an inefficient use of batch_gather with unnecessarily broadcast args. with tf.compat.v1.name_scope( name, values=[ x, x_ref_min, x_ref_max, y_ref, axis, fill_value, fill_value_below, fill_value_above ]): # Arg checking. allowed_fv_st = ('constant_extension', 'extrapolate') for fv in (fill_value, fill_value_below, fill_value_above): if isinstance(fv, str) and fv not in allowed_fv_st: raise ValueError( 'A fill value ({}) was not an allowed string ({})'.format( fv, allowed_fv_st)) # Separate value fills for below/above incurs extra cost, so keep track of # whether this is needed. need_separate_fills = ( fill_value_above is not None or fill_value_below is not None or fill_value == 'extrapolate' # always requries separate below/above ) if need_separate_fills and fill_value_above is None: fill_value_above = fill_value if need_separate_fills and fill_value_below is None: fill_value_below = fill_value dtype = dtype_util.common_dtype([x, x_ref_min, x_ref_max, y_ref], preferred_dtype=tf.float32) x = tf.convert_to_tensor(value=x, name='x', dtype=dtype) x_ref_min = tf.convert_to_tensor( value=x_ref_min, name='x_ref_min', dtype=dtype) x_ref_max = tf.convert_to_tensor( value=x_ref_max, name='x_ref_max', dtype=dtype) if not batch_y_ref: _assert_ndims_statically(x_ref_min, expect_ndims=0) _assert_ndims_statically(x_ref_max, expect_ndims=0) y_ref = tf.convert_to_tensor(value=y_ref, name='y_ref', dtype=dtype) if batch_y_ref: # If we're batching, # x.shape ~ [A1,...,AN, D], x_ref_min/max.shape ~ [A1,...,AN] # So to add together we'll append a singleton. # If not batching, x_ref_min/max are scalar, so this isn't an issue, # moreover, if not batching, x can be scalar, and expanding x_ref_min/max # would cause a bad expansion of x when added to x (confused yet?). x_ref_min = x_ref_min[..., tf.newaxis] x_ref_max = x_ref_max[..., tf.newaxis] axis = tf.convert_to_tensor(value=axis, name='axis', dtype=tf.int32) axis = distribution_util.make_non_negative_axis(axis, tf.rank(y_ref)) _assert_ndims_statically(axis, expect_ndims=0) ny = tf.cast(tf.shape(input=y_ref)[axis], dtype) # Map [x_ref_min, x_ref_max] to [0, ny - 1]. # This is the (fractional) index of x. if grid_regularizing_transform is None: g = lambda x: x else: g = grid_regularizing_transform fractional_idx = ((g(x) - g(x_ref_min)) / (g(x_ref_max) - g(x_ref_min))) x_idx_unclipped = fractional_idx * (ny - 1) # Wherever x is NaN, x_idx_unclipped will be NaN as well. # Keep track of the nan indices here (so we can impute NaN later). # Also eliminate any NaN indices, since there is not NaN in 32bit. nan_idx = tf.math.is_nan(x_idx_unclipped) x_idx_unclipped = tf.where(nan_idx, tf.zeros_like(x_idx_unclipped), x_idx_unclipped) x_idx = tf.clip_by_value(x_idx_unclipped, tf.zeros((), dtype=dtype), ny - 1) # Get the index above and below x_idx. # Naively we could set idx_below = floor(x_idx), idx_above = ceil(x_idx), # however, this results in idx_below == idx_above whenever x is on a grid. # This in turn results in y_ref_below == y_ref_above, and then the gradient # at this point is zero. So here we "jitter" one of idx_below, idx_above, # so that they are at different values. This jittering does not affect the # interpolated value, but does make the gradient nonzero (unless of course # the y_ref values are the same). idx_below = tf.floor(x_idx) idx_above = tf.minimum(idx_below + 1, ny - 1) idx_below = tf.maximum(idx_above - 1, 0) # These are the values of y_ref corresponding to above/below indices. idx_below_int32 = tf.cast(idx_below, dtype=tf.int32) idx_above_int32 = tf.cast(idx_above, dtype=tf.int32) if batch_y_ref: # If y_ref.shape ~ [A1,...,AN, C, B1,...,BN], # and x.shape, x_ref_min/max.shape ~ [A1,...,AN, D] # Then y_ref_below.shape ~ [A1,...,AN, D, B1,...,BN] y_ref_below = _batch_gather_with_broadcast(y_ref, idx_below_int32, axis) y_ref_above = _batch_gather_with_broadcast(y_ref, idx_above_int32, axis) else: # Here, y_ref_below.shape = # y_ref.shape[:axis] + x.shape + y_ref.shape[axis + 1:] y_ref_below = tf.gather(y_ref, idx_below_int32, axis=axis) y_ref_above = tf.gather(y_ref, idx_above_int32, axis=axis) # Use t to get a convex combination of the below/above values. t = x_idx - idx_below # x, and tensors shaped like x, need to be added to, and selected with # (using tf.where) the output y. This requires appending singletons. # Make functions appropriate for batch/no-batch. if batch_y_ref: # In the non-batch case, the output shape is going to be # y_ref.shape[:axis] + x.shape + y_ref.shape[axis+1:] expand_x_fn = _make_expand_x_fn_for_batch_interpolation(y_ref, axis) else: # In the batch case, the output shape is going to be # Broadcast(y_ref.shape[:axis], x.shape[:-1]) + # x.shape[-1:] + y_ref.shape[axis+1:] expand_x_fn = _make_expand_x_fn_for_non_batch_interpolation(y_ref, axis) t = expand_x_fn(t) nan_idx = expand_x_fn(nan_idx, broadcast=True) x_idx_unclipped = expand_x_fn(x_idx_unclipped, broadcast=True) y = t * y_ref_above + (1 - t) * y_ref_below # Now begins a long excursion to fill values outside [x_min, x_max]. # Re-insert NaN wherever x was NaN. y = tf.where(nan_idx, tf.fill(tf.shape(input=y), tf.constant(np.nan, y.dtype)), y) if not need_separate_fills: if fill_value == 'constant_extension': pass # Already handled by clipping x_idx_unclipped. else: y = tf.where((x_idx_unclipped < 0) | (x_idx_unclipped > ny - 1), fill_value + tf.zeros_like(y), y) else: # Fill values below x_ref_min <==> x_idx_unclipped < 0. if fill_value_below == 'constant_extension': pass # Already handled by the clipping that created x_idx_unclipped. elif fill_value_below == 'extrapolate': if batch_y_ref: # For every batch member, gather the first two elements of y across # `axis`. y_0 = tf.gather(y_ref, [0], axis=axis) y_1 = tf.gather(y_ref, [1], axis=axis) else: # If not batching, we want to gather the first two elements, just like # above. However, these results need to be replicated for every # member of x. An easy way to do that is to gather using # indices = zeros/ones(x.shape). y_0 = tf.gather( y_ref, tf.zeros(tf.shape(input=x), dtype=tf.int32), axis=axis) y_1 = tf.gather( y_ref, tf.ones(tf.shape(input=x), dtype=tf.int32), axis=axis) x_delta = (x_ref_max - x_ref_min) / (ny - 1) x_factor = expand_x_fn((x - x_ref_min) / x_delta, broadcast=True) y = tf.where(x_idx_unclipped < 0, y_0 + x_factor * (y_1 - y_0), y) else: y = tf.where(x_idx_unclipped < 0, fill_value_below + tf.zeros_like(y), y) # Fill values above x_ref_min <==> x_idx_unclipped > ny - 1. if fill_value_above == 'constant_extension': pass # Already handled by the clipping that created x_idx_unclipped. elif fill_value_above == 'extrapolate': ny_int32 = tf.shape(input=y_ref)[axis] if batch_y_ref: y_n1 = tf.gather(y_ref, [tf.shape(input=y_ref)[axis] - 1], axis=axis) y_n2 = tf.gather(y_ref, [tf.shape(input=y_ref)[axis] - 2], axis=axis) else: y_n1 = tf.gather( y_ref, tf.fill(tf.shape(input=x), ny_int32 - 1), axis=axis) y_n2 = tf.gather( y_ref, tf.fill(tf.shape(input=x), ny_int32 - 2), axis=axis) x_delta = (x_ref_max - x_ref_min) / (ny - 1) x_factor = expand_x_fn((x - x_ref_max) / x_delta, broadcast=True) y = tf.where(x_idx_unclipped > ny - 1, y_n1 + x_factor * (y_n1 - y_n2), y) else: y = tf.where(x_idx_unclipped > ny - 1, fill_value_above + tf.zeros_like(y), y) return y
def _interp_regular_1d_grid_impl(x, x_ref_min, x_ref_max, y_ref, axis=-1, batch_y_ref=False, fill_value='constant_extension', fill_value_below=None, fill_value_above=None, grid_regularizing_transform=None, name=None): """1-D interpolation that works with/without batching.""" # To understand the implemention differences between the batch/no-batch # versions of this function, you should probably understand the difference # between tf.gather and tf.batch_gather. In particular, we do *not* make the # no-batch version a special case of the batch version, because that would # an inefficient use of batch_gather with unnecessarily broadcast args. with tf.compat.v1.name_scope( name, values=[ x, x_ref_min, x_ref_max, y_ref, axis, fill_value, fill_value_below, fill_value_above ]): # Arg checking. allowed_fv_st = ('constant_extension', 'extrapolate') for fv in (fill_value, fill_value_below, fill_value_above): if isinstance(fv, str) and fv not in allowed_fv_st: raise ValueError( 'A fill value ({}) was not an allowed string ({})'.format( fv, allowed_fv_st)) # Separate value fills for below/above incurs extra cost, so keep track of # whether this is needed. need_separate_fills = ( fill_value_above is not None or fill_value_below is not None or fill_value == 'extrapolate' # always requries separate below/above ) if need_separate_fills and fill_value_above is None: fill_value_above = fill_value if need_separate_fills and fill_value_below is None: fill_value_below = fill_value dtype = dtype_util.common_dtype([x, x_ref_min, x_ref_max, y_ref], preferred_dtype=tf.float32) x = tf.convert_to_tensor(value=x, name='x', dtype=dtype) x_ref_min = tf.convert_to_tensor( value=x_ref_min, name='x_ref_min', dtype=dtype) x_ref_max = tf.convert_to_tensor( value=x_ref_max, name='x_ref_max', dtype=dtype) if not batch_y_ref: _assert_ndims_statically(x_ref_min, expect_ndims=0) _assert_ndims_statically(x_ref_max, expect_ndims=0) y_ref = tf.convert_to_tensor(value=y_ref, name='y_ref', dtype=dtype) if batch_y_ref: # If we're batching, # x.shape ~ [A1,...,AN, D], x_ref_min/max.shape ~ [A1,...,AN] # So to add together we'll append a singleton. # If not batching, x_ref_min/max are scalar, so this isn't an issue, # moreover, if not batching, x can be scalar, and expanding x_ref_min/max # would cause a bad expansion of x when added to x (confused yet?). x_ref_min = x_ref_min[..., tf.newaxis] x_ref_max = x_ref_max[..., tf.newaxis] axis = tf.convert_to_tensor(value=axis, name='axis', dtype=tf.int32) axis = distribution_util.make_non_negative_axis(axis, tf.rank(y_ref)) _assert_ndims_statically(axis, expect_ndims=0) ny = tf.cast(tf.shape(input=y_ref)[axis], dtype) # Map [x_ref_min, x_ref_max] to [0, ny - 1]. # This is the (fractional) index of x. if grid_regularizing_transform is None: g = lambda x: x else: g = grid_regularizing_transform fractional_idx = ((g(x) - g(x_ref_min)) / (g(x_ref_max) - g(x_ref_min))) x_idx_unclipped = fractional_idx * (ny - 1) # Wherever x is NaN, x_idx_unclipped will be NaN as well. # Keep track of the nan indices here (so we can impute NaN later). # Also eliminate any NaN indices, since there is not NaN in 32bit. nan_idx = tf.math.is_nan(x_idx_unclipped) x_idx_unclipped = tf.where(nan_idx, tf.zeros_like(x_idx_unclipped), x_idx_unclipped) x_idx = tf.clip_by_value(x_idx_unclipped, tf.zeros((), dtype=dtype), ny - 1) # Get the index above and below x_idx. # Naively we could set idx_below = floor(x_idx), idx_above = ceil(x_idx), # however, this results in idx_below == idx_above whenever x is on a grid. # This in turn results in y_ref_below == y_ref_above, and then the gradient # at this point is zero. So here we "jitter" one of idx_below, idx_above, # so that they are at different values. This jittering does not affect the # interpolated value, but does make the gradient nonzero (unless of course # the y_ref values are the same). idx_below = tf.floor(x_idx) idx_above = tf.minimum(idx_below + 1, ny - 1) idx_below = tf.maximum(idx_above - 1, 0) # These are the values of y_ref corresponding to above/below indices. idx_below_int32 = tf.cast(idx_below, dtype=tf.int32) idx_above_int32 = tf.cast(idx_above, dtype=tf.int32) if batch_y_ref: # If y_ref.shape ~ [A1,...,AN, C, B1,...,BN], # and x.shape, x_ref_min/max.shape ~ [A1,...,AN, D] # Then y_ref_below.shape ~ [A1,...,AN, D, B1,...,BN] y_ref_below = _batch_gather_with_broadcast(y_ref, idx_below_int32, axis) y_ref_above = _batch_gather_with_broadcast(y_ref, idx_above_int32, axis) else: # Here, y_ref_below.shape = # y_ref.shape[:axis] + x.shape + y_ref.shape[axis + 1:] y_ref_below = tf.gather(y_ref, idx_below_int32, axis=axis) y_ref_above = tf.gather(y_ref, idx_above_int32, axis=axis) # Use t to get a convex combination of the below/above values. t = x_idx - idx_below # x, and tensors shaped like x, need to be added to, and selected with # (using tf.where) the output y. This requires appending singletons. # Make functions appropriate for batch/no-batch. if batch_y_ref: # In the non-batch case, the output shape is going to be # y_ref.shape[:axis] + x.shape + y_ref.shape[axis+1:] expand_x_fn = _make_expand_x_fn_for_batch_interpolation(y_ref, axis) else: # In the batch case, the output shape is going to be # Broadcast(y_ref.shape[:axis], x.shape[:-1]) + # x.shape[-1:] + y_ref.shape[axis+1:] expand_x_fn = _make_expand_x_fn_for_non_batch_interpolation(y_ref, axis) t = expand_x_fn(t) nan_idx = expand_x_fn(nan_idx, broadcast=True) x_idx_unclipped = expand_x_fn(x_idx_unclipped, broadcast=True) y = t * y_ref_above + (1 - t) * y_ref_below # Now begins a long excursion to fill values outside [x_min, x_max]. # Re-insert NaN wherever x was NaN. y = tf.where(nan_idx, tf.fill(tf.shape(input=y), tf.constant(np.nan, y.dtype)), y) if not need_separate_fills: if fill_value == 'constant_extension': pass # Already handled by clipping x_idx_unclipped. else: y = tf.where((x_idx_unclipped < 0) | (x_idx_unclipped > ny - 1), fill_value + tf.zeros_like(y), y) else: # Fill values below x_ref_min <==> x_idx_unclipped < 0. if fill_value_below == 'constant_extension': pass # Already handled by the clipping that created x_idx_unclipped. elif fill_value_below == 'extrapolate': if batch_y_ref: # For every batch member, gather the first two elements of y across # `axis`. y_0 = tf.gather(y_ref, [0], axis=axis) y_1 = tf.gather(y_ref, [1], axis=axis) else: # If not batching, we want to gather the first two elements, just like # above. However, these results need to be replicated for every # member of x. An easy way to do that is to gather using # indices = zeros/ones(x.shape). y_0 = tf.gather( y_ref, tf.zeros(tf.shape(input=x), dtype=tf.int32), axis=axis) y_1 = tf.gather( y_ref, tf.ones(tf.shape(input=x), dtype=tf.int32), axis=axis) x_delta = (x_ref_max - x_ref_min) / (ny - 1) x_factor = expand_x_fn((x - x_ref_min) / x_delta, broadcast=True) y = tf.where(x_idx_unclipped < 0, y_0 + x_factor * (y_1 - y_0), y) else: y = tf.where(x_idx_unclipped < 0, fill_value_below + tf.zeros_like(y), y) # Fill values above x_ref_min <==> x_idx_unclipped > ny - 1. if fill_value_above == 'constant_extension': pass # Already handled by the clipping that created x_idx_unclipped. elif fill_value_above == 'extrapolate': ny_int32 = tf.shape(input=y_ref)[axis] if batch_y_ref: y_n1 = tf.gather(y_ref, [tf.shape(input=y_ref)[axis] - 1], axis=axis) y_n2 = tf.gather(y_ref, [tf.shape(input=y_ref)[axis] - 2], axis=axis) else: y_n1 = tf.gather( y_ref, tf.fill(tf.shape(input=x), ny_int32 - 1), axis=axis) y_n2 = tf.gather( y_ref, tf.fill(tf.shape(input=x), ny_int32 - 2), axis=axis) x_delta = (x_ref_max - x_ref_min) / (ny - 1) x_factor = expand_x_fn((x - x_ref_max) / x_delta, broadcast=True) y = tf.where(x_idx_unclipped > ny - 1, y_n1 + x_factor * (y_n1 - y_n2), y) else: y = tf.where(x_idx_unclipped > ny - 1, fill_value_above + tf.zeros_like(y), y) return y
[ "1", "-", "D", "interpolation", "that", "works", "with", "/", "without", "batching", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/interpolation.py#L38-L237
[ "def", "_interp_regular_1d_grid_impl", "(", "x", ",", "x_ref_min", ",", "x_ref_max", ",", "y_ref", ",", "axis", "=", "-", "1", ",", "batch_y_ref", "=", "False", ",", "fill_value", "=", "'constant_extension'", ",", "fill_value_below", "=", "None", ",", "fill_value_above", "=", "None", ",", "grid_regularizing_transform", "=", "None", ",", "name", "=", "None", ")", ":", "# To understand the implemention differences between the batch/no-batch", "# versions of this function, you should probably understand the difference", "# between tf.gather and tf.batch_gather. In particular, we do *not* make the", "# no-batch version a special case of the batch version, because that would", "# an inefficient use of batch_gather with unnecessarily broadcast args.", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "values", "=", "[", "x", ",", "x_ref_min", ",", "x_ref_max", ",", "y_ref", ",", "axis", ",", "fill_value", ",", "fill_value_below", ",", "fill_value_above", "]", ")", ":", "# Arg checking.", "allowed_fv_st", "=", "(", "'constant_extension'", ",", "'extrapolate'", ")", "for", "fv", "in", "(", "fill_value", ",", "fill_value_below", ",", "fill_value_above", ")", ":", "if", "isinstance", "(", "fv", ",", "str", ")", "and", "fv", "not", "in", "allowed_fv_st", ":", "raise", "ValueError", "(", "'A fill value ({}) was not an allowed string ({})'", ".", "format", "(", "fv", ",", "allowed_fv_st", ")", ")", "# Separate value fills for below/above incurs extra cost, so keep track of", "# whether this is needed.", "need_separate_fills", "=", "(", "fill_value_above", "is", "not", "None", "or", "fill_value_below", "is", "not", "None", "or", "fill_value", "==", "'extrapolate'", "# always requries separate below/above", ")", "if", "need_separate_fills", "and", "fill_value_above", "is", "None", ":", "fill_value_above", "=", "fill_value", "if", "need_separate_fills", "and", "fill_value_below", "is", "None", ":", "fill_value_below", "=", "fill_value", "dtype", "=", "dtype_util", ".", "common_dtype", "(", "[", "x", ",", "x_ref_min", ",", "x_ref_max", ",", "y_ref", "]", ",", "preferred_dtype", "=", "tf", ".", "float32", ")", "x", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "x", ",", "name", "=", "'x'", ",", "dtype", "=", "dtype", ")", "x_ref_min", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "x_ref_min", ",", "name", "=", "'x_ref_min'", ",", "dtype", "=", "dtype", ")", "x_ref_max", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "x_ref_max", ",", "name", "=", "'x_ref_max'", ",", "dtype", "=", "dtype", ")", "if", "not", "batch_y_ref", ":", "_assert_ndims_statically", "(", "x_ref_min", ",", "expect_ndims", "=", "0", ")", "_assert_ndims_statically", "(", "x_ref_max", ",", "expect_ndims", "=", "0", ")", "y_ref", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "y_ref", ",", "name", "=", "'y_ref'", ",", "dtype", "=", "dtype", ")", "if", "batch_y_ref", ":", "# If we're batching,", "# x.shape ~ [A1,...,AN, D], x_ref_min/max.shape ~ [A1,...,AN]", "# So to add together we'll append a singleton.", "# If not batching, x_ref_min/max are scalar, so this isn't an issue,", "# moreover, if not batching, x can be scalar, and expanding x_ref_min/max", "# would cause a bad expansion of x when added to x (confused yet?).", "x_ref_min", "=", "x_ref_min", "[", "...", ",", "tf", ".", "newaxis", "]", "x_ref_max", "=", "x_ref_max", "[", "...", ",", "tf", ".", "newaxis", "]", "axis", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "axis", ",", "name", "=", "'axis'", ",", "dtype", "=", "tf", ".", "int32", ")", "axis", "=", "distribution_util", ".", "make_non_negative_axis", "(", "axis", ",", "tf", ".", "rank", "(", "y_ref", ")", ")", "_assert_ndims_statically", "(", "axis", ",", "expect_ndims", "=", "0", ")", "ny", "=", "tf", ".", "cast", "(", "tf", ".", "shape", "(", "input", "=", "y_ref", ")", "[", "axis", "]", ",", "dtype", ")", "# Map [x_ref_min, x_ref_max] to [0, ny - 1].", "# This is the (fractional) index of x.", "if", "grid_regularizing_transform", "is", "None", ":", "g", "=", "lambda", "x", ":", "x", "else", ":", "g", "=", "grid_regularizing_transform", "fractional_idx", "=", "(", "(", "g", "(", "x", ")", "-", "g", "(", "x_ref_min", ")", ")", "/", "(", "g", "(", "x_ref_max", ")", "-", "g", "(", "x_ref_min", ")", ")", ")", "x_idx_unclipped", "=", "fractional_idx", "*", "(", "ny", "-", "1", ")", "# Wherever x is NaN, x_idx_unclipped will be NaN as well.", "# Keep track of the nan indices here (so we can impute NaN later).", "# Also eliminate any NaN indices, since there is not NaN in 32bit.", "nan_idx", "=", "tf", ".", "math", ".", "is_nan", "(", "x_idx_unclipped", ")", "x_idx_unclipped", "=", "tf", ".", "where", "(", "nan_idx", ",", "tf", ".", "zeros_like", "(", "x_idx_unclipped", ")", ",", "x_idx_unclipped", ")", "x_idx", "=", "tf", ".", "clip_by_value", "(", "x_idx_unclipped", ",", "tf", ".", "zeros", "(", "(", ")", ",", "dtype", "=", "dtype", ")", ",", "ny", "-", "1", ")", "# Get the index above and below x_idx.", "# Naively we could set idx_below = floor(x_idx), idx_above = ceil(x_idx),", "# however, this results in idx_below == idx_above whenever x is on a grid.", "# This in turn results in y_ref_below == y_ref_above, and then the gradient", "# at this point is zero. So here we \"jitter\" one of idx_below, idx_above,", "# so that they are at different values. This jittering does not affect the", "# interpolated value, but does make the gradient nonzero (unless of course", "# the y_ref values are the same).", "idx_below", "=", "tf", ".", "floor", "(", "x_idx", ")", "idx_above", "=", "tf", ".", "minimum", "(", "idx_below", "+", "1", ",", "ny", "-", "1", ")", "idx_below", "=", "tf", ".", "maximum", "(", "idx_above", "-", "1", ",", "0", ")", "# These are the values of y_ref corresponding to above/below indices.", "idx_below_int32", "=", "tf", ".", "cast", "(", "idx_below", ",", "dtype", "=", "tf", ".", "int32", ")", "idx_above_int32", "=", "tf", ".", "cast", "(", "idx_above", ",", "dtype", "=", "tf", ".", "int32", ")", "if", "batch_y_ref", ":", "# If y_ref.shape ~ [A1,...,AN, C, B1,...,BN],", "# and x.shape, x_ref_min/max.shape ~ [A1,...,AN, D]", "# Then y_ref_below.shape ~ [A1,...,AN, D, B1,...,BN]", "y_ref_below", "=", "_batch_gather_with_broadcast", "(", "y_ref", ",", "idx_below_int32", ",", "axis", ")", "y_ref_above", "=", "_batch_gather_with_broadcast", "(", "y_ref", ",", "idx_above_int32", ",", "axis", ")", "else", ":", "# Here, y_ref_below.shape =", "# y_ref.shape[:axis] + x.shape + y_ref.shape[axis + 1:]", "y_ref_below", "=", "tf", ".", "gather", "(", "y_ref", ",", "idx_below_int32", ",", "axis", "=", "axis", ")", "y_ref_above", "=", "tf", ".", "gather", "(", "y_ref", ",", "idx_above_int32", ",", "axis", "=", "axis", ")", "# Use t to get a convex combination of the below/above values.", "t", "=", "x_idx", "-", "idx_below", "# x, and tensors shaped like x, need to be added to, and selected with", "# (using tf.where) the output y. This requires appending singletons.", "# Make functions appropriate for batch/no-batch.", "if", "batch_y_ref", ":", "# In the non-batch case, the output shape is going to be", "# y_ref.shape[:axis] + x.shape + y_ref.shape[axis+1:]", "expand_x_fn", "=", "_make_expand_x_fn_for_batch_interpolation", "(", "y_ref", ",", "axis", ")", "else", ":", "# In the batch case, the output shape is going to be", "# Broadcast(y_ref.shape[:axis], x.shape[:-1]) +", "# x.shape[-1:] + y_ref.shape[axis+1:]", "expand_x_fn", "=", "_make_expand_x_fn_for_non_batch_interpolation", "(", "y_ref", ",", "axis", ")", "t", "=", "expand_x_fn", "(", "t", ")", "nan_idx", "=", "expand_x_fn", "(", "nan_idx", ",", "broadcast", "=", "True", ")", "x_idx_unclipped", "=", "expand_x_fn", "(", "x_idx_unclipped", ",", "broadcast", "=", "True", ")", "y", "=", "t", "*", "y_ref_above", "+", "(", "1", "-", "t", ")", "*", "y_ref_below", "# Now begins a long excursion to fill values outside [x_min, x_max].", "# Re-insert NaN wherever x was NaN.", "y", "=", "tf", ".", "where", "(", "nan_idx", ",", "tf", ".", "fill", "(", "tf", ".", "shape", "(", "input", "=", "y", ")", ",", "tf", ".", "constant", "(", "np", ".", "nan", ",", "y", ".", "dtype", ")", ")", ",", "y", ")", "if", "not", "need_separate_fills", ":", "if", "fill_value", "==", "'constant_extension'", ":", "pass", "# Already handled by clipping x_idx_unclipped.", "else", ":", "y", "=", "tf", ".", "where", "(", "(", "x_idx_unclipped", "<", "0", ")", "|", "(", "x_idx_unclipped", ">", "ny", "-", "1", ")", ",", "fill_value", "+", "tf", ".", "zeros_like", "(", "y", ")", ",", "y", ")", "else", ":", "# Fill values below x_ref_min <==> x_idx_unclipped < 0.", "if", "fill_value_below", "==", "'constant_extension'", ":", "pass", "# Already handled by the clipping that created x_idx_unclipped.", "elif", "fill_value_below", "==", "'extrapolate'", ":", "if", "batch_y_ref", ":", "# For every batch member, gather the first two elements of y across", "# `axis`.", "y_0", "=", "tf", ".", "gather", "(", "y_ref", ",", "[", "0", "]", ",", "axis", "=", "axis", ")", "y_1", "=", "tf", ".", "gather", "(", "y_ref", ",", "[", "1", "]", ",", "axis", "=", "axis", ")", "else", ":", "# If not batching, we want to gather the first two elements, just like", "# above. However, these results need to be replicated for every", "# member of x. An easy way to do that is to gather using", "# indices = zeros/ones(x.shape).", "y_0", "=", "tf", ".", "gather", "(", "y_ref", ",", "tf", ".", "zeros", "(", "tf", ".", "shape", "(", "input", "=", "x", ")", ",", "dtype", "=", "tf", ".", "int32", ")", ",", "axis", "=", "axis", ")", "y_1", "=", "tf", ".", "gather", "(", "y_ref", ",", "tf", ".", "ones", "(", "tf", ".", "shape", "(", "input", "=", "x", ")", ",", "dtype", "=", "tf", ".", "int32", ")", ",", "axis", "=", "axis", ")", "x_delta", "=", "(", "x_ref_max", "-", "x_ref_min", ")", "/", "(", "ny", "-", "1", ")", "x_factor", "=", "expand_x_fn", "(", "(", "x", "-", "x_ref_min", ")", "/", "x_delta", ",", "broadcast", "=", "True", ")", "y", "=", "tf", ".", "where", "(", "x_idx_unclipped", "<", "0", ",", "y_0", "+", "x_factor", "*", "(", "y_1", "-", "y_0", ")", ",", "y", ")", "else", ":", "y", "=", "tf", ".", "where", "(", "x_idx_unclipped", "<", "0", ",", "fill_value_below", "+", "tf", ".", "zeros_like", "(", "y", ")", ",", "y", ")", "# Fill values above x_ref_min <==> x_idx_unclipped > ny - 1.", "if", "fill_value_above", "==", "'constant_extension'", ":", "pass", "# Already handled by the clipping that created x_idx_unclipped.", "elif", "fill_value_above", "==", "'extrapolate'", ":", "ny_int32", "=", "tf", ".", "shape", "(", "input", "=", "y_ref", ")", "[", "axis", "]", "if", "batch_y_ref", ":", "y_n1", "=", "tf", ".", "gather", "(", "y_ref", ",", "[", "tf", ".", "shape", "(", "input", "=", "y_ref", ")", "[", "axis", "]", "-", "1", "]", ",", "axis", "=", "axis", ")", "y_n2", "=", "tf", ".", "gather", "(", "y_ref", ",", "[", "tf", ".", "shape", "(", "input", "=", "y_ref", ")", "[", "axis", "]", "-", "2", "]", ",", "axis", "=", "axis", ")", "else", ":", "y_n1", "=", "tf", ".", "gather", "(", "y_ref", ",", "tf", ".", "fill", "(", "tf", ".", "shape", "(", "input", "=", "x", ")", ",", "ny_int32", "-", "1", ")", ",", "axis", "=", "axis", ")", "y_n2", "=", "tf", ".", "gather", "(", "y_ref", ",", "tf", ".", "fill", "(", "tf", ".", "shape", "(", "input", "=", "x", ")", ",", "ny_int32", "-", "2", ")", ",", "axis", "=", "axis", ")", "x_delta", "=", "(", "x_ref_max", "-", "x_ref_min", ")", "/", "(", "ny", "-", "1", ")", "x_factor", "=", "expand_x_fn", "(", "(", "x", "-", "x_ref_max", ")", "/", "x_delta", ",", "broadcast", "=", "True", ")", "y", "=", "tf", ".", "where", "(", "x_idx_unclipped", ">", "ny", "-", "1", ",", "y_n1", "+", "x_factor", "*", "(", "y_n1", "-", "y_n2", ")", ",", "y", ")", "else", ":", "y", "=", "tf", ".", "where", "(", "x_idx_unclipped", ">", "ny", "-", "1", ",", "fill_value_above", "+", "tf", ".", "zeros_like", "(", "y", ")", ",", "y", ")", "return", "y" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
interp_regular_1d_grid
Linear `1-D` interpolation on a regular (constant spacing) grid. Given reference values, this function computes a piecewise linear interpolant and evaluates it on a new set of `x` values. The interpolant is built from `C` reference values indexed by one dimension of `y_ref` (specified by the `axis` kwarg). If `y_ref` is a vector, then each value `y_ref[i]` is considered to be equal to `f(x_ref[i])`, for `C` (implicitly defined) reference values between `x_ref_min` and `x_ref_max`: ```none x_ref[i] = x_ref_min + i * (x_ref_max - x_ref_min) / (C - 1), i = 0, ..., C - 1. ``` If `rank(y_ref) > 1`, then dimension `axis` indexes `C` reference values of a shape `y_ref.shape[:axis] + y_ref.shape[axis + 1:]` `Tensor`. If `rank(x) > 1`, then the output is obtained by effectively flattening `x`, interpolating along `axis`, then expanding the result to shape `y_ref.shape[:axis] + x.shape + y_ref.shape[axis + 1:]`. These shape semantics are equivalent to `scipy.interpolate.interp1d`. Args: x: Numeric `Tensor` The x-coordinates of the interpolated output values. x_ref_min: Scalar `Tensor` of same `dtype` as `x`. The minimum value of the (implicitly defined) reference `x_ref`. x_ref_max: Scalar `Tensor` of same `dtype` as `x`. The maximum value of the (implicitly defined) reference `x_ref`. y_ref: `N-D` `Tensor` (`N > 0`) of same `dtype` as `x`. The reference output values. axis: Scalar `Tensor` designating the dimension of `y_ref` that indexes values of the interpolation table. Default value: `-1`, the rightmost axis. fill_value: Determines what values output should take for `x` values that are below `x_ref_min` or above `x_ref_max`. `Tensor` or one of the strings "constant_extension" ==> Extend as constant function. "extrapolate" ==> Extrapolate in a linear fashion. Default value: `"constant_extension"` fill_value_below: Optional override of `fill_value` for `x < x_ref_min`. fill_value_above: Optional override of `fill_value` for `x > x_ref_max`. grid_regularizing_transform: Optional transformation `g` which regularizes the implied spacing of the x reference points. In other words, if provided, we assume `g(x_ref_i)` is a regular grid between `g(x_ref_min)` and `g(x_ref_max)`. name: A name to prepend to created ops. Default value: `"interp_regular_1d_grid"`. Returns: y_interp: Interpolation between members of `y_ref`, at points `x`. `Tensor` of same `dtype` as `x`, and shape `y.shape[:axis] + x.shape + y.shape[axis + 1:]` Raises: ValueError: If `fill_value` is not an allowed string. ValueError: If `axis` is not a scalar. #### Examples Interpolate a function of one variable: ```python y_ref = tf.exp(tf.linspace(start=0., stop=10., num=200)) interp_regular_1d_grid( x=[6.0, 0.5, 3.3], x_ref_min=0., x_ref_max=1., y_ref=y_ref) ==> approx [exp(6.0), exp(0.5), exp(3.3)] ``` Interpolate a matrix-valued function of one variable: ```python mat_0 = [[1., 0.], [0., 1.]] mat_1 = [[0., -1], [1, 0]] y_ref = [mat_0, mat_1] # Get three output matrices at once. tfp.math.interp_regular_1d_grid( x=[0., 0.5, 1.], x_ref_min=0., x_ref_max=1., y_ref=y_ref, axis=0) ==> [mat_0, 0.5 * mat_0 + 0.5 * mat_1, mat_1] ``` Interpolate a scalar valued function, and get a matrix of results: ```python y_ref = tf.exp(tf.linspace(start=0., stop=10., num=200)) x = [[1.1, 1.2], [2.1, 2.2]] tfp.math.interp_regular_1d_grid(x, x_ref_min=0., x_ref_max=10., y_ref=y_ref) ==> tf.exp(x) ``` Interpolate a function of one variable on a log-spaced grid: ```python x_ref = tf.exp(tf.linspace(tf.log(1.), tf.log(100000.), num_pts)) y_ref = tf.log(x_ref + x_ref**2) interp_regular_1d_grid(x=[1.1, 2.2], x_ref_min=1., x_ref_max=100000., y_ref, grid_regularizing_transform=tf.log) ==> [tf.log(1.1 + 1.1**2), tf.log(2.2 + 2.2**2)] ```
tensorflow_probability/python/math/interpolation.py
def interp_regular_1d_grid(x, x_ref_min, x_ref_max, y_ref, axis=-1, fill_value='constant_extension', fill_value_below=None, fill_value_above=None, grid_regularizing_transform=None, name=None): """Linear `1-D` interpolation on a regular (constant spacing) grid. Given reference values, this function computes a piecewise linear interpolant and evaluates it on a new set of `x` values. The interpolant is built from `C` reference values indexed by one dimension of `y_ref` (specified by the `axis` kwarg). If `y_ref` is a vector, then each value `y_ref[i]` is considered to be equal to `f(x_ref[i])`, for `C` (implicitly defined) reference values between `x_ref_min` and `x_ref_max`: ```none x_ref[i] = x_ref_min + i * (x_ref_max - x_ref_min) / (C - 1), i = 0, ..., C - 1. ``` If `rank(y_ref) > 1`, then dimension `axis` indexes `C` reference values of a shape `y_ref.shape[:axis] + y_ref.shape[axis + 1:]` `Tensor`. If `rank(x) > 1`, then the output is obtained by effectively flattening `x`, interpolating along `axis`, then expanding the result to shape `y_ref.shape[:axis] + x.shape + y_ref.shape[axis + 1:]`. These shape semantics are equivalent to `scipy.interpolate.interp1d`. Args: x: Numeric `Tensor` The x-coordinates of the interpolated output values. x_ref_min: Scalar `Tensor` of same `dtype` as `x`. The minimum value of the (implicitly defined) reference `x_ref`. x_ref_max: Scalar `Tensor` of same `dtype` as `x`. The maximum value of the (implicitly defined) reference `x_ref`. y_ref: `N-D` `Tensor` (`N > 0`) of same `dtype` as `x`. The reference output values. axis: Scalar `Tensor` designating the dimension of `y_ref` that indexes values of the interpolation table. Default value: `-1`, the rightmost axis. fill_value: Determines what values output should take for `x` values that are below `x_ref_min` or above `x_ref_max`. `Tensor` or one of the strings "constant_extension" ==> Extend as constant function. "extrapolate" ==> Extrapolate in a linear fashion. Default value: `"constant_extension"` fill_value_below: Optional override of `fill_value` for `x < x_ref_min`. fill_value_above: Optional override of `fill_value` for `x > x_ref_max`. grid_regularizing_transform: Optional transformation `g` which regularizes the implied spacing of the x reference points. In other words, if provided, we assume `g(x_ref_i)` is a regular grid between `g(x_ref_min)` and `g(x_ref_max)`. name: A name to prepend to created ops. Default value: `"interp_regular_1d_grid"`. Returns: y_interp: Interpolation between members of `y_ref`, at points `x`. `Tensor` of same `dtype` as `x`, and shape `y.shape[:axis] + x.shape + y.shape[axis + 1:]` Raises: ValueError: If `fill_value` is not an allowed string. ValueError: If `axis` is not a scalar. #### Examples Interpolate a function of one variable: ```python y_ref = tf.exp(tf.linspace(start=0., stop=10., num=200)) interp_regular_1d_grid( x=[6.0, 0.5, 3.3], x_ref_min=0., x_ref_max=1., y_ref=y_ref) ==> approx [exp(6.0), exp(0.5), exp(3.3)] ``` Interpolate a matrix-valued function of one variable: ```python mat_0 = [[1., 0.], [0., 1.]] mat_1 = [[0., -1], [1, 0]] y_ref = [mat_0, mat_1] # Get three output matrices at once. tfp.math.interp_regular_1d_grid( x=[0., 0.5, 1.], x_ref_min=0., x_ref_max=1., y_ref=y_ref, axis=0) ==> [mat_0, 0.5 * mat_0 + 0.5 * mat_1, mat_1] ``` Interpolate a scalar valued function, and get a matrix of results: ```python y_ref = tf.exp(tf.linspace(start=0., stop=10., num=200)) x = [[1.1, 1.2], [2.1, 2.2]] tfp.math.interp_regular_1d_grid(x, x_ref_min=0., x_ref_max=10., y_ref=y_ref) ==> tf.exp(x) ``` Interpolate a function of one variable on a log-spaced grid: ```python x_ref = tf.exp(tf.linspace(tf.log(1.), tf.log(100000.), num_pts)) y_ref = tf.log(x_ref + x_ref**2) interp_regular_1d_grid(x=[1.1, 2.2], x_ref_min=1., x_ref_max=100000., y_ref, grid_regularizing_transform=tf.log) ==> [tf.log(1.1 + 1.1**2), tf.log(2.2 + 2.2**2)] ``` """ return _interp_regular_1d_grid_impl( x, x_ref_min, x_ref_max, y_ref, axis=axis, batch_y_ref=False, fill_value=fill_value, fill_value_below=fill_value_below, fill_value_above=fill_value_above, grid_regularizing_transform=grid_regularizing_transform, name=name or 'interp_regular_1d_grid')
def interp_regular_1d_grid(x, x_ref_min, x_ref_max, y_ref, axis=-1, fill_value='constant_extension', fill_value_below=None, fill_value_above=None, grid_regularizing_transform=None, name=None): """Linear `1-D` interpolation on a regular (constant spacing) grid. Given reference values, this function computes a piecewise linear interpolant and evaluates it on a new set of `x` values. The interpolant is built from `C` reference values indexed by one dimension of `y_ref` (specified by the `axis` kwarg). If `y_ref` is a vector, then each value `y_ref[i]` is considered to be equal to `f(x_ref[i])`, for `C` (implicitly defined) reference values between `x_ref_min` and `x_ref_max`: ```none x_ref[i] = x_ref_min + i * (x_ref_max - x_ref_min) / (C - 1), i = 0, ..., C - 1. ``` If `rank(y_ref) > 1`, then dimension `axis` indexes `C` reference values of a shape `y_ref.shape[:axis] + y_ref.shape[axis + 1:]` `Tensor`. If `rank(x) > 1`, then the output is obtained by effectively flattening `x`, interpolating along `axis`, then expanding the result to shape `y_ref.shape[:axis] + x.shape + y_ref.shape[axis + 1:]`. These shape semantics are equivalent to `scipy.interpolate.interp1d`. Args: x: Numeric `Tensor` The x-coordinates of the interpolated output values. x_ref_min: Scalar `Tensor` of same `dtype` as `x`. The minimum value of the (implicitly defined) reference `x_ref`. x_ref_max: Scalar `Tensor` of same `dtype` as `x`. The maximum value of the (implicitly defined) reference `x_ref`. y_ref: `N-D` `Tensor` (`N > 0`) of same `dtype` as `x`. The reference output values. axis: Scalar `Tensor` designating the dimension of `y_ref` that indexes values of the interpolation table. Default value: `-1`, the rightmost axis. fill_value: Determines what values output should take for `x` values that are below `x_ref_min` or above `x_ref_max`. `Tensor` or one of the strings "constant_extension" ==> Extend as constant function. "extrapolate" ==> Extrapolate in a linear fashion. Default value: `"constant_extension"` fill_value_below: Optional override of `fill_value` for `x < x_ref_min`. fill_value_above: Optional override of `fill_value` for `x > x_ref_max`. grid_regularizing_transform: Optional transformation `g` which regularizes the implied spacing of the x reference points. In other words, if provided, we assume `g(x_ref_i)` is a regular grid between `g(x_ref_min)` and `g(x_ref_max)`. name: A name to prepend to created ops. Default value: `"interp_regular_1d_grid"`. Returns: y_interp: Interpolation between members of `y_ref`, at points `x`. `Tensor` of same `dtype` as `x`, and shape `y.shape[:axis] + x.shape + y.shape[axis + 1:]` Raises: ValueError: If `fill_value` is not an allowed string. ValueError: If `axis` is not a scalar. #### Examples Interpolate a function of one variable: ```python y_ref = tf.exp(tf.linspace(start=0., stop=10., num=200)) interp_regular_1d_grid( x=[6.0, 0.5, 3.3], x_ref_min=0., x_ref_max=1., y_ref=y_ref) ==> approx [exp(6.0), exp(0.5), exp(3.3)] ``` Interpolate a matrix-valued function of one variable: ```python mat_0 = [[1., 0.], [0., 1.]] mat_1 = [[0., -1], [1, 0]] y_ref = [mat_0, mat_1] # Get three output matrices at once. tfp.math.interp_regular_1d_grid( x=[0., 0.5, 1.], x_ref_min=0., x_ref_max=1., y_ref=y_ref, axis=0) ==> [mat_0, 0.5 * mat_0 + 0.5 * mat_1, mat_1] ``` Interpolate a scalar valued function, and get a matrix of results: ```python y_ref = tf.exp(tf.linspace(start=0., stop=10., num=200)) x = [[1.1, 1.2], [2.1, 2.2]] tfp.math.interp_regular_1d_grid(x, x_ref_min=0., x_ref_max=10., y_ref=y_ref) ==> tf.exp(x) ``` Interpolate a function of one variable on a log-spaced grid: ```python x_ref = tf.exp(tf.linspace(tf.log(1.), tf.log(100000.), num_pts)) y_ref = tf.log(x_ref + x_ref**2) interp_regular_1d_grid(x=[1.1, 2.2], x_ref_min=1., x_ref_max=100000., y_ref, grid_regularizing_transform=tf.log) ==> [tf.log(1.1 + 1.1**2), tf.log(2.2 + 2.2**2)] ``` """ return _interp_regular_1d_grid_impl( x, x_ref_min, x_ref_max, y_ref, axis=axis, batch_y_ref=False, fill_value=fill_value, fill_value_below=fill_value_below, fill_value_above=fill_value_above, grid_regularizing_transform=grid_regularizing_transform, name=name or 'interp_regular_1d_grid')
[ "Linear", "1", "-", "D", "interpolation", "on", "a", "regular", "(", "constant", "spacing", ")", "grid", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/interpolation.py#L240-L368
[ "def", "interp_regular_1d_grid", "(", "x", ",", "x_ref_min", ",", "x_ref_max", ",", "y_ref", ",", "axis", "=", "-", "1", ",", "fill_value", "=", "'constant_extension'", ",", "fill_value_below", "=", "None", ",", "fill_value_above", "=", "None", ",", "grid_regularizing_transform", "=", "None", ",", "name", "=", "None", ")", ":", "return", "_interp_regular_1d_grid_impl", "(", "x", ",", "x_ref_min", ",", "x_ref_max", ",", "y_ref", ",", "axis", "=", "axis", ",", "batch_y_ref", "=", "False", ",", "fill_value", "=", "fill_value", ",", "fill_value_below", "=", "fill_value_below", ",", "fill_value_above", "=", "fill_value_above", ",", "grid_regularizing_transform", "=", "grid_regularizing_transform", ",", "name", "=", "name", "or", "'interp_regular_1d_grid'", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
batch_interp_regular_1d_grid
Linear `1-D` interpolation on a regular (constant spacing) grid. Given [batch of] reference values, this function computes a piecewise linear interpolant and evaluates it on a [batch of] of new `x` values. The interpolant is built from `C` reference values indexed by one dimension of `y_ref` (specified by the `axis` kwarg). If `y_ref` is a vector, then each value `y_ref[i]` is considered to be equal to `f(x_ref[i])`, for `C` (implicitly defined) reference values between `x_ref_min` and `x_ref_max`: ```none x_ref[i] = x_ref_min + i * (x_ref_max - x_ref_min) / (C - 1), i = 0, ..., C - 1. ``` In the general case, dimensions to the left of `axis` in `y_ref` are broadcast with leading dimensions in `x`, `x_ref_min`, `x_ref_max`. Args: x: Numeric `Tensor` The x-coordinates of the interpolated output values for each batch. Shape broadcasts with `[A1, ..., AN, D]`, `N >= 0`. x_ref_min: `Tensor` of same `dtype` as `x`. The minimum value of the each batch of the (implicitly defined) reference `x_ref`. Shape broadcasts with `[A1, ..., AN]`, `N >= 0`. x_ref_max: `Tensor` of same `dtype` as `x`. The maximum value of the each batch of the (implicitly defined) reference `x_ref`. Shape broadcasts with `[A1, ..., AN]`, `N >= 0`. y_ref: `Tensor` of same `dtype` as `x`. The reference output values. `y_ref.shape[:axis]` broadcasts with the batch shape `[A1, ..., AN]`, and `y_ref.shape[axis:]` is `[C, B1, ..., BM]`, so the trailing dimensions index `C` reference values of a rank `M` `Tensor` (`M >= 0`). axis: Scalar `Tensor` designating the dimension of `y_ref` that indexes values of the interpolation table. Default value: `-1`, the rightmost axis. fill_value: Determines what values output should take for `x` values that are below `x_ref_min` or above `x_ref_max`. `Tensor` or one of the strings "constant_extension" ==> Extend as constant function. "extrapolate" ==> Extrapolate in a linear fashion. Default value: `"constant_extension"` fill_value_below: Optional override of `fill_value` for `x < x_ref_min`. fill_value_above: Optional override of `fill_value` for `x > x_ref_max`. grid_regularizing_transform: Optional transformation `g` which regularizes the implied spacing of the x reference points. In other words, if provided, we assume `g(x_ref_i)` is a regular grid between `g(x_ref_min)` and `g(x_ref_max)`. name: A name to prepend to created ops. Default value: `"batch_interp_regular_1d_grid"`. Returns: y_interp: Interpolation between members of `y_ref`, at points `x`. `Tensor` of same `dtype` as `x`, and shape `[A1, ..., AN, D, B1, ..., BM]` Raises: ValueError: If `fill_value` is not an allowed string. ValueError: If `axis` is not a scalar. #### Examples Interpolate a function of one variable: ```python y_ref = tf.exp(tf.linspace(start=0., stop=10., 20)) batch_interp_regular_1d_grid( x=[6.0, 0.5, 3.3], x_ref_min=0., x_ref_max=1., y_ref=y_ref) ==> approx [exp(6.0), exp(0.5), exp(3.3)] ``` Interpolate a batch of functions of one variable. ```python # First batch member is an exponential function, second is a log. implied_x_ref = [tf.linspace(-3., 3.2, 200), tf.linspace(0.5, 3., 200)] y_ref = tf.stack( # Shape [2, 200], 2 batches, 200 reference values per batch [tf.exp(implied_x_ref[0]), tf.log(implied_x_ref[1])], axis=0) x = [[-1., 1., 0.], # Shape [2, 3], 2 batches, 3 values per batch. [1., 2., 3.]] y = tfp.math.batch_interp_regular_1d_grid( # Shape [2, 3] x, x_ref_min=[-3., 0.5], x_ref_max=[3.2, 3.], y_ref=y_ref, axis=-1) # y[0] approx tf.exp(x[0]) # y[1] approx tf.log(x[1]) ``` Interpolate a function of one variable on a log-spaced grid: ```python x_ref = tf.exp(tf.linspace(tf.log(1.), tf.log(100000.), num_pts)) y_ref = tf.log(x_ref + x_ref**2) batch_interp_regular_1d_grid(x=[1.1, 2.2], x_ref_min=1., x_ref_max=100000., y_ref, grid_regularizing_transform=tf.log) ==> [tf.log(1.1 + 1.1**2), tf.log(2.2 + 2.2**2)] ```
tensorflow_probability/python/math/interpolation.py
def batch_interp_regular_1d_grid(x, x_ref_min, x_ref_max, y_ref, axis=-1, fill_value='constant_extension', fill_value_below=None, fill_value_above=None, grid_regularizing_transform=None, name=None): """Linear `1-D` interpolation on a regular (constant spacing) grid. Given [batch of] reference values, this function computes a piecewise linear interpolant and evaluates it on a [batch of] of new `x` values. The interpolant is built from `C` reference values indexed by one dimension of `y_ref` (specified by the `axis` kwarg). If `y_ref` is a vector, then each value `y_ref[i]` is considered to be equal to `f(x_ref[i])`, for `C` (implicitly defined) reference values between `x_ref_min` and `x_ref_max`: ```none x_ref[i] = x_ref_min + i * (x_ref_max - x_ref_min) / (C - 1), i = 0, ..., C - 1. ``` In the general case, dimensions to the left of `axis` in `y_ref` are broadcast with leading dimensions in `x`, `x_ref_min`, `x_ref_max`. Args: x: Numeric `Tensor` The x-coordinates of the interpolated output values for each batch. Shape broadcasts with `[A1, ..., AN, D]`, `N >= 0`. x_ref_min: `Tensor` of same `dtype` as `x`. The minimum value of the each batch of the (implicitly defined) reference `x_ref`. Shape broadcasts with `[A1, ..., AN]`, `N >= 0`. x_ref_max: `Tensor` of same `dtype` as `x`. The maximum value of the each batch of the (implicitly defined) reference `x_ref`. Shape broadcasts with `[A1, ..., AN]`, `N >= 0`. y_ref: `Tensor` of same `dtype` as `x`. The reference output values. `y_ref.shape[:axis]` broadcasts with the batch shape `[A1, ..., AN]`, and `y_ref.shape[axis:]` is `[C, B1, ..., BM]`, so the trailing dimensions index `C` reference values of a rank `M` `Tensor` (`M >= 0`). axis: Scalar `Tensor` designating the dimension of `y_ref` that indexes values of the interpolation table. Default value: `-1`, the rightmost axis. fill_value: Determines what values output should take for `x` values that are below `x_ref_min` or above `x_ref_max`. `Tensor` or one of the strings "constant_extension" ==> Extend as constant function. "extrapolate" ==> Extrapolate in a linear fashion. Default value: `"constant_extension"` fill_value_below: Optional override of `fill_value` for `x < x_ref_min`. fill_value_above: Optional override of `fill_value` for `x > x_ref_max`. grid_regularizing_transform: Optional transformation `g` which regularizes the implied spacing of the x reference points. In other words, if provided, we assume `g(x_ref_i)` is a regular grid between `g(x_ref_min)` and `g(x_ref_max)`. name: A name to prepend to created ops. Default value: `"batch_interp_regular_1d_grid"`. Returns: y_interp: Interpolation between members of `y_ref`, at points `x`. `Tensor` of same `dtype` as `x`, and shape `[A1, ..., AN, D, B1, ..., BM]` Raises: ValueError: If `fill_value` is not an allowed string. ValueError: If `axis` is not a scalar. #### Examples Interpolate a function of one variable: ```python y_ref = tf.exp(tf.linspace(start=0., stop=10., 20)) batch_interp_regular_1d_grid( x=[6.0, 0.5, 3.3], x_ref_min=0., x_ref_max=1., y_ref=y_ref) ==> approx [exp(6.0), exp(0.5), exp(3.3)] ``` Interpolate a batch of functions of one variable. ```python # First batch member is an exponential function, second is a log. implied_x_ref = [tf.linspace(-3., 3.2, 200), tf.linspace(0.5, 3., 200)] y_ref = tf.stack( # Shape [2, 200], 2 batches, 200 reference values per batch [tf.exp(implied_x_ref[0]), tf.log(implied_x_ref[1])], axis=0) x = [[-1., 1., 0.], # Shape [2, 3], 2 batches, 3 values per batch. [1., 2., 3.]] y = tfp.math.batch_interp_regular_1d_grid( # Shape [2, 3] x, x_ref_min=[-3., 0.5], x_ref_max=[3.2, 3.], y_ref=y_ref, axis=-1) # y[0] approx tf.exp(x[0]) # y[1] approx tf.log(x[1]) ``` Interpolate a function of one variable on a log-spaced grid: ```python x_ref = tf.exp(tf.linspace(tf.log(1.), tf.log(100000.), num_pts)) y_ref = tf.log(x_ref + x_ref**2) batch_interp_regular_1d_grid(x=[1.1, 2.2], x_ref_min=1., x_ref_max=100000., y_ref, grid_regularizing_transform=tf.log) ==> [tf.log(1.1 + 1.1**2), tf.log(2.2 + 2.2**2)] ``` """ return _interp_regular_1d_grid_impl( x, x_ref_min, x_ref_max, y_ref, axis=axis, batch_y_ref=True, fill_value=fill_value, fill_value_below=fill_value_below, fill_value_above=fill_value_above, grid_regularizing_transform=grid_regularizing_transform, name=name or 'batch_interp_regular_1d_grid')
def batch_interp_regular_1d_grid(x, x_ref_min, x_ref_max, y_ref, axis=-1, fill_value='constant_extension', fill_value_below=None, fill_value_above=None, grid_regularizing_transform=None, name=None): """Linear `1-D` interpolation on a regular (constant spacing) grid. Given [batch of] reference values, this function computes a piecewise linear interpolant and evaluates it on a [batch of] of new `x` values. The interpolant is built from `C` reference values indexed by one dimension of `y_ref` (specified by the `axis` kwarg). If `y_ref` is a vector, then each value `y_ref[i]` is considered to be equal to `f(x_ref[i])`, for `C` (implicitly defined) reference values between `x_ref_min` and `x_ref_max`: ```none x_ref[i] = x_ref_min + i * (x_ref_max - x_ref_min) / (C - 1), i = 0, ..., C - 1. ``` In the general case, dimensions to the left of `axis` in `y_ref` are broadcast with leading dimensions in `x`, `x_ref_min`, `x_ref_max`. Args: x: Numeric `Tensor` The x-coordinates of the interpolated output values for each batch. Shape broadcasts with `[A1, ..., AN, D]`, `N >= 0`. x_ref_min: `Tensor` of same `dtype` as `x`. The minimum value of the each batch of the (implicitly defined) reference `x_ref`. Shape broadcasts with `[A1, ..., AN]`, `N >= 0`. x_ref_max: `Tensor` of same `dtype` as `x`. The maximum value of the each batch of the (implicitly defined) reference `x_ref`. Shape broadcasts with `[A1, ..., AN]`, `N >= 0`. y_ref: `Tensor` of same `dtype` as `x`. The reference output values. `y_ref.shape[:axis]` broadcasts with the batch shape `[A1, ..., AN]`, and `y_ref.shape[axis:]` is `[C, B1, ..., BM]`, so the trailing dimensions index `C` reference values of a rank `M` `Tensor` (`M >= 0`). axis: Scalar `Tensor` designating the dimension of `y_ref` that indexes values of the interpolation table. Default value: `-1`, the rightmost axis. fill_value: Determines what values output should take for `x` values that are below `x_ref_min` or above `x_ref_max`. `Tensor` or one of the strings "constant_extension" ==> Extend as constant function. "extrapolate" ==> Extrapolate in a linear fashion. Default value: `"constant_extension"` fill_value_below: Optional override of `fill_value` for `x < x_ref_min`. fill_value_above: Optional override of `fill_value` for `x > x_ref_max`. grid_regularizing_transform: Optional transformation `g` which regularizes the implied spacing of the x reference points. In other words, if provided, we assume `g(x_ref_i)` is a regular grid between `g(x_ref_min)` and `g(x_ref_max)`. name: A name to prepend to created ops. Default value: `"batch_interp_regular_1d_grid"`. Returns: y_interp: Interpolation between members of `y_ref`, at points `x`. `Tensor` of same `dtype` as `x`, and shape `[A1, ..., AN, D, B1, ..., BM]` Raises: ValueError: If `fill_value` is not an allowed string. ValueError: If `axis` is not a scalar. #### Examples Interpolate a function of one variable: ```python y_ref = tf.exp(tf.linspace(start=0., stop=10., 20)) batch_interp_regular_1d_grid( x=[6.0, 0.5, 3.3], x_ref_min=0., x_ref_max=1., y_ref=y_ref) ==> approx [exp(6.0), exp(0.5), exp(3.3)] ``` Interpolate a batch of functions of one variable. ```python # First batch member is an exponential function, second is a log. implied_x_ref = [tf.linspace(-3., 3.2, 200), tf.linspace(0.5, 3., 200)] y_ref = tf.stack( # Shape [2, 200], 2 batches, 200 reference values per batch [tf.exp(implied_x_ref[0]), tf.log(implied_x_ref[1])], axis=0) x = [[-1., 1., 0.], # Shape [2, 3], 2 batches, 3 values per batch. [1., 2., 3.]] y = tfp.math.batch_interp_regular_1d_grid( # Shape [2, 3] x, x_ref_min=[-3., 0.5], x_ref_max=[3.2, 3.], y_ref=y_ref, axis=-1) # y[0] approx tf.exp(x[0]) # y[1] approx tf.log(x[1]) ``` Interpolate a function of one variable on a log-spaced grid: ```python x_ref = tf.exp(tf.linspace(tf.log(1.), tf.log(100000.), num_pts)) y_ref = tf.log(x_ref + x_ref**2) batch_interp_regular_1d_grid(x=[1.1, 2.2], x_ref_min=1., x_ref_max=100000., y_ref, grid_regularizing_transform=tf.log) ==> [tf.log(1.1 + 1.1**2), tf.log(2.2 + 2.2**2)] ``` """ return _interp_regular_1d_grid_impl( x, x_ref_min, x_ref_max, y_ref, axis=axis, batch_y_ref=True, fill_value=fill_value, fill_value_below=fill_value_below, fill_value_above=fill_value_above, grid_regularizing_transform=grid_regularizing_transform, name=name or 'batch_interp_regular_1d_grid')
[ "Linear", "1", "-", "D", "interpolation", "on", "a", "regular", "(", "constant", "spacing", ")", "grid", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/interpolation.py#L371-L497
[ "def", "batch_interp_regular_1d_grid", "(", "x", ",", "x_ref_min", ",", "x_ref_max", ",", "y_ref", ",", "axis", "=", "-", "1", ",", "fill_value", "=", "'constant_extension'", ",", "fill_value_below", "=", "None", ",", "fill_value_above", "=", "None", ",", "grid_regularizing_transform", "=", "None", ",", "name", "=", "None", ")", ":", "return", "_interp_regular_1d_grid_impl", "(", "x", ",", "x_ref_min", ",", "x_ref_max", ",", "y_ref", ",", "axis", "=", "axis", ",", "batch_y_ref", "=", "True", ",", "fill_value", "=", "fill_value", ",", "fill_value_below", "=", "fill_value_below", ",", "fill_value_above", "=", "fill_value_above", ",", "grid_regularizing_transform", "=", "grid_regularizing_transform", ",", "name", "=", "name", "or", "'batch_interp_regular_1d_grid'", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
batch_interp_regular_nd_grid
Multi-linear interpolation on a regular (constant spacing) grid. Given [a batch of] reference values, this function computes a multi-linear interpolant and evaluates it on [a batch of] of new `x` values. The interpolant is built from reference values indexed by `nd` dimensions of `y_ref`, starting at `axis`. For example, take the case of a `2-D` scalar valued function and no leading batch dimensions. In this case, `y_ref.shape = [C1, C2]` and `y_ref[i, j]` is the reference value corresponding to grid point ``` [x_ref_min[0] + i * (x_ref_max[0] - x_ref_min[0]) / (C1 - 1), x_ref_min[1] + j * (x_ref_max[1] - x_ref_min[1]) / (C2 - 1)] ``` In the general case, dimensions to the left of `axis` in `y_ref` are broadcast with leading dimensions in `x`, `x_ref_min`, `x_ref_max`. Args: x: Numeric `Tensor` The x-coordinates of the interpolated output values for each batch. Shape `[..., D, nd]`, designating [a batch of] `D` coordinates in `nd` space. `D` must be `>= 1` and is not a batch dim. x_ref_min: `Tensor` of same `dtype` as `x`. The minimum values of the (implicitly defined) reference `x_ref`. Shape `[..., nd]`. x_ref_max: `Tensor` of same `dtype` as `x`. The maximum values of the (implicitly defined) reference `x_ref`. Shape `[..., nd]`. y_ref: `Tensor` of same `dtype` as `x`. The reference output values. Shape `[..., C1, ..., Cnd, B1,...,BM]`, designating [a batch of] reference values indexed by `nd` dimensions, of a shape `[B1,...,BM]` valued function (for `M >= 0`). axis: Scalar integer `Tensor`. Dimensions `[axis, axis + nd)` of `y_ref` index the interpolation table. E.g. `3-D` interpolation of a scalar valued function requires `axis=-3` and a `3-D` matrix valued function requires `axis=-5`. fill_value: Determines what values output should take for `x` values that are below `x_ref_min` or above `x_ref_max`. Scalar `Tensor` or "constant_extension" ==> Extend as constant function. Default value: `"constant_extension"` name: A name to prepend to created ops. Default value: `"batch_interp_regular_nd_grid"`. Returns: y_interp: Interpolation between members of `y_ref`, at points `x`. `Tensor` of same `dtype` as `x`, and shape `[..., D, B1, ..., BM].` Raises: ValueError: If `rank(x) < 2` is determined statically. ValueError: If `axis` is not a scalar is determined statically. ValueError: If `axis + nd > rank(y_ref)` is determined statically. #### Examples Interpolate a function of one variable. ```python y_ref = tf.exp(tf.linspace(start=0., stop=10., 20)) tfp.math.batch_interp_regular_nd_grid( # x.shape = [3, 1], x_ref_min/max.shape = [1]. Trailing `1` for `1-D`. x=[[6.0], [0.5], [3.3]], x_ref_min=[0.], x_ref_max=[1.], y_ref=y_ref) ==> approx [exp(6.0), exp(0.5), exp(3.3)] ``` Interpolate a scalar function of two variables. ```python x_ref_min = [0., 2 * np.pi] x_ref_max = [0., 2 * np.pi] # Build y_ref. x0s, x1s = tf.meshgrid( tf.linspace(x_ref_min[0], x_ref_max[0], num=100), tf.linspace(x_ref_min[1], x_ref_max[1], num=100), indexing='ij') def func(x0, x1): return tf.sin(x0) * tf.cos(x1) y_ref = func(x0s, x1s) x = np.pi * tf.random_uniform(shape=(10, 2)) tfp.math.batch_interp_regular_nd_grid(x, x_ref_min, x_ref_max, y_ref, axis=-2) ==> tf.sin(x[:, 0]) * tf.cos(x[:, 1]) ```
tensorflow_probability/python/math/interpolation.py
def batch_interp_regular_nd_grid(x, x_ref_min, x_ref_max, y_ref, axis, fill_value='constant_extension', name=None): """Multi-linear interpolation on a regular (constant spacing) grid. Given [a batch of] reference values, this function computes a multi-linear interpolant and evaluates it on [a batch of] of new `x` values. The interpolant is built from reference values indexed by `nd` dimensions of `y_ref`, starting at `axis`. For example, take the case of a `2-D` scalar valued function and no leading batch dimensions. In this case, `y_ref.shape = [C1, C2]` and `y_ref[i, j]` is the reference value corresponding to grid point ``` [x_ref_min[0] + i * (x_ref_max[0] - x_ref_min[0]) / (C1 - 1), x_ref_min[1] + j * (x_ref_max[1] - x_ref_min[1]) / (C2 - 1)] ``` In the general case, dimensions to the left of `axis` in `y_ref` are broadcast with leading dimensions in `x`, `x_ref_min`, `x_ref_max`. Args: x: Numeric `Tensor` The x-coordinates of the interpolated output values for each batch. Shape `[..., D, nd]`, designating [a batch of] `D` coordinates in `nd` space. `D` must be `>= 1` and is not a batch dim. x_ref_min: `Tensor` of same `dtype` as `x`. The minimum values of the (implicitly defined) reference `x_ref`. Shape `[..., nd]`. x_ref_max: `Tensor` of same `dtype` as `x`. The maximum values of the (implicitly defined) reference `x_ref`. Shape `[..., nd]`. y_ref: `Tensor` of same `dtype` as `x`. The reference output values. Shape `[..., C1, ..., Cnd, B1,...,BM]`, designating [a batch of] reference values indexed by `nd` dimensions, of a shape `[B1,...,BM]` valued function (for `M >= 0`). axis: Scalar integer `Tensor`. Dimensions `[axis, axis + nd)` of `y_ref` index the interpolation table. E.g. `3-D` interpolation of a scalar valued function requires `axis=-3` and a `3-D` matrix valued function requires `axis=-5`. fill_value: Determines what values output should take for `x` values that are below `x_ref_min` or above `x_ref_max`. Scalar `Tensor` or "constant_extension" ==> Extend as constant function. Default value: `"constant_extension"` name: A name to prepend to created ops. Default value: `"batch_interp_regular_nd_grid"`. Returns: y_interp: Interpolation between members of `y_ref`, at points `x`. `Tensor` of same `dtype` as `x`, and shape `[..., D, B1, ..., BM].` Raises: ValueError: If `rank(x) < 2` is determined statically. ValueError: If `axis` is not a scalar is determined statically. ValueError: If `axis + nd > rank(y_ref)` is determined statically. #### Examples Interpolate a function of one variable. ```python y_ref = tf.exp(tf.linspace(start=0., stop=10., 20)) tfp.math.batch_interp_regular_nd_grid( # x.shape = [3, 1], x_ref_min/max.shape = [1]. Trailing `1` for `1-D`. x=[[6.0], [0.5], [3.3]], x_ref_min=[0.], x_ref_max=[1.], y_ref=y_ref) ==> approx [exp(6.0), exp(0.5), exp(3.3)] ``` Interpolate a scalar function of two variables. ```python x_ref_min = [0., 2 * np.pi] x_ref_max = [0., 2 * np.pi] # Build y_ref. x0s, x1s = tf.meshgrid( tf.linspace(x_ref_min[0], x_ref_max[0], num=100), tf.linspace(x_ref_min[1], x_ref_max[1], num=100), indexing='ij') def func(x0, x1): return tf.sin(x0) * tf.cos(x1) y_ref = func(x0s, x1s) x = np.pi * tf.random_uniform(shape=(10, 2)) tfp.math.batch_interp_regular_nd_grid(x, x_ref_min, x_ref_max, y_ref, axis=-2) ==> tf.sin(x[:, 0]) * tf.cos(x[:, 1]) ``` """ with tf.compat.v1.name_scope( name, default_name='interp_regular_nd_grid', values=[x, x_ref_min, x_ref_max, y_ref, fill_value]): dtype = dtype_util.common_dtype([x, x_ref_min, x_ref_max, y_ref], preferred_dtype=tf.float32) # Arg checking. if isinstance(fill_value, str): if fill_value != 'constant_extension': raise ValueError( 'A fill value ({}) was not an allowed string ({})'.format( fill_value, 'constant_extension')) else: fill_value = tf.convert_to_tensor( value=fill_value, name='fill_value', dtype=dtype) _assert_ndims_statically(fill_value, expect_ndims=0) # x.shape = [..., nd]. x = tf.convert_to_tensor(value=x, name='x', dtype=dtype) _assert_ndims_statically(x, expect_ndims_at_least=2) # y_ref.shape = [..., C1,...,Cnd, B1,...,BM] y_ref = tf.convert_to_tensor(value=y_ref, name='y_ref', dtype=dtype) # x_ref_min.shape = [nd] x_ref_min = tf.convert_to_tensor( value=x_ref_min, name='x_ref_min', dtype=dtype) x_ref_max = tf.convert_to_tensor( value=x_ref_max, name='x_ref_max', dtype=dtype) _assert_ndims_statically( x_ref_min, expect_ndims_at_least=1, expect_static=True) _assert_ndims_statically( x_ref_max, expect_ndims_at_least=1, expect_static=True) # nd is the number of dimensions indexing the interpolation table, it's the # "nd" in the function name. nd = tf.compat.dimension_value(x_ref_min.shape[-1]) if nd is None: raise ValueError('`x_ref_min.shape[-1]` must be known statically.') x_ref_max.shape[-1:].assert_is_compatible_with(x_ref_min.shape[-1:]) # Convert axis and check it statically. axis = tf.convert_to_tensor(value=axis, dtype=tf.int32, name='axis') axis = distribution_util.make_non_negative_axis(axis, tf.rank(y_ref)) axis.shape.assert_has_rank(0) axis_ = tf.get_static_value(axis) y_ref_rank_ = tf.get_static_value(tf.rank(y_ref)) if axis_ is not None and y_ref_rank_ is not None: if axis_ + nd > y_ref_rank_: raise ValueError( 'Since dims `[axis, axis + nd)` index the interpolation table, we ' 'must have `axis + nd <= rank(y_ref)`. Found: ' '`axis`: {}, rank(y_ref): {}, and inferred `nd` from trailing ' 'dimensions of `x_ref_min` to be {}.'.format( axis_, y_ref_rank_, nd)) x_batch_shape = tf.shape(input=x)[:-2] x_ref_min_batch_shape = tf.shape(input=x_ref_min)[:-1] x_ref_max_batch_shape = tf.shape(input=x_ref_max)[:-1] y_ref_batch_shape = tf.shape(input=y_ref)[:axis] # Do a brute-force broadcast of batch dims (add zeros). batch_shape = y_ref_batch_shape for tensor in [x_batch_shape, x_ref_min_batch_shape, x_ref_max_batch_shape]: batch_shape = tf.broadcast_dynamic_shape(batch_shape, tensor) def _batch_of_zeros_with_rightmost_singletons(n_singletons): """Return Tensor of zeros with some singletons on the rightmost dims.""" ones = tf.ones(shape=[n_singletons], dtype=tf.int32) return tf.zeros(shape=tf.concat([batch_shape, ones], axis=0), dtype=dtype) x += _batch_of_zeros_with_rightmost_singletons(n_singletons=2) x_ref_min += _batch_of_zeros_with_rightmost_singletons(n_singletons=1) x_ref_max += _batch_of_zeros_with_rightmost_singletons(n_singletons=1) y_ref += _batch_of_zeros_with_rightmost_singletons( n_singletons=tf.rank(y_ref) - axis) return _batch_interp_with_gather_nd( x=x, x_ref_min=x_ref_min, x_ref_max=x_ref_max, y_ref=y_ref, nd=nd, fill_value=fill_value, batch_dims=tf.get_static_value(tf.rank(x)) - 2)
def batch_interp_regular_nd_grid(x, x_ref_min, x_ref_max, y_ref, axis, fill_value='constant_extension', name=None): """Multi-linear interpolation on a regular (constant spacing) grid. Given [a batch of] reference values, this function computes a multi-linear interpolant and evaluates it on [a batch of] of new `x` values. The interpolant is built from reference values indexed by `nd` dimensions of `y_ref`, starting at `axis`. For example, take the case of a `2-D` scalar valued function and no leading batch dimensions. In this case, `y_ref.shape = [C1, C2]` and `y_ref[i, j]` is the reference value corresponding to grid point ``` [x_ref_min[0] + i * (x_ref_max[0] - x_ref_min[0]) / (C1 - 1), x_ref_min[1] + j * (x_ref_max[1] - x_ref_min[1]) / (C2 - 1)] ``` In the general case, dimensions to the left of `axis` in `y_ref` are broadcast with leading dimensions in `x`, `x_ref_min`, `x_ref_max`. Args: x: Numeric `Tensor` The x-coordinates of the interpolated output values for each batch. Shape `[..., D, nd]`, designating [a batch of] `D` coordinates in `nd` space. `D` must be `>= 1` and is not a batch dim. x_ref_min: `Tensor` of same `dtype` as `x`. The minimum values of the (implicitly defined) reference `x_ref`. Shape `[..., nd]`. x_ref_max: `Tensor` of same `dtype` as `x`. The maximum values of the (implicitly defined) reference `x_ref`. Shape `[..., nd]`. y_ref: `Tensor` of same `dtype` as `x`. The reference output values. Shape `[..., C1, ..., Cnd, B1,...,BM]`, designating [a batch of] reference values indexed by `nd` dimensions, of a shape `[B1,...,BM]` valued function (for `M >= 0`). axis: Scalar integer `Tensor`. Dimensions `[axis, axis + nd)` of `y_ref` index the interpolation table. E.g. `3-D` interpolation of a scalar valued function requires `axis=-3` and a `3-D` matrix valued function requires `axis=-5`. fill_value: Determines what values output should take for `x` values that are below `x_ref_min` or above `x_ref_max`. Scalar `Tensor` or "constant_extension" ==> Extend as constant function. Default value: `"constant_extension"` name: A name to prepend to created ops. Default value: `"batch_interp_regular_nd_grid"`. Returns: y_interp: Interpolation between members of `y_ref`, at points `x`. `Tensor` of same `dtype` as `x`, and shape `[..., D, B1, ..., BM].` Raises: ValueError: If `rank(x) < 2` is determined statically. ValueError: If `axis` is not a scalar is determined statically. ValueError: If `axis + nd > rank(y_ref)` is determined statically. #### Examples Interpolate a function of one variable. ```python y_ref = tf.exp(tf.linspace(start=0., stop=10., 20)) tfp.math.batch_interp_regular_nd_grid( # x.shape = [3, 1], x_ref_min/max.shape = [1]. Trailing `1` for `1-D`. x=[[6.0], [0.5], [3.3]], x_ref_min=[0.], x_ref_max=[1.], y_ref=y_ref) ==> approx [exp(6.0), exp(0.5), exp(3.3)] ``` Interpolate a scalar function of two variables. ```python x_ref_min = [0., 2 * np.pi] x_ref_max = [0., 2 * np.pi] # Build y_ref. x0s, x1s = tf.meshgrid( tf.linspace(x_ref_min[0], x_ref_max[0], num=100), tf.linspace(x_ref_min[1], x_ref_max[1], num=100), indexing='ij') def func(x0, x1): return tf.sin(x0) * tf.cos(x1) y_ref = func(x0s, x1s) x = np.pi * tf.random_uniform(shape=(10, 2)) tfp.math.batch_interp_regular_nd_grid(x, x_ref_min, x_ref_max, y_ref, axis=-2) ==> tf.sin(x[:, 0]) * tf.cos(x[:, 1]) ``` """ with tf.compat.v1.name_scope( name, default_name='interp_regular_nd_grid', values=[x, x_ref_min, x_ref_max, y_ref, fill_value]): dtype = dtype_util.common_dtype([x, x_ref_min, x_ref_max, y_ref], preferred_dtype=tf.float32) # Arg checking. if isinstance(fill_value, str): if fill_value != 'constant_extension': raise ValueError( 'A fill value ({}) was not an allowed string ({})'.format( fill_value, 'constant_extension')) else: fill_value = tf.convert_to_tensor( value=fill_value, name='fill_value', dtype=dtype) _assert_ndims_statically(fill_value, expect_ndims=0) # x.shape = [..., nd]. x = tf.convert_to_tensor(value=x, name='x', dtype=dtype) _assert_ndims_statically(x, expect_ndims_at_least=2) # y_ref.shape = [..., C1,...,Cnd, B1,...,BM] y_ref = tf.convert_to_tensor(value=y_ref, name='y_ref', dtype=dtype) # x_ref_min.shape = [nd] x_ref_min = tf.convert_to_tensor( value=x_ref_min, name='x_ref_min', dtype=dtype) x_ref_max = tf.convert_to_tensor( value=x_ref_max, name='x_ref_max', dtype=dtype) _assert_ndims_statically( x_ref_min, expect_ndims_at_least=1, expect_static=True) _assert_ndims_statically( x_ref_max, expect_ndims_at_least=1, expect_static=True) # nd is the number of dimensions indexing the interpolation table, it's the # "nd" in the function name. nd = tf.compat.dimension_value(x_ref_min.shape[-1]) if nd is None: raise ValueError('`x_ref_min.shape[-1]` must be known statically.') x_ref_max.shape[-1:].assert_is_compatible_with(x_ref_min.shape[-1:]) # Convert axis and check it statically. axis = tf.convert_to_tensor(value=axis, dtype=tf.int32, name='axis') axis = distribution_util.make_non_negative_axis(axis, tf.rank(y_ref)) axis.shape.assert_has_rank(0) axis_ = tf.get_static_value(axis) y_ref_rank_ = tf.get_static_value(tf.rank(y_ref)) if axis_ is not None and y_ref_rank_ is not None: if axis_ + nd > y_ref_rank_: raise ValueError( 'Since dims `[axis, axis + nd)` index the interpolation table, we ' 'must have `axis + nd <= rank(y_ref)`. Found: ' '`axis`: {}, rank(y_ref): {}, and inferred `nd` from trailing ' 'dimensions of `x_ref_min` to be {}.'.format( axis_, y_ref_rank_, nd)) x_batch_shape = tf.shape(input=x)[:-2] x_ref_min_batch_shape = tf.shape(input=x_ref_min)[:-1] x_ref_max_batch_shape = tf.shape(input=x_ref_max)[:-1] y_ref_batch_shape = tf.shape(input=y_ref)[:axis] # Do a brute-force broadcast of batch dims (add zeros). batch_shape = y_ref_batch_shape for tensor in [x_batch_shape, x_ref_min_batch_shape, x_ref_max_batch_shape]: batch_shape = tf.broadcast_dynamic_shape(batch_shape, tensor) def _batch_of_zeros_with_rightmost_singletons(n_singletons): """Return Tensor of zeros with some singletons on the rightmost dims.""" ones = tf.ones(shape=[n_singletons], dtype=tf.int32) return tf.zeros(shape=tf.concat([batch_shape, ones], axis=0), dtype=dtype) x += _batch_of_zeros_with_rightmost_singletons(n_singletons=2) x_ref_min += _batch_of_zeros_with_rightmost_singletons(n_singletons=1) x_ref_max += _batch_of_zeros_with_rightmost_singletons(n_singletons=1) y_ref += _batch_of_zeros_with_rightmost_singletons( n_singletons=tf.rank(y_ref) - axis) return _batch_interp_with_gather_nd( x=x, x_ref_min=x_ref_min, x_ref_max=x_ref_max, y_ref=y_ref, nd=nd, fill_value=fill_value, batch_dims=tf.get_static_value(tf.rank(x)) - 2)
[ "Multi", "-", "linear", "interpolation", "on", "a", "regular", "(", "constant", "spacing", ")", "grid", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/interpolation.py#L500-L681
[ "def", "batch_interp_regular_nd_grid", "(", "x", ",", "x_ref_min", ",", "x_ref_max", ",", "y_ref", ",", "axis", ",", "fill_value", "=", "'constant_extension'", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "default_name", "=", "'interp_regular_nd_grid'", ",", "values", "=", "[", "x", ",", "x_ref_min", ",", "x_ref_max", ",", "y_ref", ",", "fill_value", "]", ")", ":", "dtype", "=", "dtype_util", ".", "common_dtype", "(", "[", "x", ",", "x_ref_min", ",", "x_ref_max", ",", "y_ref", "]", ",", "preferred_dtype", "=", "tf", ".", "float32", ")", "# Arg checking.", "if", "isinstance", "(", "fill_value", ",", "str", ")", ":", "if", "fill_value", "!=", "'constant_extension'", ":", "raise", "ValueError", "(", "'A fill value ({}) was not an allowed string ({})'", ".", "format", "(", "fill_value", ",", "'constant_extension'", ")", ")", "else", ":", "fill_value", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "fill_value", ",", "name", "=", "'fill_value'", ",", "dtype", "=", "dtype", ")", "_assert_ndims_statically", "(", "fill_value", ",", "expect_ndims", "=", "0", ")", "# x.shape = [..., nd].", "x", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "x", ",", "name", "=", "'x'", ",", "dtype", "=", "dtype", ")", "_assert_ndims_statically", "(", "x", ",", "expect_ndims_at_least", "=", "2", ")", "# y_ref.shape = [..., C1,...,Cnd, B1,...,BM]", "y_ref", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "y_ref", ",", "name", "=", "'y_ref'", ",", "dtype", "=", "dtype", ")", "# x_ref_min.shape = [nd]", "x_ref_min", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "x_ref_min", ",", "name", "=", "'x_ref_min'", ",", "dtype", "=", "dtype", ")", "x_ref_max", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "x_ref_max", ",", "name", "=", "'x_ref_max'", ",", "dtype", "=", "dtype", ")", "_assert_ndims_statically", "(", "x_ref_min", ",", "expect_ndims_at_least", "=", "1", ",", "expect_static", "=", "True", ")", "_assert_ndims_statically", "(", "x_ref_max", ",", "expect_ndims_at_least", "=", "1", ",", "expect_static", "=", "True", ")", "# nd is the number of dimensions indexing the interpolation table, it's the", "# \"nd\" in the function name.", "nd", "=", "tf", ".", "compat", ".", "dimension_value", "(", "x_ref_min", ".", "shape", "[", "-", "1", "]", ")", "if", "nd", "is", "None", ":", "raise", "ValueError", "(", "'`x_ref_min.shape[-1]` must be known statically.'", ")", "x_ref_max", ".", "shape", "[", "-", "1", ":", "]", ".", "assert_is_compatible_with", "(", "x_ref_min", ".", "shape", "[", "-", "1", ":", "]", ")", "# Convert axis and check it statically.", "axis", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "axis", ",", "dtype", "=", "tf", ".", "int32", ",", "name", "=", "'axis'", ")", "axis", "=", "distribution_util", ".", "make_non_negative_axis", "(", "axis", ",", "tf", ".", "rank", "(", "y_ref", ")", ")", "axis", ".", "shape", ".", "assert_has_rank", "(", "0", ")", "axis_", "=", "tf", ".", "get_static_value", "(", "axis", ")", "y_ref_rank_", "=", "tf", ".", "get_static_value", "(", "tf", ".", "rank", "(", "y_ref", ")", ")", "if", "axis_", "is", "not", "None", "and", "y_ref_rank_", "is", "not", "None", ":", "if", "axis_", "+", "nd", ">", "y_ref_rank_", ":", "raise", "ValueError", "(", "'Since dims `[axis, axis + nd)` index the interpolation table, we '", "'must have `axis + nd <= rank(y_ref)`. Found: '", "'`axis`: {}, rank(y_ref): {}, and inferred `nd` from trailing '", "'dimensions of `x_ref_min` to be {}.'", ".", "format", "(", "axis_", ",", "y_ref_rank_", ",", "nd", ")", ")", "x_batch_shape", "=", "tf", ".", "shape", "(", "input", "=", "x", ")", "[", ":", "-", "2", "]", "x_ref_min_batch_shape", "=", "tf", ".", "shape", "(", "input", "=", "x_ref_min", ")", "[", ":", "-", "1", "]", "x_ref_max_batch_shape", "=", "tf", ".", "shape", "(", "input", "=", "x_ref_max", ")", "[", ":", "-", "1", "]", "y_ref_batch_shape", "=", "tf", ".", "shape", "(", "input", "=", "y_ref", ")", "[", ":", "axis", "]", "# Do a brute-force broadcast of batch dims (add zeros).", "batch_shape", "=", "y_ref_batch_shape", "for", "tensor", "in", "[", "x_batch_shape", ",", "x_ref_min_batch_shape", ",", "x_ref_max_batch_shape", "]", ":", "batch_shape", "=", "tf", ".", "broadcast_dynamic_shape", "(", "batch_shape", ",", "tensor", ")", "def", "_batch_of_zeros_with_rightmost_singletons", "(", "n_singletons", ")", ":", "\"\"\"Return Tensor of zeros with some singletons on the rightmost dims.\"\"\"", "ones", "=", "tf", ".", "ones", "(", "shape", "=", "[", "n_singletons", "]", ",", "dtype", "=", "tf", ".", "int32", ")", "return", "tf", ".", "zeros", "(", "shape", "=", "tf", ".", "concat", "(", "[", "batch_shape", ",", "ones", "]", ",", "axis", "=", "0", ")", ",", "dtype", "=", "dtype", ")", "x", "+=", "_batch_of_zeros_with_rightmost_singletons", "(", "n_singletons", "=", "2", ")", "x_ref_min", "+=", "_batch_of_zeros_with_rightmost_singletons", "(", "n_singletons", "=", "1", ")", "x_ref_max", "+=", "_batch_of_zeros_with_rightmost_singletons", "(", "n_singletons", "=", "1", ")", "y_ref", "+=", "_batch_of_zeros_with_rightmost_singletons", "(", "n_singletons", "=", "tf", ".", "rank", "(", "y_ref", ")", "-", "axis", ")", "return", "_batch_interp_with_gather_nd", "(", "x", "=", "x", ",", "x_ref_min", "=", "x_ref_min", ",", "x_ref_max", "=", "x_ref_max", ",", "y_ref", "=", "y_ref", ",", "nd", "=", "nd", ",", "fill_value", "=", "fill_value", ",", "batch_dims", "=", "tf", ".", "get_static_value", "(", "tf", ".", "rank", "(", "x", ")", ")", "-", "2", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_batch_interp_with_gather_nd
N-D interpolation that works with leading batch dims.
tensorflow_probability/python/math/interpolation.py
def _batch_interp_with_gather_nd(x, x_ref_min, x_ref_max, y_ref, nd, fill_value, batch_dims): """N-D interpolation that works with leading batch dims.""" dtype = x.dtype # In this function, # x.shape = [A1, ..., An, D, nd], where n = batch_dims # and # y_ref.shape = [A1, ..., An, C1, C2,..., Cnd, B1,...,BM] # y_ref[A1, ..., An, i1,...,ind] is a shape [B1,...,BM] Tensor with the value # at index [i1,...,ind] in the interpolation table. # and x_ref_max have shapes [A1, ..., An, nd]. # ny[k] is number of y reference points in interp dim k. ny = tf.cast(tf.shape(input=y_ref)[batch_dims:batch_dims + nd], dtype) # Map [x_ref_min, x_ref_max] to [0, ny - 1]. # This is the (fractional) index of x. # x_idx_unclipped[A1, ..., An, d, k] is the fractional index into dim k of # interpolation table for the dth x value. x_ref_min_expanded = tf.expand_dims(x_ref_min, axis=-2) x_ref_max_expanded = tf.expand_dims(x_ref_max, axis=-2) x_idx_unclipped = (ny - 1) * (x - x_ref_min_expanded) / ( x_ref_max_expanded - x_ref_min_expanded) # Wherever x is NaN, x_idx_unclipped will be NaN as well. # Keep track of the nan indices here (so we can impute NaN later). # Also eliminate any NaN indices, since there is not NaN in 32bit. nan_idx = tf.math.is_nan(x_idx_unclipped) x_idx_unclipped = tf.where(nan_idx, tf.zeros_like(x_idx_unclipped), x_idx_unclipped) # x_idx.shape = [A1, ..., An, D, nd] x_idx = tf.clip_by_value(x_idx_unclipped, tf.zeros((), dtype=dtype), ny - 1) # Get the index above and below x_idx. # Naively we could set idx_below = floor(x_idx), idx_above = ceil(x_idx), # however, this results in idx_below == idx_above whenever x is on a grid. # This in turn results in y_ref_below == y_ref_above, and then the gradient # at this point is zero. So here we "jitter" one of idx_below, idx_above, # so that they are at different values. This jittering does not affect the # interpolated value, but does make the gradient nonzero (unless of course # the y_ref values are the same). idx_below = tf.floor(x_idx) idx_above = tf.minimum(idx_below + 1, ny - 1) idx_below = tf.maximum(idx_above - 1, 0) # These are the values of y_ref corresponding to above/below indices. # idx_below_int32.shape = x.shape[:-1] + [nd] idx_below_int32 = tf.cast(idx_below, dtype=tf.int32) idx_above_int32 = tf.cast(idx_above, dtype=tf.int32) # idx_below_list is a length nd list of shape x.shape[:-1] int32 tensors. idx_below_list = tf.unstack(idx_below_int32, axis=-1) idx_above_list = tf.unstack(idx_above_int32, axis=-1) # Use t to get a convex combination of the below/above values. # t.shape = [A1, ..., An, D, nd] t = x_idx - idx_below # x, and tensors shaped like x, need to be added to, and selected with # (using tf.where) the output y. This requires appending singletons. def _expand_x_fn(tensor): # Reshape tensor to tensor.shape + [1] * M. extended_shape = tf.concat([ tf.shape(input=tensor), tf.ones_like(tf.shape(input=y_ref)[batch_dims + nd:]) ], axis=0) return tf.reshape(tensor, extended_shape) # Now, t.shape = [A1, ..., An, D, nd] + [1] * (rank(y_ref) - nd - batch_dims) t = _expand_x_fn(t) s = 1 - t # Re-insert NaN wherever x was NaN. nan_idx = _expand_x_fn(nan_idx) t = tf.where(nan_idx, tf.fill(tf.shape(input=t), tf.constant(np.nan, dtype)), t) terms = [] # Our work above has located x's fractional index inside a cube of above/below # indices. The distance to the below indices is t, and to the above indices # is s. # Drawing lines from x to the cube walls, we get 2**nd smaller cubes. Each # term in the result is a product of a reference point, gathered from y_ref, # multiplied by a volume. The volume is that of the cube opposite to the # reference point. E.g. if the reference point is below x in every axis, the # volume is that of the cube with corner above x in every axis, s[0]*...*s[nd] # We could probably do this with one massive gather, but that would be very # unreadable and un-debuggable. It also would create a large Tensor. for zero_ones_list in _binary_count(nd): gather_from_y_ref_idx = [] opposite_volume_t_idx = [] opposite_volume_s_idx = [] for k, zero_or_one in enumerate(zero_ones_list): if zero_or_one == 0: # If the kth iterate has zero_or_one = 0, # Will gather from the "below" reference point along axis k. gather_from_y_ref_idx.append(idx_below_list[k]) # Now append the index to gather for computing opposite_volume. # This could be done by initializing opposite_volume to 1, then here: # opposite_volume *= tf.gather(s, indices=k, axis=tf.rank(x) - 1) # but that puts a gather in the "inner loop." Better to append the # index and do one larger gather down below. opposite_volume_s_idx.append(k) else: gather_from_y_ref_idx.append(idx_above_list[k]) # Append an index to gather, having the same effect as # opposite_volume *= tf.gather(t, indices=k, axis=tf.rank(x) - 1) opposite_volume_t_idx.append(k) # Compute opposite_volume (volume of cube opposite the ref point): # Recall t.shape = s.shape = [D, nd] + [1, ..., 1] # Gather from t and s along the "nd" axis, which is rank(x) - 1. ov_axis = tf.rank(x) - 1 opposite_volume = ( tf.reduce_prod( input_tensor=tf.gather( t, indices=tf.cast(opposite_volume_t_idx, dtype=tf.int32), axis=ov_axis), axis=ov_axis) * tf.reduce_prod( input_tensor=tf.gather( s, indices=tf.cast(opposite_volume_s_idx, dtype=tf.int32), axis=ov_axis), axis=ov_axis) ) # pyformat: disable y_ref_pt = tf.gather_nd( y_ref, tf.stack(gather_from_y_ref_idx, axis=-1), batch_dims=batch_dims) terms.append(y_ref_pt * opposite_volume) y = tf.math.add_n(terms) if tf.debugging.is_numeric_tensor(fill_value): # Recall x_idx_unclipped.shape = [D, nd], # so here we check if it was out of bounds in any of the nd dims. # Thus, oob_idx.shape = [D]. oob_idx = tf.reduce_any( input_tensor=(x_idx_unclipped < 0) | (x_idx_unclipped > ny - 1), axis=-1) # Now, y.shape = [D, B1,...,BM], so we'll have to broadcast oob_idx. oob_idx = _expand_x_fn(oob_idx) # Shape [D, 1,...,1] oob_idx |= tf.fill(tf.shape(input=y), False) y = tf.where(oob_idx, tf.fill(tf.shape(input=y), fill_value), y) return y
def _batch_interp_with_gather_nd(x, x_ref_min, x_ref_max, y_ref, nd, fill_value, batch_dims): """N-D interpolation that works with leading batch dims.""" dtype = x.dtype # In this function, # x.shape = [A1, ..., An, D, nd], where n = batch_dims # and # y_ref.shape = [A1, ..., An, C1, C2,..., Cnd, B1,...,BM] # y_ref[A1, ..., An, i1,...,ind] is a shape [B1,...,BM] Tensor with the value # at index [i1,...,ind] in the interpolation table. # and x_ref_max have shapes [A1, ..., An, nd]. # ny[k] is number of y reference points in interp dim k. ny = tf.cast(tf.shape(input=y_ref)[batch_dims:batch_dims + nd], dtype) # Map [x_ref_min, x_ref_max] to [0, ny - 1]. # This is the (fractional) index of x. # x_idx_unclipped[A1, ..., An, d, k] is the fractional index into dim k of # interpolation table for the dth x value. x_ref_min_expanded = tf.expand_dims(x_ref_min, axis=-2) x_ref_max_expanded = tf.expand_dims(x_ref_max, axis=-2) x_idx_unclipped = (ny - 1) * (x - x_ref_min_expanded) / ( x_ref_max_expanded - x_ref_min_expanded) # Wherever x is NaN, x_idx_unclipped will be NaN as well. # Keep track of the nan indices here (so we can impute NaN later). # Also eliminate any NaN indices, since there is not NaN in 32bit. nan_idx = tf.math.is_nan(x_idx_unclipped) x_idx_unclipped = tf.where(nan_idx, tf.zeros_like(x_idx_unclipped), x_idx_unclipped) # x_idx.shape = [A1, ..., An, D, nd] x_idx = tf.clip_by_value(x_idx_unclipped, tf.zeros((), dtype=dtype), ny - 1) # Get the index above and below x_idx. # Naively we could set idx_below = floor(x_idx), idx_above = ceil(x_idx), # however, this results in idx_below == idx_above whenever x is on a grid. # This in turn results in y_ref_below == y_ref_above, and then the gradient # at this point is zero. So here we "jitter" one of idx_below, idx_above, # so that they are at different values. This jittering does not affect the # interpolated value, but does make the gradient nonzero (unless of course # the y_ref values are the same). idx_below = tf.floor(x_idx) idx_above = tf.minimum(idx_below + 1, ny - 1) idx_below = tf.maximum(idx_above - 1, 0) # These are the values of y_ref corresponding to above/below indices. # idx_below_int32.shape = x.shape[:-1] + [nd] idx_below_int32 = tf.cast(idx_below, dtype=tf.int32) idx_above_int32 = tf.cast(idx_above, dtype=tf.int32) # idx_below_list is a length nd list of shape x.shape[:-1] int32 tensors. idx_below_list = tf.unstack(idx_below_int32, axis=-1) idx_above_list = tf.unstack(idx_above_int32, axis=-1) # Use t to get a convex combination of the below/above values. # t.shape = [A1, ..., An, D, nd] t = x_idx - idx_below # x, and tensors shaped like x, need to be added to, and selected with # (using tf.where) the output y. This requires appending singletons. def _expand_x_fn(tensor): # Reshape tensor to tensor.shape + [1] * M. extended_shape = tf.concat([ tf.shape(input=tensor), tf.ones_like(tf.shape(input=y_ref)[batch_dims + nd:]) ], axis=0) return tf.reshape(tensor, extended_shape) # Now, t.shape = [A1, ..., An, D, nd] + [1] * (rank(y_ref) - nd - batch_dims) t = _expand_x_fn(t) s = 1 - t # Re-insert NaN wherever x was NaN. nan_idx = _expand_x_fn(nan_idx) t = tf.where(nan_idx, tf.fill(tf.shape(input=t), tf.constant(np.nan, dtype)), t) terms = [] # Our work above has located x's fractional index inside a cube of above/below # indices. The distance to the below indices is t, and to the above indices # is s. # Drawing lines from x to the cube walls, we get 2**nd smaller cubes. Each # term in the result is a product of a reference point, gathered from y_ref, # multiplied by a volume. The volume is that of the cube opposite to the # reference point. E.g. if the reference point is below x in every axis, the # volume is that of the cube with corner above x in every axis, s[0]*...*s[nd] # We could probably do this with one massive gather, but that would be very # unreadable and un-debuggable. It also would create a large Tensor. for zero_ones_list in _binary_count(nd): gather_from_y_ref_idx = [] opposite_volume_t_idx = [] opposite_volume_s_idx = [] for k, zero_or_one in enumerate(zero_ones_list): if zero_or_one == 0: # If the kth iterate has zero_or_one = 0, # Will gather from the "below" reference point along axis k. gather_from_y_ref_idx.append(idx_below_list[k]) # Now append the index to gather for computing opposite_volume. # This could be done by initializing opposite_volume to 1, then here: # opposite_volume *= tf.gather(s, indices=k, axis=tf.rank(x) - 1) # but that puts a gather in the "inner loop." Better to append the # index and do one larger gather down below. opposite_volume_s_idx.append(k) else: gather_from_y_ref_idx.append(idx_above_list[k]) # Append an index to gather, having the same effect as # opposite_volume *= tf.gather(t, indices=k, axis=tf.rank(x) - 1) opposite_volume_t_idx.append(k) # Compute opposite_volume (volume of cube opposite the ref point): # Recall t.shape = s.shape = [D, nd] + [1, ..., 1] # Gather from t and s along the "nd" axis, which is rank(x) - 1. ov_axis = tf.rank(x) - 1 opposite_volume = ( tf.reduce_prod( input_tensor=tf.gather( t, indices=tf.cast(opposite_volume_t_idx, dtype=tf.int32), axis=ov_axis), axis=ov_axis) * tf.reduce_prod( input_tensor=tf.gather( s, indices=tf.cast(opposite_volume_s_idx, dtype=tf.int32), axis=ov_axis), axis=ov_axis) ) # pyformat: disable y_ref_pt = tf.gather_nd( y_ref, tf.stack(gather_from_y_ref_idx, axis=-1), batch_dims=batch_dims) terms.append(y_ref_pt * opposite_volume) y = tf.math.add_n(terms) if tf.debugging.is_numeric_tensor(fill_value): # Recall x_idx_unclipped.shape = [D, nd], # so here we check if it was out of bounds in any of the nd dims. # Thus, oob_idx.shape = [D]. oob_idx = tf.reduce_any( input_tensor=(x_idx_unclipped < 0) | (x_idx_unclipped > ny - 1), axis=-1) # Now, y.shape = [D, B1,...,BM], so we'll have to broadcast oob_idx. oob_idx = _expand_x_fn(oob_idx) # Shape [D, 1,...,1] oob_idx |= tf.fill(tf.shape(input=y), False) y = tf.where(oob_idx, tf.fill(tf.shape(input=y), fill_value), y) return y
[ "N", "-", "D", "interpolation", "that", "works", "with", "leading", "batch", "dims", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/interpolation.py#L684-L836
[ "def", "_batch_interp_with_gather_nd", "(", "x", ",", "x_ref_min", ",", "x_ref_max", ",", "y_ref", ",", "nd", ",", "fill_value", ",", "batch_dims", ")", ":", "dtype", "=", "x", ".", "dtype", "# In this function,", "# x.shape = [A1, ..., An, D, nd], where n = batch_dims", "# and", "# y_ref.shape = [A1, ..., An, C1, C2,..., Cnd, B1,...,BM]", "# y_ref[A1, ..., An, i1,...,ind] is a shape [B1,...,BM] Tensor with the value", "# at index [i1,...,ind] in the interpolation table.", "# and x_ref_max have shapes [A1, ..., An, nd].", "# ny[k] is number of y reference points in interp dim k.", "ny", "=", "tf", ".", "cast", "(", "tf", ".", "shape", "(", "input", "=", "y_ref", ")", "[", "batch_dims", ":", "batch_dims", "+", "nd", "]", ",", "dtype", ")", "# Map [x_ref_min, x_ref_max] to [0, ny - 1].", "# This is the (fractional) index of x.", "# x_idx_unclipped[A1, ..., An, d, k] is the fractional index into dim k of", "# interpolation table for the dth x value.", "x_ref_min_expanded", "=", "tf", ".", "expand_dims", "(", "x_ref_min", ",", "axis", "=", "-", "2", ")", "x_ref_max_expanded", "=", "tf", ".", "expand_dims", "(", "x_ref_max", ",", "axis", "=", "-", "2", ")", "x_idx_unclipped", "=", "(", "ny", "-", "1", ")", "*", "(", "x", "-", "x_ref_min_expanded", ")", "/", "(", "x_ref_max_expanded", "-", "x_ref_min_expanded", ")", "# Wherever x is NaN, x_idx_unclipped will be NaN as well.", "# Keep track of the nan indices here (so we can impute NaN later).", "# Also eliminate any NaN indices, since there is not NaN in 32bit.", "nan_idx", "=", "tf", ".", "math", ".", "is_nan", "(", "x_idx_unclipped", ")", "x_idx_unclipped", "=", "tf", ".", "where", "(", "nan_idx", ",", "tf", ".", "zeros_like", "(", "x_idx_unclipped", ")", ",", "x_idx_unclipped", ")", "# x_idx.shape = [A1, ..., An, D, nd]", "x_idx", "=", "tf", ".", "clip_by_value", "(", "x_idx_unclipped", ",", "tf", ".", "zeros", "(", "(", ")", ",", "dtype", "=", "dtype", ")", ",", "ny", "-", "1", ")", "# Get the index above and below x_idx.", "# Naively we could set idx_below = floor(x_idx), idx_above = ceil(x_idx),", "# however, this results in idx_below == idx_above whenever x is on a grid.", "# This in turn results in y_ref_below == y_ref_above, and then the gradient", "# at this point is zero. So here we \"jitter\" one of idx_below, idx_above,", "# so that they are at different values. This jittering does not affect the", "# interpolated value, but does make the gradient nonzero (unless of course", "# the y_ref values are the same).", "idx_below", "=", "tf", ".", "floor", "(", "x_idx", ")", "idx_above", "=", "tf", ".", "minimum", "(", "idx_below", "+", "1", ",", "ny", "-", "1", ")", "idx_below", "=", "tf", ".", "maximum", "(", "idx_above", "-", "1", ",", "0", ")", "# These are the values of y_ref corresponding to above/below indices.", "# idx_below_int32.shape = x.shape[:-1] + [nd]", "idx_below_int32", "=", "tf", ".", "cast", "(", "idx_below", ",", "dtype", "=", "tf", ".", "int32", ")", "idx_above_int32", "=", "tf", ".", "cast", "(", "idx_above", ",", "dtype", "=", "tf", ".", "int32", ")", "# idx_below_list is a length nd list of shape x.shape[:-1] int32 tensors.", "idx_below_list", "=", "tf", ".", "unstack", "(", "idx_below_int32", ",", "axis", "=", "-", "1", ")", "idx_above_list", "=", "tf", ".", "unstack", "(", "idx_above_int32", ",", "axis", "=", "-", "1", ")", "# Use t to get a convex combination of the below/above values.", "# t.shape = [A1, ..., An, D, nd]", "t", "=", "x_idx", "-", "idx_below", "# x, and tensors shaped like x, need to be added to, and selected with", "# (using tf.where) the output y. This requires appending singletons.", "def", "_expand_x_fn", "(", "tensor", ")", ":", "# Reshape tensor to tensor.shape + [1] * M.", "extended_shape", "=", "tf", ".", "concat", "(", "[", "tf", ".", "shape", "(", "input", "=", "tensor", ")", ",", "tf", ".", "ones_like", "(", "tf", ".", "shape", "(", "input", "=", "y_ref", ")", "[", "batch_dims", "+", "nd", ":", "]", ")", "]", ",", "axis", "=", "0", ")", "return", "tf", ".", "reshape", "(", "tensor", ",", "extended_shape", ")", "# Now, t.shape = [A1, ..., An, D, nd] + [1] * (rank(y_ref) - nd - batch_dims)", "t", "=", "_expand_x_fn", "(", "t", ")", "s", "=", "1", "-", "t", "# Re-insert NaN wherever x was NaN.", "nan_idx", "=", "_expand_x_fn", "(", "nan_idx", ")", "t", "=", "tf", ".", "where", "(", "nan_idx", ",", "tf", ".", "fill", "(", "tf", ".", "shape", "(", "input", "=", "t", ")", ",", "tf", ".", "constant", "(", "np", ".", "nan", ",", "dtype", ")", ")", ",", "t", ")", "terms", "=", "[", "]", "# Our work above has located x's fractional index inside a cube of above/below", "# indices. The distance to the below indices is t, and to the above indices", "# is s.", "# Drawing lines from x to the cube walls, we get 2**nd smaller cubes. Each", "# term in the result is a product of a reference point, gathered from y_ref,", "# multiplied by a volume. The volume is that of the cube opposite to the", "# reference point. E.g. if the reference point is below x in every axis, the", "# volume is that of the cube with corner above x in every axis, s[0]*...*s[nd]", "# We could probably do this with one massive gather, but that would be very", "# unreadable and un-debuggable. It also would create a large Tensor.", "for", "zero_ones_list", "in", "_binary_count", "(", "nd", ")", ":", "gather_from_y_ref_idx", "=", "[", "]", "opposite_volume_t_idx", "=", "[", "]", "opposite_volume_s_idx", "=", "[", "]", "for", "k", ",", "zero_or_one", "in", "enumerate", "(", "zero_ones_list", ")", ":", "if", "zero_or_one", "==", "0", ":", "# If the kth iterate has zero_or_one = 0,", "# Will gather from the \"below\" reference point along axis k.", "gather_from_y_ref_idx", ".", "append", "(", "idx_below_list", "[", "k", "]", ")", "# Now append the index to gather for computing opposite_volume.", "# This could be done by initializing opposite_volume to 1, then here:", "# opposite_volume *= tf.gather(s, indices=k, axis=tf.rank(x) - 1)", "# but that puts a gather in the \"inner loop.\" Better to append the", "# index and do one larger gather down below.", "opposite_volume_s_idx", ".", "append", "(", "k", ")", "else", ":", "gather_from_y_ref_idx", ".", "append", "(", "idx_above_list", "[", "k", "]", ")", "# Append an index to gather, having the same effect as", "# opposite_volume *= tf.gather(t, indices=k, axis=tf.rank(x) - 1)", "opposite_volume_t_idx", ".", "append", "(", "k", ")", "# Compute opposite_volume (volume of cube opposite the ref point):", "# Recall t.shape = s.shape = [D, nd] + [1, ..., 1]", "# Gather from t and s along the \"nd\" axis, which is rank(x) - 1.", "ov_axis", "=", "tf", ".", "rank", "(", "x", ")", "-", "1", "opposite_volume", "=", "(", "tf", ".", "reduce_prod", "(", "input_tensor", "=", "tf", ".", "gather", "(", "t", ",", "indices", "=", "tf", ".", "cast", "(", "opposite_volume_t_idx", ",", "dtype", "=", "tf", ".", "int32", ")", ",", "axis", "=", "ov_axis", ")", ",", "axis", "=", "ov_axis", ")", "*", "tf", ".", "reduce_prod", "(", "input_tensor", "=", "tf", ".", "gather", "(", "s", ",", "indices", "=", "tf", ".", "cast", "(", "opposite_volume_s_idx", ",", "dtype", "=", "tf", ".", "int32", ")", ",", "axis", "=", "ov_axis", ")", ",", "axis", "=", "ov_axis", ")", ")", "# pyformat: disable", "y_ref_pt", "=", "tf", ".", "gather_nd", "(", "y_ref", ",", "tf", ".", "stack", "(", "gather_from_y_ref_idx", ",", "axis", "=", "-", "1", ")", ",", "batch_dims", "=", "batch_dims", ")", "terms", ".", "append", "(", "y_ref_pt", "*", "opposite_volume", ")", "y", "=", "tf", ".", "math", ".", "add_n", "(", "terms", ")", "if", "tf", ".", "debugging", ".", "is_numeric_tensor", "(", "fill_value", ")", ":", "# Recall x_idx_unclipped.shape = [D, nd],", "# so here we check if it was out of bounds in any of the nd dims.", "# Thus, oob_idx.shape = [D].", "oob_idx", "=", "tf", ".", "reduce_any", "(", "input_tensor", "=", "(", "x_idx_unclipped", "<", "0", ")", "|", "(", "x_idx_unclipped", ">", "ny", "-", "1", ")", ",", "axis", "=", "-", "1", ")", "# Now, y.shape = [D, B1,...,BM], so we'll have to broadcast oob_idx.", "oob_idx", "=", "_expand_x_fn", "(", "oob_idx", ")", "# Shape [D, 1,...,1]", "oob_idx", "|=", "tf", ".", "fill", "(", "tf", ".", "shape", "(", "input", "=", "y", ")", ",", "False", ")", "y", "=", "tf", ".", "where", "(", "oob_idx", ",", "tf", ".", "fill", "(", "tf", ".", "shape", "(", "input", "=", "y", ")", ",", "fill_value", ")", ",", "y", ")", "return", "y" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_assert_ndims_statically
Assert that Tensor x has expected number of dimensions.
tensorflow_probability/python/math/interpolation.py
def _assert_ndims_statically(x, expect_ndims=None, expect_ndims_at_least=None, expect_static=False): """Assert that Tensor x has expected number of dimensions.""" ndims = x.shape.ndims if ndims is None: if expect_static: raise ValueError('Expected static ndims. Found: {}'.format(x)) return if expect_ndims is not None and ndims != expect_ndims: raise ValueError('ndims must be {}. Found: {}'.format(expect_ndims, ndims)) if expect_ndims_at_least is not None and ndims < expect_ndims_at_least: raise ValueError('ndims must be at least {}. Found {}'.format( expect_ndims_at_least, ndims))
def _assert_ndims_statically(x, expect_ndims=None, expect_ndims_at_least=None, expect_static=False): """Assert that Tensor x has expected number of dimensions.""" ndims = x.shape.ndims if ndims is None: if expect_static: raise ValueError('Expected static ndims. Found: {}'.format(x)) return if expect_ndims is not None and ndims != expect_ndims: raise ValueError('ndims must be {}. Found: {}'.format(expect_ndims, ndims)) if expect_ndims_at_least is not None and ndims < expect_ndims_at_least: raise ValueError('ndims must be at least {}. Found {}'.format( expect_ndims_at_least, ndims))
[ "Assert", "that", "Tensor", "x", "has", "expected", "number", "of", "dimensions", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/interpolation.py#L839-L853
[ "def", "_assert_ndims_statically", "(", "x", ",", "expect_ndims", "=", "None", ",", "expect_ndims_at_least", "=", "None", ",", "expect_static", "=", "False", ")", ":", "ndims", "=", "x", ".", "shape", ".", "ndims", "if", "ndims", "is", "None", ":", "if", "expect_static", ":", "raise", "ValueError", "(", "'Expected static ndims. Found: {}'", ".", "format", "(", "x", ")", ")", "return", "if", "expect_ndims", "is", "not", "None", "and", "ndims", "!=", "expect_ndims", ":", "raise", "ValueError", "(", "'ndims must be {}. Found: {}'", ".", "format", "(", "expect_ndims", ",", "ndims", ")", ")", "if", "expect_ndims_at_least", "is", "not", "None", "and", "ndims", "<", "expect_ndims_at_least", ":", "raise", "ValueError", "(", "'ndims must be at least {}. Found {}'", ".", "format", "(", "expect_ndims_at_least", ",", "ndims", ")", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_make_expand_x_fn_for_non_batch_interpolation
Make func to expand left/right (of axis) dims of tensors shaped like x.
tensorflow_probability/python/math/interpolation.py
def _make_expand_x_fn_for_non_batch_interpolation(y_ref, axis): """Make func to expand left/right (of axis) dims of tensors shaped like x.""" # This expansion is to help x broadcast with `y`, the output. # In the non-batch case, the output shape is going to be # y_ref.shape[:axis] + x.shape + y_ref.shape[axis+1:] # Recall we made axis non-negative y_ref_shape = tf.shape(input=y_ref) y_ref_shape_left = y_ref_shape[:axis] y_ref_shape_right = y_ref_shape[axis + 1:] def expand_ends(x, broadcast=False): """Expand x so it can bcast w/ tensors of output shape.""" # Assume out_shape = A + x.shape + B, and rank(A) = axis. # Expand with singletons with same rank as A, B. expanded_shape = tf.pad( tensor=tf.shape(input=x), paddings=[[axis, tf.size(input=y_ref_shape_right)]], constant_values=1) x_expanded = tf.reshape(x, expanded_shape) if broadcast: out_shape = tf.concat(( y_ref_shape_left, tf.shape(input=x), y_ref_shape_right, ), axis=0) if x.dtype.is_bool: x_expanded = x_expanded | tf.cast(tf.zeros(out_shape), tf.bool) else: x_expanded += tf.zeros(out_shape, dtype=x.dtype) return x_expanded return expand_ends
def _make_expand_x_fn_for_non_batch_interpolation(y_ref, axis): """Make func to expand left/right (of axis) dims of tensors shaped like x.""" # This expansion is to help x broadcast with `y`, the output. # In the non-batch case, the output shape is going to be # y_ref.shape[:axis] + x.shape + y_ref.shape[axis+1:] # Recall we made axis non-negative y_ref_shape = tf.shape(input=y_ref) y_ref_shape_left = y_ref_shape[:axis] y_ref_shape_right = y_ref_shape[axis + 1:] def expand_ends(x, broadcast=False): """Expand x so it can bcast w/ tensors of output shape.""" # Assume out_shape = A + x.shape + B, and rank(A) = axis. # Expand with singletons with same rank as A, B. expanded_shape = tf.pad( tensor=tf.shape(input=x), paddings=[[axis, tf.size(input=y_ref_shape_right)]], constant_values=1) x_expanded = tf.reshape(x, expanded_shape) if broadcast: out_shape = tf.concat(( y_ref_shape_left, tf.shape(input=x), y_ref_shape_right, ), axis=0) if x.dtype.is_bool: x_expanded = x_expanded | tf.cast(tf.zeros(out_shape), tf.bool) else: x_expanded += tf.zeros(out_shape, dtype=x.dtype) return x_expanded return expand_ends
[ "Make", "func", "to", "expand", "left", "/", "right", "(", "of", "axis", ")", "dims", "of", "tensors", "shaped", "like", "x", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/interpolation.py#L856-L890
[ "def", "_make_expand_x_fn_for_non_batch_interpolation", "(", "y_ref", ",", "axis", ")", ":", "# This expansion is to help x broadcast with `y`, the output.", "# In the non-batch case, the output shape is going to be", "# y_ref.shape[:axis] + x.shape + y_ref.shape[axis+1:]", "# Recall we made axis non-negative", "y_ref_shape", "=", "tf", ".", "shape", "(", "input", "=", "y_ref", ")", "y_ref_shape_left", "=", "y_ref_shape", "[", ":", "axis", "]", "y_ref_shape_right", "=", "y_ref_shape", "[", "axis", "+", "1", ":", "]", "def", "expand_ends", "(", "x", ",", "broadcast", "=", "False", ")", ":", "\"\"\"Expand x so it can bcast w/ tensors of output shape.\"\"\"", "# Assume out_shape = A + x.shape + B, and rank(A) = axis.", "# Expand with singletons with same rank as A, B.", "expanded_shape", "=", "tf", ".", "pad", "(", "tensor", "=", "tf", ".", "shape", "(", "input", "=", "x", ")", ",", "paddings", "=", "[", "[", "axis", ",", "tf", ".", "size", "(", "input", "=", "y_ref_shape_right", ")", "]", "]", ",", "constant_values", "=", "1", ")", "x_expanded", "=", "tf", ".", "reshape", "(", "x", ",", "expanded_shape", ")", "if", "broadcast", ":", "out_shape", "=", "tf", ".", "concat", "(", "(", "y_ref_shape_left", ",", "tf", ".", "shape", "(", "input", "=", "x", ")", ",", "y_ref_shape_right", ",", ")", ",", "axis", "=", "0", ")", "if", "x", ".", "dtype", ".", "is_bool", ":", "x_expanded", "=", "x_expanded", "|", "tf", ".", "cast", "(", "tf", ".", "zeros", "(", "out_shape", ")", ",", "tf", ".", "bool", ")", "else", ":", "x_expanded", "+=", "tf", ".", "zeros", "(", "out_shape", ",", "dtype", "=", "x", ".", "dtype", ")", "return", "x_expanded", "return", "expand_ends" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_make_expand_x_fn_for_batch_interpolation
Make func to expand left/right (of axis) dims of tensors shaped like x.
tensorflow_probability/python/math/interpolation.py
def _make_expand_x_fn_for_batch_interpolation(y_ref, axis): """Make func to expand left/right (of axis) dims of tensors shaped like x.""" # This expansion is to help x broadcast with `y`, the output. # In the batch case, the output shape is going to be # Broadcast(y_ref.shape[:axis], x.shape[:-1]) + # x.shape[-1:] + y_ref.shape[axis+1:] # Recall we made axis non-negative y_ref_shape = tf.shape(input=y_ref) y_ref_shape_left = y_ref_shape[:axis] y_ref_shape_right = y_ref_shape[axis + 1:] def expand_right_dims(x, broadcast=False): """Expand x so it can bcast w/ tensors of output shape.""" expanded_shape_left = tf.broadcast_dynamic_shape( tf.shape(input=x)[:-1], tf.ones([tf.size(input=y_ref_shape_left)], dtype=tf.int32)) expanded_shape = tf.concat( (expanded_shape_left, tf.shape(input=x)[-1:], tf.ones([tf.size(input=y_ref_shape_right)], dtype=tf.int32)), axis=0) x_expanded = tf.reshape(x, expanded_shape) if broadcast: broadcast_shape_left = tf.broadcast_dynamic_shape( tf.shape(input=x)[:-1], y_ref_shape_left) broadcast_shape = tf.concat( (broadcast_shape_left, tf.shape(input=x)[-1:], y_ref_shape_right), axis=0) if x.dtype.is_bool: x_expanded = x_expanded | tf.cast(tf.zeros(broadcast_shape), tf.bool) else: x_expanded += tf.zeros(broadcast_shape, dtype=x.dtype) return x_expanded return expand_right_dims
def _make_expand_x_fn_for_batch_interpolation(y_ref, axis): """Make func to expand left/right (of axis) dims of tensors shaped like x.""" # This expansion is to help x broadcast with `y`, the output. # In the batch case, the output shape is going to be # Broadcast(y_ref.shape[:axis], x.shape[:-1]) + # x.shape[-1:] + y_ref.shape[axis+1:] # Recall we made axis non-negative y_ref_shape = tf.shape(input=y_ref) y_ref_shape_left = y_ref_shape[:axis] y_ref_shape_right = y_ref_shape[axis + 1:] def expand_right_dims(x, broadcast=False): """Expand x so it can bcast w/ tensors of output shape.""" expanded_shape_left = tf.broadcast_dynamic_shape( tf.shape(input=x)[:-1], tf.ones([tf.size(input=y_ref_shape_left)], dtype=tf.int32)) expanded_shape = tf.concat( (expanded_shape_left, tf.shape(input=x)[-1:], tf.ones([tf.size(input=y_ref_shape_right)], dtype=tf.int32)), axis=0) x_expanded = tf.reshape(x, expanded_shape) if broadcast: broadcast_shape_left = tf.broadcast_dynamic_shape( tf.shape(input=x)[:-1], y_ref_shape_left) broadcast_shape = tf.concat( (broadcast_shape_left, tf.shape(input=x)[-1:], y_ref_shape_right), axis=0) if x.dtype.is_bool: x_expanded = x_expanded | tf.cast(tf.zeros(broadcast_shape), tf.bool) else: x_expanded += tf.zeros(broadcast_shape, dtype=x.dtype) return x_expanded return expand_right_dims
[ "Make", "func", "to", "expand", "left", "/", "right", "(", "of", "axis", ")", "dims", "of", "tensors", "shaped", "like", "x", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/interpolation.py#L893-L927
[ "def", "_make_expand_x_fn_for_batch_interpolation", "(", "y_ref", ",", "axis", ")", ":", "# This expansion is to help x broadcast with `y`, the output.", "# In the batch case, the output shape is going to be", "# Broadcast(y_ref.shape[:axis], x.shape[:-1]) +", "# x.shape[-1:] + y_ref.shape[axis+1:]", "# Recall we made axis non-negative", "y_ref_shape", "=", "tf", ".", "shape", "(", "input", "=", "y_ref", ")", "y_ref_shape_left", "=", "y_ref_shape", "[", ":", "axis", "]", "y_ref_shape_right", "=", "y_ref_shape", "[", "axis", "+", "1", ":", "]", "def", "expand_right_dims", "(", "x", ",", "broadcast", "=", "False", ")", ":", "\"\"\"Expand x so it can bcast w/ tensors of output shape.\"\"\"", "expanded_shape_left", "=", "tf", ".", "broadcast_dynamic_shape", "(", "tf", ".", "shape", "(", "input", "=", "x", ")", "[", ":", "-", "1", "]", ",", "tf", ".", "ones", "(", "[", "tf", ".", "size", "(", "input", "=", "y_ref_shape_left", ")", "]", ",", "dtype", "=", "tf", ".", "int32", ")", ")", "expanded_shape", "=", "tf", ".", "concat", "(", "(", "expanded_shape_left", ",", "tf", ".", "shape", "(", "input", "=", "x", ")", "[", "-", "1", ":", "]", ",", "tf", ".", "ones", "(", "[", "tf", ".", "size", "(", "input", "=", "y_ref_shape_right", ")", "]", ",", "dtype", "=", "tf", ".", "int32", ")", ")", ",", "axis", "=", "0", ")", "x_expanded", "=", "tf", ".", "reshape", "(", "x", ",", "expanded_shape", ")", "if", "broadcast", ":", "broadcast_shape_left", "=", "tf", ".", "broadcast_dynamic_shape", "(", "tf", ".", "shape", "(", "input", "=", "x", ")", "[", ":", "-", "1", "]", ",", "y_ref_shape_left", ")", "broadcast_shape", "=", "tf", ".", "concat", "(", "(", "broadcast_shape_left", ",", "tf", ".", "shape", "(", "input", "=", "x", ")", "[", "-", "1", ":", "]", ",", "y_ref_shape_right", ")", ",", "axis", "=", "0", ")", "if", "x", ".", "dtype", ".", "is_bool", ":", "x_expanded", "=", "x_expanded", "|", "tf", ".", "cast", "(", "tf", ".", "zeros", "(", "broadcast_shape", ")", ",", "tf", ".", "bool", ")", "else", ":", "x_expanded", "+=", "tf", ".", "zeros", "(", "broadcast_shape", ",", "dtype", "=", "x", ".", "dtype", ")", "return", "x_expanded", "return", "expand_right_dims" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_batch_gather_with_broadcast
Like batch_gather, but broadcasts to the left of axis.
tensorflow_probability/python/math/interpolation.py
def _batch_gather_with_broadcast(params, indices, axis): """Like batch_gather, but broadcasts to the left of axis.""" # batch_gather assumes... # params.shape = [A1,...,AN, B1,...,BM] # indices.shape = [A1,...,AN, C] # which gives output of shape # [A1,...,AN, C, B1,...,BM] # Here we broadcast dims of each to the left of `axis` in params, and left of # the rightmost dim in indices, e.g. we can # have # params.shape = [A1,...,AN, B1,...,BM] # indices.shape = [a1,...,aN, C], # where ai broadcasts with Ai. # leading_bcast_shape is the broadcast of [A1,...,AN] and [a1,...,aN]. leading_bcast_shape = tf.broadcast_dynamic_shape( tf.shape(input=params)[:axis], tf.shape(input=indices)[:-1]) params += tf.zeros( tf.concat((leading_bcast_shape, tf.shape(input=params)[axis:]), axis=0), dtype=params.dtype) indices += tf.zeros( tf.concat((leading_bcast_shape, tf.shape(input=indices)[-1:]), axis=0), dtype=indices.dtype) return tf.compat.v1.batch_gather(params, indices)
def _batch_gather_with_broadcast(params, indices, axis): """Like batch_gather, but broadcasts to the left of axis.""" # batch_gather assumes... # params.shape = [A1,...,AN, B1,...,BM] # indices.shape = [A1,...,AN, C] # which gives output of shape # [A1,...,AN, C, B1,...,BM] # Here we broadcast dims of each to the left of `axis` in params, and left of # the rightmost dim in indices, e.g. we can # have # params.shape = [A1,...,AN, B1,...,BM] # indices.shape = [a1,...,aN, C], # where ai broadcasts with Ai. # leading_bcast_shape is the broadcast of [A1,...,AN] and [a1,...,aN]. leading_bcast_shape = tf.broadcast_dynamic_shape( tf.shape(input=params)[:axis], tf.shape(input=indices)[:-1]) params += tf.zeros( tf.concat((leading_bcast_shape, tf.shape(input=params)[axis:]), axis=0), dtype=params.dtype) indices += tf.zeros( tf.concat((leading_bcast_shape, tf.shape(input=indices)[-1:]), axis=0), dtype=indices.dtype) return tf.compat.v1.batch_gather(params, indices)
[ "Like", "batch_gather", "but", "broadcasts", "to", "the", "left", "of", "axis", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/interpolation.py#L930-L954
[ "def", "_batch_gather_with_broadcast", "(", "params", ",", "indices", ",", "axis", ")", ":", "# batch_gather assumes...", "# params.shape = [A1,...,AN, B1,...,BM]", "# indices.shape = [A1,...,AN, C]", "# which gives output of shape", "# [A1,...,AN, C, B1,...,BM]", "# Here we broadcast dims of each to the left of `axis` in params, and left of", "# the rightmost dim in indices, e.g. we can", "# have", "# params.shape = [A1,...,AN, B1,...,BM]", "# indices.shape = [a1,...,aN, C],", "# where ai broadcasts with Ai.", "# leading_bcast_shape is the broadcast of [A1,...,AN] and [a1,...,aN].", "leading_bcast_shape", "=", "tf", ".", "broadcast_dynamic_shape", "(", "tf", ".", "shape", "(", "input", "=", "params", ")", "[", ":", "axis", "]", ",", "tf", ".", "shape", "(", "input", "=", "indices", ")", "[", ":", "-", "1", "]", ")", "params", "+=", "tf", ".", "zeros", "(", "tf", ".", "concat", "(", "(", "leading_bcast_shape", ",", "tf", ".", "shape", "(", "input", "=", "params", ")", "[", "axis", ":", "]", ")", ",", "axis", "=", "0", ")", ",", "dtype", "=", "params", ".", "dtype", ")", "indices", "+=", "tf", ".", "zeros", "(", "tf", ".", "concat", "(", "(", "leading_bcast_shape", ",", "tf", ".", "shape", "(", "input", "=", "indices", ")", "[", "-", "1", ":", "]", ")", ",", "axis", "=", "0", ")", ",", "dtype", "=", "indices", ".", "dtype", ")", "return", "tf", ".", "compat", ".", "v1", ".", "batch_gather", "(", "params", ",", "indices", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_broadcast_cat_event_and_params
Broadcasts the event or distribution parameters.
tensorflow_probability/python/distributions/categorical.py
def _broadcast_cat_event_and_params(event, params, base_dtype): """Broadcasts the event or distribution parameters.""" if dtype_util.is_integer(event.dtype): pass elif dtype_util.is_floating(event.dtype): # When `validate_args=True` we've already ensured int/float casting # is closed. event = tf.cast(event, dtype=tf.int32) else: raise TypeError("`value` should have integer `dtype` or " "`self.dtype` ({})".format(base_dtype)) shape_known_statically = ( tensorshape_util.rank(params.shape) is not None and tensorshape_util.is_fully_defined(params.shape[:-1]) and tensorshape_util.is_fully_defined(event.shape)) if not shape_known_statically or params.shape[:-1] != event.shape: params *= tf.ones_like(event[..., tf.newaxis], dtype=params.dtype) params_shape = tf.shape(input=params)[:-1] event *= tf.ones(params_shape, dtype=event.dtype) if tensorshape_util.rank(params.shape) is not None: tensorshape_util.set_shape(event, params.shape[:-1]) return event, params
def _broadcast_cat_event_and_params(event, params, base_dtype): """Broadcasts the event or distribution parameters.""" if dtype_util.is_integer(event.dtype): pass elif dtype_util.is_floating(event.dtype): # When `validate_args=True` we've already ensured int/float casting # is closed. event = tf.cast(event, dtype=tf.int32) else: raise TypeError("`value` should have integer `dtype` or " "`self.dtype` ({})".format(base_dtype)) shape_known_statically = ( tensorshape_util.rank(params.shape) is not None and tensorshape_util.is_fully_defined(params.shape[:-1]) and tensorshape_util.is_fully_defined(event.shape)) if not shape_known_statically or params.shape[:-1] != event.shape: params *= tf.ones_like(event[..., tf.newaxis], dtype=params.dtype) params_shape = tf.shape(input=params)[:-1] event *= tf.ones(params_shape, dtype=event.dtype) if tensorshape_util.rank(params.shape) is not None: tensorshape_util.set_shape(event, params.shape[:-1]) return event, params
[ "Broadcasts", "the", "event", "or", "distribution", "parameters", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/categorical.py#L33-L56
[ "def", "_broadcast_cat_event_and_params", "(", "event", ",", "params", ",", "base_dtype", ")", ":", "if", "dtype_util", ".", "is_integer", "(", "event", ".", "dtype", ")", ":", "pass", "elif", "dtype_util", ".", "is_floating", "(", "event", ".", "dtype", ")", ":", "# When `validate_args=True` we've already ensured int/float casting", "# is closed.", "event", "=", "tf", ".", "cast", "(", "event", ",", "dtype", "=", "tf", ".", "int32", ")", "else", ":", "raise", "TypeError", "(", "\"`value` should have integer `dtype` or \"", "\"`self.dtype` ({})\"", ".", "format", "(", "base_dtype", ")", ")", "shape_known_statically", "=", "(", "tensorshape_util", ".", "rank", "(", "params", ".", "shape", ")", "is", "not", "None", "and", "tensorshape_util", ".", "is_fully_defined", "(", "params", ".", "shape", "[", ":", "-", "1", "]", ")", "and", "tensorshape_util", ".", "is_fully_defined", "(", "event", ".", "shape", ")", ")", "if", "not", "shape_known_statically", "or", "params", ".", "shape", "[", ":", "-", "1", "]", "!=", "event", ".", "shape", ":", "params", "*=", "tf", ".", "ones_like", "(", "event", "[", "...", ",", "tf", ".", "newaxis", "]", ",", "dtype", "=", "params", ".", "dtype", ")", "params_shape", "=", "tf", ".", "shape", "(", "input", "=", "params", ")", "[", ":", "-", "1", "]", "event", "*=", "tf", ".", "ones", "(", "params_shape", ",", "dtype", "=", "event", ".", "dtype", ")", "if", "tensorshape_util", ".", "rank", "(", "params", ".", "shape", ")", "is", "not", "None", ":", "tensorshape_util", ".", "set_shape", "(", "event", ",", "params", ".", "shape", "[", ":", "-", "1", "]", ")", "return", "event", ",", "params" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
expectation_importance_sampler
r"""Monte Carlo estimate of \\(E_p[f(Z)] = E_q[f(Z) p(Z) / q(Z)]\\). With \\(p(z) := exp^{log_p(z)}\\), this `Op` returns \\(n^{-1} sum_{i=1}^n [ f(z_i) p(z_i) / q(z_i) ], z_i ~ q,\\) \\(\approx E_q[ f(Z) p(Z) / q(Z) ]\\) \\(= E_p[f(Z)]\\) This integral is done in log-space with max-subtraction to better handle the often extreme values that `f(z) p(z) / q(z)` can take on. If `f >= 0`, it is up to 2x more efficient to exponentiate the result of `expectation_importance_sampler_logspace` applied to `Log[f]`. User supplies either `Tensor` of samples `z`, or number of samples to draw `n` Args: f: Callable mapping samples from `sampling_dist_q` to `Tensors` with shape broadcastable to `q.batch_shape`. For example, `f` works "just like" `q.log_prob`. log_p: Callable mapping samples from `sampling_dist_q` to `Tensors` with shape broadcastable to `q.batch_shape`. For example, `log_p` works "just like" `sampling_dist_q.log_prob`. sampling_dist_q: The sampling distribution. `tfp.distributions.Distribution`. `float64` `dtype` recommended. `log_p` and `q` should be supported on the same set. z: `Tensor` of samples from `q`, produced by `q.sample` for some `n`. n: Integer `Tensor`. Number of samples to generate if `z` is not provided. seed: Python integer to seed the random number generator. name: A name to give this `Op`. Returns: The importance sampling estimate. `Tensor` with `shape` equal to batch shape of `q`, and `dtype` = `q.dtype`.
tensorflow_probability/python/internal/monte_carlo.py
def expectation_importance_sampler(f, log_p, sampling_dist_q, z=None, n=None, seed=None, name='expectation_importance_sampler'): r"""Monte Carlo estimate of \\(E_p[f(Z)] = E_q[f(Z) p(Z) / q(Z)]\\). With \\(p(z) := exp^{log_p(z)}\\), this `Op` returns \\(n^{-1} sum_{i=1}^n [ f(z_i) p(z_i) / q(z_i) ], z_i ~ q,\\) \\(\approx E_q[ f(Z) p(Z) / q(Z) ]\\) \\(= E_p[f(Z)]\\) This integral is done in log-space with max-subtraction to better handle the often extreme values that `f(z) p(z) / q(z)` can take on. If `f >= 0`, it is up to 2x more efficient to exponentiate the result of `expectation_importance_sampler_logspace` applied to `Log[f]`. User supplies either `Tensor` of samples `z`, or number of samples to draw `n` Args: f: Callable mapping samples from `sampling_dist_q` to `Tensors` with shape broadcastable to `q.batch_shape`. For example, `f` works "just like" `q.log_prob`. log_p: Callable mapping samples from `sampling_dist_q` to `Tensors` with shape broadcastable to `q.batch_shape`. For example, `log_p` works "just like" `sampling_dist_q.log_prob`. sampling_dist_q: The sampling distribution. `tfp.distributions.Distribution`. `float64` `dtype` recommended. `log_p` and `q` should be supported on the same set. z: `Tensor` of samples from `q`, produced by `q.sample` for some `n`. n: Integer `Tensor`. Number of samples to generate if `z` is not provided. seed: Python integer to seed the random number generator. name: A name to give this `Op`. Returns: The importance sampling estimate. `Tensor` with `shape` equal to batch shape of `q`, and `dtype` = `q.dtype`. """ q = sampling_dist_q with tf.name_scope(name): z = _get_samples(q, z, n, seed) log_p_z = log_p(z) q_log_prob_z = q.log_prob(z) def _importance_sampler_positive_f(log_f_z): # Same as expectation_importance_sampler_logspace, but using Tensors # rather than samples and functions. Allows us to sample once. log_values = log_f_z + log_p_z - q_log_prob_z return _logspace_mean(log_values) # With \\(f_{plus}(z) = max(0, f(z)), f_{minus}(z) = max(0, -f(z))\\), # \\(E_p[f(Z)] = E_p[f_{plus}(Z)] - E_p[f_{minus}(Z)]\\) # \\( = E_p[f_{plus}(Z) + 1] - E_p[f_{minus}(Z) + 1]\\) # Without incurring bias, 1 is added to each to prevent zeros in logspace. # The logarithm is approximately linear around 1 + epsilon, so this is good # for small values of 'z' as well. f_z = f(z) log_f_plus_z = tf.math.log1p(tf.nn.relu(f_z)) log_f_minus_z = tf.math.log1p(tf.nn.relu(-1. * f_z)) log_f_plus_integral = _importance_sampler_positive_f(log_f_plus_z) log_f_minus_integral = _importance_sampler_positive_f(log_f_minus_z) return tf.math.exp(log_f_plus_integral) - tf.math.exp(log_f_minus_integral)
def expectation_importance_sampler(f, log_p, sampling_dist_q, z=None, n=None, seed=None, name='expectation_importance_sampler'): r"""Monte Carlo estimate of \\(E_p[f(Z)] = E_q[f(Z) p(Z) / q(Z)]\\). With \\(p(z) := exp^{log_p(z)}\\), this `Op` returns \\(n^{-1} sum_{i=1}^n [ f(z_i) p(z_i) / q(z_i) ], z_i ~ q,\\) \\(\approx E_q[ f(Z) p(Z) / q(Z) ]\\) \\(= E_p[f(Z)]\\) This integral is done in log-space with max-subtraction to better handle the often extreme values that `f(z) p(z) / q(z)` can take on. If `f >= 0`, it is up to 2x more efficient to exponentiate the result of `expectation_importance_sampler_logspace` applied to `Log[f]`. User supplies either `Tensor` of samples `z`, or number of samples to draw `n` Args: f: Callable mapping samples from `sampling_dist_q` to `Tensors` with shape broadcastable to `q.batch_shape`. For example, `f` works "just like" `q.log_prob`. log_p: Callable mapping samples from `sampling_dist_q` to `Tensors` with shape broadcastable to `q.batch_shape`. For example, `log_p` works "just like" `sampling_dist_q.log_prob`. sampling_dist_q: The sampling distribution. `tfp.distributions.Distribution`. `float64` `dtype` recommended. `log_p` and `q` should be supported on the same set. z: `Tensor` of samples from `q`, produced by `q.sample` for some `n`. n: Integer `Tensor`. Number of samples to generate if `z` is not provided. seed: Python integer to seed the random number generator. name: A name to give this `Op`. Returns: The importance sampling estimate. `Tensor` with `shape` equal to batch shape of `q`, and `dtype` = `q.dtype`. """ q = sampling_dist_q with tf.name_scope(name): z = _get_samples(q, z, n, seed) log_p_z = log_p(z) q_log_prob_z = q.log_prob(z) def _importance_sampler_positive_f(log_f_z): # Same as expectation_importance_sampler_logspace, but using Tensors # rather than samples and functions. Allows us to sample once. log_values = log_f_z + log_p_z - q_log_prob_z return _logspace_mean(log_values) # With \\(f_{plus}(z) = max(0, f(z)), f_{minus}(z) = max(0, -f(z))\\), # \\(E_p[f(Z)] = E_p[f_{plus}(Z)] - E_p[f_{minus}(Z)]\\) # \\( = E_p[f_{plus}(Z) + 1] - E_p[f_{minus}(Z) + 1]\\) # Without incurring bias, 1 is added to each to prevent zeros in logspace. # The logarithm is approximately linear around 1 + epsilon, so this is good # for small values of 'z' as well. f_z = f(z) log_f_plus_z = tf.math.log1p(tf.nn.relu(f_z)) log_f_minus_z = tf.math.log1p(tf.nn.relu(-1. * f_z)) log_f_plus_integral = _importance_sampler_positive_f(log_f_plus_z) log_f_minus_integral = _importance_sampler_positive_f(log_f_minus_z) return tf.math.exp(log_f_plus_integral) - tf.math.exp(log_f_minus_integral)
[ "r", "Monte", "Carlo", "estimate", "of", "\\\\", "(", "E_p", "[", "f", "(", "Z", ")", "]", "=", "E_q", "[", "f", "(", "Z", ")", "p", "(", "Z", ")", "/", "q", "(", "Z", ")", "]", "\\\\", ")", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/monte_carlo.py#L30-L99
[ "def", "expectation_importance_sampler", "(", "f", ",", "log_p", ",", "sampling_dist_q", ",", "z", "=", "None", ",", "n", "=", "None", ",", "seed", "=", "None", ",", "name", "=", "'expectation_importance_sampler'", ")", ":", "q", "=", "sampling_dist_q", "with", "tf", ".", "name_scope", "(", "name", ")", ":", "z", "=", "_get_samples", "(", "q", ",", "z", ",", "n", ",", "seed", ")", "log_p_z", "=", "log_p", "(", "z", ")", "q_log_prob_z", "=", "q", ".", "log_prob", "(", "z", ")", "def", "_importance_sampler_positive_f", "(", "log_f_z", ")", ":", "# Same as expectation_importance_sampler_logspace, but using Tensors", "# rather than samples and functions. Allows us to sample once.", "log_values", "=", "log_f_z", "+", "log_p_z", "-", "q_log_prob_z", "return", "_logspace_mean", "(", "log_values", ")", "# With \\\\(f_{plus}(z) = max(0, f(z)), f_{minus}(z) = max(0, -f(z))\\\\),", "# \\\\(E_p[f(Z)] = E_p[f_{plus}(Z)] - E_p[f_{minus}(Z)]\\\\)", "# \\\\( = E_p[f_{plus}(Z) + 1] - E_p[f_{minus}(Z) + 1]\\\\)", "# Without incurring bias, 1 is added to each to prevent zeros in logspace.", "# The logarithm is approximately linear around 1 + epsilon, so this is good", "# for small values of 'z' as well.", "f_z", "=", "f", "(", "z", ")", "log_f_plus_z", "=", "tf", ".", "math", ".", "log1p", "(", "tf", ".", "nn", ".", "relu", "(", "f_z", ")", ")", "log_f_minus_z", "=", "tf", ".", "math", ".", "log1p", "(", "tf", ".", "nn", ".", "relu", "(", "-", "1.", "*", "f_z", ")", ")", "log_f_plus_integral", "=", "_importance_sampler_positive_f", "(", "log_f_plus_z", ")", "log_f_minus_integral", "=", "_importance_sampler_positive_f", "(", "log_f_minus_z", ")", "return", "tf", ".", "math", ".", "exp", "(", "log_f_plus_integral", ")", "-", "tf", ".", "math", ".", "exp", "(", "log_f_minus_integral", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
expectation_importance_sampler_logspace
r"""Importance sampling with a positive function, in log-space. With \\(p(z) := exp^{log_p(z)}\\), and \\(f(z) = exp{log_f(z)}\\), this `Op` returns \\(Log[ n^{-1} sum_{i=1}^n [ f(z_i) p(z_i) / q(z_i) ] ], z_i ~ q,\\) \\(\approx Log[ E_q[ f(Z) p(Z) / q(Z) ] ]\\) \\(= Log[E_p[f(Z)]]\\) This integral is done in log-space with max-subtraction to better handle the often extreme values that `f(z) p(z) / q(z)` can take on. In contrast to `expectation_importance_sampler`, this `Op` returns values in log-space. User supplies either `Tensor` of samples `z`, or number of samples to draw `n` Args: log_f: Callable mapping samples from `sampling_dist_q` to `Tensors` with shape broadcastable to `q.batch_shape`. For example, `log_f` works "just like" `sampling_dist_q.log_prob`. log_p: Callable mapping samples from `sampling_dist_q` to `Tensors` with shape broadcastable to `q.batch_shape`. For example, `log_p` works "just like" `q.log_prob`. sampling_dist_q: The sampling distribution. `tfp.distributions.Distribution`. `float64` `dtype` recommended. `log_p` and `q` should be supported on the same set. z: `Tensor` of samples from `q`, produced by `q.sample` for some `n`. n: Integer `Tensor`. Number of samples to generate if `z` is not provided. seed: Python integer to seed the random number generator. name: A name to give this `Op`. Returns: Logarithm of the importance sampling estimate. `Tensor` with `shape` equal to batch shape of `q`, and `dtype` = `q.dtype`.
tensorflow_probability/python/internal/monte_carlo.py
def expectation_importance_sampler_logspace( log_f, log_p, sampling_dist_q, z=None, n=None, seed=None, name='expectation_importance_sampler_logspace'): r"""Importance sampling with a positive function, in log-space. With \\(p(z) := exp^{log_p(z)}\\), and \\(f(z) = exp{log_f(z)}\\), this `Op` returns \\(Log[ n^{-1} sum_{i=1}^n [ f(z_i) p(z_i) / q(z_i) ] ], z_i ~ q,\\) \\(\approx Log[ E_q[ f(Z) p(Z) / q(Z) ] ]\\) \\(= Log[E_p[f(Z)]]\\) This integral is done in log-space with max-subtraction to better handle the often extreme values that `f(z) p(z) / q(z)` can take on. In contrast to `expectation_importance_sampler`, this `Op` returns values in log-space. User supplies either `Tensor` of samples `z`, or number of samples to draw `n` Args: log_f: Callable mapping samples from `sampling_dist_q` to `Tensors` with shape broadcastable to `q.batch_shape`. For example, `log_f` works "just like" `sampling_dist_q.log_prob`. log_p: Callable mapping samples from `sampling_dist_q` to `Tensors` with shape broadcastable to `q.batch_shape`. For example, `log_p` works "just like" `q.log_prob`. sampling_dist_q: The sampling distribution. `tfp.distributions.Distribution`. `float64` `dtype` recommended. `log_p` and `q` should be supported on the same set. z: `Tensor` of samples from `q`, produced by `q.sample` for some `n`. n: Integer `Tensor`. Number of samples to generate if `z` is not provided. seed: Python integer to seed the random number generator. name: A name to give this `Op`. Returns: Logarithm of the importance sampling estimate. `Tensor` with `shape` equal to batch shape of `q`, and `dtype` = `q.dtype`. """ q = sampling_dist_q with tf.name_scope(name): z = _get_samples(q, z, n, seed) log_values = log_f(z) + log_p(z) - q.log_prob(z) return _logspace_mean(log_values)
def expectation_importance_sampler_logspace( log_f, log_p, sampling_dist_q, z=None, n=None, seed=None, name='expectation_importance_sampler_logspace'): r"""Importance sampling with a positive function, in log-space. With \\(p(z) := exp^{log_p(z)}\\), and \\(f(z) = exp{log_f(z)}\\), this `Op` returns \\(Log[ n^{-1} sum_{i=1}^n [ f(z_i) p(z_i) / q(z_i) ] ], z_i ~ q,\\) \\(\approx Log[ E_q[ f(Z) p(Z) / q(Z) ] ]\\) \\(= Log[E_p[f(Z)]]\\) This integral is done in log-space with max-subtraction to better handle the often extreme values that `f(z) p(z) / q(z)` can take on. In contrast to `expectation_importance_sampler`, this `Op` returns values in log-space. User supplies either `Tensor` of samples `z`, or number of samples to draw `n` Args: log_f: Callable mapping samples from `sampling_dist_q` to `Tensors` with shape broadcastable to `q.batch_shape`. For example, `log_f` works "just like" `sampling_dist_q.log_prob`. log_p: Callable mapping samples from `sampling_dist_q` to `Tensors` with shape broadcastable to `q.batch_shape`. For example, `log_p` works "just like" `q.log_prob`. sampling_dist_q: The sampling distribution. `tfp.distributions.Distribution`. `float64` `dtype` recommended. `log_p` and `q` should be supported on the same set. z: `Tensor` of samples from `q`, produced by `q.sample` for some `n`. n: Integer `Tensor`. Number of samples to generate if `z` is not provided. seed: Python integer to seed the random number generator. name: A name to give this `Op`. Returns: Logarithm of the importance sampling estimate. `Tensor` with `shape` equal to batch shape of `q`, and `dtype` = `q.dtype`. """ q = sampling_dist_q with tf.name_scope(name): z = _get_samples(q, z, n, seed) log_values = log_f(z) + log_p(z) - q.log_prob(z) return _logspace_mean(log_values)
[ "r", "Importance", "sampling", "with", "a", "positive", "function", "in", "log", "-", "space", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/monte_carlo.py#L102-L152
[ "def", "expectation_importance_sampler_logspace", "(", "log_f", ",", "log_p", ",", "sampling_dist_q", ",", "z", "=", "None", ",", "n", "=", "None", ",", "seed", "=", "None", ",", "name", "=", "'expectation_importance_sampler_logspace'", ")", ":", "q", "=", "sampling_dist_q", "with", "tf", ".", "name_scope", "(", "name", ")", ":", "z", "=", "_get_samples", "(", "q", ",", "z", ",", "n", ",", "seed", ")", "log_values", "=", "log_f", "(", "z", ")", "+", "log_p", "(", "z", ")", "-", "q", ".", "log_prob", "(", "z", ")", "return", "_logspace_mean", "(", "log_values", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_logspace_mean
Evaluate `Log[E[values]]` in a stable manner. Args: log_values: `Tensor` holding `Log[values]`. Returns: `Tensor` of same `dtype` as `log_values`, reduced across dim 0. `Log[Mean[values]]`.
tensorflow_probability/python/internal/monte_carlo.py
def _logspace_mean(log_values): """Evaluate `Log[E[values]]` in a stable manner. Args: log_values: `Tensor` holding `Log[values]`. Returns: `Tensor` of same `dtype` as `log_values`, reduced across dim 0. `Log[Mean[values]]`. """ # center = Max[Log[values]], with stop-gradient # The center hopefully keep the exponentiated term small. It is canceled # from the final result, so putting stop gradient on it will not change the # final result. We put stop gradient on to eliminate unnecessary computation. center = tf.stop_gradient(_sample_max(log_values)) # centered_values = exp{Log[values] - E[Log[values]]} centered_values = tf.math.exp(log_values - center) # log_mean_of_values = Log[ E[centered_values] ] + center # = Log[ E[exp{log_values - E[log_values]}] ] + center # = Log[E[values]] - E[log_values] + center # = Log[E[values]] log_mean_of_values = tf.math.log(_sample_mean(centered_values)) + center return log_mean_of_values
def _logspace_mean(log_values): """Evaluate `Log[E[values]]` in a stable manner. Args: log_values: `Tensor` holding `Log[values]`. Returns: `Tensor` of same `dtype` as `log_values`, reduced across dim 0. `Log[Mean[values]]`. """ # center = Max[Log[values]], with stop-gradient # The center hopefully keep the exponentiated term small. It is canceled # from the final result, so putting stop gradient on it will not change the # final result. We put stop gradient on to eliminate unnecessary computation. center = tf.stop_gradient(_sample_max(log_values)) # centered_values = exp{Log[values] - E[Log[values]]} centered_values = tf.math.exp(log_values - center) # log_mean_of_values = Log[ E[centered_values] ] + center # = Log[ E[exp{log_values - E[log_values]}] ] + center # = Log[E[values]] - E[log_values] + center # = Log[E[values]] log_mean_of_values = tf.math.log(_sample_mean(centered_values)) + center return log_mean_of_values
[ "Evaluate", "Log", "[", "E", "[", "values", "]]", "in", "a", "stable", "manner", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/monte_carlo.py#L155-L180
[ "def", "_logspace_mean", "(", "log_values", ")", ":", "# center = Max[Log[values]], with stop-gradient", "# The center hopefully keep the exponentiated term small. It is canceled", "# from the final result, so putting stop gradient on it will not change the", "# final result. We put stop gradient on to eliminate unnecessary computation.", "center", "=", "tf", ".", "stop_gradient", "(", "_sample_max", "(", "log_values", ")", ")", "# centered_values = exp{Log[values] - E[Log[values]]}", "centered_values", "=", "tf", ".", "math", ".", "exp", "(", "log_values", "-", "center", ")", "# log_mean_of_values = Log[ E[centered_values] ] + center", "# = Log[ E[exp{log_values - E[log_values]}] ] + center", "# = Log[E[values]] - E[log_values] + center", "# = Log[E[values]]", "log_mean_of_values", "=", "tf", ".", "math", ".", "log", "(", "_sample_mean", "(", "centered_values", ")", ")", "+", "center", "return", "log_mean_of_values" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_broadcast_event_and_samples
Broadcasts the event or samples.
tensorflow_probability/python/distributions/empirical.py
def _broadcast_event_and_samples(event, samples, event_ndims): """Broadcasts the event or samples.""" # This is the shape of self.samples, without the samples axis, i.e. the shape # of the result of a call to dist.sample(). This way we can broadcast it with # event to get a properly-sized event, then add the singleton dim back at # -event_ndims - 1. samples_shape = tf.concat( [tf.shape(input=samples)[:-event_ndims - 1], tf.shape(input=samples)[tf.rank(samples) - event_ndims:]], axis=0) event *= tf.ones(samples_shape, dtype=event.dtype) event = tf.expand_dims(event, axis=-event_ndims - 1) samples *= tf.ones_like(event, dtype=samples.dtype) return event, samples
def _broadcast_event_and_samples(event, samples, event_ndims): """Broadcasts the event or samples.""" # This is the shape of self.samples, without the samples axis, i.e. the shape # of the result of a call to dist.sample(). This way we can broadcast it with # event to get a properly-sized event, then add the singleton dim back at # -event_ndims - 1. samples_shape = tf.concat( [tf.shape(input=samples)[:-event_ndims - 1], tf.shape(input=samples)[tf.rank(samples) - event_ndims:]], axis=0) event *= tf.ones(samples_shape, dtype=event.dtype) event = tf.expand_dims(event, axis=-event_ndims - 1) samples *= tf.ones_like(event, dtype=samples.dtype) return event, samples
[ "Broadcasts", "the", "event", "or", "samples", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/empirical.py#L35-L49
[ "def", "_broadcast_event_and_samples", "(", "event", ",", "samples", ",", "event_ndims", ")", ":", "# This is the shape of self.samples, without the samples axis, i.e. the shape", "# of the result of a call to dist.sample(). This way we can broadcast it with", "# event to get a properly-sized event, then add the singleton dim back at", "# -event_ndims - 1.", "samples_shape", "=", "tf", ".", "concat", "(", "[", "tf", ".", "shape", "(", "input", "=", "samples", ")", "[", ":", "-", "event_ndims", "-", "1", "]", ",", "tf", ".", "shape", "(", "input", "=", "samples", ")", "[", "tf", ".", "rank", "(", "samples", ")", "-", "event_ndims", ":", "]", "]", ",", "axis", "=", "0", ")", "event", "*=", "tf", ".", "ones", "(", "samples_shape", ",", "dtype", "=", "event", ".", "dtype", ")", "event", "=", "tf", ".", "expand_dims", "(", "event", ",", "axis", "=", "-", "event_ndims", "-", "1", ")", "samples", "*=", "tf", ".", "ones_like", "(", "event", ",", "dtype", "=", "samples", ".", "dtype", ")", "return", "event", ",", "samples" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
MetropolisHastings.one_step
Takes one step of the TransitionKernel. Args: current_state: `Tensor` or Python `list` of `Tensor`s representing the current state(s) of the Markov chain(s). previous_kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of `Tensor`s representing internal calculations made within the previous call to this function (or as returned by `bootstrap_results`). Returns: next_state: `Tensor` or Python `list` of `Tensor`s representing the next state(s) of the Markov chain(s). kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of `Tensor`s representing internal calculations made within this function. Raises: ValueError: if `inner_kernel` results doesn't contain the member "target_log_prob".
tensorflow_probability/python/mcmc/metropolis_hastings.py
def one_step(self, current_state, previous_kernel_results): """Takes one step of the TransitionKernel. Args: current_state: `Tensor` or Python `list` of `Tensor`s representing the current state(s) of the Markov chain(s). previous_kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of `Tensor`s representing internal calculations made within the previous call to this function (or as returned by `bootstrap_results`). Returns: next_state: `Tensor` or Python `list` of `Tensor`s representing the next state(s) of the Markov chain(s). kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of `Tensor`s representing internal calculations made within this function. Raises: ValueError: if `inner_kernel` results doesn't contain the member "target_log_prob". """ with tf.compat.v1.name_scope( name=mcmc_util.make_name(self.name, 'mh', 'one_step'), values=[current_state, previous_kernel_results]): # Take one inner step. [ proposed_state, proposed_results, ] = self.inner_kernel.one_step( current_state, previous_kernel_results.accepted_results) if (not has_target_log_prob(proposed_results) or not has_target_log_prob(previous_kernel_results.accepted_results)): raise ValueError('"target_log_prob" must be a member of ' '`inner_kernel` results.') # Compute log(acceptance_ratio). to_sum = [proposed_results.target_log_prob, -previous_kernel_results.accepted_results.target_log_prob] try: if (not mcmc_util.is_list_like( proposed_results.log_acceptance_correction) or proposed_results.log_acceptance_correction): to_sum.append(proposed_results.log_acceptance_correction) except AttributeError: warnings.warn('Supplied inner `TransitionKernel` does not have a ' '`log_acceptance_correction`. Assuming its value is `0.`') log_accept_ratio = mcmc_util.safe_sum( to_sum, name='compute_log_accept_ratio') # If proposed state reduces likelihood: randomly accept. # If proposed state increases likelihood: always accept. # I.e., u < min(1, accept_ratio), where u ~ Uniform[0,1) # ==> log(u) < log_accept_ratio log_uniform = tf.math.log( tf.random.uniform( shape=tf.shape(input=proposed_results.target_log_prob), dtype=proposed_results.target_log_prob.dtype.base_dtype, seed=self._seed_stream())) is_accepted = log_uniform < log_accept_ratio next_state = mcmc_util.choose( is_accepted, proposed_state, current_state, name='choose_next_state') kernel_results = MetropolisHastingsKernelResults( accepted_results=mcmc_util.choose( is_accepted, proposed_results, previous_kernel_results.accepted_results, name='choose_inner_results'), is_accepted=is_accepted, log_accept_ratio=log_accept_ratio, proposed_state=proposed_state, proposed_results=proposed_results, extra=[], ) return next_state, kernel_results
def one_step(self, current_state, previous_kernel_results): """Takes one step of the TransitionKernel. Args: current_state: `Tensor` or Python `list` of `Tensor`s representing the current state(s) of the Markov chain(s). previous_kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of `Tensor`s representing internal calculations made within the previous call to this function (or as returned by `bootstrap_results`). Returns: next_state: `Tensor` or Python `list` of `Tensor`s representing the next state(s) of the Markov chain(s). kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of `Tensor`s representing internal calculations made within this function. Raises: ValueError: if `inner_kernel` results doesn't contain the member "target_log_prob". """ with tf.compat.v1.name_scope( name=mcmc_util.make_name(self.name, 'mh', 'one_step'), values=[current_state, previous_kernel_results]): # Take one inner step. [ proposed_state, proposed_results, ] = self.inner_kernel.one_step( current_state, previous_kernel_results.accepted_results) if (not has_target_log_prob(proposed_results) or not has_target_log_prob(previous_kernel_results.accepted_results)): raise ValueError('"target_log_prob" must be a member of ' '`inner_kernel` results.') # Compute log(acceptance_ratio). to_sum = [proposed_results.target_log_prob, -previous_kernel_results.accepted_results.target_log_prob] try: if (not mcmc_util.is_list_like( proposed_results.log_acceptance_correction) or proposed_results.log_acceptance_correction): to_sum.append(proposed_results.log_acceptance_correction) except AttributeError: warnings.warn('Supplied inner `TransitionKernel` does not have a ' '`log_acceptance_correction`. Assuming its value is `0.`') log_accept_ratio = mcmc_util.safe_sum( to_sum, name='compute_log_accept_ratio') # If proposed state reduces likelihood: randomly accept. # If proposed state increases likelihood: always accept. # I.e., u < min(1, accept_ratio), where u ~ Uniform[0,1) # ==> log(u) < log_accept_ratio log_uniform = tf.math.log( tf.random.uniform( shape=tf.shape(input=proposed_results.target_log_prob), dtype=proposed_results.target_log_prob.dtype.base_dtype, seed=self._seed_stream())) is_accepted = log_uniform < log_accept_ratio next_state = mcmc_util.choose( is_accepted, proposed_state, current_state, name='choose_next_state') kernel_results = MetropolisHastingsKernelResults( accepted_results=mcmc_util.choose( is_accepted, proposed_results, previous_kernel_results.accepted_results, name='choose_inner_results'), is_accepted=is_accepted, log_accept_ratio=log_accept_ratio, proposed_state=proposed_state, proposed_results=proposed_results, extra=[], ) return next_state, kernel_results
[ "Takes", "one", "step", "of", "the", "TransitionKernel", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/metropolis_hastings.py#L165-L245
[ "def", "one_step", "(", "self", ",", "current_state", ",", "previous_kernel_results", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", "=", "mcmc_util", ".", "make_name", "(", "self", ".", "name", ",", "'mh'", ",", "'one_step'", ")", ",", "values", "=", "[", "current_state", ",", "previous_kernel_results", "]", ")", ":", "# Take one inner step.", "[", "proposed_state", ",", "proposed_results", ",", "]", "=", "self", ".", "inner_kernel", ".", "one_step", "(", "current_state", ",", "previous_kernel_results", ".", "accepted_results", ")", "if", "(", "not", "has_target_log_prob", "(", "proposed_results", ")", "or", "not", "has_target_log_prob", "(", "previous_kernel_results", ".", "accepted_results", ")", ")", ":", "raise", "ValueError", "(", "'\"target_log_prob\" must be a member of '", "'`inner_kernel` results.'", ")", "# Compute log(acceptance_ratio).", "to_sum", "=", "[", "proposed_results", ".", "target_log_prob", ",", "-", "previous_kernel_results", ".", "accepted_results", ".", "target_log_prob", "]", "try", ":", "if", "(", "not", "mcmc_util", ".", "is_list_like", "(", "proposed_results", ".", "log_acceptance_correction", ")", "or", "proposed_results", ".", "log_acceptance_correction", ")", ":", "to_sum", ".", "append", "(", "proposed_results", ".", "log_acceptance_correction", ")", "except", "AttributeError", ":", "warnings", ".", "warn", "(", "'Supplied inner `TransitionKernel` does not have a '", "'`log_acceptance_correction`. Assuming its value is `0.`'", ")", "log_accept_ratio", "=", "mcmc_util", ".", "safe_sum", "(", "to_sum", ",", "name", "=", "'compute_log_accept_ratio'", ")", "# If proposed state reduces likelihood: randomly accept.", "# If proposed state increases likelihood: always accept.", "# I.e., u < min(1, accept_ratio), where u ~ Uniform[0,1)", "# ==> log(u) < log_accept_ratio", "log_uniform", "=", "tf", ".", "math", ".", "log", "(", "tf", ".", "random", ".", "uniform", "(", "shape", "=", "tf", ".", "shape", "(", "input", "=", "proposed_results", ".", "target_log_prob", ")", ",", "dtype", "=", "proposed_results", ".", "target_log_prob", ".", "dtype", ".", "base_dtype", ",", "seed", "=", "self", ".", "_seed_stream", "(", ")", ")", ")", "is_accepted", "=", "log_uniform", "<", "log_accept_ratio", "next_state", "=", "mcmc_util", ".", "choose", "(", "is_accepted", ",", "proposed_state", ",", "current_state", ",", "name", "=", "'choose_next_state'", ")", "kernel_results", "=", "MetropolisHastingsKernelResults", "(", "accepted_results", "=", "mcmc_util", ".", "choose", "(", "is_accepted", ",", "proposed_results", ",", "previous_kernel_results", ".", "accepted_results", ",", "name", "=", "'choose_inner_results'", ")", ",", "is_accepted", "=", "is_accepted", ",", "log_accept_ratio", "=", "log_accept_ratio", ",", "proposed_state", "=", "proposed_state", ",", "proposed_results", "=", "proposed_results", ",", "extra", "=", "[", "]", ",", ")", "return", "next_state", ",", "kernel_results" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
MetropolisHastings.bootstrap_results
Returns an object with the same type as returned by `one_step`. Args: init_state: `Tensor` or Python `list` of `Tensor`s representing the initial state(s) of the Markov chain(s). Returns: kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of `Tensor`s representing internal calculations made within this function. Raises: ValueError: if `inner_kernel` results doesn't contain the member "target_log_prob".
tensorflow_probability/python/mcmc/metropolis_hastings.py
def bootstrap_results(self, init_state): """Returns an object with the same type as returned by `one_step`. Args: init_state: `Tensor` or Python `list` of `Tensor`s representing the initial state(s) of the Markov chain(s). Returns: kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of `Tensor`s representing internal calculations made within this function. Raises: ValueError: if `inner_kernel` results doesn't contain the member "target_log_prob". """ with tf.compat.v1.name_scope( name=mcmc_util.make_name(self.name, 'mh', 'bootstrap_results'), values=[init_state]): pkr = self.inner_kernel.bootstrap_results(init_state) if not has_target_log_prob(pkr): raise ValueError( '"target_log_prob" must be a member of `inner_kernel` results.') x = pkr.target_log_prob return MetropolisHastingsKernelResults( accepted_results=pkr, is_accepted=tf.ones_like(x, dtype=tf.bool), log_accept_ratio=tf.zeros_like(x), proposed_state=init_state, proposed_results=pkr, extra=[], )
def bootstrap_results(self, init_state): """Returns an object with the same type as returned by `one_step`. Args: init_state: `Tensor` or Python `list` of `Tensor`s representing the initial state(s) of the Markov chain(s). Returns: kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of `Tensor`s representing internal calculations made within this function. Raises: ValueError: if `inner_kernel` results doesn't contain the member "target_log_prob". """ with tf.compat.v1.name_scope( name=mcmc_util.make_name(self.name, 'mh', 'bootstrap_results'), values=[init_state]): pkr = self.inner_kernel.bootstrap_results(init_state) if not has_target_log_prob(pkr): raise ValueError( '"target_log_prob" must be a member of `inner_kernel` results.') x = pkr.target_log_prob return MetropolisHastingsKernelResults( accepted_results=pkr, is_accepted=tf.ones_like(x, dtype=tf.bool), log_accept_ratio=tf.zeros_like(x), proposed_state=init_state, proposed_results=pkr, extra=[], )
[ "Returns", "an", "object", "with", "the", "same", "type", "as", "returned", "by", "one_step", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/metropolis_hastings.py#L247-L277
[ "def", "bootstrap_results", "(", "self", ",", "init_state", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", "=", "mcmc_util", ".", "make_name", "(", "self", ".", "name", ",", "'mh'", ",", "'bootstrap_results'", ")", ",", "values", "=", "[", "init_state", "]", ")", ":", "pkr", "=", "self", ".", "inner_kernel", ".", "bootstrap_results", "(", "init_state", ")", "if", "not", "has_target_log_prob", "(", "pkr", ")", ":", "raise", "ValueError", "(", "'\"target_log_prob\" must be a member of `inner_kernel` results.'", ")", "x", "=", "pkr", ".", "target_log_prob", "return", "MetropolisHastingsKernelResults", "(", "accepted_results", "=", "pkr", ",", "is_accepted", "=", "tf", ".", "ones_like", "(", "x", ",", "dtype", "=", "tf", ".", "bool", ")", ",", "log_accept_ratio", "=", "tf", ".", "zeros_like", "(", "x", ")", ",", "proposed_state", "=", "init_state", ",", "proposed_results", "=", "pkr", ",", "extra", "=", "[", "]", ",", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
minimize
Applies the BFGS algorithm to minimize a differentiable function. Performs unconstrained minimization of a differentiable function using the BFGS scheme. For details of the algorithm, see [Nocedal and Wright(2006)][1]. ### Usage: The following example demonstrates the BFGS optimizer attempting to find the minimum for a simple two dimensional quadratic objective function. ```python minimum = np.array([1.0, 1.0]) # The center of the quadratic bowl. scales = np.array([2.0, 3.0]) # The scales along the two axes. # The objective function and the gradient. def quadratic(x): value = tf.reduce_sum(scales * (x - minimum) ** 2) return value, tf.gradients(value, x)[0] start = tf.constant([0.6, 0.8]) # Starting point for the search. optim_results = tfp.optimizer.bfgs_minimize( quadratic, initial_position=start, tolerance=1e-8) with tf.Session() as session: results = session.run(optim_results) # Check that the search converged assert(results.converged) # Check that the argmin is close to the actual value. np.testing.assert_allclose(results.position, minimum) # Print out the total number of function evaluations it took. Should be 6. print ("Function evaluations: %d" % results.num_objective_evaluations) ``` ### References: [1]: Jorge Nocedal, Stephen Wright. Numerical Optimization. Springer Series in Operations Research. pp 136-140. 2006 http://pages.mtu.edu/~struther/Courses/OLD/Sp2013/5630/Jorge_Nocedal_Numerical_optimization_267490.pdf Args: value_and_gradients_function: A Python callable that accepts a point as a real `Tensor` and returns a tuple of `Tensor`s of real dtype containing the value of the function and its gradient at that point. The function to be minimized. The input should be of shape `[..., n]`, where `n` is the size of the domain of input points, and all others are batching dimensions. The first component of the return value should be a real `Tensor` of matching shape `[...]`. The second component (the gradient) should also be of shape `[..., n]` like the input value to the function. initial_position: real `Tensor` of shape `[..., n]`. The starting point, or points when using batching dimensions, of the search procedure. At these points the function value and the gradient norm should be finite. tolerance: Scalar `Tensor` of real dtype. Specifies the gradient tolerance for the procedure. If the supremum norm of the gradient vector is below this number, the algorithm is stopped. x_tolerance: Scalar `Tensor` of real dtype. If the absolute change in the position between one iteration and the next is smaller than this number, the algorithm is stopped. f_relative_tolerance: Scalar `Tensor` of real dtype. If the relative change in the objective value between one iteration and the next is smaller than this value, the algorithm is stopped. initial_inverse_hessian_estimate: Optional `Tensor` of the same dtype as the components of the output of the `value_and_gradients_function`. If specified, the shape should broadcastable to shape `[..., n, n]`; e.g. if a single `[n, n]` matrix is provided, it will be automatically broadcasted to all batches. Alternatively, one can also specify a different hessian estimate for each batch member. For the correctness of the algorithm, it is required that this parameter be symmetric and positive definite. Specifies the starting estimate for the inverse of the Hessian at the initial point. If not specified, the identity matrix is used as the starting estimate for the inverse Hessian. max_iterations: Scalar positive int32 `Tensor`. The maximum number of iterations for BFGS updates. parallel_iterations: Positive integer. The number of iterations allowed to run in parallel. stopping_condition: (Optional) A Python function that takes as input two Boolean tensors of shape `[...]`, and returns a Boolean scalar tensor. The input tensors are `converged` and `failed`, indicating the current status of each respective batch member; the return value states whether the algorithm should stop. The default is tfp.optimizer.converged_all which only stops when all batch members have either converged or failed. An alternative is tfp.optimizer.converged_any which stops as soon as one batch member has converged, or when all have failed. name: (Optional) Python str. The name prefixed to the ops created by this function. If not supplied, the default name 'minimize' is used. Returns: optimizer_results: A namedtuple containing the following items: converged: boolean tensor of shape `[...]` indicating for each batch member whether the minimum was found within tolerance. failed: boolean tensor of shape `[...]` indicating for each batch member whether a line search step failed to find a suitable step size satisfying Wolfe conditions. In the absence of any constraints on the number of objective evaluations permitted, this value will be the complement of `converged`. However, if there is a constraint and the search stopped due to available evaluations being exhausted, both `failed` and `converged` will be simultaneously False. num_objective_evaluations: The total number of objective evaluations performed. position: A tensor of shape `[..., n]` containing the last argument value found during the search from each starting point. If the search converged, then this value is the argmin of the objective function. objective_value: A tensor of shape `[...]` with the value of the objective function at the `position`. If the search converged, then this is the (local) minimum of the objective function. objective_gradient: A tensor of shape `[..., n]` containing the gradient of the objective function at the `position`. If the search converged the max-norm of this tensor should be below the tolerance. inverse_hessian_estimate: A tensor of shape `[..., n, n]` containing the inverse of the estimated Hessian.
tensorflow_probability/python/optimizer/bfgs.py
def minimize(value_and_gradients_function, initial_position, tolerance=1e-8, x_tolerance=0, f_relative_tolerance=0, initial_inverse_hessian_estimate=None, max_iterations=50, parallel_iterations=1, stopping_condition=None, name=None): """Applies the BFGS algorithm to minimize a differentiable function. Performs unconstrained minimization of a differentiable function using the BFGS scheme. For details of the algorithm, see [Nocedal and Wright(2006)][1]. ### Usage: The following example demonstrates the BFGS optimizer attempting to find the minimum for a simple two dimensional quadratic objective function. ```python minimum = np.array([1.0, 1.0]) # The center of the quadratic bowl. scales = np.array([2.0, 3.0]) # The scales along the two axes. # The objective function and the gradient. def quadratic(x): value = tf.reduce_sum(scales * (x - minimum) ** 2) return value, tf.gradients(value, x)[0] start = tf.constant([0.6, 0.8]) # Starting point for the search. optim_results = tfp.optimizer.bfgs_minimize( quadratic, initial_position=start, tolerance=1e-8) with tf.Session() as session: results = session.run(optim_results) # Check that the search converged assert(results.converged) # Check that the argmin is close to the actual value. np.testing.assert_allclose(results.position, minimum) # Print out the total number of function evaluations it took. Should be 6. print ("Function evaluations: %d" % results.num_objective_evaluations) ``` ### References: [1]: Jorge Nocedal, Stephen Wright. Numerical Optimization. Springer Series in Operations Research. pp 136-140. 2006 http://pages.mtu.edu/~struther/Courses/OLD/Sp2013/5630/Jorge_Nocedal_Numerical_optimization_267490.pdf Args: value_and_gradients_function: A Python callable that accepts a point as a real `Tensor` and returns a tuple of `Tensor`s of real dtype containing the value of the function and its gradient at that point. The function to be minimized. The input should be of shape `[..., n]`, where `n` is the size of the domain of input points, and all others are batching dimensions. The first component of the return value should be a real `Tensor` of matching shape `[...]`. The second component (the gradient) should also be of shape `[..., n]` like the input value to the function. initial_position: real `Tensor` of shape `[..., n]`. The starting point, or points when using batching dimensions, of the search procedure. At these points the function value and the gradient norm should be finite. tolerance: Scalar `Tensor` of real dtype. Specifies the gradient tolerance for the procedure. If the supremum norm of the gradient vector is below this number, the algorithm is stopped. x_tolerance: Scalar `Tensor` of real dtype. If the absolute change in the position between one iteration and the next is smaller than this number, the algorithm is stopped. f_relative_tolerance: Scalar `Tensor` of real dtype. If the relative change in the objective value between one iteration and the next is smaller than this value, the algorithm is stopped. initial_inverse_hessian_estimate: Optional `Tensor` of the same dtype as the components of the output of the `value_and_gradients_function`. If specified, the shape should broadcastable to shape `[..., n, n]`; e.g. if a single `[n, n]` matrix is provided, it will be automatically broadcasted to all batches. Alternatively, one can also specify a different hessian estimate for each batch member. For the correctness of the algorithm, it is required that this parameter be symmetric and positive definite. Specifies the starting estimate for the inverse of the Hessian at the initial point. If not specified, the identity matrix is used as the starting estimate for the inverse Hessian. max_iterations: Scalar positive int32 `Tensor`. The maximum number of iterations for BFGS updates. parallel_iterations: Positive integer. The number of iterations allowed to run in parallel. stopping_condition: (Optional) A Python function that takes as input two Boolean tensors of shape `[...]`, and returns a Boolean scalar tensor. The input tensors are `converged` and `failed`, indicating the current status of each respective batch member; the return value states whether the algorithm should stop. The default is tfp.optimizer.converged_all which only stops when all batch members have either converged or failed. An alternative is tfp.optimizer.converged_any which stops as soon as one batch member has converged, or when all have failed. name: (Optional) Python str. The name prefixed to the ops created by this function. If not supplied, the default name 'minimize' is used. Returns: optimizer_results: A namedtuple containing the following items: converged: boolean tensor of shape `[...]` indicating for each batch member whether the minimum was found within tolerance. failed: boolean tensor of shape `[...]` indicating for each batch member whether a line search step failed to find a suitable step size satisfying Wolfe conditions. In the absence of any constraints on the number of objective evaluations permitted, this value will be the complement of `converged`. However, if there is a constraint and the search stopped due to available evaluations being exhausted, both `failed` and `converged` will be simultaneously False. num_objective_evaluations: The total number of objective evaluations performed. position: A tensor of shape `[..., n]` containing the last argument value found during the search from each starting point. If the search converged, then this value is the argmin of the objective function. objective_value: A tensor of shape `[...]` with the value of the objective function at the `position`. If the search converged, then this is the (local) minimum of the objective function. objective_gradient: A tensor of shape `[..., n]` containing the gradient of the objective function at the `position`. If the search converged the max-norm of this tensor should be below the tolerance. inverse_hessian_estimate: A tensor of shape `[..., n, n]` containing the inverse of the estimated Hessian. """ with tf.compat.v1.name_scope( name, 'minimize', [initial_position, tolerance, initial_inverse_hessian_estimate]): initial_position = tf.convert_to_tensor( value=initial_position, name='initial_position') dtype = initial_position.dtype.base_dtype tolerance = tf.convert_to_tensor( value=tolerance, dtype=dtype, name='grad_tolerance') f_relative_tolerance = tf.convert_to_tensor( value=f_relative_tolerance, dtype=dtype, name='f_relative_tolerance') x_tolerance = tf.convert_to_tensor( value=x_tolerance, dtype=dtype, name='x_tolerance') max_iterations = tf.convert_to_tensor( value=max_iterations, name='max_iterations') input_shape = distribution_util.prefer_static_shape(initial_position) batch_shape, domain_size = input_shape[:-1], input_shape[-1] if stopping_condition is None: stopping_condition = bfgs_utils.converged_all # Control inputs are an optional list of tensors to evaluate before # the start of the search procedure. These can be used to assert the # validity of inputs to the search procedure. control_inputs = None if initial_inverse_hessian_estimate is None: # Create a default initial inverse Hessian. initial_inv_hessian = tf.eye(domain_size, batch_shape=batch_shape, dtype=dtype, name='initial_inv_hessian') else: # If an initial inverse Hessian is supplied, compute some control inputs # to ensure that it is positive definite and symmetric. initial_inv_hessian = tf.convert_to_tensor( value=initial_inverse_hessian_estimate, dtype=dtype, name='initial_inv_hessian') control_inputs = _inv_hessian_control_inputs(initial_inv_hessian) hessian_shape = tf.concat([batch_shape, [domain_size, domain_size]], 0) initial_inv_hessian = tf.broadcast_to(initial_inv_hessian, hessian_shape) # The `state` here is a `BfgsOptimizerResults` tuple with values for the # current state of the algorithm computation. def _cond(state): """Continue if iterations remain and stopping condition is not met.""" return ((state.num_iterations < max_iterations) & tf.logical_not(stopping_condition(state.converged, state.failed))) def _body(state): """Main optimization loop.""" search_direction = _get_search_direction(state.inverse_hessian_estimate, state.objective_gradient) derivative_at_start_pt = tf.reduce_sum( input_tensor=state.objective_gradient * search_direction, axis=-1) # If the derivative at the start point is not negative, recompute the # search direction with the initial inverse Hessian. needs_reset = (~state.failed & ~state.converged & (derivative_at_start_pt >= 0)) search_direction_reset = _get_search_direction( initial_inv_hessian, state.objective_gradient) actual_serch_direction = tf.where( needs_reset, search_direction_reset, search_direction) actual_inv_hessian = tf.where( needs_reset, initial_inv_hessian, state.inverse_hessian_estimate) # Replace the hessian estimate in the state, in case it had to be reset. current_state = bfgs_utils.update_fields( state, inverse_hessian_estimate=actual_inv_hessian) next_state = bfgs_utils.line_search_step( current_state, value_and_gradients_function, actual_serch_direction, tolerance, f_relative_tolerance, x_tolerance, stopping_condition) # Update the inverse Hessian if needed and continue. return [_update_inv_hessian(current_state, next_state)] kwargs = bfgs_utils.get_initial_state_args( value_and_gradients_function, initial_position, tolerance, control_inputs) kwargs['inverse_hessian_estimate'] = initial_inv_hessian initial_state = BfgsOptimizerResults(**kwargs) return tf.while_loop( cond=_cond, body=_body, loop_vars=[initial_state], parallel_iterations=parallel_iterations)[0]
def minimize(value_and_gradients_function, initial_position, tolerance=1e-8, x_tolerance=0, f_relative_tolerance=0, initial_inverse_hessian_estimate=None, max_iterations=50, parallel_iterations=1, stopping_condition=None, name=None): """Applies the BFGS algorithm to minimize a differentiable function. Performs unconstrained minimization of a differentiable function using the BFGS scheme. For details of the algorithm, see [Nocedal and Wright(2006)][1]. ### Usage: The following example demonstrates the BFGS optimizer attempting to find the minimum for a simple two dimensional quadratic objective function. ```python minimum = np.array([1.0, 1.0]) # The center of the quadratic bowl. scales = np.array([2.0, 3.0]) # The scales along the two axes. # The objective function and the gradient. def quadratic(x): value = tf.reduce_sum(scales * (x - minimum) ** 2) return value, tf.gradients(value, x)[0] start = tf.constant([0.6, 0.8]) # Starting point for the search. optim_results = tfp.optimizer.bfgs_minimize( quadratic, initial_position=start, tolerance=1e-8) with tf.Session() as session: results = session.run(optim_results) # Check that the search converged assert(results.converged) # Check that the argmin is close to the actual value. np.testing.assert_allclose(results.position, minimum) # Print out the total number of function evaluations it took. Should be 6. print ("Function evaluations: %d" % results.num_objective_evaluations) ``` ### References: [1]: Jorge Nocedal, Stephen Wright. Numerical Optimization. Springer Series in Operations Research. pp 136-140. 2006 http://pages.mtu.edu/~struther/Courses/OLD/Sp2013/5630/Jorge_Nocedal_Numerical_optimization_267490.pdf Args: value_and_gradients_function: A Python callable that accepts a point as a real `Tensor` and returns a tuple of `Tensor`s of real dtype containing the value of the function and its gradient at that point. The function to be minimized. The input should be of shape `[..., n]`, where `n` is the size of the domain of input points, and all others are batching dimensions. The first component of the return value should be a real `Tensor` of matching shape `[...]`. The second component (the gradient) should also be of shape `[..., n]` like the input value to the function. initial_position: real `Tensor` of shape `[..., n]`. The starting point, or points when using batching dimensions, of the search procedure. At these points the function value and the gradient norm should be finite. tolerance: Scalar `Tensor` of real dtype. Specifies the gradient tolerance for the procedure. If the supremum norm of the gradient vector is below this number, the algorithm is stopped. x_tolerance: Scalar `Tensor` of real dtype. If the absolute change in the position between one iteration and the next is smaller than this number, the algorithm is stopped. f_relative_tolerance: Scalar `Tensor` of real dtype. If the relative change in the objective value between one iteration and the next is smaller than this value, the algorithm is stopped. initial_inverse_hessian_estimate: Optional `Tensor` of the same dtype as the components of the output of the `value_and_gradients_function`. If specified, the shape should broadcastable to shape `[..., n, n]`; e.g. if a single `[n, n]` matrix is provided, it will be automatically broadcasted to all batches. Alternatively, one can also specify a different hessian estimate for each batch member. For the correctness of the algorithm, it is required that this parameter be symmetric and positive definite. Specifies the starting estimate for the inverse of the Hessian at the initial point. If not specified, the identity matrix is used as the starting estimate for the inverse Hessian. max_iterations: Scalar positive int32 `Tensor`. The maximum number of iterations for BFGS updates. parallel_iterations: Positive integer. The number of iterations allowed to run in parallel. stopping_condition: (Optional) A Python function that takes as input two Boolean tensors of shape `[...]`, and returns a Boolean scalar tensor. The input tensors are `converged` and `failed`, indicating the current status of each respective batch member; the return value states whether the algorithm should stop. The default is tfp.optimizer.converged_all which only stops when all batch members have either converged or failed. An alternative is tfp.optimizer.converged_any which stops as soon as one batch member has converged, or when all have failed. name: (Optional) Python str. The name prefixed to the ops created by this function. If not supplied, the default name 'minimize' is used. Returns: optimizer_results: A namedtuple containing the following items: converged: boolean tensor of shape `[...]` indicating for each batch member whether the minimum was found within tolerance. failed: boolean tensor of shape `[...]` indicating for each batch member whether a line search step failed to find a suitable step size satisfying Wolfe conditions. In the absence of any constraints on the number of objective evaluations permitted, this value will be the complement of `converged`. However, if there is a constraint and the search stopped due to available evaluations being exhausted, both `failed` and `converged` will be simultaneously False. num_objective_evaluations: The total number of objective evaluations performed. position: A tensor of shape `[..., n]` containing the last argument value found during the search from each starting point. If the search converged, then this value is the argmin of the objective function. objective_value: A tensor of shape `[...]` with the value of the objective function at the `position`. If the search converged, then this is the (local) minimum of the objective function. objective_gradient: A tensor of shape `[..., n]` containing the gradient of the objective function at the `position`. If the search converged the max-norm of this tensor should be below the tolerance. inverse_hessian_estimate: A tensor of shape `[..., n, n]` containing the inverse of the estimated Hessian. """ with tf.compat.v1.name_scope( name, 'minimize', [initial_position, tolerance, initial_inverse_hessian_estimate]): initial_position = tf.convert_to_tensor( value=initial_position, name='initial_position') dtype = initial_position.dtype.base_dtype tolerance = tf.convert_to_tensor( value=tolerance, dtype=dtype, name='grad_tolerance') f_relative_tolerance = tf.convert_to_tensor( value=f_relative_tolerance, dtype=dtype, name='f_relative_tolerance') x_tolerance = tf.convert_to_tensor( value=x_tolerance, dtype=dtype, name='x_tolerance') max_iterations = tf.convert_to_tensor( value=max_iterations, name='max_iterations') input_shape = distribution_util.prefer_static_shape(initial_position) batch_shape, domain_size = input_shape[:-1], input_shape[-1] if stopping_condition is None: stopping_condition = bfgs_utils.converged_all # Control inputs are an optional list of tensors to evaluate before # the start of the search procedure. These can be used to assert the # validity of inputs to the search procedure. control_inputs = None if initial_inverse_hessian_estimate is None: # Create a default initial inverse Hessian. initial_inv_hessian = tf.eye(domain_size, batch_shape=batch_shape, dtype=dtype, name='initial_inv_hessian') else: # If an initial inverse Hessian is supplied, compute some control inputs # to ensure that it is positive definite and symmetric. initial_inv_hessian = tf.convert_to_tensor( value=initial_inverse_hessian_estimate, dtype=dtype, name='initial_inv_hessian') control_inputs = _inv_hessian_control_inputs(initial_inv_hessian) hessian_shape = tf.concat([batch_shape, [domain_size, domain_size]], 0) initial_inv_hessian = tf.broadcast_to(initial_inv_hessian, hessian_shape) # The `state` here is a `BfgsOptimizerResults` tuple with values for the # current state of the algorithm computation. def _cond(state): """Continue if iterations remain and stopping condition is not met.""" return ((state.num_iterations < max_iterations) & tf.logical_not(stopping_condition(state.converged, state.failed))) def _body(state): """Main optimization loop.""" search_direction = _get_search_direction(state.inverse_hessian_estimate, state.objective_gradient) derivative_at_start_pt = tf.reduce_sum( input_tensor=state.objective_gradient * search_direction, axis=-1) # If the derivative at the start point is not negative, recompute the # search direction with the initial inverse Hessian. needs_reset = (~state.failed & ~state.converged & (derivative_at_start_pt >= 0)) search_direction_reset = _get_search_direction( initial_inv_hessian, state.objective_gradient) actual_serch_direction = tf.where( needs_reset, search_direction_reset, search_direction) actual_inv_hessian = tf.where( needs_reset, initial_inv_hessian, state.inverse_hessian_estimate) # Replace the hessian estimate in the state, in case it had to be reset. current_state = bfgs_utils.update_fields( state, inverse_hessian_estimate=actual_inv_hessian) next_state = bfgs_utils.line_search_step( current_state, value_and_gradients_function, actual_serch_direction, tolerance, f_relative_tolerance, x_tolerance, stopping_condition) # Update the inverse Hessian if needed and continue. return [_update_inv_hessian(current_state, next_state)] kwargs = bfgs_utils.get_initial_state_args( value_and_gradients_function, initial_position, tolerance, control_inputs) kwargs['inverse_hessian_estimate'] = initial_inv_hessian initial_state = BfgsOptimizerResults(**kwargs) return tf.while_loop( cond=_cond, body=_body, loop_vars=[initial_state], parallel_iterations=parallel_iterations)[0]
[ "Applies", "the", "BFGS", "algorithm", "to", "minimize", "a", "differentiable", "function", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/bfgs.py#L72-L286
[ "def", "minimize", "(", "value_and_gradients_function", ",", "initial_position", ",", "tolerance", "=", "1e-8", ",", "x_tolerance", "=", "0", ",", "f_relative_tolerance", "=", "0", ",", "initial_inverse_hessian_estimate", "=", "None", ",", "max_iterations", "=", "50", ",", "parallel_iterations", "=", "1", ",", "stopping_condition", "=", "None", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "'minimize'", ",", "[", "initial_position", ",", "tolerance", ",", "initial_inverse_hessian_estimate", "]", ")", ":", "initial_position", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "initial_position", ",", "name", "=", "'initial_position'", ")", "dtype", "=", "initial_position", ".", "dtype", ".", "base_dtype", "tolerance", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "tolerance", ",", "dtype", "=", "dtype", ",", "name", "=", "'grad_tolerance'", ")", "f_relative_tolerance", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "f_relative_tolerance", ",", "dtype", "=", "dtype", ",", "name", "=", "'f_relative_tolerance'", ")", "x_tolerance", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "x_tolerance", ",", "dtype", "=", "dtype", ",", "name", "=", "'x_tolerance'", ")", "max_iterations", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "max_iterations", ",", "name", "=", "'max_iterations'", ")", "input_shape", "=", "distribution_util", ".", "prefer_static_shape", "(", "initial_position", ")", "batch_shape", ",", "domain_size", "=", "input_shape", "[", ":", "-", "1", "]", ",", "input_shape", "[", "-", "1", "]", "if", "stopping_condition", "is", "None", ":", "stopping_condition", "=", "bfgs_utils", ".", "converged_all", "# Control inputs are an optional list of tensors to evaluate before", "# the start of the search procedure. These can be used to assert the", "# validity of inputs to the search procedure.", "control_inputs", "=", "None", "if", "initial_inverse_hessian_estimate", "is", "None", ":", "# Create a default initial inverse Hessian.", "initial_inv_hessian", "=", "tf", ".", "eye", "(", "domain_size", ",", "batch_shape", "=", "batch_shape", ",", "dtype", "=", "dtype", ",", "name", "=", "'initial_inv_hessian'", ")", "else", ":", "# If an initial inverse Hessian is supplied, compute some control inputs", "# to ensure that it is positive definite and symmetric.", "initial_inv_hessian", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "initial_inverse_hessian_estimate", ",", "dtype", "=", "dtype", ",", "name", "=", "'initial_inv_hessian'", ")", "control_inputs", "=", "_inv_hessian_control_inputs", "(", "initial_inv_hessian", ")", "hessian_shape", "=", "tf", ".", "concat", "(", "[", "batch_shape", ",", "[", "domain_size", ",", "domain_size", "]", "]", ",", "0", ")", "initial_inv_hessian", "=", "tf", ".", "broadcast_to", "(", "initial_inv_hessian", ",", "hessian_shape", ")", "# The `state` here is a `BfgsOptimizerResults` tuple with values for the", "# current state of the algorithm computation.", "def", "_cond", "(", "state", ")", ":", "\"\"\"Continue if iterations remain and stopping condition is not met.\"\"\"", "return", "(", "(", "state", ".", "num_iterations", "<", "max_iterations", ")", "&", "tf", ".", "logical_not", "(", "stopping_condition", "(", "state", ".", "converged", ",", "state", ".", "failed", ")", ")", ")", "def", "_body", "(", "state", ")", ":", "\"\"\"Main optimization loop.\"\"\"", "search_direction", "=", "_get_search_direction", "(", "state", ".", "inverse_hessian_estimate", ",", "state", ".", "objective_gradient", ")", "derivative_at_start_pt", "=", "tf", ".", "reduce_sum", "(", "input_tensor", "=", "state", ".", "objective_gradient", "*", "search_direction", ",", "axis", "=", "-", "1", ")", "# If the derivative at the start point is not negative, recompute the", "# search direction with the initial inverse Hessian.", "needs_reset", "=", "(", "~", "state", ".", "failed", "&", "~", "state", ".", "converged", "&", "(", "derivative_at_start_pt", ">=", "0", ")", ")", "search_direction_reset", "=", "_get_search_direction", "(", "initial_inv_hessian", ",", "state", ".", "objective_gradient", ")", "actual_serch_direction", "=", "tf", ".", "where", "(", "needs_reset", ",", "search_direction_reset", ",", "search_direction", ")", "actual_inv_hessian", "=", "tf", ".", "where", "(", "needs_reset", ",", "initial_inv_hessian", ",", "state", ".", "inverse_hessian_estimate", ")", "# Replace the hessian estimate in the state, in case it had to be reset.", "current_state", "=", "bfgs_utils", ".", "update_fields", "(", "state", ",", "inverse_hessian_estimate", "=", "actual_inv_hessian", ")", "next_state", "=", "bfgs_utils", ".", "line_search_step", "(", "current_state", ",", "value_and_gradients_function", ",", "actual_serch_direction", ",", "tolerance", ",", "f_relative_tolerance", ",", "x_tolerance", ",", "stopping_condition", ")", "# Update the inverse Hessian if needed and continue.", "return", "[", "_update_inv_hessian", "(", "current_state", ",", "next_state", ")", "]", "kwargs", "=", "bfgs_utils", ".", "get_initial_state_args", "(", "value_and_gradients_function", ",", "initial_position", ",", "tolerance", ",", "control_inputs", ")", "kwargs", "[", "'inverse_hessian_estimate'", "]", "=", "initial_inv_hessian", "initial_state", "=", "BfgsOptimizerResults", "(", "*", "*", "kwargs", ")", "return", "tf", ".", "while_loop", "(", "cond", "=", "_cond", ",", "body", "=", "_body", ",", "loop_vars", "=", "[", "initial_state", "]", ",", "parallel_iterations", "=", "parallel_iterations", ")", "[", "0", "]" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_inv_hessian_control_inputs
Computes control inputs to validate a provided inverse Hessian. These ensure that the provided inverse Hessian is positive definite and symmetric. Args: inv_hessian: The starting estimate for the inverse of the Hessian at the initial point. Returns: A list of tf.Assert ops suitable for use with tf.control_dependencies.
tensorflow_probability/python/optimizer/bfgs.py
def _inv_hessian_control_inputs(inv_hessian): """Computes control inputs to validate a provided inverse Hessian. These ensure that the provided inverse Hessian is positive definite and symmetric. Args: inv_hessian: The starting estimate for the inverse of the Hessian at the initial point. Returns: A list of tf.Assert ops suitable for use with tf.control_dependencies. """ # The easiest way to validate if the inverse Hessian is positive definite is # to compute its Cholesky decomposition. is_positive_definite = tf.reduce_all( input_tensor=tf.math.is_finite(tf.linalg.cholesky(inv_hessian)), axis=[-1, -2]) # Then check that the supplied inverse Hessian is symmetric. is_symmetric = tf.equal(bfgs_utils.norm( inv_hessian - _batch_transpose(inv_hessian), dims=2), 0) # Simply adding a control dependencies on these results is not enough to # trigger them, we need to add asserts on the results. return [tf.Assert(is_positive_definite, ['Initial inverse Hessian is not positive definite.', inv_hessian]), tf.Assert(is_symmetric, ['Initial inverse Hessian is not symmetric', inv_hessian])]
def _inv_hessian_control_inputs(inv_hessian): """Computes control inputs to validate a provided inverse Hessian. These ensure that the provided inverse Hessian is positive definite and symmetric. Args: inv_hessian: The starting estimate for the inverse of the Hessian at the initial point. Returns: A list of tf.Assert ops suitable for use with tf.control_dependencies. """ # The easiest way to validate if the inverse Hessian is positive definite is # to compute its Cholesky decomposition. is_positive_definite = tf.reduce_all( input_tensor=tf.math.is_finite(tf.linalg.cholesky(inv_hessian)), axis=[-1, -2]) # Then check that the supplied inverse Hessian is symmetric. is_symmetric = tf.equal(bfgs_utils.norm( inv_hessian - _batch_transpose(inv_hessian), dims=2), 0) # Simply adding a control dependencies on these results is not enough to # trigger them, we need to add asserts on the results. return [tf.Assert(is_positive_definite, ['Initial inverse Hessian is not positive definite.', inv_hessian]), tf.Assert(is_symmetric, ['Initial inverse Hessian is not symmetric', inv_hessian])]
[ "Computes", "control", "inputs", "to", "validate", "a", "provided", "inverse", "Hessian", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/bfgs.py#L289-L319
[ "def", "_inv_hessian_control_inputs", "(", "inv_hessian", ")", ":", "# The easiest way to validate if the inverse Hessian is positive definite is", "# to compute its Cholesky decomposition.", "is_positive_definite", "=", "tf", ".", "reduce_all", "(", "input_tensor", "=", "tf", ".", "math", ".", "is_finite", "(", "tf", ".", "linalg", ".", "cholesky", "(", "inv_hessian", ")", ")", ",", "axis", "=", "[", "-", "1", ",", "-", "2", "]", ")", "# Then check that the supplied inverse Hessian is symmetric.", "is_symmetric", "=", "tf", ".", "equal", "(", "bfgs_utils", ".", "norm", "(", "inv_hessian", "-", "_batch_transpose", "(", "inv_hessian", ")", ",", "dims", "=", "2", ")", ",", "0", ")", "# Simply adding a control dependencies on these results is not enough to", "# trigger them, we need to add asserts on the results.", "return", "[", "tf", ".", "Assert", "(", "is_positive_definite", ",", "[", "'Initial inverse Hessian is not positive definite.'", ",", "inv_hessian", "]", ")", ",", "tf", ".", "Assert", "(", "is_symmetric", ",", "[", "'Initial inverse Hessian is not symmetric'", ",", "inv_hessian", "]", ")", "]" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_update_inv_hessian
Update the BGFS state by computing the next inverse hessian estimate.
tensorflow_probability/python/optimizer/bfgs.py
def _update_inv_hessian(prev_state, next_state): """Update the BGFS state by computing the next inverse hessian estimate.""" # Only update the inverse Hessian if not already failed or converged. should_update = ~next_state.converged & ~next_state.failed # Compute the normalization term (y^T . s), should not update if is singular. gradient_delta = next_state.objective_gradient - prev_state.objective_gradient position_delta = next_state.position - prev_state.position normalization_factor = tf.reduce_sum( input_tensor=gradient_delta * position_delta, axis=-1) should_update = should_update & ~tf.equal(normalization_factor, 0) def _do_update_inv_hessian(): next_inv_hessian = _bfgs_inv_hessian_update( gradient_delta, position_delta, normalization_factor, prev_state.inverse_hessian_estimate) return bfgs_utils.update_fields( next_state, inverse_hessian_estimate=tf.where(should_update, next_inv_hessian, prev_state.inverse_hessian_estimate)) return prefer_static.cond( tf.reduce_any(input_tensor=should_update), _do_update_inv_hessian, lambda: next_state)
def _update_inv_hessian(prev_state, next_state): """Update the BGFS state by computing the next inverse hessian estimate.""" # Only update the inverse Hessian if not already failed or converged. should_update = ~next_state.converged & ~next_state.failed # Compute the normalization term (y^T . s), should not update if is singular. gradient_delta = next_state.objective_gradient - prev_state.objective_gradient position_delta = next_state.position - prev_state.position normalization_factor = tf.reduce_sum( input_tensor=gradient_delta * position_delta, axis=-1) should_update = should_update & ~tf.equal(normalization_factor, 0) def _do_update_inv_hessian(): next_inv_hessian = _bfgs_inv_hessian_update( gradient_delta, position_delta, normalization_factor, prev_state.inverse_hessian_estimate) return bfgs_utils.update_fields( next_state, inverse_hessian_estimate=tf.where(should_update, next_inv_hessian, prev_state.inverse_hessian_estimate)) return prefer_static.cond( tf.reduce_any(input_tensor=should_update), _do_update_inv_hessian, lambda: next_state)
[ "Update", "the", "BGFS", "state", "by", "computing", "the", "next", "inverse", "hessian", "estimate", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/bfgs.py#L327-L352
[ "def", "_update_inv_hessian", "(", "prev_state", ",", "next_state", ")", ":", "# Only update the inverse Hessian if not already failed or converged.", "should_update", "=", "~", "next_state", ".", "converged", "&", "~", "next_state", ".", "failed", "# Compute the normalization term (y^T . s), should not update if is singular.", "gradient_delta", "=", "next_state", ".", "objective_gradient", "-", "prev_state", ".", "objective_gradient", "position_delta", "=", "next_state", ".", "position", "-", "prev_state", ".", "position", "normalization_factor", "=", "tf", ".", "reduce_sum", "(", "input_tensor", "=", "gradient_delta", "*", "position_delta", ",", "axis", "=", "-", "1", ")", "should_update", "=", "should_update", "&", "~", "tf", ".", "equal", "(", "normalization_factor", ",", "0", ")", "def", "_do_update_inv_hessian", "(", ")", ":", "next_inv_hessian", "=", "_bfgs_inv_hessian_update", "(", "gradient_delta", ",", "position_delta", ",", "normalization_factor", ",", "prev_state", ".", "inverse_hessian_estimate", ")", "return", "bfgs_utils", ".", "update_fields", "(", "next_state", ",", "inverse_hessian_estimate", "=", "tf", ".", "where", "(", "should_update", ",", "next_inv_hessian", ",", "prev_state", ".", "inverse_hessian_estimate", ")", ")", "return", "prefer_static", ".", "cond", "(", "tf", ".", "reduce_any", "(", "input_tensor", "=", "should_update", ")", ",", "_do_update_inv_hessian", ",", "lambda", ":", "next_state", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_bfgs_inv_hessian_update
Applies the BFGS update to the inverse Hessian estimate. The BFGS update rule is (note A^T denotes the transpose of a vector/matrix A). ```None rho = 1/(grad_delta^T * position_delta) U = (I - rho * position_delta * grad_delta^T) H_1 = U * H_0 * U^T + rho * position_delta * position_delta^T ``` Here, `H_0` is the inverse Hessian estimate at the previous iteration and `H_1` is the next estimate. Note that `*` should be interpreted as the matrix multiplication (with the understanding that matrix multiplication for scalars is usual multiplication and for matrix with vector is the action of the matrix on the vector.). The implementation below utilizes an expanded version of the above formula to avoid the matrix multiplications that would be needed otherwise. By expansion it is easy to see that one only needs matrix-vector or vector-vector operations. The expanded version is: ```None f = 1 + rho * (grad_delta^T * H_0 * grad_delta) H_1 - H_0 = - rho * [position_delta * (H_0 * grad_delta)^T + (H_0 * grad_delta) * position_delta^T] + rho * f * [position_delta * position_delta^T] ``` All the terms in square brackets are matrices and are constructed using vector outer products. All the other terms on the right hand side are scalars. Also worth noting that the first and second lines are both rank 1 updates applied to the current inverse Hessian estimate. Args: grad_delta: Real `Tensor` of shape `[..., n]`. The difference between the gradient at the new position and the old position. position_delta: Real `Tensor` of shape `[..., n]`. The change in position from the previous iteration to the current one. normalization_factor: Real `Tensor` of shape `[...]`. Should be equal to `grad_delta^T * position_delta`, i.e. `1/rho` as defined above. inv_hessian_estimate: Real `Tensor` of shape `[..., n, n]`. The previous estimate of the inverse Hessian. Should be positive definite and symmetric. Returns: A tuple containing the following fields is_valid: A Boolean `Tensor` of shape `[...]` indicating batch members where the update succeeded. The update can fail if the position change becomes orthogonal to the gradient change. next_inv_hessian_estimate: A `Tensor` of shape `[..., n, n]`. The next Hessian estimate updated using the BFGS update scheme. If the `inv_hessian_estimate` is symmetric and positive definite, the `next_inv_hessian_estimate` is guaranteed to satisfy the same conditions.
tensorflow_probability/python/optimizer/bfgs.py
def _bfgs_inv_hessian_update(grad_delta, position_delta, normalization_factor, inv_hessian_estimate): """Applies the BFGS update to the inverse Hessian estimate. The BFGS update rule is (note A^T denotes the transpose of a vector/matrix A). ```None rho = 1/(grad_delta^T * position_delta) U = (I - rho * position_delta * grad_delta^T) H_1 = U * H_0 * U^T + rho * position_delta * position_delta^T ``` Here, `H_0` is the inverse Hessian estimate at the previous iteration and `H_1` is the next estimate. Note that `*` should be interpreted as the matrix multiplication (with the understanding that matrix multiplication for scalars is usual multiplication and for matrix with vector is the action of the matrix on the vector.). The implementation below utilizes an expanded version of the above formula to avoid the matrix multiplications that would be needed otherwise. By expansion it is easy to see that one only needs matrix-vector or vector-vector operations. The expanded version is: ```None f = 1 + rho * (grad_delta^T * H_0 * grad_delta) H_1 - H_0 = - rho * [position_delta * (H_0 * grad_delta)^T + (H_0 * grad_delta) * position_delta^T] + rho * f * [position_delta * position_delta^T] ``` All the terms in square brackets are matrices and are constructed using vector outer products. All the other terms on the right hand side are scalars. Also worth noting that the first and second lines are both rank 1 updates applied to the current inverse Hessian estimate. Args: grad_delta: Real `Tensor` of shape `[..., n]`. The difference between the gradient at the new position and the old position. position_delta: Real `Tensor` of shape `[..., n]`. The change in position from the previous iteration to the current one. normalization_factor: Real `Tensor` of shape `[...]`. Should be equal to `grad_delta^T * position_delta`, i.e. `1/rho` as defined above. inv_hessian_estimate: Real `Tensor` of shape `[..., n, n]`. The previous estimate of the inverse Hessian. Should be positive definite and symmetric. Returns: A tuple containing the following fields is_valid: A Boolean `Tensor` of shape `[...]` indicating batch members where the update succeeded. The update can fail if the position change becomes orthogonal to the gradient change. next_inv_hessian_estimate: A `Tensor` of shape `[..., n, n]`. The next Hessian estimate updated using the BFGS update scheme. If the `inv_hessian_estimate` is symmetric and positive definite, the `next_inv_hessian_estimate` is guaranteed to satisfy the same conditions. """ # The quadratic form: y^T.H.y; where H is the inverse Hessian and y is the # gradient change. conditioned_grad_delta = _mul_right(inv_hessian_estimate, grad_delta) conditioned_grad_delta_norm = tf.reduce_sum( input_tensor=conditioned_grad_delta * grad_delta, axis=-1) # The first rank 1 update term requires the outer product: s.y^T. cross_term = _tensor_product(position_delta, conditioned_grad_delta) def _expand_scalar(s): # Expand dimensions of a batch of scalars to multiply or divide a matrix. return s[..., tf.newaxis, tf.newaxis] # Symmetrize cross_term += _tensor_product(conditioned_grad_delta, position_delta) position_term = _tensor_product(position_delta, position_delta) with tf.control_dependencies([position_term]): position_term *= _expand_scalar( 1 + conditioned_grad_delta_norm / normalization_factor) return (inv_hessian_estimate + (position_term - cross_term) / _expand_scalar(normalization_factor))
def _bfgs_inv_hessian_update(grad_delta, position_delta, normalization_factor, inv_hessian_estimate): """Applies the BFGS update to the inverse Hessian estimate. The BFGS update rule is (note A^T denotes the transpose of a vector/matrix A). ```None rho = 1/(grad_delta^T * position_delta) U = (I - rho * position_delta * grad_delta^T) H_1 = U * H_0 * U^T + rho * position_delta * position_delta^T ``` Here, `H_0` is the inverse Hessian estimate at the previous iteration and `H_1` is the next estimate. Note that `*` should be interpreted as the matrix multiplication (with the understanding that matrix multiplication for scalars is usual multiplication and for matrix with vector is the action of the matrix on the vector.). The implementation below utilizes an expanded version of the above formula to avoid the matrix multiplications that would be needed otherwise. By expansion it is easy to see that one only needs matrix-vector or vector-vector operations. The expanded version is: ```None f = 1 + rho * (grad_delta^T * H_0 * grad_delta) H_1 - H_0 = - rho * [position_delta * (H_0 * grad_delta)^T + (H_0 * grad_delta) * position_delta^T] + rho * f * [position_delta * position_delta^T] ``` All the terms in square brackets are matrices and are constructed using vector outer products. All the other terms on the right hand side are scalars. Also worth noting that the first and second lines are both rank 1 updates applied to the current inverse Hessian estimate. Args: grad_delta: Real `Tensor` of shape `[..., n]`. The difference between the gradient at the new position and the old position. position_delta: Real `Tensor` of shape `[..., n]`. The change in position from the previous iteration to the current one. normalization_factor: Real `Tensor` of shape `[...]`. Should be equal to `grad_delta^T * position_delta`, i.e. `1/rho` as defined above. inv_hessian_estimate: Real `Tensor` of shape `[..., n, n]`. The previous estimate of the inverse Hessian. Should be positive definite and symmetric. Returns: A tuple containing the following fields is_valid: A Boolean `Tensor` of shape `[...]` indicating batch members where the update succeeded. The update can fail if the position change becomes orthogonal to the gradient change. next_inv_hessian_estimate: A `Tensor` of shape `[..., n, n]`. The next Hessian estimate updated using the BFGS update scheme. If the `inv_hessian_estimate` is symmetric and positive definite, the `next_inv_hessian_estimate` is guaranteed to satisfy the same conditions. """ # The quadratic form: y^T.H.y; where H is the inverse Hessian and y is the # gradient change. conditioned_grad_delta = _mul_right(inv_hessian_estimate, grad_delta) conditioned_grad_delta_norm = tf.reduce_sum( input_tensor=conditioned_grad_delta * grad_delta, axis=-1) # The first rank 1 update term requires the outer product: s.y^T. cross_term = _tensor_product(position_delta, conditioned_grad_delta) def _expand_scalar(s): # Expand dimensions of a batch of scalars to multiply or divide a matrix. return s[..., tf.newaxis, tf.newaxis] # Symmetrize cross_term += _tensor_product(conditioned_grad_delta, position_delta) position_term = _tensor_product(position_delta, position_delta) with tf.control_dependencies([position_term]): position_term *= _expand_scalar( 1 + conditioned_grad_delta_norm / normalization_factor) return (inv_hessian_estimate + (position_term - cross_term) / _expand_scalar(normalization_factor))
[ "Applies", "the", "BFGS", "update", "to", "the", "inverse", "Hessian", "estimate", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/bfgs.py#L355-L433
[ "def", "_bfgs_inv_hessian_update", "(", "grad_delta", ",", "position_delta", ",", "normalization_factor", ",", "inv_hessian_estimate", ")", ":", "# The quadratic form: y^T.H.y; where H is the inverse Hessian and y is the", "# gradient change.", "conditioned_grad_delta", "=", "_mul_right", "(", "inv_hessian_estimate", ",", "grad_delta", ")", "conditioned_grad_delta_norm", "=", "tf", ".", "reduce_sum", "(", "input_tensor", "=", "conditioned_grad_delta", "*", "grad_delta", ",", "axis", "=", "-", "1", ")", "# The first rank 1 update term requires the outer product: s.y^T.", "cross_term", "=", "_tensor_product", "(", "position_delta", ",", "conditioned_grad_delta", ")", "def", "_expand_scalar", "(", "s", ")", ":", "# Expand dimensions of a batch of scalars to multiply or divide a matrix.", "return", "s", "[", "...", ",", "tf", ".", "newaxis", ",", "tf", ".", "newaxis", "]", "# Symmetrize", "cross_term", "+=", "_tensor_product", "(", "conditioned_grad_delta", ",", "position_delta", ")", "position_term", "=", "_tensor_product", "(", "position_delta", ",", "position_delta", ")", "with", "tf", ".", "control_dependencies", "(", "[", "position_term", "]", ")", ":", "position_term", "*=", "_expand_scalar", "(", "1", "+", "conditioned_grad_delta_norm", "/", "normalization_factor", ")", "return", "(", "inv_hessian_estimate", "+", "(", "position_term", "-", "cross_term", ")", "/", "_expand_scalar", "(", "normalization_factor", ")", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_mul_right
Computes the product of a matrix with a vector on the right. Note this supports dynamic shapes and batched computation. Examples: M = tf.reshape(tf.range(6), shape=(3, 2)) # => [[0, 1], # [2, 3], # [4, 5]] v = tf.constant([1, 2]) # Shape: (2,) _mul_right(M, v) # => [ 2, 8, 14] # Shape: (3,) M = tf.reshape(tf.range(30), shape=(2, 3, 5)) # => [[[ 0, 1, 2, 3, 4], # [ 5, 6, 7, 8, 9], # [10, 11, 12, 13, 14]], # # [[15, 16, 17, 18, 19], # [20, 21, 22, 23, 24], # [25, 26, 27, 28, 29]]] v = tf.reshape(tf.range(10), shape=(2, 5)) # => [[0, 1, 2, 3, 4], # [5, 6, 7, 8, 9]] _mul_right(M, v) # => [[ 30, 80, 130], # [605, 780, 955]] # Shape: (2, 3) Args: mat: A `tf.Tensor` of shape `[..., n, m]`. vec: A `tf.Tensor` of shape `[..., m]`. Returns: A tensor of shape `[..., n]` with matching batch dimensions.
tensorflow_probability/python/optimizer/bfgs.py
def _mul_right(mat, vec): """Computes the product of a matrix with a vector on the right. Note this supports dynamic shapes and batched computation. Examples: M = tf.reshape(tf.range(6), shape=(3, 2)) # => [[0, 1], # [2, 3], # [4, 5]] v = tf.constant([1, 2]) # Shape: (2,) _mul_right(M, v) # => [ 2, 8, 14] # Shape: (3,) M = tf.reshape(tf.range(30), shape=(2, 3, 5)) # => [[[ 0, 1, 2, 3, 4], # [ 5, 6, 7, 8, 9], # [10, 11, 12, 13, 14]], # # [[15, 16, 17, 18, 19], # [20, 21, 22, 23, 24], # [25, 26, 27, 28, 29]]] v = tf.reshape(tf.range(10), shape=(2, 5)) # => [[0, 1, 2, 3, 4], # [5, 6, 7, 8, 9]] _mul_right(M, v) # => [[ 30, 80, 130], # [605, 780, 955]] # Shape: (2, 3) Args: mat: A `tf.Tensor` of shape `[..., n, m]`. vec: A `tf.Tensor` of shape `[..., m]`. Returns: A tensor of shape `[..., n]` with matching batch dimensions. """ return tf.squeeze(tf.matmul(mat, tf.expand_dims(vec, axis=-1)), axis=-1)
def _mul_right(mat, vec): """Computes the product of a matrix with a vector on the right. Note this supports dynamic shapes and batched computation. Examples: M = tf.reshape(tf.range(6), shape=(3, 2)) # => [[0, 1], # [2, 3], # [4, 5]] v = tf.constant([1, 2]) # Shape: (2,) _mul_right(M, v) # => [ 2, 8, 14] # Shape: (3,) M = tf.reshape(tf.range(30), shape=(2, 3, 5)) # => [[[ 0, 1, 2, 3, 4], # [ 5, 6, 7, 8, 9], # [10, 11, 12, 13, 14]], # # [[15, 16, 17, 18, 19], # [20, 21, 22, 23, 24], # [25, 26, 27, 28, 29]]] v = tf.reshape(tf.range(10), shape=(2, 5)) # => [[0, 1, 2, 3, 4], # [5, 6, 7, 8, 9]] _mul_right(M, v) # => [[ 30, 80, 130], # [605, 780, 955]] # Shape: (2, 3) Args: mat: A `tf.Tensor` of shape `[..., n, m]`. vec: A `tf.Tensor` of shape `[..., m]`. Returns: A tensor of shape `[..., n]` with matching batch dimensions. """ return tf.squeeze(tf.matmul(mat, tf.expand_dims(vec, axis=-1)), axis=-1)
[ "Computes", "the", "product", "of", "a", "matrix", "with", "a", "vector", "on", "the", "right", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/bfgs.py#L436-L473
[ "def", "_mul_right", "(", "mat", ",", "vec", ")", ":", "return", "tf", ".", "squeeze", "(", "tf", ".", "matmul", "(", "mat", ",", "tf", ".", "expand_dims", "(", "vec", ",", "axis", "=", "-", "1", ")", ")", ",", "axis", "=", "-", "1", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_tensor_product
Computes the outer product of two possibly batched vectors. Args: t1: A `tf.Tensor` of shape `[..., n]`. t2: A `tf.Tensor` of shape `[..., m]`. Returns: A tensor of shape `[..., n, m]` with matching batch dimensions, let's call it `r`, whose components are: ```None r[..., i, j] = t1[..., i] * t2[..., j] ```
tensorflow_probability/python/optimizer/bfgs.py
def _tensor_product(t1, t2): """Computes the outer product of two possibly batched vectors. Args: t1: A `tf.Tensor` of shape `[..., n]`. t2: A `tf.Tensor` of shape `[..., m]`. Returns: A tensor of shape `[..., n, m]` with matching batch dimensions, let's call it `r`, whose components are: ```None r[..., i, j] = t1[..., i] * t2[..., j] ``` """ return tf.matmul(tf.expand_dims(t1, axis=-1), tf.expand_dims(t2, axis=-2))
def _tensor_product(t1, t2): """Computes the outer product of two possibly batched vectors. Args: t1: A `tf.Tensor` of shape `[..., n]`. t2: A `tf.Tensor` of shape `[..., m]`. Returns: A tensor of shape `[..., n, m]` with matching batch dimensions, let's call it `r`, whose components are: ```None r[..., i, j] = t1[..., i] * t2[..., j] ``` """ return tf.matmul(tf.expand_dims(t1, axis=-1), tf.expand_dims(t2, axis=-2))
[ "Computes", "the", "outer", "product", "of", "two", "possibly", "batched", "vectors", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/bfgs.py#L476-L491
[ "def", "_tensor_product", "(", "t1", ",", "t2", ")", ":", "return", "tf", ".", "matmul", "(", "tf", ".", "expand_dims", "(", "t1", ",", "axis", "=", "-", "1", ")", ",", "tf", ".", "expand_dims", "(", "t2", ",", "axis", "=", "-", "2", ")", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_batch_transpose
Transpose a possibly batched matrix. Args: mat: A `tf.Tensor` of shape `[..., n, m]`. Returns: A tensor of shape `[..., m, n]` with matching batch dimensions.
tensorflow_probability/python/optimizer/bfgs.py
def _batch_transpose(mat): """Transpose a possibly batched matrix. Args: mat: A `tf.Tensor` of shape `[..., n, m]`. Returns: A tensor of shape `[..., m, n]` with matching batch dimensions. """ n = distribution_util.prefer_static_rank(mat) perm = tf.range(n) perm = tf.concat([perm[:-2], [perm[-1], perm[-2]]], axis=0) return tf.transpose(a=mat, perm=perm)
def _batch_transpose(mat): """Transpose a possibly batched matrix. Args: mat: A `tf.Tensor` of shape `[..., n, m]`. Returns: A tensor of shape `[..., m, n]` with matching batch dimensions. """ n = distribution_util.prefer_static_rank(mat) perm = tf.range(n) perm = tf.concat([perm[:-2], [perm[-1], perm[-2]]], axis=0) return tf.transpose(a=mat, perm=perm)
[ "Transpose", "a", "possibly", "batched", "matrix", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/bfgs.py#L494-L506
[ "def", "_batch_transpose", "(", "mat", ")", ":", "n", "=", "distribution_util", ".", "prefer_static_rank", "(", "mat", ")", "perm", "=", "tf", ".", "range", "(", "n", ")", "perm", "=", "tf", ".", "concat", "(", "[", "perm", "[", ":", "-", "2", "]", ",", "[", "perm", "[", "-", "1", "]", ",", "perm", "[", "-", "2", "]", "]", "]", ",", "axis", "=", "0", ")", "return", "tf", ".", "transpose", "(", "a", "=", "mat", ",", "perm", "=", "perm", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
pad_shape_right_with_ones
Maybe add `ndims` ones to `x.shape` on the right. If `ndims` is zero, this is a no-op; otherwise, we will create and return a new `Tensor` whose shape is that of `x` with `ndims` ones concatenated on the right side. If the shape of `x` is known statically, the shape of the return value will be as well. Args: x: The `Tensor` we'll return a reshaping of. ndims: Python `integer` number of ones to pad onto `x.shape`. Returns: If `ndims` is zero, `x`; otherwise, a `Tensor` whose shape is that of `x` with `ndims` ones concatenated on the right side. If possible, returns a `Tensor` whose shape is known statically. Raises: ValueError: if `ndims` is not a Python `integer` greater than or equal to zero.
tensorflow_probability/python/positive_semidefinite_kernels/internal/util.py
def pad_shape_right_with_ones(x, ndims): """Maybe add `ndims` ones to `x.shape` on the right. If `ndims` is zero, this is a no-op; otherwise, we will create and return a new `Tensor` whose shape is that of `x` with `ndims` ones concatenated on the right side. If the shape of `x` is known statically, the shape of the return value will be as well. Args: x: The `Tensor` we'll return a reshaping of. ndims: Python `integer` number of ones to pad onto `x.shape`. Returns: If `ndims` is zero, `x`; otherwise, a `Tensor` whose shape is that of `x` with `ndims` ones concatenated on the right side. If possible, returns a `Tensor` whose shape is known statically. Raises: ValueError: if `ndims` is not a Python `integer` greater than or equal to zero. """ if not (isinstance(ndims, int) and ndims >= 0): raise ValueError( '`ndims` must be a Python `integer` greater than zero. Got: {}' .format(ndims)) if ndims == 0: return x x = tf.convert_to_tensor(value=x) original_shape = x.shape new_shape = distribution_util.pad( tf.shape(input=x), axis=0, back=True, value=1, count=ndims) x = tf.reshape(x, new_shape) x.set_shape(original_shape.concatenate([1]*ndims)) return x
def pad_shape_right_with_ones(x, ndims): """Maybe add `ndims` ones to `x.shape` on the right. If `ndims` is zero, this is a no-op; otherwise, we will create and return a new `Tensor` whose shape is that of `x` with `ndims` ones concatenated on the right side. If the shape of `x` is known statically, the shape of the return value will be as well. Args: x: The `Tensor` we'll return a reshaping of. ndims: Python `integer` number of ones to pad onto `x.shape`. Returns: If `ndims` is zero, `x`; otherwise, a `Tensor` whose shape is that of `x` with `ndims` ones concatenated on the right side. If possible, returns a `Tensor` whose shape is known statically. Raises: ValueError: if `ndims` is not a Python `integer` greater than or equal to zero. """ if not (isinstance(ndims, int) and ndims >= 0): raise ValueError( '`ndims` must be a Python `integer` greater than zero. Got: {}' .format(ndims)) if ndims == 0: return x x = tf.convert_to_tensor(value=x) original_shape = x.shape new_shape = distribution_util.pad( tf.shape(input=x), axis=0, back=True, value=1, count=ndims) x = tf.reshape(x, new_shape) x.set_shape(original_shape.concatenate([1]*ndims)) return x
[ "Maybe", "add", "ndims", "ones", "to", "x", ".", "shape", "on", "the", "right", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/positive_semidefinite_kernels/internal/util.py#L34-L65
[ "def", "pad_shape_right_with_ones", "(", "x", ",", "ndims", ")", ":", "if", "not", "(", "isinstance", "(", "ndims", ",", "int", ")", "and", "ndims", ">=", "0", ")", ":", "raise", "ValueError", "(", "'`ndims` must be a Python `integer` greater than zero. Got: {}'", ".", "format", "(", "ndims", ")", ")", "if", "ndims", "==", "0", ":", "return", "x", "x", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "x", ")", "original_shape", "=", "x", ".", "shape", "new_shape", "=", "distribution_util", ".", "pad", "(", "tf", ".", "shape", "(", "input", "=", "x", ")", ",", "axis", "=", "0", ",", "back", "=", "True", ",", "value", "=", "1", ",", "count", "=", "ndims", ")", "x", "=", "tf", ".", "reshape", "(", "x", ",", "new_shape", ")", "x", ".", "set_shape", "(", "original_shape", ".", "concatenate", "(", "[", "1", "]", "*", "ndims", ")", ")", "return", "x" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
sum_rightmost_ndims_preserving_shape
Return `Tensor` with right-most ndims summed. Args: x: the `Tensor` whose right-most `ndims` dimensions to sum ndims: number of right-most dimensions to sum. Returns: A `Tensor` resulting from calling `reduce_sum` on the `ndims` right-most dimensions. If the shape of `x` is statically known, the result will also have statically known shape. Otherwise, the resulting shape will only be known at runtime.
tensorflow_probability/python/positive_semidefinite_kernels/internal/util.py
def sum_rightmost_ndims_preserving_shape(x, ndims): """Return `Tensor` with right-most ndims summed. Args: x: the `Tensor` whose right-most `ndims` dimensions to sum ndims: number of right-most dimensions to sum. Returns: A `Tensor` resulting from calling `reduce_sum` on the `ndims` right-most dimensions. If the shape of `x` is statically known, the result will also have statically known shape. Otherwise, the resulting shape will only be known at runtime. """ x = tf.convert_to_tensor(value=x) if x.shape.ndims is not None: axes = tf.range(x.shape.ndims - ndims, x.shape.ndims) else: axes = tf.range(tf.rank(x) - ndims, tf.rank(x)) return tf.reduce_sum(input_tensor=x, axis=axes)
def sum_rightmost_ndims_preserving_shape(x, ndims): """Return `Tensor` with right-most ndims summed. Args: x: the `Tensor` whose right-most `ndims` dimensions to sum ndims: number of right-most dimensions to sum. Returns: A `Tensor` resulting from calling `reduce_sum` on the `ndims` right-most dimensions. If the shape of `x` is statically known, the result will also have statically known shape. Otherwise, the resulting shape will only be known at runtime. """ x = tf.convert_to_tensor(value=x) if x.shape.ndims is not None: axes = tf.range(x.shape.ndims - ndims, x.shape.ndims) else: axes = tf.range(tf.rank(x) - ndims, tf.rank(x)) return tf.reduce_sum(input_tensor=x, axis=axes)
[ "Return", "Tensor", "with", "right", "-", "most", "ndims", "summed", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/positive_semidefinite_kernels/internal/util.py#L68-L86
[ "def", "sum_rightmost_ndims_preserving_shape", "(", "x", ",", "ndims", ")", ":", "x", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "x", ")", "if", "x", ".", "shape", ".", "ndims", "is", "not", "None", ":", "axes", "=", "tf", ".", "range", "(", "x", ".", "shape", ".", "ndims", "-", "ndims", ",", "x", ".", "shape", ".", "ndims", ")", "else", ":", "axes", "=", "tf", ".", "range", "(", "tf", ".", "rank", "(", "x", ")", "-", "ndims", ",", "tf", ".", "rank", "(", "x", ")", ")", "return", "tf", ".", "reduce_sum", "(", "input_tensor", "=", "x", ",", "axis", "=", "axes", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
sqrt_with_finite_grads
A sqrt function whose gradient at zero is very large but finite. Args: x: a `Tensor` whose sqrt is to be computed. name: a Python `str` prefixed to all ops created by this function. Default `None` (i.e., "sqrt_with_finite_grads"). Returns: sqrt: the square root of `x`, with an overridden gradient at zero grad: a gradient function, which is the same as sqrt's gradient everywhere except at zero, where it is given a large finite value, instead of `inf`. Raises: TypeError: if `tf.convert_to_tensor(x)` is not a `float` type. Often in kernel functions, we need to compute the L2 norm of the difference between two vectors, `x` and `y`: `sqrt(sum_i((x_i - y_i) ** 2))`. In the case where `x` and `y` are identical, e.g., on the diagonal of a kernel matrix, we get `NaN`s when we take gradients with respect to the inputs. To see, this consider the forward pass: ``` [x_1 ... x_N] --> [x_1 ** 2 ... x_N ** 2] --> (x_1 ** 2 + ... + x_N ** 2) --> sqrt((x_1 ** 2 + ... + x_N ** 2)) ``` When we backprop through this forward pass, the `sqrt` yields an `inf` because `grad_z(sqrt(z)) = 1 / (2 * sqrt(z))`. Continuing the backprop to the left, at the `x ** 2` term, we pick up a `2 * x`, and when `x` is zero, we get `0 * inf`, which is `NaN`. We'd like to avoid these `NaN`s, since they infect the rest of the connected computation graph. Practically, when two inputs to a kernel function are equal, we are in one of two scenarios: 1. We are actually computing k(x, x), in which case norm(x - x) is identically zero, independent of x. In this case, we'd like the gradient to reflect this independence: it should be zero. 2. We are computing k(x, y), and x just *happens* to have the same value as y. The gradient at such inputs is in fact ill-defined (there is a cusp in the sqrt((x - y) ** 2) surface along the line x = y). There are, however, an infinite number of sub-gradients, all of which are valid at all such inputs. By symmetry, there is exactly one which is "special": zero, and we elect to use that value here. In practice, having two identical inputs to a kernel matrix is probably a pathological situation to be avoided, but that is better resolved at a higher level than this. To avoid the infinite gradient at zero, we use tf.custom_gradient to redefine the gradient at zero. We assign it to be a very large value, specifically the sqrt of the max value of the floating point dtype of the input. We use the sqrt (as opposed to just using the max floating point value) to avoid potential overflow when combining this value with others downstream.
tensorflow_probability/python/positive_semidefinite_kernels/internal/util.py
def sqrt_with_finite_grads(x, name=None): """A sqrt function whose gradient at zero is very large but finite. Args: x: a `Tensor` whose sqrt is to be computed. name: a Python `str` prefixed to all ops created by this function. Default `None` (i.e., "sqrt_with_finite_grads"). Returns: sqrt: the square root of `x`, with an overridden gradient at zero grad: a gradient function, which is the same as sqrt's gradient everywhere except at zero, where it is given a large finite value, instead of `inf`. Raises: TypeError: if `tf.convert_to_tensor(x)` is not a `float` type. Often in kernel functions, we need to compute the L2 norm of the difference between two vectors, `x` and `y`: `sqrt(sum_i((x_i - y_i) ** 2))`. In the case where `x` and `y` are identical, e.g., on the diagonal of a kernel matrix, we get `NaN`s when we take gradients with respect to the inputs. To see, this consider the forward pass: ``` [x_1 ... x_N] --> [x_1 ** 2 ... x_N ** 2] --> (x_1 ** 2 + ... + x_N ** 2) --> sqrt((x_1 ** 2 + ... + x_N ** 2)) ``` When we backprop through this forward pass, the `sqrt` yields an `inf` because `grad_z(sqrt(z)) = 1 / (2 * sqrt(z))`. Continuing the backprop to the left, at the `x ** 2` term, we pick up a `2 * x`, and when `x` is zero, we get `0 * inf`, which is `NaN`. We'd like to avoid these `NaN`s, since they infect the rest of the connected computation graph. Practically, when two inputs to a kernel function are equal, we are in one of two scenarios: 1. We are actually computing k(x, x), in which case norm(x - x) is identically zero, independent of x. In this case, we'd like the gradient to reflect this independence: it should be zero. 2. We are computing k(x, y), and x just *happens* to have the same value as y. The gradient at such inputs is in fact ill-defined (there is a cusp in the sqrt((x - y) ** 2) surface along the line x = y). There are, however, an infinite number of sub-gradients, all of which are valid at all such inputs. By symmetry, there is exactly one which is "special": zero, and we elect to use that value here. In practice, having two identical inputs to a kernel matrix is probably a pathological situation to be avoided, but that is better resolved at a higher level than this. To avoid the infinite gradient at zero, we use tf.custom_gradient to redefine the gradient at zero. We assign it to be a very large value, specifically the sqrt of the max value of the floating point dtype of the input. We use the sqrt (as opposed to just using the max floating point value) to avoid potential overflow when combining this value with others downstream. """ with tf.compat.v1.name_scope(name, 'sqrt_with_finite_grads', [x]): x = tf.convert_to_tensor(value=x, name='x') if not x.dtype.is_floating: raise TypeError('Input `x` must be floating type.') def grad(grad_ys): large_float_like_x = np.sqrt(np.finfo(x.dtype.as_numpy_dtype()).max) safe_grads = tf.where( tf.equal(x, 0), tf.fill(tf.shape(input=x), large_float_like_x), 0.5 * tf.math.rsqrt(x)) return grad_ys * safe_grads return tf.sqrt(x), grad
def sqrt_with_finite_grads(x, name=None): """A sqrt function whose gradient at zero is very large but finite. Args: x: a `Tensor` whose sqrt is to be computed. name: a Python `str` prefixed to all ops created by this function. Default `None` (i.e., "sqrt_with_finite_grads"). Returns: sqrt: the square root of `x`, with an overridden gradient at zero grad: a gradient function, which is the same as sqrt's gradient everywhere except at zero, where it is given a large finite value, instead of `inf`. Raises: TypeError: if `tf.convert_to_tensor(x)` is not a `float` type. Often in kernel functions, we need to compute the L2 norm of the difference between two vectors, `x` and `y`: `sqrt(sum_i((x_i - y_i) ** 2))`. In the case where `x` and `y` are identical, e.g., on the diagonal of a kernel matrix, we get `NaN`s when we take gradients with respect to the inputs. To see, this consider the forward pass: ``` [x_1 ... x_N] --> [x_1 ** 2 ... x_N ** 2] --> (x_1 ** 2 + ... + x_N ** 2) --> sqrt((x_1 ** 2 + ... + x_N ** 2)) ``` When we backprop through this forward pass, the `sqrt` yields an `inf` because `grad_z(sqrt(z)) = 1 / (2 * sqrt(z))`. Continuing the backprop to the left, at the `x ** 2` term, we pick up a `2 * x`, and when `x` is zero, we get `0 * inf`, which is `NaN`. We'd like to avoid these `NaN`s, since they infect the rest of the connected computation graph. Practically, when two inputs to a kernel function are equal, we are in one of two scenarios: 1. We are actually computing k(x, x), in which case norm(x - x) is identically zero, independent of x. In this case, we'd like the gradient to reflect this independence: it should be zero. 2. We are computing k(x, y), and x just *happens* to have the same value as y. The gradient at such inputs is in fact ill-defined (there is a cusp in the sqrt((x - y) ** 2) surface along the line x = y). There are, however, an infinite number of sub-gradients, all of which are valid at all such inputs. By symmetry, there is exactly one which is "special": zero, and we elect to use that value here. In practice, having two identical inputs to a kernel matrix is probably a pathological situation to be avoided, but that is better resolved at a higher level than this. To avoid the infinite gradient at zero, we use tf.custom_gradient to redefine the gradient at zero. We assign it to be a very large value, specifically the sqrt of the max value of the floating point dtype of the input. We use the sqrt (as opposed to just using the max floating point value) to avoid potential overflow when combining this value with others downstream. """ with tf.compat.v1.name_scope(name, 'sqrt_with_finite_grads', [x]): x = tf.convert_to_tensor(value=x, name='x') if not x.dtype.is_floating: raise TypeError('Input `x` must be floating type.') def grad(grad_ys): large_float_like_x = np.sqrt(np.finfo(x.dtype.as_numpy_dtype()).max) safe_grads = tf.where( tf.equal(x, 0), tf.fill(tf.shape(input=x), large_float_like_x), 0.5 * tf.math.rsqrt(x)) return grad_ys * safe_grads return tf.sqrt(x), grad
[ "A", "sqrt", "function", "whose", "gradient", "at", "zero", "is", "very", "large", "but", "finite", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/positive_semidefinite_kernels/internal/util.py#L90-L154
[ "def", "sqrt_with_finite_grads", "(", "x", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "'sqrt_with_finite_grads'", ",", "[", "x", "]", ")", ":", "x", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "x", ",", "name", "=", "'x'", ")", "if", "not", "x", ".", "dtype", ".", "is_floating", ":", "raise", "TypeError", "(", "'Input `x` must be floating type.'", ")", "def", "grad", "(", "grad_ys", ")", ":", "large_float_like_x", "=", "np", ".", "sqrt", "(", "np", ".", "finfo", "(", "x", ".", "dtype", ".", "as_numpy_dtype", "(", ")", ")", ".", "max", ")", "safe_grads", "=", "tf", ".", "where", "(", "tf", ".", "equal", "(", "x", ",", "0", ")", ",", "tf", ".", "fill", "(", "tf", ".", "shape", "(", "input", "=", "x", ")", ",", "large_float_like_x", ")", ",", "0.5", "*", "tf", ".", "math", ".", "rsqrt", "(", "x", ")", ")", "return", "grad_ys", "*", "safe_grads", "return", "tf", ".", "sqrt", "(", "x", ")", ",", "grad" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
maybe_get_common_dtype
Return common dtype of arg_list, or None. Args: arg_list: an iterable of items which are either `None` or have a `dtype` property. Returns: dtype: The common dtype of items in `arg_list`, or `None` if the list is empty or all items are `None`.
tensorflow_probability/python/positive_semidefinite_kernels/internal/util.py
def maybe_get_common_dtype(arg_list): """Return common dtype of arg_list, or None. Args: arg_list: an iterable of items which are either `None` or have a `dtype` property. Returns: dtype: The common dtype of items in `arg_list`, or `None` if the list is empty or all items are `None`. """ # Note that `all` defaults to `True` if `arg_list` is empty. if all(a is None for a in arg_list): return None return dtype_util.common_dtype(arg_list, tf.float32)
def maybe_get_common_dtype(arg_list): """Return common dtype of arg_list, or None. Args: arg_list: an iterable of items which are either `None` or have a `dtype` property. Returns: dtype: The common dtype of items in `arg_list`, or `None` if the list is empty or all items are `None`. """ # Note that `all` defaults to `True` if `arg_list` is empty. if all(a is None for a in arg_list): return None return dtype_util.common_dtype(arg_list, tf.float32)
[ "Return", "common", "dtype", "of", "arg_list", "or", "None", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/positive_semidefinite_kernels/internal/util.py#L157-L171
[ "def", "maybe_get_common_dtype", "(", "arg_list", ")", ":", "# Note that `all` defaults to `True` if `arg_list` is empty.", "if", "all", "(", "a", "is", "None", "for", "a", "in", "arg_list", ")", ":", "return", "None", "return", "dtype_util", ".", "common_dtype", "(", "arg_list", ",", "tf", ".", "float32", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
minimize
Applies the L-BFGS algorithm to minimize a differentiable function. Performs unconstrained minimization of a differentiable function using the L-BFGS scheme. See [Nocedal and Wright(2006)][1] for details of the algorithm. ### Usage: The following example demonstrates the L-BFGS optimizer attempting to find the minimum for a simple high-dimensional quadratic objective function. ```python # A high-dimensional quadratic bowl. ndims = 60 minimum = np.ones([ndims], dtype='float64') scales = np.arange(ndims, dtype='float64') + 1.0 # The objective function and the gradient. def quadratic(x): value = tf.reduce_sum(scales * (x - minimum) ** 2) return value, tf.gradients(value, x)[0] start = np.arange(ndims, 0, -1, dtype='float64') optim_results = tfp.optimizer.lbfgs_minimize( quadratic, initial_position=start, num_correction_pairs=10, tolerance=1e-8) with tf.Session() as session: results = session.run(optim_results) # Check that the search converged assert(results.converged) # Check that the argmin is close to the actual value. np.testing.assert_allclose(results.position, minimum) ``` ### References: [1] Jorge Nocedal, Stephen Wright. Numerical Optimization. Springer Series in Operations Research. pp 176-180. 2006 http://pages.mtu.edu/~struther/Courses/OLD/Sp2013/5630/Jorge_Nocedal_Numerical_optimization_267490.pdf Args: value_and_gradients_function: A Python callable that accepts a point as a real `Tensor` and returns a tuple of `Tensor`s of real dtype containing the value of the function and its gradient at that point. The function to be minimized. The input is of shape `[..., n]`, where `n` is the size of the domain of input points, and all others are batching dimensions. The first component of the return value is a real `Tensor` of matching shape `[...]`. The second component (the gradient) is also of shape `[..., n]` like the input value to the function. initial_position: Real `Tensor` of shape `[..., n]`. The starting point, or points when using batching dimensions, of the search procedure. At these points the function value and the gradient norm should be finite. num_correction_pairs: Positive integer. Specifies the maximum number of (position_delta, gradient_delta) correction pairs to keep as implicit approximation of the Hessian matrix. tolerance: Scalar `Tensor` of real dtype. Specifies the gradient tolerance for the procedure. If the supremum norm of the gradient vector is below this number, the algorithm is stopped. x_tolerance: Scalar `Tensor` of real dtype. If the absolute change in the position between one iteration and the next is smaller than this number, the algorithm is stopped. f_relative_tolerance: Scalar `Tensor` of real dtype. If the relative change in the objective value between one iteration and the next is smaller than this value, the algorithm is stopped. initial_inverse_hessian_estimate: None. Option currently not supported. max_iterations: Scalar positive int32 `Tensor`. The maximum number of iterations for L-BFGS updates. parallel_iterations: Positive integer. The number of iterations allowed to run in parallel. stopping_condition: (Optional) A Python function that takes as input two Boolean tensors of shape `[...]`, and returns a Boolean scalar tensor. The input tensors are `converged` and `failed`, indicating the current status of each respective batch member; the return value states whether the algorithm should stop. The default is tfp.optimizer.converged_all which only stops when all batch members have either converged or failed. An alternative is tfp.optimizer.converged_any which stops as soon as one batch member has converged, or when all have failed. name: (Optional) Python str. The name prefixed to the ops created by this function. If not supplied, the default name 'minimize' is used. Returns: optimizer_results: A namedtuple containing the following items: converged: Scalar boolean tensor indicating whether the minimum was found within tolerance. failed: Scalar boolean tensor indicating whether a line search step failed to find a suitable step size satisfying Wolfe conditions. In the absence of any constraints on the number of objective evaluations permitted, this value will be the complement of `converged`. However, if there is a constraint and the search stopped due to available evaluations being exhausted, both `failed` and `converged` will be simultaneously False. num_objective_evaluations: The total number of objective evaluations performed. position: A tensor containing the last argument value found during the search. If the search converged, then this value is the argmin of the objective function. objective_value: A tensor containing the value of the objective function at the `position`. If the search converged, then this is the (local) minimum of the objective function. objective_gradient: A tensor containing the gradient of the objective function at the `position`. If the search converged the max-norm of this tensor should be below the tolerance. position_deltas: A tensor encoding information about the latest changes in `position` during the algorithm execution. gradient_deltas: A tensor encoding information about the latest changes in `objective_gradient` during the algorithm execution.
tensorflow_probability/python/optimizer/lbfgs.py
def minimize(value_and_gradients_function, initial_position, num_correction_pairs=10, tolerance=1e-8, x_tolerance=0, f_relative_tolerance=0, initial_inverse_hessian_estimate=None, max_iterations=50, parallel_iterations=1, stopping_condition=None, name=None): """Applies the L-BFGS algorithm to minimize a differentiable function. Performs unconstrained minimization of a differentiable function using the L-BFGS scheme. See [Nocedal and Wright(2006)][1] for details of the algorithm. ### Usage: The following example demonstrates the L-BFGS optimizer attempting to find the minimum for a simple high-dimensional quadratic objective function. ```python # A high-dimensional quadratic bowl. ndims = 60 minimum = np.ones([ndims], dtype='float64') scales = np.arange(ndims, dtype='float64') + 1.0 # The objective function and the gradient. def quadratic(x): value = tf.reduce_sum(scales * (x - minimum) ** 2) return value, tf.gradients(value, x)[0] start = np.arange(ndims, 0, -1, dtype='float64') optim_results = tfp.optimizer.lbfgs_minimize( quadratic, initial_position=start, num_correction_pairs=10, tolerance=1e-8) with tf.Session() as session: results = session.run(optim_results) # Check that the search converged assert(results.converged) # Check that the argmin is close to the actual value. np.testing.assert_allclose(results.position, minimum) ``` ### References: [1] Jorge Nocedal, Stephen Wright. Numerical Optimization. Springer Series in Operations Research. pp 176-180. 2006 http://pages.mtu.edu/~struther/Courses/OLD/Sp2013/5630/Jorge_Nocedal_Numerical_optimization_267490.pdf Args: value_and_gradients_function: A Python callable that accepts a point as a real `Tensor` and returns a tuple of `Tensor`s of real dtype containing the value of the function and its gradient at that point. The function to be minimized. The input is of shape `[..., n]`, where `n` is the size of the domain of input points, and all others are batching dimensions. The first component of the return value is a real `Tensor` of matching shape `[...]`. The second component (the gradient) is also of shape `[..., n]` like the input value to the function. initial_position: Real `Tensor` of shape `[..., n]`. The starting point, or points when using batching dimensions, of the search procedure. At these points the function value and the gradient norm should be finite. num_correction_pairs: Positive integer. Specifies the maximum number of (position_delta, gradient_delta) correction pairs to keep as implicit approximation of the Hessian matrix. tolerance: Scalar `Tensor` of real dtype. Specifies the gradient tolerance for the procedure. If the supremum norm of the gradient vector is below this number, the algorithm is stopped. x_tolerance: Scalar `Tensor` of real dtype. If the absolute change in the position between one iteration and the next is smaller than this number, the algorithm is stopped. f_relative_tolerance: Scalar `Tensor` of real dtype. If the relative change in the objective value between one iteration and the next is smaller than this value, the algorithm is stopped. initial_inverse_hessian_estimate: None. Option currently not supported. max_iterations: Scalar positive int32 `Tensor`. The maximum number of iterations for L-BFGS updates. parallel_iterations: Positive integer. The number of iterations allowed to run in parallel. stopping_condition: (Optional) A Python function that takes as input two Boolean tensors of shape `[...]`, and returns a Boolean scalar tensor. The input tensors are `converged` and `failed`, indicating the current status of each respective batch member; the return value states whether the algorithm should stop. The default is tfp.optimizer.converged_all which only stops when all batch members have either converged or failed. An alternative is tfp.optimizer.converged_any which stops as soon as one batch member has converged, or when all have failed. name: (Optional) Python str. The name prefixed to the ops created by this function. If not supplied, the default name 'minimize' is used. Returns: optimizer_results: A namedtuple containing the following items: converged: Scalar boolean tensor indicating whether the minimum was found within tolerance. failed: Scalar boolean tensor indicating whether a line search step failed to find a suitable step size satisfying Wolfe conditions. In the absence of any constraints on the number of objective evaluations permitted, this value will be the complement of `converged`. However, if there is a constraint and the search stopped due to available evaluations being exhausted, both `failed` and `converged` will be simultaneously False. num_objective_evaluations: The total number of objective evaluations performed. position: A tensor containing the last argument value found during the search. If the search converged, then this value is the argmin of the objective function. objective_value: A tensor containing the value of the objective function at the `position`. If the search converged, then this is the (local) minimum of the objective function. objective_gradient: A tensor containing the gradient of the objective function at the `position`. If the search converged the max-norm of this tensor should be below the tolerance. position_deltas: A tensor encoding information about the latest changes in `position` during the algorithm execution. gradient_deltas: A tensor encoding information about the latest changes in `objective_gradient` during the algorithm execution. """ if initial_inverse_hessian_estimate is not None: raise NotImplementedError( 'Support of initial_inverse_hessian_estimate arg not yet implemented') if stopping_condition is None: stopping_condition = bfgs_utils.converged_all with tf.compat.v1.name_scope(name, 'minimize', [initial_position, tolerance]): initial_position = tf.convert_to_tensor( value=initial_position, name='initial_position') dtype = initial_position.dtype.base_dtype tolerance = tf.convert_to_tensor( value=tolerance, dtype=dtype, name='grad_tolerance') f_relative_tolerance = tf.convert_to_tensor( value=f_relative_tolerance, dtype=dtype, name='f_relative_tolerance') x_tolerance = tf.convert_to_tensor( value=x_tolerance, dtype=dtype, name='x_tolerance') max_iterations = tf.convert_to_tensor( value=max_iterations, name='max_iterations') # The `state` here is a `LBfgsOptimizerResults` tuple with values for the # current state of the algorithm computation. def _cond(state): """Continue if iterations remain and stopping condition is not met.""" return ((state.num_iterations < max_iterations) & tf.logical_not(stopping_condition(state.converged, state.failed))) def _body(current_state): """Main optimization loop.""" search_direction = _get_search_direction(current_state) # TODO(b/120134934): Check if the derivative at the start point is not # negative, if so then reset position/gradient deltas and recompute # search direction. next_state = bfgs_utils.line_search_step( current_state, value_and_gradients_function, search_direction, tolerance, f_relative_tolerance, x_tolerance, stopping_condition) # If not failed or converged, update the Hessian estimate. should_update = ~(next_state.converged | next_state.failed) state_after_inv_hessian_update = bfgs_utils.update_fields( next_state, position_deltas=_queue_push( current_state.position_deltas, should_update, next_state.position - current_state.position), gradient_deltas=_queue_push( current_state.gradient_deltas, should_update, next_state.objective_gradient - current_state.objective_gradient)) return [state_after_inv_hessian_update] initial_state = _get_initial_state(value_and_gradients_function, initial_position, num_correction_pairs, tolerance) return tf.while_loop( cond=_cond, body=_body, loop_vars=[initial_state], parallel_iterations=parallel_iterations)[0]
def minimize(value_and_gradients_function, initial_position, num_correction_pairs=10, tolerance=1e-8, x_tolerance=0, f_relative_tolerance=0, initial_inverse_hessian_estimate=None, max_iterations=50, parallel_iterations=1, stopping_condition=None, name=None): """Applies the L-BFGS algorithm to minimize a differentiable function. Performs unconstrained minimization of a differentiable function using the L-BFGS scheme. See [Nocedal and Wright(2006)][1] for details of the algorithm. ### Usage: The following example demonstrates the L-BFGS optimizer attempting to find the minimum for a simple high-dimensional quadratic objective function. ```python # A high-dimensional quadratic bowl. ndims = 60 minimum = np.ones([ndims], dtype='float64') scales = np.arange(ndims, dtype='float64') + 1.0 # The objective function and the gradient. def quadratic(x): value = tf.reduce_sum(scales * (x - minimum) ** 2) return value, tf.gradients(value, x)[0] start = np.arange(ndims, 0, -1, dtype='float64') optim_results = tfp.optimizer.lbfgs_minimize( quadratic, initial_position=start, num_correction_pairs=10, tolerance=1e-8) with tf.Session() as session: results = session.run(optim_results) # Check that the search converged assert(results.converged) # Check that the argmin is close to the actual value. np.testing.assert_allclose(results.position, minimum) ``` ### References: [1] Jorge Nocedal, Stephen Wright. Numerical Optimization. Springer Series in Operations Research. pp 176-180. 2006 http://pages.mtu.edu/~struther/Courses/OLD/Sp2013/5630/Jorge_Nocedal_Numerical_optimization_267490.pdf Args: value_and_gradients_function: A Python callable that accepts a point as a real `Tensor` and returns a tuple of `Tensor`s of real dtype containing the value of the function and its gradient at that point. The function to be minimized. The input is of shape `[..., n]`, where `n` is the size of the domain of input points, and all others are batching dimensions. The first component of the return value is a real `Tensor` of matching shape `[...]`. The second component (the gradient) is also of shape `[..., n]` like the input value to the function. initial_position: Real `Tensor` of shape `[..., n]`. The starting point, or points when using batching dimensions, of the search procedure. At these points the function value and the gradient norm should be finite. num_correction_pairs: Positive integer. Specifies the maximum number of (position_delta, gradient_delta) correction pairs to keep as implicit approximation of the Hessian matrix. tolerance: Scalar `Tensor` of real dtype. Specifies the gradient tolerance for the procedure. If the supremum norm of the gradient vector is below this number, the algorithm is stopped. x_tolerance: Scalar `Tensor` of real dtype. If the absolute change in the position between one iteration and the next is smaller than this number, the algorithm is stopped. f_relative_tolerance: Scalar `Tensor` of real dtype. If the relative change in the objective value between one iteration and the next is smaller than this value, the algorithm is stopped. initial_inverse_hessian_estimate: None. Option currently not supported. max_iterations: Scalar positive int32 `Tensor`. The maximum number of iterations for L-BFGS updates. parallel_iterations: Positive integer. The number of iterations allowed to run in parallel. stopping_condition: (Optional) A Python function that takes as input two Boolean tensors of shape `[...]`, and returns a Boolean scalar tensor. The input tensors are `converged` and `failed`, indicating the current status of each respective batch member; the return value states whether the algorithm should stop. The default is tfp.optimizer.converged_all which only stops when all batch members have either converged or failed. An alternative is tfp.optimizer.converged_any which stops as soon as one batch member has converged, or when all have failed. name: (Optional) Python str. The name prefixed to the ops created by this function. If not supplied, the default name 'minimize' is used. Returns: optimizer_results: A namedtuple containing the following items: converged: Scalar boolean tensor indicating whether the minimum was found within tolerance. failed: Scalar boolean tensor indicating whether a line search step failed to find a suitable step size satisfying Wolfe conditions. In the absence of any constraints on the number of objective evaluations permitted, this value will be the complement of `converged`. However, if there is a constraint and the search stopped due to available evaluations being exhausted, both `failed` and `converged` will be simultaneously False. num_objective_evaluations: The total number of objective evaluations performed. position: A tensor containing the last argument value found during the search. If the search converged, then this value is the argmin of the objective function. objective_value: A tensor containing the value of the objective function at the `position`. If the search converged, then this is the (local) minimum of the objective function. objective_gradient: A tensor containing the gradient of the objective function at the `position`. If the search converged the max-norm of this tensor should be below the tolerance. position_deltas: A tensor encoding information about the latest changes in `position` during the algorithm execution. gradient_deltas: A tensor encoding information about the latest changes in `objective_gradient` during the algorithm execution. """ if initial_inverse_hessian_estimate is not None: raise NotImplementedError( 'Support of initial_inverse_hessian_estimate arg not yet implemented') if stopping_condition is None: stopping_condition = bfgs_utils.converged_all with tf.compat.v1.name_scope(name, 'minimize', [initial_position, tolerance]): initial_position = tf.convert_to_tensor( value=initial_position, name='initial_position') dtype = initial_position.dtype.base_dtype tolerance = tf.convert_to_tensor( value=tolerance, dtype=dtype, name='grad_tolerance') f_relative_tolerance = tf.convert_to_tensor( value=f_relative_tolerance, dtype=dtype, name='f_relative_tolerance') x_tolerance = tf.convert_to_tensor( value=x_tolerance, dtype=dtype, name='x_tolerance') max_iterations = tf.convert_to_tensor( value=max_iterations, name='max_iterations') # The `state` here is a `LBfgsOptimizerResults` tuple with values for the # current state of the algorithm computation. def _cond(state): """Continue if iterations remain and stopping condition is not met.""" return ((state.num_iterations < max_iterations) & tf.logical_not(stopping_condition(state.converged, state.failed))) def _body(current_state): """Main optimization loop.""" search_direction = _get_search_direction(current_state) # TODO(b/120134934): Check if the derivative at the start point is not # negative, if so then reset position/gradient deltas and recompute # search direction. next_state = bfgs_utils.line_search_step( current_state, value_and_gradients_function, search_direction, tolerance, f_relative_tolerance, x_tolerance, stopping_condition) # If not failed or converged, update the Hessian estimate. should_update = ~(next_state.converged | next_state.failed) state_after_inv_hessian_update = bfgs_utils.update_fields( next_state, position_deltas=_queue_push( current_state.position_deltas, should_update, next_state.position - current_state.position), gradient_deltas=_queue_push( current_state.gradient_deltas, should_update, next_state.objective_gradient - current_state.objective_gradient)) return [state_after_inv_hessian_update] initial_state = _get_initial_state(value_and_gradients_function, initial_position, num_correction_pairs, tolerance) return tf.while_loop( cond=_cond, body=_body, loop_vars=[initial_state], parallel_iterations=parallel_iterations)[0]
[ "Applies", "the", "L", "-", "BFGS", "algorithm", "to", "minimize", "a", "differentiable", "function", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/lbfgs.py#L80-L260
[ "def", "minimize", "(", "value_and_gradients_function", ",", "initial_position", ",", "num_correction_pairs", "=", "10", ",", "tolerance", "=", "1e-8", ",", "x_tolerance", "=", "0", ",", "f_relative_tolerance", "=", "0", ",", "initial_inverse_hessian_estimate", "=", "None", ",", "max_iterations", "=", "50", ",", "parallel_iterations", "=", "1", ",", "stopping_condition", "=", "None", ",", "name", "=", "None", ")", ":", "if", "initial_inverse_hessian_estimate", "is", "not", "None", ":", "raise", "NotImplementedError", "(", "'Support of initial_inverse_hessian_estimate arg not yet implemented'", ")", "if", "stopping_condition", "is", "None", ":", "stopping_condition", "=", "bfgs_utils", ".", "converged_all", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "'minimize'", ",", "[", "initial_position", ",", "tolerance", "]", ")", ":", "initial_position", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "initial_position", ",", "name", "=", "'initial_position'", ")", "dtype", "=", "initial_position", ".", "dtype", ".", "base_dtype", "tolerance", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "tolerance", ",", "dtype", "=", "dtype", ",", "name", "=", "'grad_tolerance'", ")", "f_relative_tolerance", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "f_relative_tolerance", ",", "dtype", "=", "dtype", ",", "name", "=", "'f_relative_tolerance'", ")", "x_tolerance", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "x_tolerance", ",", "dtype", "=", "dtype", ",", "name", "=", "'x_tolerance'", ")", "max_iterations", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "max_iterations", ",", "name", "=", "'max_iterations'", ")", "# The `state` here is a `LBfgsOptimizerResults` tuple with values for the", "# current state of the algorithm computation.", "def", "_cond", "(", "state", ")", ":", "\"\"\"Continue if iterations remain and stopping condition is not met.\"\"\"", "return", "(", "(", "state", ".", "num_iterations", "<", "max_iterations", ")", "&", "tf", ".", "logical_not", "(", "stopping_condition", "(", "state", ".", "converged", ",", "state", ".", "failed", ")", ")", ")", "def", "_body", "(", "current_state", ")", ":", "\"\"\"Main optimization loop.\"\"\"", "search_direction", "=", "_get_search_direction", "(", "current_state", ")", "# TODO(b/120134934): Check if the derivative at the start point is not", "# negative, if so then reset position/gradient deltas and recompute", "# search direction.", "next_state", "=", "bfgs_utils", ".", "line_search_step", "(", "current_state", ",", "value_and_gradients_function", ",", "search_direction", ",", "tolerance", ",", "f_relative_tolerance", ",", "x_tolerance", ",", "stopping_condition", ")", "# If not failed or converged, update the Hessian estimate.", "should_update", "=", "~", "(", "next_state", ".", "converged", "|", "next_state", ".", "failed", ")", "state_after_inv_hessian_update", "=", "bfgs_utils", ".", "update_fields", "(", "next_state", ",", "position_deltas", "=", "_queue_push", "(", "current_state", ".", "position_deltas", ",", "should_update", ",", "next_state", ".", "position", "-", "current_state", ".", "position", ")", ",", "gradient_deltas", "=", "_queue_push", "(", "current_state", ".", "gradient_deltas", ",", "should_update", ",", "next_state", ".", "objective_gradient", "-", "current_state", ".", "objective_gradient", ")", ")", "return", "[", "state_after_inv_hessian_update", "]", "initial_state", "=", "_get_initial_state", "(", "value_and_gradients_function", ",", "initial_position", ",", "num_correction_pairs", ",", "tolerance", ")", "return", "tf", ".", "while_loop", "(", "cond", "=", "_cond", ",", "body", "=", "_body", ",", "loop_vars", "=", "[", "initial_state", "]", ",", "parallel_iterations", "=", "parallel_iterations", ")", "[", "0", "]" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_get_initial_state
Create LBfgsOptimizerResults with initial state of search procedure.
tensorflow_probability/python/optimizer/lbfgs.py
def _get_initial_state(value_and_gradients_function, initial_position, num_correction_pairs, tolerance): """Create LBfgsOptimizerResults with initial state of search procedure.""" init_args = bfgs_utils.get_initial_state_args( value_and_gradients_function, initial_position, tolerance) empty_queue = _make_empty_queue_for(num_correction_pairs, initial_position) init_args.update(position_deltas=empty_queue, gradient_deltas=empty_queue) return LBfgsOptimizerResults(**init_args)
def _get_initial_state(value_and_gradients_function, initial_position, num_correction_pairs, tolerance): """Create LBfgsOptimizerResults with initial state of search procedure.""" init_args = bfgs_utils.get_initial_state_args( value_and_gradients_function, initial_position, tolerance) empty_queue = _make_empty_queue_for(num_correction_pairs, initial_position) init_args.update(position_deltas=empty_queue, gradient_deltas=empty_queue) return LBfgsOptimizerResults(**init_args)
[ "Create", "LBfgsOptimizerResults", "with", "initial", "state", "of", "search", "procedure", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/lbfgs.py#L263-L274
[ "def", "_get_initial_state", "(", "value_and_gradients_function", ",", "initial_position", ",", "num_correction_pairs", ",", "tolerance", ")", ":", "init_args", "=", "bfgs_utils", ".", "get_initial_state_args", "(", "value_and_gradients_function", ",", "initial_position", ",", "tolerance", ")", "empty_queue", "=", "_make_empty_queue_for", "(", "num_correction_pairs", ",", "initial_position", ")", "init_args", ".", "update", "(", "position_deltas", "=", "empty_queue", ",", "gradient_deltas", "=", "empty_queue", ")", "return", "LBfgsOptimizerResults", "(", "*", "*", "init_args", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_get_search_direction
Computes the search direction to follow at the current state. On the `k`-th iteration of the main L-BFGS algorithm, the state has collected the most recent `m` correction pairs in position_deltas and gradient_deltas, where `k = state.num_iterations` and `m = min(k, num_correction_pairs)`. Assuming these, the code below is an implementation of the L-BFGS two-loop recursion algorithm given by [Nocedal and Wright(2006)][1]: ```None q_direction = objective_gradient for i in reversed(range(m)): # First loop. inv_rho[i] = gradient_deltas[i]^T * position_deltas[i] alpha[i] = position_deltas[i]^T * q_direction / inv_rho[i] q_direction = q_direction - alpha[i] * gradient_deltas[i] kth_inv_hessian_factor = (gradient_deltas[-1]^T * position_deltas[-1] / gradient_deltas[-1]^T * gradient_deltas[-1]) r_direction = kth_inv_hessian_factor * I * q_direction for i in range(m): # Second loop. beta = gradient_deltas[i]^T * r_direction / inv_rho[i] r_direction = r_direction + position_deltas[i] * (alpha[i] - beta) return -r_direction # Approximates - H_k * objective_gradient. ``` Args: state: A `LBfgsOptimizerResults` tuple with the current state of the search procedure. Returns: A real `Tensor` of the same shape as the `state.position`. The direction along which to perform line search.
tensorflow_probability/python/optimizer/lbfgs.py
def _get_search_direction(state): """Computes the search direction to follow at the current state. On the `k`-th iteration of the main L-BFGS algorithm, the state has collected the most recent `m` correction pairs in position_deltas and gradient_deltas, where `k = state.num_iterations` and `m = min(k, num_correction_pairs)`. Assuming these, the code below is an implementation of the L-BFGS two-loop recursion algorithm given by [Nocedal and Wright(2006)][1]: ```None q_direction = objective_gradient for i in reversed(range(m)): # First loop. inv_rho[i] = gradient_deltas[i]^T * position_deltas[i] alpha[i] = position_deltas[i]^T * q_direction / inv_rho[i] q_direction = q_direction - alpha[i] * gradient_deltas[i] kth_inv_hessian_factor = (gradient_deltas[-1]^T * position_deltas[-1] / gradient_deltas[-1]^T * gradient_deltas[-1]) r_direction = kth_inv_hessian_factor * I * q_direction for i in range(m): # Second loop. beta = gradient_deltas[i]^T * r_direction / inv_rho[i] r_direction = r_direction + position_deltas[i] * (alpha[i] - beta) return -r_direction # Approximates - H_k * objective_gradient. ``` Args: state: A `LBfgsOptimizerResults` tuple with the current state of the search procedure. Returns: A real `Tensor` of the same shape as the `state.position`. The direction along which to perform line search. """ # The number of correction pairs that have been collected so far. num_elements = tf.minimum( state.num_iterations, distribution_util.prefer_static_shape(state.position_deltas)[0]) def _two_loop_algorithm(): """L-BFGS two-loop algorithm.""" # Correction pairs are always appended to the end, so only the latest # `num_elements` vectors have valid position/gradient deltas. position_deltas = state.position_deltas[-num_elements:] gradient_deltas = state.gradient_deltas[-num_elements:] # Pre-compute all `inv_rho[i]`s. inv_rhos = tf.reduce_sum( input_tensor=gradient_deltas * position_deltas, axis=-1) def first_loop(acc, args): _, q_direction = acc position_delta, gradient_delta, inv_rho = args alpha = tf.reduce_sum( input_tensor=position_delta * q_direction, axis=-1) / inv_rho direction_delta = tf.expand_dims(alpha, axis=-1) * gradient_delta return (alpha, q_direction - direction_delta) # Run first loop body computing and collecting `alpha[i]`s, while also # computing the updated `q_direction` at each step. zero = tf.zeros_like(inv_rhos[0]) alphas, q_directions = tf.scan( first_loop, [position_deltas, gradient_deltas, inv_rhos], initializer=(zero, state.objective_gradient), reverse=True) # We use `H^0_k = gamma_k * I` as an estimate for the initial inverse # hessian for the k-th iteration; then `r_direction = H^0_k * q_direction`. gamma_k = inv_rhos[-1] / tf.reduce_sum( input_tensor=gradient_deltas[-1] * gradient_deltas[-1], axis=-1) r_direction = tf.expand_dims(gamma_k, axis=-1) * q_directions[0] def second_loop(r_direction, args): alpha, position_delta, gradient_delta, inv_rho = args beta = tf.reduce_sum( input_tensor=gradient_delta * r_direction, axis=-1) / inv_rho direction_delta = tf.expand_dims(alpha - beta, axis=-1) * position_delta return r_direction + direction_delta # Finally, run second loop body computing the updated `r_direction` at each # step. r_directions = tf.scan( second_loop, [alphas, position_deltas, gradient_deltas, inv_rhos], initializer=r_direction) return -r_directions[-1] return prefer_static.cond(tf.equal(num_elements, 0), (lambda: -state.objective_gradient), _two_loop_algorithm)
def _get_search_direction(state): """Computes the search direction to follow at the current state. On the `k`-th iteration of the main L-BFGS algorithm, the state has collected the most recent `m` correction pairs in position_deltas and gradient_deltas, where `k = state.num_iterations` and `m = min(k, num_correction_pairs)`. Assuming these, the code below is an implementation of the L-BFGS two-loop recursion algorithm given by [Nocedal and Wright(2006)][1]: ```None q_direction = objective_gradient for i in reversed(range(m)): # First loop. inv_rho[i] = gradient_deltas[i]^T * position_deltas[i] alpha[i] = position_deltas[i]^T * q_direction / inv_rho[i] q_direction = q_direction - alpha[i] * gradient_deltas[i] kth_inv_hessian_factor = (gradient_deltas[-1]^T * position_deltas[-1] / gradient_deltas[-1]^T * gradient_deltas[-1]) r_direction = kth_inv_hessian_factor * I * q_direction for i in range(m): # Second loop. beta = gradient_deltas[i]^T * r_direction / inv_rho[i] r_direction = r_direction + position_deltas[i] * (alpha[i] - beta) return -r_direction # Approximates - H_k * objective_gradient. ``` Args: state: A `LBfgsOptimizerResults` tuple with the current state of the search procedure. Returns: A real `Tensor` of the same shape as the `state.position`. The direction along which to perform line search. """ # The number of correction pairs that have been collected so far. num_elements = tf.minimum( state.num_iterations, distribution_util.prefer_static_shape(state.position_deltas)[0]) def _two_loop_algorithm(): """L-BFGS two-loop algorithm.""" # Correction pairs are always appended to the end, so only the latest # `num_elements` vectors have valid position/gradient deltas. position_deltas = state.position_deltas[-num_elements:] gradient_deltas = state.gradient_deltas[-num_elements:] # Pre-compute all `inv_rho[i]`s. inv_rhos = tf.reduce_sum( input_tensor=gradient_deltas * position_deltas, axis=-1) def first_loop(acc, args): _, q_direction = acc position_delta, gradient_delta, inv_rho = args alpha = tf.reduce_sum( input_tensor=position_delta * q_direction, axis=-1) / inv_rho direction_delta = tf.expand_dims(alpha, axis=-1) * gradient_delta return (alpha, q_direction - direction_delta) # Run first loop body computing and collecting `alpha[i]`s, while also # computing the updated `q_direction` at each step. zero = tf.zeros_like(inv_rhos[0]) alphas, q_directions = tf.scan( first_loop, [position_deltas, gradient_deltas, inv_rhos], initializer=(zero, state.objective_gradient), reverse=True) # We use `H^0_k = gamma_k * I` as an estimate for the initial inverse # hessian for the k-th iteration; then `r_direction = H^0_k * q_direction`. gamma_k = inv_rhos[-1] / tf.reduce_sum( input_tensor=gradient_deltas[-1] * gradient_deltas[-1], axis=-1) r_direction = tf.expand_dims(gamma_k, axis=-1) * q_directions[0] def second_loop(r_direction, args): alpha, position_delta, gradient_delta, inv_rho = args beta = tf.reduce_sum( input_tensor=gradient_delta * r_direction, axis=-1) / inv_rho direction_delta = tf.expand_dims(alpha - beta, axis=-1) * position_delta return r_direction + direction_delta # Finally, run second loop body computing the updated `r_direction` at each # step. r_directions = tf.scan( second_loop, [alphas, position_deltas, gradient_deltas, inv_rhos], initializer=r_direction) return -r_directions[-1] return prefer_static.cond(tf.equal(num_elements, 0), (lambda: -state.objective_gradient), _two_loop_algorithm)
[ "Computes", "the", "search", "direction", "to", "follow", "at", "the", "current", "state", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/lbfgs.py#L277-L366
[ "def", "_get_search_direction", "(", "state", ")", ":", "# The number of correction pairs that have been collected so far.", "num_elements", "=", "tf", ".", "minimum", "(", "state", ".", "num_iterations", ",", "distribution_util", ".", "prefer_static_shape", "(", "state", ".", "position_deltas", ")", "[", "0", "]", ")", "def", "_two_loop_algorithm", "(", ")", ":", "\"\"\"L-BFGS two-loop algorithm.\"\"\"", "# Correction pairs are always appended to the end, so only the latest", "# `num_elements` vectors have valid position/gradient deltas.", "position_deltas", "=", "state", ".", "position_deltas", "[", "-", "num_elements", ":", "]", "gradient_deltas", "=", "state", ".", "gradient_deltas", "[", "-", "num_elements", ":", "]", "# Pre-compute all `inv_rho[i]`s.", "inv_rhos", "=", "tf", ".", "reduce_sum", "(", "input_tensor", "=", "gradient_deltas", "*", "position_deltas", ",", "axis", "=", "-", "1", ")", "def", "first_loop", "(", "acc", ",", "args", ")", ":", "_", ",", "q_direction", "=", "acc", "position_delta", ",", "gradient_delta", ",", "inv_rho", "=", "args", "alpha", "=", "tf", ".", "reduce_sum", "(", "input_tensor", "=", "position_delta", "*", "q_direction", ",", "axis", "=", "-", "1", ")", "/", "inv_rho", "direction_delta", "=", "tf", ".", "expand_dims", "(", "alpha", ",", "axis", "=", "-", "1", ")", "*", "gradient_delta", "return", "(", "alpha", ",", "q_direction", "-", "direction_delta", ")", "# Run first loop body computing and collecting `alpha[i]`s, while also", "# computing the updated `q_direction` at each step.", "zero", "=", "tf", ".", "zeros_like", "(", "inv_rhos", "[", "0", "]", ")", "alphas", ",", "q_directions", "=", "tf", ".", "scan", "(", "first_loop", ",", "[", "position_deltas", ",", "gradient_deltas", ",", "inv_rhos", "]", ",", "initializer", "=", "(", "zero", ",", "state", ".", "objective_gradient", ")", ",", "reverse", "=", "True", ")", "# We use `H^0_k = gamma_k * I` as an estimate for the initial inverse", "# hessian for the k-th iteration; then `r_direction = H^0_k * q_direction`.", "gamma_k", "=", "inv_rhos", "[", "-", "1", "]", "/", "tf", ".", "reduce_sum", "(", "input_tensor", "=", "gradient_deltas", "[", "-", "1", "]", "*", "gradient_deltas", "[", "-", "1", "]", ",", "axis", "=", "-", "1", ")", "r_direction", "=", "tf", ".", "expand_dims", "(", "gamma_k", ",", "axis", "=", "-", "1", ")", "*", "q_directions", "[", "0", "]", "def", "second_loop", "(", "r_direction", ",", "args", ")", ":", "alpha", ",", "position_delta", ",", "gradient_delta", ",", "inv_rho", "=", "args", "beta", "=", "tf", ".", "reduce_sum", "(", "input_tensor", "=", "gradient_delta", "*", "r_direction", ",", "axis", "=", "-", "1", ")", "/", "inv_rho", "direction_delta", "=", "tf", ".", "expand_dims", "(", "alpha", "-", "beta", ",", "axis", "=", "-", "1", ")", "*", "position_delta", "return", "r_direction", "+", "direction_delta", "# Finally, run second loop body computing the updated `r_direction` at each", "# step.", "r_directions", "=", "tf", ".", "scan", "(", "second_loop", ",", "[", "alphas", ",", "position_deltas", ",", "gradient_deltas", ",", "inv_rhos", "]", ",", "initializer", "=", "r_direction", ")", "return", "-", "r_directions", "[", "-", "1", "]", "return", "prefer_static", ".", "cond", "(", "tf", ".", "equal", "(", "num_elements", ",", "0", ")", ",", "(", "lambda", ":", "-", "state", ".", "objective_gradient", ")", ",", "_two_loop_algorithm", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_make_empty_queue_for
Creates a `tf.Tensor` suitable to hold `k` element-shaped tensors. For example: ```python element = tf.constant([[0., 1., 2., 3., 4.], [5., 6., 7., 8., 9.]]) # A queue capable of holding 3 elements. _make_empty_queue_for(3, element) # => [[[ 0., 0., 0., 0., 0.], # [ 0., 0., 0., 0., 0.]], # # [[ 0., 0., 0., 0., 0.], # [ 0., 0., 0., 0., 0.]], # # [[ 0., 0., 0., 0., 0.], # [ 0., 0., 0., 0., 0.]]] ``` Args: k: A positive scalar integer, number of elements that each queue will hold. element: A `tf.Tensor`, only its shape and dtype information are relevant. Returns: A zero-filed `tf.Tensor` of shape `(k,) + tf.shape(element)` and same dtype as `element`.
tensorflow_probability/python/optimizer/lbfgs.py
def _make_empty_queue_for(k, element): """Creates a `tf.Tensor` suitable to hold `k` element-shaped tensors. For example: ```python element = tf.constant([[0., 1., 2., 3., 4.], [5., 6., 7., 8., 9.]]) # A queue capable of holding 3 elements. _make_empty_queue_for(3, element) # => [[[ 0., 0., 0., 0., 0.], # [ 0., 0., 0., 0., 0.]], # # [[ 0., 0., 0., 0., 0.], # [ 0., 0., 0., 0., 0.]], # # [[ 0., 0., 0., 0., 0.], # [ 0., 0., 0., 0., 0.]]] ``` Args: k: A positive scalar integer, number of elements that each queue will hold. element: A `tf.Tensor`, only its shape and dtype information are relevant. Returns: A zero-filed `tf.Tensor` of shape `(k,) + tf.shape(element)` and same dtype as `element`. """ queue_shape = tf.concat( [[k], distribution_util.prefer_static_shape(element)], axis=0) return tf.zeros(queue_shape, dtype=element.dtype.base_dtype)
def _make_empty_queue_for(k, element): """Creates a `tf.Tensor` suitable to hold `k` element-shaped tensors. For example: ```python element = tf.constant([[0., 1., 2., 3., 4.], [5., 6., 7., 8., 9.]]) # A queue capable of holding 3 elements. _make_empty_queue_for(3, element) # => [[[ 0., 0., 0., 0., 0.], # [ 0., 0., 0., 0., 0.]], # # [[ 0., 0., 0., 0., 0.], # [ 0., 0., 0., 0., 0.]], # # [[ 0., 0., 0., 0., 0.], # [ 0., 0., 0., 0., 0.]]] ``` Args: k: A positive scalar integer, number of elements that each queue will hold. element: A `tf.Tensor`, only its shape and dtype information are relevant. Returns: A zero-filed `tf.Tensor` of shape `(k,) + tf.shape(element)` and same dtype as `element`. """ queue_shape = tf.concat( [[k], distribution_util.prefer_static_shape(element)], axis=0) return tf.zeros(queue_shape, dtype=element.dtype.base_dtype)
[ "Creates", "a", "tf", ".", "Tensor", "suitable", "to", "hold", "k", "element", "-", "shaped", "tensors", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/lbfgs.py#L369-L400
[ "def", "_make_empty_queue_for", "(", "k", ",", "element", ")", ":", "queue_shape", "=", "tf", ".", "concat", "(", "[", "[", "k", "]", ",", "distribution_util", ".", "prefer_static_shape", "(", "element", ")", "]", ",", "axis", "=", "0", ")", "return", "tf", ".", "zeros", "(", "queue_shape", ",", "dtype", "=", "element", ".", "dtype", ".", "base_dtype", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_queue_push
Conditionally push new vectors into a batch of first-in-first-out queues. The `queue` of shape `[k, ..., n]` can be thought of as a batch of queues, each holding `k` n-D vectors; while `new_vecs` of shape `[..., n]` is a fresh new batch of n-D vectors. The `should_update` batch of Boolean scalars, i.e. shape `[...]`, indicates batch members whose corresponding n-D vector in `new_vecs` should be added at the back of its queue, pushing out the corresponding n-D vector from the front. Batch members in `new_vecs` for which `should_update` is False are ignored. Note: the choice of placing `k` at the dimension 0 of the queue is constrained by the L-BFGS two-loop algorithm above. The algorithm uses tf.scan to iterate over the `k` correction pairs simulatneously across all batches, and tf.scan itself can only iterate over dimension 0. For example: ```python k, b, n = (3, 2, 5) queue = tf.reshape(tf.range(30), (k, b, n)) # => [[[ 0, 1, 2, 3, 4], # [ 5, 6, 7, 8, 9]], # # [[10, 11, 12, 13, 14], # [15, 16, 17, 18, 19]], # # [[20, 21, 22, 23, 24], # [25, 26, 27, 28, 29]]] element = tf.reshape(tf.range(30, 40), (b, n)) # => [[30, 31, 32, 33, 34], [35, 36, 37, 38, 39]] should_update = tf.constant([True, False]) # Shape: (b,) _queue_add(should_update, queue, element) # => [[[10, 11, 12, 13, 14], # [ 5, 6, 7, 8, 9]], # # [[20, 21, 22, 23, 24], # [15, 16, 17, 18, 19]], # # [[30, 31, 32, 33, 34], # [25, 26, 27, 28, 29]]] ``` Args: queue: A `tf.Tensor` of shape `[k, ..., n]`; a batch of queues each with `k` n-D vectors. should_update: A Boolean `tf.Tensor` of shape `[...]` indicating batch members where new vectors should be added to their queues. new_vecs: A `tf.Tensor` of shape `[..., n]`; a batch of n-D vectors to add at the end of their respective queues, pushing out the first element from each. Returns: A new `tf.Tensor` of shape `[k, ..., n]`.
tensorflow_probability/python/optimizer/lbfgs.py
def _queue_push(queue, should_update, new_vecs): """Conditionally push new vectors into a batch of first-in-first-out queues. The `queue` of shape `[k, ..., n]` can be thought of as a batch of queues, each holding `k` n-D vectors; while `new_vecs` of shape `[..., n]` is a fresh new batch of n-D vectors. The `should_update` batch of Boolean scalars, i.e. shape `[...]`, indicates batch members whose corresponding n-D vector in `new_vecs` should be added at the back of its queue, pushing out the corresponding n-D vector from the front. Batch members in `new_vecs` for which `should_update` is False are ignored. Note: the choice of placing `k` at the dimension 0 of the queue is constrained by the L-BFGS two-loop algorithm above. The algorithm uses tf.scan to iterate over the `k` correction pairs simulatneously across all batches, and tf.scan itself can only iterate over dimension 0. For example: ```python k, b, n = (3, 2, 5) queue = tf.reshape(tf.range(30), (k, b, n)) # => [[[ 0, 1, 2, 3, 4], # [ 5, 6, 7, 8, 9]], # # [[10, 11, 12, 13, 14], # [15, 16, 17, 18, 19]], # # [[20, 21, 22, 23, 24], # [25, 26, 27, 28, 29]]] element = tf.reshape(tf.range(30, 40), (b, n)) # => [[30, 31, 32, 33, 34], [35, 36, 37, 38, 39]] should_update = tf.constant([True, False]) # Shape: (b,) _queue_add(should_update, queue, element) # => [[[10, 11, 12, 13, 14], # [ 5, 6, 7, 8, 9]], # # [[20, 21, 22, 23, 24], # [15, 16, 17, 18, 19]], # # [[30, 31, 32, 33, 34], # [25, 26, 27, 28, 29]]] ``` Args: queue: A `tf.Tensor` of shape `[k, ..., n]`; a batch of queues each with `k` n-D vectors. should_update: A Boolean `tf.Tensor` of shape `[...]` indicating batch members where new vectors should be added to their queues. new_vecs: A `tf.Tensor` of shape `[..., n]`; a batch of n-D vectors to add at the end of their respective queues, pushing out the first element from each. Returns: A new `tf.Tensor` of shape `[k, ..., n]`. """ new_queue = tf.concat([queue[1:], [new_vecs]], axis=0) update_pattern = tf.broadcast_to( should_update[tf.newaxis, ..., tf.newaxis], distribution_util.prefer_static_shape(queue)) return tf.where(update_pattern, new_queue, queue)
def _queue_push(queue, should_update, new_vecs): """Conditionally push new vectors into a batch of first-in-first-out queues. The `queue` of shape `[k, ..., n]` can be thought of as a batch of queues, each holding `k` n-D vectors; while `new_vecs` of shape `[..., n]` is a fresh new batch of n-D vectors. The `should_update` batch of Boolean scalars, i.e. shape `[...]`, indicates batch members whose corresponding n-D vector in `new_vecs` should be added at the back of its queue, pushing out the corresponding n-D vector from the front. Batch members in `new_vecs` for which `should_update` is False are ignored. Note: the choice of placing `k` at the dimension 0 of the queue is constrained by the L-BFGS two-loop algorithm above. The algorithm uses tf.scan to iterate over the `k` correction pairs simulatneously across all batches, and tf.scan itself can only iterate over dimension 0. For example: ```python k, b, n = (3, 2, 5) queue = tf.reshape(tf.range(30), (k, b, n)) # => [[[ 0, 1, 2, 3, 4], # [ 5, 6, 7, 8, 9]], # # [[10, 11, 12, 13, 14], # [15, 16, 17, 18, 19]], # # [[20, 21, 22, 23, 24], # [25, 26, 27, 28, 29]]] element = tf.reshape(tf.range(30, 40), (b, n)) # => [[30, 31, 32, 33, 34], [35, 36, 37, 38, 39]] should_update = tf.constant([True, False]) # Shape: (b,) _queue_add(should_update, queue, element) # => [[[10, 11, 12, 13, 14], # [ 5, 6, 7, 8, 9]], # # [[20, 21, 22, 23, 24], # [15, 16, 17, 18, 19]], # # [[30, 31, 32, 33, 34], # [25, 26, 27, 28, 29]]] ``` Args: queue: A `tf.Tensor` of shape `[k, ..., n]`; a batch of queues each with `k` n-D vectors. should_update: A Boolean `tf.Tensor` of shape `[...]` indicating batch members where new vectors should be added to their queues. new_vecs: A `tf.Tensor` of shape `[..., n]`; a batch of n-D vectors to add at the end of their respective queues, pushing out the first element from each. Returns: A new `tf.Tensor` of shape `[k, ..., n]`. """ new_queue = tf.concat([queue[1:], [new_vecs]], axis=0) update_pattern = tf.broadcast_to( should_update[tf.newaxis, ..., tf.newaxis], distribution_util.prefer_static_shape(queue)) return tf.where(update_pattern, new_queue, queue)
[ "Conditionally", "push", "new", "vectors", "into", "a", "batch", "of", "first", "-", "in", "-", "first", "-", "out", "queues", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/lbfgs.py#L403-L466
[ "def", "_queue_push", "(", "queue", ",", "should_update", ",", "new_vecs", ")", ":", "new_queue", "=", "tf", ".", "concat", "(", "[", "queue", "[", "1", ":", "]", ",", "[", "new_vecs", "]", "]", ",", "axis", "=", "0", ")", "update_pattern", "=", "tf", ".", "broadcast_to", "(", "should_update", "[", "tf", ".", "newaxis", ",", "...", ",", "tf", ".", "newaxis", "]", ",", "distribution_util", ".", "prefer_static_shape", "(", "queue", ")", ")", "return", "tf", ".", "where", "(", "update_pattern", ",", "new_queue", ",", "queue", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_psd_mask
Computes whether each square matrix in the input is positive semi-definite. Args: x: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`. Returns: mask: A floating-point `Tensor` of shape `[B1, ... Bn]`. Each scalar is 1 if the corresponding matrix was PSD, otherwise 0.
tensorflow_probability/python/distributions/internal/correlation_matrix_volumes_lib.py
def _psd_mask(x): """Computes whether each square matrix in the input is positive semi-definite. Args: x: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`. Returns: mask: A floating-point `Tensor` of shape `[B1, ... Bn]`. Each scalar is 1 if the corresponding matrix was PSD, otherwise 0. """ # Allegedly # https://scicomp.stackexchange.com/questions/12979/testing-if-a-matrix-is-positive-semi-definite # it is more efficient to test for positive semi-definiteness by # trying to compute the Cholesky decomposition -- the matrix is PSD # if you succeed and not PSD if you fail. However, TensorFlow's # Cholesky raises an exception if _any_ of the input matrices are # not PSD, from which I don't know how to extract _which ones_, so I # proceed by explicitly computing all the eigenvalues and checking # whether they are all positive or not. # # Also, as was discussed in the answer, it is somewhat dangerous to # treat SPD-ness as binary in floating-point arithmetic. Cholesky # factorization can complete and 'look' like everything is fine # (e.g., O(1) entries and a diagonal of all ones) but the matrix can # have an exponential condition number. eigenvalues, _ = tf.linalg.eigh(x) return tf.cast( tf.reduce_min(input_tensor=eigenvalues, axis=-1) >= 0, dtype=x.dtype)
def _psd_mask(x): """Computes whether each square matrix in the input is positive semi-definite. Args: x: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`. Returns: mask: A floating-point `Tensor` of shape `[B1, ... Bn]`. Each scalar is 1 if the corresponding matrix was PSD, otherwise 0. """ # Allegedly # https://scicomp.stackexchange.com/questions/12979/testing-if-a-matrix-is-positive-semi-definite # it is more efficient to test for positive semi-definiteness by # trying to compute the Cholesky decomposition -- the matrix is PSD # if you succeed and not PSD if you fail. However, TensorFlow's # Cholesky raises an exception if _any_ of the input matrices are # not PSD, from which I don't know how to extract _which ones_, so I # proceed by explicitly computing all the eigenvalues and checking # whether they are all positive or not. # # Also, as was discussed in the answer, it is somewhat dangerous to # treat SPD-ness as binary in floating-point arithmetic. Cholesky # factorization can complete and 'look' like everything is fine # (e.g., O(1) entries and a diagonal of all ones) but the matrix can # have an exponential condition number. eigenvalues, _ = tf.linalg.eigh(x) return tf.cast( tf.reduce_min(input_tensor=eigenvalues, axis=-1) >= 0, dtype=x.dtype)
[ "Computes", "whether", "each", "square", "matrix", "in", "the", "input", "is", "positive", "semi", "-", "definite", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/internal/correlation_matrix_volumes_lib.py#L77-L104
[ "def", "_psd_mask", "(", "x", ")", ":", "# Allegedly", "# https://scicomp.stackexchange.com/questions/12979/testing-if-a-matrix-is-positive-semi-definite", "# it is more efficient to test for positive semi-definiteness by", "# trying to compute the Cholesky decomposition -- the matrix is PSD", "# if you succeed and not PSD if you fail. However, TensorFlow's", "# Cholesky raises an exception if _any_ of the input matrices are", "# not PSD, from which I don't know how to extract _which ones_, so I", "# proceed by explicitly computing all the eigenvalues and checking", "# whether they are all positive or not.", "#", "# Also, as was discussed in the answer, it is somewhat dangerous to", "# treat SPD-ness as binary in floating-point arithmetic. Cholesky", "# factorization can complete and 'look' like everything is fine", "# (e.g., O(1) entries and a diagonal of all ones) but the matrix can", "# have an exponential condition number.", "eigenvalues", ",", "_", "=", "tf", ".", "linalg", ".", "eigh", "(", "x", ")", "return", "tf", ".", "cast", "(", "tf", ".", "reduce_min", "(", "input_tensor", "=", "eigenvalues", ",", "axis", "=", "-", "1", ")", ">=", "0", ",", "dtype", "=", "x", ".", "dtype", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_det_large_enough_mask
Returns whether the input matches the given determinant limit. Args: x: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`. det_bounds: A floating-point `Tensor` that must broadcast to shape `[B1, ..., Bn]`, giving the desired lower bound on the determinants in `x`. Returns: mask: A floating-point `Tensor` of shape [B1, ..., Bn]. Each scalar is 1 if the corresponding matrix had determinant above the corresponding bound, otherwise 0.
tensorflow_probability/python/distributions/internal/correlation_matrix_volumes_lib.py
def _det_large_enough_mask(x, det_bounds): """Returns whether the input matches the given determinant limit. Args: x: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`. det_bounds: A floating-point `Tensor` that must broadcast to shape `[B1, ..., Bn]`, giving the desired lower bound on the determinants in `x`. Returns: mask: A floating-point `Tensor` of shape [B1, ..., Bn]. Each scalar is 1 if the corresponding matrix had determinant above the corresponding bound, otherwise 0. """ # For the curious: I wonder whether it is possible and desirable to # use a Cholesky decomposition-based algorithm for this, since the # only matrices whose determinant this code cares about will be PSD. # Didn't figure out how to code that in TensorFlow. # # Expert opinion is that it would be about twice as fast since # Cholesky is roughly half the cost of Gaussian Elimination with # Partial Pivoting. But this is less of an impact than the switch in # _psd_mask. return tf.cast(tf.linalg.det(x) > det_bounds, dtype=x.dtype)
def _det_large_enough_mask(x, det_bounds): """Returns whether the input matches the given determinant limit. Args: x: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`. det_bounds: A floating-point `Tensor` that must broadcast to shape `[B1, ..., Bn]`, giving the desired lower bound on the determinants in `x`. Returns: mask: A floating-point `Tensor` of shape [B1, ..., Bn]. Each scalar is 1 if the corresponding matrix had determinant above the corresponding bound, otherwise 0. """ # For the curious: I wonder whether it is possible and desirable to # use a Cholesky decomposition-based algorithm for this, since the # only matrices whose determinant this code cares about will be PSD. # Didn't figure out how to code that in TensorFlow. # # Expert opinion is that it would be about twice as fast since # Cholesky is roughly half the cost of Gaussian Elimination with # Partial Pivoting. But this is less of an impact than the switch in # _psd_mask. return tf.cast(tf.linalg.det(x) > det_bounds, dtype=x.dtype)
[ "Returns", "whether", "the", "input", "matches", "the", "given", "determinant", "limit", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/internal/correlation_matrix_volumes_lib.py#L107-L130
[ "def", "_det_large_enough_mask", "(", "x", ",", "det_bounds", ")", ":", "# For the curious: I wonder whether it is possible and desirable to", "# use a Cholesky decomposition-based algorithm for this, since the", "# only matrices whose determinant this code cares about will be PSD.", "# Didn't figure out how to code that in TensorFlow.", "#", "# Expert opinion is that it would be about twice as fast since", "# Cholesky is roughly half the cost of Gaussian Elimination with", "# Partial Pivoting. But this is less of an impact than the switch in", "# _psd_mask.", "return", "tf", ".", "cast", "(", "tf", ".", "linalg", ".", "det", "(", "x", ")", ">", "det_bounds", ",", "dtype", "=", "x", ".", "dtype", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_uniform_correlation_like_matrix
Returns a uniformly random `Tensor` of "correlation-like" matrices. A "correlation-like" matrix is a symmetric square matrix with all entries between -1 and 1 (inclusive) and 1s on the main diagonal. Of these, the ones that are positive semi-definite are exactly the correlation matrices. Args: num_rows: Python `int` dimension of the correlation-like matrices. batch_shape: `Tensor` or Python `tuple` of `int` shape of the batch to return. dtype: `dtype` of the `Tensor` to return. seed: Random seed. Returns: matrices: A `Tensor` of shape `batch_shape + [num_rows, num_rows]` and dtype `dtype`. Each entry is in [-1, 1], and each matrix along the bottom two dimensions is symmetric and has 1s on the main diagonal.
tensorflow_probability/python/distributions/internal/correlation_matrix_volumes_lib.py
def _uniform_correlation_like_matrix(num_rows, batch_shape, dtype, seed): """Returns a uniformly random `Tensor` of "correlation-like" matrices. A "correlation-like" matrix is a symmetric square matrix with all entries between -1 and 1 (inclusive) and 1s on the main diagonal. Of these, the ones that are positive semi-definite are exactly the correlation matrices. Args: num_rows: Python `int` dimension of the correlation-like matrices. batch_shape: `Tensor` or Python `tuple` of `int` shape of the batch to return. dtype: `dtype` of the `Tensor` to return. seed: Random seed. Returns: matrices: A `Tensor` of shape `batch_shape + [num_rows, num_rows]` and dtype `dtype`. Each entry is in [-1, 1], and each matrix along the bottom two dimensions is symmetric and has 1s on the main diagonal. """ num_entries = num_rows * (num_rows + 1) / 2 ones = tf.ones(shape=[num_entries], dtype=dtype) # It seems wasteful to generate random values for the diagonal since # I am going to throw them away, but `fill_triangular` fills the # diagonal, so I probably need them. # It's not impossible that it would be more efficient to just fill # the whole matrix with random values instead of messing with # `fill_triangular`. Then would need to filter almost half out with # `matrix_band_part`. unifs = uniform.Uniform(-ones, ones).sample(batch_shape, seed=seed) tril = util.fill_triangular(unifs) symmetric = tril + tf.linalg.matrix_transpose(tril) diagonal_ones = tf.ones( shape=util.pad(batch_shape, axis=0, back=True, value=num_rows), dtype=dtype) return tf.linalg.set_diag(symmetric, diagonal_ones)
def _uniform_correlation_like_matrix(num_rows, batch_shape, dtype, seed): """Returns a uniformly random `Tensor` of "correlation-like" matrices. A "correlation-like" matrix is a symmetric square matrix with all entries between -1 and 1 (inclusive) and 1s on the main diagonal. Of these, the ones that are positive semi-definite are exactly the correlation matrices. Args: num_rows: Python `int` dimension of the correlation-like matrices. batch_shape: `Tensor` or Python `tuple` of `int` shape of the batch to return. dtype: `dtype` of the `Tensor` to return. seed: Random seed. Returns: matrices: A `Tensor` of shape `batch_shape + [num_rows, num_rows]` and dtype `dtype`. Each entry is in [-1, 1], and each matrix along the bottom two dimensions is symmetric and has 1s on the main diagonal. """ num_entries = num_rows * (num_rows + 1) / 2 ones = tf.ones(shape=[num_entries], dtype=dtype) # It seems wasteful to generate random values for the diagonal since # I am going to throw them away, but `fill_triangular` fills the # diagonal, so I probably need them. # It's not impossible that it would be more efficient to just fill # the whole matrix with random values instead of messing with # `fill_triangular`. Then would need to filter almost half out with # `matrix_band_part`. unifs = uniform.Uniform(-ones, ones).sample(batch_shape, seed=seed) tril = util.fill_triangular(unifs) symmetric = tril + tf.linalg.matrix_transpose(tril) diagonal_ones = tf.ones( shape=util.pad(batch_shape, axis=0, back=True, value=num_rows), dtype=dtype) return tf.linalg.set_diag(symmetric, diagonal_ones)
[ "Returns", "a", "uniformly", "random", "Tensor", "of", "correlation", "-", "like", "matrices", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/internal/correlation_matrix_volumes_lib.py#L133-L169
[ "def", "_uniform_correlation_like_matrix", "(", "num_rows", ",", "batch_shape", ",", "dtype", ",", "seed", ")", ":", "num_entries", "=", "num_rows", "*", "(", "num_rows", "+", "1", ")", "/", "2", "ones", "=", "tf", ".", "ones", "(", "shape", "=", "[", "num_entries", "]", ",", "dtype", "=", "dtype", ")", "# It seems wasteful to generate random values for the diagonal since", "# I am going to throw them away, but `fill_triangular` fills the", "# diagonal, so I probably need them.", "# It's not impossible that it would be more efficient to just fill", "# the whole matrix with random values instead of messing with", "# `fill_triangular`. Then would need to filter almost half out with", "# `matrix_band_part`.", "unifs", "=", "uniform", ".", "Uniform", "(", "-", "ones", ",", "ones", ")", ".", "sample", "(", "batch_shape", ",", "seed", "=", "seed", ")", "tril", "=", "util", ".", "fill_triangular", "(", "unifs", ")", "symmetric", "=", "tril", "+", "tf", ".", "linalg", ".", "matrix_transpose", "(", "tril", ")", "diagonal_ones", "=", "tf", ".", "ones", "(", "shape", "=", "util", ".", "pad", "(", "batch_shape", ",", "axis", "=", "0", ",", "back", "=", "True", ",", "value", "=", "num_rows", ")", ",", "dtype", "=", "dtype", ")", "return", "tf", ".", "linalg", ".", "set_diag", "(", "symmetric", ",", "diagonal_ones", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
correlation_matrix_volume_rejection_samples
Returns rejection samples from trying to get good correlation matrices. The proposal being rejected from is the uniform distribution on "correlation-like" matrices. We say a matrix is "correlation-like" if it is a symmetric square matrix with all entries between -1 and 1 (inclusive) and 1s on the main diagonal. Of these, the ones that are positive semi-definite are exactly the correlation matrices. The rejection algorithm, then, is to sample a `Tensor` of `sample_shape` correlation-like matrices of dimensions `dim` by `dim`, and check each one for (i) being a correlation matrix (i.e., PSD), and (ii) having determinant at least the corresponding entry of `det_bounds`. Args: det_bounds: A `Tensor` of lower bounds on the determinants of acceptable matrices. The shape must broadcast with `sample_shape`. dim: A Python `int` dimension of correlation matrices to sample. sample_shape: Python `tuple` of `int` shape of the samples to compute, excluding the two matrix dimensions. dtype: The `dtype` in which to do the computation. seed: Random seed. Returns: weights: A `Tensor` of shape `sample_shape`. Each entry is 0 if the corresponding matrix was not a correlation matrix, or had too small of a determinant. Otherwise, the entry is the multiplicative inverse of the density of proposing that matrix uniformly, i.e., the volume of the set of `dim` by `dim` correlation-like matrices. volume: The volume of the set of `dim` by `dim` correlation-like matrices.
tensorflow_probability/python/distributions/internal/correlation_matrix_volumes_lib.py
def correlation_matrix_volume_rejection_samples( det_bounds, dim, sample_shape, dtype, seed): """Returns rejection samples from trying to get good correlation matrices. The proposal being rejected from is the uniform distribution on "correlation-like" matrices. We say a matrix is "correlation-like" if it is a symmetric square matrix with all entries between -1 and 1 (inclusive) and 1s on the main diagonal. Of these, the ones that are positive semi-definite are exactly the correlation matrices. The rejection algorithm, then, is to sample a `Tensor` of `sample_shape` correlation-like matrices of dimensions `dim` by `dim`, and check each one for (i) being a correlation matrix (i.e., PSD), and (ii) having determinant at least the corresponding entry of `det_bounds`. Args: det_bounds: A `Tensor` of lower bounds on the determinants of acceptable matrices. The shape must broadcast with `sample_shape`. dim: A Python `int` dimension of correlation matrices to sample. sample_shape: Python `tuple` of `int` shape of the samples to compute, excluding the two matrix dimensions. dtype: The `dtype` in which to do the computation. seed: Random seed. Returns: weights: A `Tensor` of shape `sample_shape`. Each entry is 0 if the corresponding matrix was not a correlation matrix, or had too small of a determinant. Otherwise, the entry is the multiplicative inverse of the density of proposing that matrix uniformly, i.e., the volume of the set of `dim` by `dim` correlation-like matrices. volume: The volume of the set of `dim` by `dim` correlation-like matrices. """ with tf.compat.v1.name_scope("rejection_sampler"): rej_proposals = _uniform_correlation_like_matrix( dim, sample_shape, dtype, seed=seed) rej_proposal_volume = 2. ** (dim * (dim - 1) / 2.) # The density of proposing any given point is 1 / rej_proposal_volume; # The weight of that point should be scaled by # 1 / density = rej_proposal_volume. rej_weights = rej_proposal_volume * _psd_mask( rej_proposals) * _det_large_enough_mask(rej_proposals, det_bounds) return rej_weights, rej_proposal_volume
def correlation_matrix_volume_rejection_samples( det_bounds, dim, sample_shape, dtype, seed): """Returns rejection samples from trying to get good correlation matrices. The proposal being rejected from is the uniform distribution on "correlation-like" matrices. We say a matrix is "correlation-like" if it is a symmetric square matrix with all entries between -1 and 1 (inclusive) and 1s on the main diagonal. Of these, the ones that are positive semi-definite are exactly the correlation matrices. The rejection algorithm, then, is to sample a `Tensor` of `sample_shape` correlation-like matrices of dimensions `dim` by `dim`, and check each one for (i) being a correlation matrix (i.e., PSD), and (ii) having determinant at least the corresponding entry of `det_bounds`. Args: det_bounds: A `Tensor` of lower bounds on the determinants of acceptable matrices. The shape must broadcast with `sample_shape`. dim: A Python `int` dimension of correlation matrices to sample. sample_shape: Python `tuple` of `int` shape of the samples to compute, excluding the two matrix dimensions. dtype: The `dtype` in which to do the computation. seed: Random seed. Returns: weights: A `Tensor` of shape `sample_shape`. Each entry is 0 if the corresponding matrix was not a correlation matrix, or had too small of a determinant. Otherwise, the entry is the multiplicative inverse of the density of proposing that matrix uniformly, i.e., the volume of the set of `dim` by `dim` correlation-like matrices. volume: The volume of the set of `dim` by `dim` correlation-like matrices. """ with tf.compat.v1.name_scope("rejection_sampler"): rej_proposals = _uniform_correlation_like_matrix( dim, sample_shape, dtype, seed=seed) rej_proposal_volume = 2. ** (dim * (dim - 1) / 2.) # The density of proposing any given point is 1 / rej_proposal_volume; # The weight of that point should be scaled by # 1 / density = rej_proposal_volume. rej_weights = rej_proposal_volume * _psd_mask( rej_proposals) * _det_large_enough_mask(rej_proposals, det_bounds) return rej_weights, rej_proposal_volume
[ "Returns", "rejection", "samples", "from", "trying", "to", "get", "good", "correlation", "matrices", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/internal/correlation_matrix_volumes_lib.py#L172-L216
[ "def", "correlation_matrix_volume_rejection_samples", "(", "det_bounds", ",", "dim", ",", "sample_shape", ",", "dtype", ",", "seed", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "\"rejection_sampler\"", ")", ":", "rej_proposals", "=", "_uniform_correlation_like_matrix", "(", "dim", ",", "sample_shape", ",", "dtype", ",", "seed", "=", "seed", ")", "rej_proposal_volume", "=", "2.", "**", "(", "dim", "*", "(", "dim", "-", "1", ")", "/", "2.", ")", "# The density of proposing any given point is 1 / rej_proposal_volume;", "# The weight of that point should be scaled by", "# 1 / density = rej_proposal_volume.", "rej_weights", "=", "rej_proposal_volume", "*", "_psd_mask", "(", "rej_proposals", ")", "*", "_det_large_enough_mask", "(", "rej_proposals", ",", "det_bounds", ")", "return", "rej_weights", ",", "rej_proposal_volume" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_clopper_pearson_confidence_interval
Computes a confidence interval for the mean of the given 1-D distribution. Assumes (and checks) that the given distribution is Bernoulli, i.e., takes only two values. This licenses using the CDF of the binomial distribution for the confidence, which is tighter (for extreme probabilities) than the DKWM inequality. The method is known as the [Clopper-Pearson method] (https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval). Assumes: - The given samples were drawn iid from the distribution of interest. - The given distribution is a Bernoulli, i.e., supported only on low and high. Guarantees: - The probability (over the randomness of drawing the given sample) that the true mean is outside the returned interval is no more than the given error_rate. Args: samples: `np.ndarray` of samples drawn iid from the distribution of interest. error_rate: Python `float` admissible rate of mistakes. Returns: low: Lower bound of confidence interval. high: Upper bound of confidence interval. Raises: ValueError: If `samples` has rank other than 1 (batch semantics are not implemented), or if `samples` contains values other than `low` or `high` (as that makes the distribution not Bernoulli).
tensorflow_probability/python/distributions/internal/correlation_matrix_volumes_lib.py
def _clopper_pearson_confidence_interval(samples, error_rate): """Computes a confidence interval for the mean of the given 1-D distribution. Assumes (and checks) that the given distribution is Bernoulli, i.e., takes only two values. This licenses using the CDF of the binomial distribution for the confidence, which is tighter (for extreme probabilities) than the DKWM inequality. The method is known as the [Clopper-Pearson method] (https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval). Assumes: - The given samples were drawn iid from the distribution of interest. - The given distribution is a Bernoulli, i.e., supported only on low and high. Guarantees: - The probability (over the randomness of drawing the given sample) that the true mean is outside the returned interval is no more than the given error_rate. Args: samples: `np.ndarray` of samples drawn iid from the distribution of interest. error_rate: Python `float` admissible rate of mistakes. Returns: low: Lower bound of confidence interval. high: Upper bound of confidence interval. Raises: ValueError: If `samples` has rank other than 1 (batch semantics are not implemented), or if `samples` contains values other than `low` or `high` (as that makes the distribution not Bernoulli). """ # TODO(b/78025336) Migrate this confidence interval function # to statistical_testing.py. In order to do that # - Get the binomial CDF from the Binomial distribution # - Implement scalar root finding in TF. Batch bisection search # shouldn't be too hard, and is definitely good enough for this # problem. Batching the Brent algorithm (from scipy) that is used # here may be more involved, but may also not be necessary---it's # only used here because scipy made it convenient. In particular, # robustness is more important than speed here, which may make # bisection search actively better. # - The rest is just a matter of rewriting in the appropriate style. if optimize is None or stats is None: raise ValueError( "Scipy is required for computing Clopper-Pearson confidence intervals") if len(samples.shape) != 1: raise ValueError("Batch semantics not implemented") n = len(samples) low = np.amin(samples) high = np.amax(samples) successes = np.count_nonzero(samples - low) failures = np.count_nonzero(samples - high) if successes + failures != n: uniques = np.unique(samples) msg = ("Purportedly Bernoulli distribution had distinct samples" " {}, {}, and {}".format(uniques[0], uniques[1], uniques[2])) raise ValueError(msg) def p_small_enough(p): prob = stats.binom.logcdf(successes, n, p) return prob - np.log(error_rate / 2.) def p_big_enough(p): prob = stats.binom.logsf(successes, n, p) return prob - np.log(error_rate / 2.) high_p = optimize.brentq( p_small_enough, float(successes) / n, 1., rtol=1e-9) low_p = optimize.brentq( p_big_enough, 0., float(successes) / n, rtol=1e-9) low_interval = low + (high - low) * low_p high_interval = low + (high - low) * high_p return (low_interval, high_interval)
def _clopper_pearson_confidence_interval(samples, error_rate): """Computes a confidence interval for the mean of the given 1-D distribution. Assumes (and checks) that the given distribution is Bernoulli, i.e., takes only two values. This licenses using the CDF of the binomial distribution for the confidence, which is tighter (for extreme probabilities) than the DKWM inequality. The method is known as the [Clopper-Pearson method] (https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval). Assumes: - The given samples were drawn iid from the distribution of interest. - The given distribution is a Bernoulli, i.e., supported only on low and high. Guarantees: - The probability (over the randomness of drawing the given sample) that the true mean is outside the returned interval is no more than the given error_rate. Args: samples: `np.ndarray` of samples drawn iid from the distribution of interest. error_rate: Python `float` admissible rate of mistakes. Returns: low: Lower bound of confidence interval. high: Upper bound of confidence interval. Raises: ValueError: If `samples` has rank other than 1 (batch semantics are not implemented), or if `samples` contains values other than `low` or `high` (as that makes the distribution not Bernoulli). """ # TODO(b/78025336) Migrate this confidence interval function # to statistical_testing.py. In order to do that # - Get the binomial CDF from the Binomial distribution # - Implement scalar root finding in TF. Batch bisection search # shouldn't be too hard, and is definitely good enough for this # problem. Batching the Brent algorithm (from scipy) that is used # here may be more involved, but may also not be necessary---it's # only used here because scipy made it convenient. In particular, # robustness is more important than speed here, which may make # bisection search actively better. # - The rest is just a matter of rewriting in the appropriate style. if optimize is None or stats is None: raise ValueError( "Scipy is required for computing Clopper-Pearson confidence intervals") if len(samples.shape) != 1: raise ValueError("Batch semantics not implemented") n = len(samples) low = np.amin(samples) high = np.amax(samples) successes = np.count_nonzero(samples - low) failures = np.count_nonzero(samples - high) if successes + failures != n: uniques = np.unique(samples) msg = ("Purportedly Bernoulli distribution had distinct samples" " {}, {}, and {}".format(uniques[0], uniques[1], uniques[2])) raise ValueError(msg) def p_small_enough(p): prob = stats.binom.logcdf(successes, n, p) return prob - np.log(error_rate / 2.) def p_big_enough(p): prob = stats.binom.logsf(successes, n, p) return prob - np.log(error_rate / 2.) high_p = optimize.brentq( p_small_enough, float(successes) / n, 1., rtol=1e-9) low_p = optimize.brentq( p_big_enough, 0., float(successes) / n, rtol=1e-9) low_interval = low + (high - low) * low_p high_interval = low + (high - low) * high_p return (low_interval, high_interval)
[ "Computes", "a", "confidence", "interval", "for", "the", "mean", "of", "the", "given", "1", "-", "D", "distribution", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/internal/correlation_matrix_volumes_lib.py#L219-L294
[ "def", "_clopper_pearson_confidence_interval", "(", "samples", ",", "error_rate", ")", ":", "# TODO(b/78025336) Migrate this confidence interval function", "# to statistical_testing.py. In order to do that", "# - Get the binomial CDF from the Binomial distribution", "# - Implement scalar root finding in TF. Batch bisection search", "# shouldn't be too hard, and is definitely good enough for this", "# problem. Batching the Brent algorithm (from scipy) that is used", "# here may be more involved, but may also not be necessary---it's", "# only used here because scipy made it convenient. In particular,", "# robustness is more important than speed here, which may make", "# bisection search actively better.", "# - The rest is just a matter of rewriting in the appropriate style.", "if", "optimize", "is", "None", "or", "stats", "is", "None", ":", "raise", "ValueError", "(", "\"Scipy is required for computing Clopper-Pearson confidence intervals\"", ")", "if", "len", "(", "samples", ".", "shape", ")", "!=", "1", ":", "raise", "ValueError", "(", "\"Batch semantics not implemented\"", ")", "n", "=", "len", "(", "samples", ")", "low", "=", "np", ".", "amin", "(", "samples", ")", "high", "=", "np", ".", "amax", "(", "samples", ")", "successes", "=", "np", ".", "count_nonzero", "(", "samples", "-", "low", ")", "failures", "=", "np", ".", "count_nonzero", "(", "samples", "-", "high", ")", "if", "successes", "+", "failures", "!=", "n", ":", "uniques", "=", "np", ".", "unique", "(", "samples", ")", "msg", "=", "(", "\"Purportedly Bernoulli distribution had distinct samples\"", "\" {}, {}, and {}\"", ".", "format", "(", "uniques", "[", "0", "]", ",", "uniques", "[", "1", "]", ",", "uniques", "[", "2", "]", ")", ")", "raise", "ValueError", "(", "msg", ")", "def", "p_small_enough", "(", "p", ")", ":", "prob", "=", "stats", ".", "binom", ".", "logcdf", "(", "successes", ",", "n", ",", "p", ")", "return", "prob", "-", "np", ".", "log", "(", "error_rate", "/", "2.", ")", "def", "p_big_enough", "(", "p", ")", ":", "prob", "=", "stats", ".", "binom", ".", "logsf", "(", "successes", ",", "n", ",", "p", ")", "return", "prob", "-", "np", ".", "log", "(", "error_rate", "/", "2.", ")", "high_p", "=", "optimize", ".", "brentq", "(", "p_small_enough", ",", "float", "(", "successes", ")", "/", "n", ",", "1.", ",", "rtol", "=", "1e-9", ")", "low_p", "=", "optimize", ".", "brentq", "(", "p_big_enough", ",", "0.", ",", "float", "(", "successes", ")", "/", "n", ",", "rtol", "=", "1e-9", ")", "low_interval", "=", "low", "+", "(", "high", "-", "low", ")", "*", "low_p", "high_interval", "=", "low", "+", "(", "high", "-", "low", ")", "*", "high_p", "return", "(", "low_interval", ",", "high_interval", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
compute_true_volumes
Returns confidence intervals for the desired correlation matrix volumes. The confidence intervals are computed by the [Clopper-Pearson method] (https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval). Args: det_bounds: A rank-1 numpy array of lower bounds on the determinants of acceptable matrices. Entries must be unique. dim: A Python `int` dimension of correlation matrices to sample. num_samples: The number of samples to draw. error_rate: The statistical significance of the returned confidence intervals. The significance is broadcast: Each returned interval separately may be incorrect with probability (under the sample of correlation-like matrices drawn internally) at most `error_rate`. seed: Random seed. Returns: bounds: A Python `dict` mapping each determinant bound to the low, high tuple giving the confidence interval.
tensorflow_probability/python/distributions/internal/correlation_matrix_volumes_lib.py
def compute_true_volumes( det_bounds, dim, num_samples, error_rate=1e-6, seed=42): """Returns confidence intervals for the desired correlation matrix volumes. The confidence intervals are computed by the [Clopper-Pearson method] (https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval). Args: det_bounds: A rank-1 numpy array of lower bounds on the determinants of acceptable matrices. Entries must be unique. dim: A Python `int` dimension of correlation matrices to sample. num_samples: The number of samples to draw. error_rate: The statistical significance of the returned confidence intervals. The significance is broadcast: Each returned interval separately may be incorrect with probability (under the sample of correlation-like matrices drawn internally) at most `error_rate`. seed: Random seed. Returns: bounds: A Python `dict` mapping each determinant bound to the low, high tuple giving the confidence interval. """ bounds = {} with tf.compat.v1.Session() as sess: rej_weights, _ = correlation_matrix_volume_rejection_samples( det_bounds, dim, [num_samples, len(det_bounds)], np.float32, seed=seed) rej_weights = sess.run(rej_weights) for rw, det in zip(np.rollaxis(rej_weights, 1), det_bounds): template = ("Estimating volume of {}x{} correlation " "matrices with determinant >= {}.") print(template.format(dim, dim, det)) sys.stdout.flush() bounds[det] = _clopper_pearson_confidence_interval( rw, error_rate=error_rate) return bounds
def compute_true_volumes( det_bounds, dim, num_samples, error_rate=1e-6, seed=42): """Returns confidence intervals for the desired correlation matrix volumes. The confidence intervals are computed by the [Clopper-Pearson method] (https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval). Args: det_bounds: A rank-1 numpy array of lower bounds on the determinants of acceptable matrices. Entries must be unique. dim: A Python `int` dimension of correlation matrices to sample. num_samples: The number of samples to draw. error_rate: The statistical significance of the returned confidence intervals. The significance is broadcast: Each returned interval separately may be incorrect with probability (under the sample of correlation-like matrices drawn internally) at most `error_rate`. seed: Random seed. Returns: bounds: A Python `dict` mapping each determinant bound to the low, high tuple giving the confidence interval. """ bounds = {} with tf.compat.v1.Session() as sess: rej_weights, _ = correlation_matrix_volume_rejection_samples( det_bounds, dim, [num_samples, len(det_bounds)], np.float32, seed=seed) rej_weights = sess.run(rej_weights) for rw, det in zip(np.rollaxis(rej_weights, 1), det_bounds): template = ("Estimating volume of {}x{} correlation " "matrices with determinant >= {}.") print(template.format(dim, dim, det)) sys.stdout.flush() bounds[det] = _clopper_pearson_confidence_interval( rw, error_rate=error_rate) return bounds
[ "Returns", "confidence", "intervals", "for", "the", "desired", "correlation", "matrix", "volumes", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/internal/correlation_matrix_volumes_lib.py#L297-L332
[ "def", "compute_true_volumes", "(", "det_bounds", ",", "dim", ",", "num_samples", ",", "error_rate", "=", "1e-6", ",", "seed", "=", "42", ")", ":", "bounds", "=", "{", "}", "with", "tf", ".", "compat", ".", "v1", ".", "Session", "(", ")", "as", "sess", ":", "rej_weights", ",", "_", "=", "correlation_matrix_volume_rejection_samples", "(", "det_bounds", ",", "dim", ",", "[", "num_samples", ",", "len", "(", "det_bounds", ")", "]", ",", "np", ".", "float32", ",", "seed", "=", "seed", ")", "rej_weights", "=", "sess", ".", "run", "(", "rej_weights", ")", "for", "rw", ",", "det", "in", "zip", "(", "np", ".", "rollaxis", "(", "rej_weights", ",", "1", ")", ",", "det_bounds", ")", ":", "template", "=", "(", "\"Estimating volume of {}x{} correlation \"", "\"matrices with determinant >= {}.\"", ")", "print", "(", "template", ".", "format", "(", "dim", ",", "dim", ",", "det", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "bounds", "[", "det", "]", "=", "_clopper_pearson_confidence_interval", "(", "rw", ",", "error_rate", "=", "error_rate", ")", "return", "bounds" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_kl_von_mises_von_mises
Batchwise KL divergence KL(d1 || d2) with d1 and d2 von Mises. Args: d1: instance of a von Mises distribution object. d2: instance of a a von Mises distribution object. name: (optional) Name to use for created operations. default is "kl_von_mises_von_mises". Returns: Batchwise KL(d1 || d2)
tensorflow_probability/python/distributions/von_mises.py
def _kl_von_mises_von_mises(d1, d2, name=None): """Batchwise KL divergence KL(d1 || d2) with d1 and d2 von Mises. Args: d1: instance of a von Mises distribution object. d2: instance of a a von Mises distribution object. name: (optional) Name to use for created operations. default is "kl_von_mises_von_mises". Returns: Batchwise KL(d1 || d2) """ with tf.name_scope(name or "kl_von_mises_von_mises"): # The density of von Mises is (abbreviating the concentration for conc): # vonMises(x; loc, conc) = exp(conc cos(x - loc)) / (2 pi I_0 (conc) ) # We need two properties: # 1. Standardization: if z ~ vonMises(0, conc), then # z + loc ~ vonMises(loc, conc). # 2. Expectation of cosine: # E_q(z | 0, conc) cos z = I_1 (conc) / I_0 (conc) # Now, # KL(d1 || d2) # = E_vonMises(x; loc1, conc1) log vonMises(x; loc1, conc1) # / vonMises(x; loc2, conc2) # Plugging the densities and rearranging, we have # log I_0(conc2) / I_0(conc1) # + E_vonMises(x; loc1, conc1) [ conc1 cos (z - loc1) # - conc2 cos (z - loc2) ] # Let's transform the second term using the standardization property: # E_vonMises(x; 0, conc1) [conc1 cos z - conc2 cos (z - (loc2 - loc1))] # Applying the cos (x - y) = cos x cos y + sin x sin y expansion, we get # E_vonMises(x; 0, conc1) [conc1 cos z - conc2 cos (loc2 - loc1) cos z # - conc2 sin(loc2 - loc1) sin z] # Because the distribution is symmetric around zero, the last term vanishes # in expectation. The remaining two terms are computed using the # "expectation of cosine" property: # (conc1 - conc2 cos (loc2 - loc1) E_vonMises(x; 0, conc1) cos z # = (conc1 - conc2 cos (loc2 - loc1)) I_1(conc1) / I_0(conc1) # In total, we have # KL(d1 || d2) = log I_0(conc2) / I_0(conc1) # + (conc1 - conc2 cos (loc2 - loc1)) I_1(conc1) / I_0(conc1) # To improve the numerical stability, we can replace I_j(k) functions with # the exponentially scaled versions using the equality # I_j(k) = I_j^E(k) exp(k) (which holds for k >= 0): # KL(d1 || d2) = (conc2 - conc1) + log I_0^E(conc2) / I_0^E(conc1) # + (conc1 - conc2 cos (loc2 - loc1)) I_1^E(conc1) / I_0^E(conc1) # Note that this formula is numerically stable for conc1 = 0 and/or # conc2 = 0 because I_0 (0) = I_0^E (0) = 1. i0e_concentration1 = tf.math.bessel_i0e(d1.concentration) i1e_concentration1 = tf.math.bessel_i1e(d1.concentration) i0e_concentration2 = tf.math.bessel_i0e(d2.concentration) return ((d2.concentration - d1.concentration) + tf.math.log(i0e_concentration2 / i0e_concentration1) + (d1.concentration - d2.concentration * tf.cos(d1.loc - d2.loc)) * (i1e_concentration1 / i0e_concentration1))
def _kl_von_mises_von_mises(d1, d2, name=None): """Batchwise KL divergence KL(d1 || d2) with d1 and d2 von Mises. Args: d1: instance of a von Mises distribution object. d2: instance of a a von Mises distribution object. name: (optional) Name to use for created operations. default is "kl_von_mises_von_mises". Returns: Batchwise KL(d1 || d2) """ with tf.name_scope(name or "kl_von_mises_von_mises"): # The density of von Mises is (abbreviating the concentration for conc): # vonMises(x; loc, conc) = exp(conc cos(x - loc)) / (2 pi I_0 (conc) ) # We need two properties: # 1. Standardization: if z ~ vonMises(0, conc), then # z + loc ~ vonMises(loc, conc). # 2. Expectation of cosine: # E_q(z | 0, conc) cos z = I_1 (conc) / I_0 (conc) # Now, # KL(d1 || d2) # = E_vonMises(x; loc1, conc1) log vonMises(x; loc1, conc1) # / vonMises(x; loc2, conc2) # Plugging the densities and rearranging, we have # log I_0(conc2) / I_0(conc1) # + E_vonMises(x; loc1, conc1) [ conc1 cos (z - loc1) # - conc2 cos (z - loc2) ] # Let's transform the second term using the standardization property: # E_vonMises(x; 0, conc1) [conc1 cos z - conc2 cos (z - (loc2 - loc1))] # Applying the cos (x - y) = cos x cos y + sin x sin y expansion, we get # E_vonMises(x; 0, conc1) [conc1 cos z - conc2 cos (loc2 - loc1) cos z # - conc2 sin(loc2 - loc1) sin z] # Because the distribution is symmetric around zero, the last term vanishes # in expectation. The remaining two terms are computed using the # "expectation of cosine" property: # (conc1 - conc2 cos (loc2 - loc1) E_vonMises(x; 0, conc1) cos z # = (conc1 - conc2 cos (loc2 - loc1)) I_1(conc1) / I_0(conc1) # In total, we have # KL(d1 || d2) = log I_0(conc2) / I_0(conc1) # + (conc1 - conc2 cos (loc2 - loc1)) I_1(conc1) / I_0(conc1) # To improve the numerical stability, we can replace I_j(k) functions with # the exponentially scaled versions using the equality # I_j(k) = I_j^E(k) exp(k) (which holds for k >= 0): # KL(d1 || d2) = (conc2 - conc1) + log I_0^E(conc2) / I_0^E(conc1) # + (conc1 - conc2 cos (loc2 - loc1)) I_1^E(conc1) / I_0^E(conc1) # Note that this formula is numerically stable for conc1 = 0 and/or # conc2 = 0 because I_0 (0) = I_0^E (0) = 1. i0e_concentration1 = tf.math.bessel_i0e(d1.concentration) i1e_concentration1 = tf.math.bessel_i1e(d1.concentration) i0e_concentration2 = tf.math.bessel_i0e(d2.concentration) return ((d2.concentration - d1.concentration) + tf.math.log(i0e_concentration2 / i0e_concentration1) + (d1.concentration - d2.concentration * tf.cos(d1.loc - d2.loc)) * (i1e_concentration1 / i0e_concentration1))
[ "Batchwise", "KL", "divergence", "KL", "(", "d1", "||", "d2", ")", "with", "d1", "and", "d2", "von", "Mises", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/von_mises.py#L250-L304
[ "def", "_kl_von_mises_von_mises", "(", "d1", ",", "d2", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "name_scope", "(", "name", "or", "\"kl_von_mises_von_mises\"", ")", ":", "# The density of von Mises is (abbreviating the concentration for conc):", "# vonMises(x; loc, conc) = exp(conc cos(x - loc)) / (2 pi I_0 (conc) )", "# We need two properties:", "# 1. Standardization: if z ~ vonMises(0, conc), then", "# z + loc ~ vonMises(loc, conc).", "# 2. Expectation of cosine:", "# E_q(z | 0, conc) cos z = I_1 (conc) / I_0 (conc)", "# Now,", "# KL(d1 || d2)", "# = E_vonMises(x; loc1, conc1) log vonMises(x; loc1, conc1)", "# / vonMises(x; loc2, conc2)", "# Plugging the densities and rearranging, we have", "# log I_0(conc2) / I_0(conc1)", "# + E_vonMises(x; loc1, conc1) [ conc1 cos (z - loc1)", "# - conc2 cos (z - loc2) ]", "# Let's transform the second term using the standardization property:", "# E_vonMises(x; 0, conc1) [conc1 cos z - conc2 cos (z - (loc2 - loc1))]", "# Applying the cos (x - y) = cos x cos y + sin x sin y expansion, we get", "# E_vonMises(x; 0, conc1) [conc1 cos z - conc2 cos (loc2 - loc1) cos z", "# - conc2 sin(loc2 - loc1) sin z]", "# Because the distribution is symmetric around zero, the last term vanishes", "# in expectation. The remaining two terms are computed using the", "# \"expectation of cosine\" property:", "# (conc1 - conc2 cos (loc2 - loc1) E_vonMises(x; 0, conc1) cos z", "# = (conc1 - conc2 cos (loc2 - loc1)) I_1(conc1) / I_0(conc1)", "# In total, we have", "# KL(d1 || d2) = log I_0(conc2) / I_0(conc1)", "# + (conc1 - conc2 cos (loc2 - loc1)) I_1(conc1) / I_0(conc1)", "# To improve the numerical stability, we can replace I_j(k) functions with", "# the exponentially scaled versions using the equality", "# I_j(k) = I_j^E(k) exp(k) (which holds for k >= 0):", "# KL(d1 || d2) = (conc2 - conc1) + log I_0^E(conc2) / I_0^E(conc1)", "# + (conc1 - conc2 cos (loc2 - loc1)) I_1^E(conc1) / I_0^E(conc1)", "# Note that this formula is numerically stable for conc1 = 0 and/or", "# conc2 = 0 because I_0 (0) = I_0^E (0) = 1.", "i0e_concentration1", "=", "tf", ".", "math", ".", "bessel_i0e", "(", "d1", ".", "concentration", ")", "i1e_concentration1", "=", "tf", ".", "math", ".", "bessel_i1e", "(", "d1", ".", "concentration", ")", "i0e_concentration2", "=", "tf", ".", "math", ".", "bessel_i0e", "(", "d2", ".", "concentration", ")", "return", "(", "(", "d2", ".", "concentration", "-", "d1", ".", "concentration", ")", "+", "tf", ".", "math", ".", "log", "(", "i0e_concentration2", "/", "i0e_concentration1", ")", "+", "(", "d1", ".", "concentration", "-", "d2", ".", "concentration", "*", "tf", ".", "cos", "(", "d1", ".", "loc", "-", "d2", ".", "loc", ")", ")", "*", "(", "i1e_concentration1", "/", "i0e_concentration1", ")", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
von_mises_cdf
Computes the cumulative density function (CDF) of von Mises distribution. Denote the density of vonMises(loc=0, concentration=concentration) by p(t). Note that p(t) is periodic, p(t) = p(t + 2 pi). The CDF at the point x is defined as int_{-pi}^x p(t) dt. Thus, when x in [-pi, pi], the CDF is in [0, 1]; when x is in [pi, 3pi], the CDF is in [1, 2], etc. The CDF is not available in closed form. Instead, we use the method [1] which uses either a series expansion or a Normal approximation, depending on the value of concentration. We also compute the derivative of the CDF w.r.t. both x and concentration. The derivative w.r.t. x is p(x), while the derivative w.r.t. concentration is computed using automatic differentiation. We use forward mode for the series case (which allows to save memory) and backward mode for the Normal approximation. Arguments: x: The point at which to evaluate the CDF. concentration: The concentration parameter of the von Mises distribution. Returns: The value of the CDF computed elementwise. References: [1] G. Hill "Algorithm 518: Incomplete Bessel Function I_0. The Von Mises Distribution." ACM Transactions on Mathematical Software, 1977
tensorflow_probability/python/distributions/von_mises.py
def von_mises_cdf(x, concentration): """Computes the cumulative density function (CDF) of von Mises distribution. Denote the density of vonMises(loc=0, concentration=concentration) by p(t). Note that p(t) is periodic, p(t) = p(t + 2 pi). The CDF at the point x is defined as int_{-pi}^x p(t) dt. Thus, when x in [-pi, pi], the CDF is in [0, 1]; when x is in [pi, 3pi], the CDF is in [1, 2], etc. The CDF is not available in closed form. Instead, we use the method [1] which uses either a series expansion or a Normal approximation, depending on the value of concentration. We also compute the derivative of the CDF w.r.t. both x and concentration. The derivative w.r.t. x is p(x), while the derivative w.r.t. concentration is computed using automatic differentiation. We use forward mode for the series case (which allows to save memory) and backward mode for the Normal approximation. Arguments: x: The point at which to evaluate the CDF. concentration: The concentration parameter of the von Mises distribution. Returns: The value of the CDF computed elementwise. References: [1] G. Hill "Algorithm 518: Incomplete Bessel Function I_0. The Von Mises Distribution." ACM Transactions on Mathematical Software, 1977 """ x = tf.convert_to_tensor(value=x) concentration = tf.convert_to_tensor(value=concentration) dtype = x.dtype # Map x to [-pi, pi]. num_periods = tf.round(x / (2. * np.pi)) x -= (2. * np.pi) * num_periods # We take the hyperparameters from Table I of [1], the row for D=8 # decimal digits of accuracy. ck is the cut-off for concentration: # if concentration < ck, the series expansion is used; # otherwise, the Normal approximation is used. ck = 10.5 # The number of terms in the series expansion. [1] chooses it as a function # of concentration, n(concentration). This is hard to implement in TF. # Instead, we upper bound it over concentrations: # num_terms = ceil ( max_{concentration <= ck} n(concentration) ). # The maximum is achieved for concentration = ck. num_terms = 20 cdf_series, dcdf_dconcentration_series = _von_mises_cdf_series( x, concentration, num_terms, dtype) cdf_normal, dcdf_dconcentration_normal = _von_mises_cdf_normal( x, concentration, dtype) use_series = concentration < ck cdf = tf.where(use_series, cdf_series, cdf_normal) cdf += num_periods dcdf_dconcentration = tf.where(use_series, dcdf_dconcentration_series, dcdf_dconcentration_normal) def grad(dy): prob = tf.exp(concentration * (tf.cos(x) - 1.)) / ( (2. * np.pi) * tf.math.bessel_i0e(concentration)) return dy * prob, dy * dcdf_dconcentration return cdf, grad
def von_mises_cdf(x, concentration): """Computes the cumulative density function (CDF) of von Mises distribution. Denote the density of vonMises(loc=0, concentration=concentration) by p(t). Note that p(t) is periodic, p(t) = p(t + 2 pi). The CDF at the point x is defined as int_{-pi}^x p(t) dt. Thus, when x in [-pi, pi], the CDF is in [0, 1]; when x is in [pi, 3pi], the CDF is in [1, 2], etc. The CDF is not available in closed form. Instead, we use the method [1] which uses either a series expansion or a Normal approximation, depending on the value of concentration. We also compute the derivative of the CDF w.r.t. both x and concentration. The derivative w.r.t. x is p(x), while the derivative w.r.t. concentration is computed using automatic differentiation. We use forward mode for the series case (which allows to save memory) and backward mode for the Normal approximation. Arguments: x: The point at which to evaluate the CDF. concentration: The concentration parameter of the von Mises distribution. Returns: The value of the CDF computed elementwise. References: [1] G. Hill "Algorithm 518: Incomplete Bessel Function I_0. The Von Mises Distribution." ACM Transactions on Mathematical Software, 1977 """ x = tf.convert_to_tensor(value=x) concentration = tf.convert_to_tensor(value=concentration) dtype = x.dtype # Map x to [-pi, pi]. num_periods = tf.round(x / (2. * np.pi)) x -= (2. * np.pi) * num_periods # We take the hyperparameters from Table I of [1], the row for D=8 # decimal digits of accuracy. ck is the cut-off for concentration: # if concentration < ck, the series expansion is used; # otherwise, the Normal approximation is used. ck = 10.5 # The number of terms in the series expansion. [1] chooses it as a function # of concentration, n(concentration). This is hard to implement in TF. # Instead, we upper bound it over concentrations: # num_terms = ceil ( max_{concentration <= ck} n(concentration) ). # The maximum is achieved for concentration = ck. num_terms = 20 cdf_series, dcdf_dconcentration_series = _von_mises_cdf_series( x, concentration, num_terms, dtype) cdf_normal, dcdf_dconcentration_normal = _von_mises_cdf_normal( x, concentration, dtype) use_series = concentration < ck cdf = tf.where(use_series, cdf_series, cdf_normal) cdf += num_periods dcdf_dconcentration = tf.where(use_series, dcdf_dconcentration_series, dcdf_dconcentration_normal) def grad(dy): prob = tf.exp(concentration * (tf.cos(x) - 1.)) / ( (2. * np.pi) * tf.math.bessel_i0e(concentration)) return dy * prob, dy * dcdf_dconcentration return cdf, grad
[ "Computes", "the", "cumulative", "density", "function", "(", "CDF", ")", "of", "von", "Mises", "distribution", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/von_mises.py#L308-L374
[ "def", "von_mises_cdf", "(", "x", ",", "concentration", ")", ":", "x", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "x", ")", "concentration", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "concentration", ")", "dtype", "=", "x", ".", "dtype", "# Map x to [-pi, pi].", "num_periods", "=", "tf", ".", "round", "(", "x", "/", "(", "2.", "*", "np", ".", "pi", ")", ")", "x", "-=", "(", "2.", "*", "np", ".", "pi", ")", "*", "num_periods", "# We take the hyperparameters from Table I of [1], the row for D=8", "# decimal digits of accuracy. ck is the cut-off for concentration:", "# if concentration < ck, the series expansion is used;", "# otherwise, the Normal approximation is used.", "ck", "=", "10.5", "# The number of terms in the series expansion. [1] chooses it as a function", "# of concentration, n(concentration). This is hard to implement in TF.", "# Instead, we upper bound it over concentrations:", "# num_terms = ceil ( max_{concentration <= ck} n(concentration) ).", "# The maximum is achieved for concentration = ck.", "num_terms", "=", "20", "cdf_series", ",", "dcdf_dconcentration_series", "=", "_von_mises_cdf_series", "(", "x", ",", "concentration", ",", "num_terms", ",", "dtype", ")", "cdf_normal", ",", "dcdf_dconcentration_normal", "=", "_von_mises_cdf_normal", "(", "x", ",", "concentration", ",", "dtype", ")", "use_series", "=", "concentration", "<", "ck", "cdf", "=", "tf", ".", "where", "(", "use_series", ",", "cdf_series", ",", "cdf_normal", ")", "cdf", "+=", "num_periods", "dcdf_dconcentration", "=", "tf", ".", "where", "(", "use_series", ",", "dcdf_dconcentration_series", ",", "dcdf_dconcentration_normal", ")", "def", "grad", "(", "dy", ")", ":", "prob", "=", "tf", ".", "exp", "(", "concentration", "*", "(", "tf", ".", "cos", "(", "x", ")", "-", "1.", ")", ")", "/", "(", "(", "2.", "*", "np", ".", "pi", ")", "*", "tf", ".", "math", ".", "bessel_i0e", "(", "concentration", ")", ")", "return", "dy", "*", "prob", ",", "dy", "*", "dcdf_dconcentration", "return", "cdf", ",", "grad" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_von_mises_cdf_series
Computes the von Mises CDF and its derivative via series expansion.
tensorflow_probability/python/distributions/von_mises.py
def _von_mises_cdf_series(x, concentration, num_terms, dtype): """Computes the von Mises CDF and its derivative via series expansion.""" # Keep the number of terms as a float. It should be a small integer, so # exactly representable as a float. num_terms = tf.cast(num_terms, dtype=dtype) def loop_body(n, rn, drn_dconcentration, vn, dvn_dconcentration): """One iteration of the series loop.""" denominator = 2. * n / concentration + rn ddenominator_dk = -2. * n / concentration ** 2 + drn_dconcentration rn = 1. / denominator drn_dconcentration = -ddenominator_dk / denominator ** 2 multiplier = tf.sin(n * x) / n + vn vn = rn * multiplier dvn_dconcentration = (drn_dconcentration * multiplier + rn * dvn_dconcentration) n -= 1. return n, rn, drn_dconcentration, vn, dvn_dconcentration (_, _, _, vn, dvn_dconcentration) = tf.while_loop( cond=lambda n, *_: n > 0., body=loop_body, loop_vars=( num_terms, # n tf.zeros_like(x, name="rn"), tf.zeros_like(x, name="drn_dconcentration"), tf.zeros_like(x, name="vn"), tf.zeros_like(x, name="dvn_dconcentration"), ), ) cdf = .5 + x / (2. * np.pi) + vn / np.pi dcdf_dconcentration = dvn_dconcentration / np.pi # Clip the result to [0, 1]. cdf_clipped = tf.clip_by_value(cdf, 0., 1.) # The clipped values do not depend on concentration anymore, so set their # derivative to zero. dcdf_dconcentration *= tf.cast((cdf >= 0.) & (cdf <= 1.), dtype) return cdf_clipped, dcdf_dconcentration
def _von_mises_cdf_series(x, concentration, num_terms, dtype): """Computes the von Mises CDF and its derivative via series expansion.""" # Keep the number of terms as a float. It should be a small integer, so # exactly representable as a float. num_terms = tf.cast(num_terms, dtype=dtype) def loop_body(n, rn, drn_dconcentration, vn, dvn_dconcentration): """One iteration of the series loop.""" denominator = 2. * n / concentration + rn ddenominator_dk = -2. * n / concentration ** 2 + drn_dconcentration rn = 1. / denominator drn_dconcentration = -ddenominator_dk / denominator ** 2 multiplier = tf.sin(n * x) / n + vn vn = rn * multiplier dvn_dconcentration = (drn_dconcentration * multiplier + rn * dvn_dconcentration) n -= 1. return n, rn, drn_dconcentration, vn, dvn_dconcentration (_, _, _, vn, dvn_dconcentration) = tf.while_loop( cond=lambda n, *_: n > 0., body=loop_body, loop_vars=( num_terms, # n tf.zeros_like(x, name="rn"), tf.zeros_like(x, name="drn_dconcentration"), tf.zeros_like(x, name="vn"), tf.zeros_like(x, name="dvn_dconcentration"), ), ) cdf = .5 + x / (2. * np.pi) + vn / np.pi dcdf_dconcentration = dvn_dconcentration / np.pi # Clip the result to [0, 1]. cdf_clipped = tf.clip_by_value(cdf, 0., 1.) # The clipped values do not depend on concentration anymore, so set their # derivative to zero. dcdf_dconcentration *= tf.cast((cdf >= 0.) & (cdf <= 1.), dtype) return cdf_clipped, dcdf_dconcentration
[ "Computes", "the", "von", "Mises", "CDF", "and", "its", "derivative", "via", "series", "expansion", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/von_mises.py#L377-L420
[ "def", "_von_mises_cdf_series", "(", "x", ",", "concentration", ",", "num_terms", ",", "dtype", ")", ":", "# Keep the number of terms as a float. It should be a small integer, so", "# exactly representable as a float.", "num_terms", "=", "tf", ".", "cast", "(", "num_terms", ",", "dtype", "=", "dtype", ")", "def", "loop_body", "(", "n", ",", "rn", ",", "drn_dconcentration", ",", "vn", ",", "dvn_dconcentration", ")", ":", "\"\"\"One iteration of the series loop.\"\"\"", "denominator", "=", "2.", "*", "n", "/", "concentration", "+", "rn", "ddenominator_dk", "=", "-", "2.", "*", "n", "/", "concentration", "**", "2", "+", "drn_dconcentration", "rn", "=", "1.", "/", "denominator", "drn_dconcentration", "=", "-", "ddenominator_dk", "/", "denominator", "**", "2", "multiplier", "=", "tf", ".", "sin", "(", "n", "*", "x", ")", "/", "n", "+", "vn", "vn", "=", "rn", "*", "multiplier", "dvn_dconcentration", "=", "(", "drn_dconcentration", "*", "multiplier", "+", "rn", "*", "dvn_dconcentration", ")", "n", "-=", "1.", "return", "n", ",", "rn", ",", "drn_dconcentration", ",", "vn", ",", "dvn_dconcentration", "(", "_", ",", "_", ",", "_", ",", "vn", ",", "dvn_dconcentration", ")", "=", "tf", ".", "while_loop", "(", "cond", "=", "lambda", "n", ",", "*", "_", ":", "n", ">", "0.", ",", "body", "=", "loop_body", ",", "loop_vars", "=", "(", "num_terms", ",", "# n", "tf", ".", "zeros_like", "(", "x", ",", "name", "=", "\"rn\"", ")", ",", "tf", ".", "zeros_like", "(", "x", ",", "name", "=", "\"drn_dconcentration\"", ")", ",", "tf", ".", "zeros_like", "(", "x", ",", "name", "=", "\"vn\"", ")", ",", "tf", ".", "zeros_like", "(", "x", ",", "name", "=", "\"dvn_dconcentration\"", ")", ",", ")", ",", ")", "cdf", "=", ".5", "+", "x", "/", "(", "2.", "*", "np", ".", "pi", ")", "+", "vn", "/", "np", ".", "pi", "dcdf_dconcentration", "=", "dvn_dconcentration", "/", "np", ".", "pi", "# Clip the result to [0, 1].", "cdf_clipped", "=", "tf", ".", "clip_by_value", "(", "cdf", ",", "0.", ",", "1.", ")", "# The clipped values do not depend on concentration anymore, so set their", "# derivative to zero.", "dcdf_dconcentration", "*=", "tf", ".", "cast", "(", "(", "cdf", ">=", "0.", ")", "&", "(", "cdf", "<=", "1.", ")", ",", "dtype", ")", "return", "cdf_clipped", ",", "dcdf_dconcentration" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_von_mises_cdf_normal
Computes the von Mises CDF and its derivative via Normal approximation.
tensorflow_probability/python/distributions/von_mises.py
def _von_mises_cdf_normal(x, concentration, dtype): """Computes the von Mises CDF and its derivative via Normal approximation.""" def cdf_func(concentration): """A helper function that is passed to value_and_gradient.""" # z is an "almost Normally distributed" random variable. z = ((np.sqrt(2. / np.pi) / tf.math.bessel_i0e(concentration)) * tf.sin(.5 * x)) # This is the correction described in [1] which reduces the error # of the Normal approximation. z2 = z ** 2 z3 = z2 * z z4 = z2 ** 2 c = 24. * concentration c1 = 56. xi = z - z3 / ((c - 2. * z2 - 16.) / 3. - (z4 + (7. / 4.) * z2 + 167. / 2.) / (c - c1 - z2 + 3.)) ** 2 distrib = normal.Normal(tf.cast(0., dtype), tf.cast(1., dtype)) return distrib.cdf(xi) return value_and_gradient(cdf_func, concentration)
def _von_mises_cdf_normal(x, concentration, dtype): """Computes the von Mises CDF and its derivative via Normal approximation.""" def cdf_func(concentration): """A helper function that is passed to value_and_gradient.""" # z is an "almost Normally distributed" random variable. z = ((np.sqrt(2. / np.pi) / tf.math.bessel_i0e(concentration)) * tf.sin(.5 * x)) # This is the correction described in [1] which reduces the error # of the Normal approximation. z2 = z ** 2 z3 = z2 * z z4 = z2 ** 2 c = 24. * concentration c1 = 56. xi = z - z3 / ((c - 2. * z2 - 16.) / 3. - (z4 + (7. / 4.) * z2 + 167. / 2.) / (c - c1 - z2 + 3.)) ** 2 distrib = normal.Normal(tf.cast(0., dtype), tf.cast(1., dtype)) return distrib.cdf(xi) return value_and_gradient(cdf_func, concentration)
[ "Computes", "the", "von", "Mises", "CDF", "and", "its", "derivative", "via", "Normal", "approximation", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/von_mises.py#L423-L447
[ "def", "_von_mises_cdf_normal", "(", "x", ",", "concentration", ",", "dtype", ")", ":", "def", "cdf_func", "(", "concentration", ")", ":", "\"\"\"A helper function that is passed to value_and_gradient.\"\"\"", "# z is an \"almost Normally distributed\" random variable.", "z", "=", "(", "(", "np", ".", "sqrt", "(", "2.", "/", "np", ".", "pi", ")", "/", "tf", ".", "math", ".", "bessel_i0e", "(", "concentration", ")", ")", "*", "tf", ".", "sin", "(", ".5", "*", "x", ")", ")", "# This is the correction described in [1] which reduces the error", "# of the Normal approximation.", "z2", "=", "z", "**", "2", "z3", "=", "z2", "*", "z", "z4", "=", "z2", "**", "2", "c", "=", "24.", "*", "concentration", "c1", "=", "56.", "xi", "=", "z", "-", "z3", "/", "(", "(", "c", "-", "2.", "*", "z2", "-", "16.", ")", "/", "3.", "-", "(", "z4", "+", "(", "7.", "/", "4.", ")", "*", "z2", "+", "167.", "/", "2.", ")", "/", "(", "c", "-", "c1", "-", "z2", "+", "3.", ")", ")", "**", "2", "distrib", "=", "normal", ".", "Normal", "(", "tf", ".", "cast", "(", "0.", ",", "dtype", ")", ",", "tf", ".", "cast", "(", "1.", ",", "dtype", ")", ")", "return", "distrib", ".", "cdf", "(", "xi", ")", "return", "value_and_gradient", "(", "cdf_func", ",", "concentration", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
random_von_mises
Samples from the standardized von Mises distribution. The distribution is vonMises(loc=0, concentration=concentration), so the mean is zero. The location can then be changed by adding it to the samples. The sampling algorithm is rejection sampling with wrapped Cauchy proposal [1]. The samples are pathwise differentiable using the approach of [2]. Arguments: shape: The output sample shape. concentration: The concentration parameter of the von Mises distribution. dtype: The data type of concentration and the outputs. seed: (optional) The random seed. Returns: Differentiable samples of standardized von Mises. References: [1] Luc Devroye "Non-Uniform Random Variate Generation", Springer-Verlag, 1986; Chapter 9, p. 473-476. http://www.nrbook.com/devroye/Devroye_files/chapter_nine.pdf + corrections http://www.nrbook.com/devroye/Devroye_files/errors.pdf [2] Michael Figurnov, Shakir Mohamed, Andriy Mnih. "Implicit Reparameterization Gradients", 2018.
tensorflow_probability/python/distributions/von_mises.py
def random_von_mises(shape, concentration, dtype=tf.float32, seed=None): """Samples from the standardized von Mises distribution. The distribution is vonMises(loc=0, concentration=concentration), so the mean is zero. The location can then be changed by adding it to the samples. The sampling algorithm is rejection sampling with wrapped Cauchy proposal [1]. The samples are pathwise differentiable using the approach of [2]. Arguments: shape: The output sample shape. concentration: The concentration parameter of the von Mises distribution. dtype: The data type of concentration and the outputs. seed: (optional) The random seed. Returns: Differentiable samples of standardized von Mises. References: [1] Luc Devroye "Non-Uniform Random Variate Generation", Springer-Verlag, 1986; Chapter 9, p. 473-476. http://www.nrbook.com/devroye/Devroye_files/chapter_nine.pdf + corrections http://www.nrbook.com/devroye/Devroye_files/errors.pdf [2] Michael Figurnov, Shakir Mohamed, Andriy Mnih. "Implicit Reparameterization Gradients", 2018. """ seed = SeedStream(seed, salt="von_mises") concentration = tf.convert_to_tensor( value=concentration, dtype=dtype, name="concentration") @tf.custom_gradient def rejection_sample_with_gradient(concentration): """Performs rejection sampling for standardized von Mises. A nested function is required because @tf.custom_gradient does not handle non-tensor inputs such as dtype. Instead, they are captured by the outer scope. Arguments: concentration: The concentration parameter of the distribution. Returns: Differentiable samples of standardized von Mises. """ r = 1. + tf.sqrt(1. + 4. * concentration ** 2) rho = (r - tf.sqrt(2. * r)) / (2. * concentration) s_exact = (1. + rho ** 2) / (2. * rho) # For low concentration, s becomes numerically unstable. # To fix that, we use an approximation. Here is the derivation. # First-order Taylor expansion at conc = 0 gives # sqrt(1 + 4 concentration^2) ~= 1 + (2 concentration)^2 / 2. # Therefore, r ~= 2 + 2 concentration. By plugging this into rho, we have # rho ~= conc + 1 / conc - sqrt(1 + 1 / concentration^2). # Let's expand the last term at concentration=0 up to the linear term: # sqrt(1 + 1 / concentration^2) ~= 1 / concentration + concentration / 2 # Thus, rho ~= concentration / 2. Finally, # s = 1 / (2 rho) + rho / 2 ~= 1 / concentration + concentration / 4. # Since concentration is small, we drop the second term and simply use # s ~= 1 / concentration. s_approximate = 1. / concentration # To compute the cutoff, we compute s_exact using mpmath with 30 decimal # digits precision and compare that to the s_exact and s_approximate # computed with dtype. Then, the cutoff is the largest concentration for # which abs(s_exact - s_exact_mpmath) > abs(s_approximate - s_exact_mpmath). s_concentration_cutoff_dict = { tf.float16: 1.8e-1, tf.float32: 2e-2, tf.float64: 1.2e-4, } s_concentration_cutoff = s_concentration_cutoff_dict[dtype] s = tf.where(concentration > s_concentration_cutoff, s_exact, s_approximate) def loop_body(done, u, w): """Resample the non-accepted points.""" # We resample u each time completely. Only its sign is used outside the # loop, which is random. u = tf.random.uniform( shape, minval=-1., maxval=1., dtype=dtype, seed=seed()) z = tf.cos(np.pi * u) # Update the non-accepted points. w = tf.where(done, w, (1. + s * z) / (s + z)) y = concentration * (s - w) v = tf.random.uniform( shape, minval=0., maxval=1., dtype=dtype, seed=seed()) accept = (y * (2. - y) >= v) | (tf.math.log(y / v) + 1. >= y) return done | accept, u, w _, u, w = tf.while_loop( cond=lambda done, *_: ~tf.reduce_all(input_tensor=done), body=loop_body, loop_vars=( tf.zeros(shape, dtype=tf.bool, name="done"), tf.zeros(shape, dtype=dtype, name="u"), tf.zeros(shape, dtype=dtype, name="w"), ), # The expected number of iterations depends on concentration. # It monotonically increases from one iteration for concentration = 0 to # sqrt(2 pi / e) ~= 1.52 iterations for concentration = +inf [1]. # We use a limit of 100 iterations to avoid infinite loops # for very large / nan concentration. maximum_iterations=100, parallel_iterations=1 if seed.original_seed is None else 10, ) x = tf.sign(u) * tf.math.acos(w) def grad(dy): """The gradient of the von Mises samples w.r.t. concentration.""" broadcast_concentration = concentration + tf.zeros_like(x) _, dcdf_dconcentration = value_and_gradient( lambda conc: von_mises_cdf(x, conc), broadcast_concentration) inv_prob = tf.exp(-broadcast_concentration * (tf.cos(x) - 1.)) * ( (2. * np.pi) * tf.math.bessel_i0e(broadcast_concentration)) # Compute the implicit reparameterization gradient [2], # dz/dconc = -(dF(z; conc) / dconc) / p(z; conc) ret = dy * (-inv_prob * dcdf_dconcentration) # Sum over the sample dimensions. Assume that they are always the first # ones. num_sample_dimensions = (tf.rank(broadcast_concentration) - tf.rank(concentration)) return tf.reduce_sum( input_tensor=ret, axis=tf.range(num_sample_dimensions)) return x, grad return rejection_sample_with_gradient(concentration)
def random_von_mises(shape, concentration, dtype=tf.float32, seed=None): """Samples from the standardized von Mises distribution. The distribution is vonMises(loc=0, concentration=concentration), so the mean is zero. The location can then be changed by adding it to the samples. The sampling algorithm is rejection sampling with wrapped Cauchy proposal [1]. The samples are pathwise differentiable using the approach of [2]. Arguments: shape: The output sample shape. concentration: The concentration parameter of the von Mises distribution. dtype: The data type of concentration and the outputs. seed: (optional) The random seed. Returns: Differentiable samples of standardized von Mises. References: [1] Luc Devroye "Non-Uniform Random Variate Generation", Springer-Verlag, 1986; Chapter 9, p. 473-476. http://www.nrbook.com/devroye/Devroye_files/chapter_nine.pdf + corrections http://www.nrbook.com/devroye/Devroye_files/errors.pdf [2] Michael Figurnov, Shakir Mohamed, Andriy Mnih. "Implicit Reparameterization Gradients", 2018. """ seed = SeedStream(seed, salt="von_mises") concentration = tf.convert_to_tensor( value=concentration, dtype=dtype, name="concentration") @tf.custom_gradient def rejection_sample_with_gradient(concentration): """Performs rejection sampling for standardized von Mises. A nested function is required because @tf.custom_gradient does not handle non-tensor inputs such as dtype. Instead, they are captured by the outer scope. Arguments: concentration: The concentration parameter of the distribution. Returns: Differentiable samples of standardized von Mises. """ r = 1. + tf.sqrt(1. + 4. * concentration ** 2) rho = (r - tf.sqrt(2. * r)) / (2. * concentration) s_exact = (1. + rho ** 2) / (2. * rho) # For low concentration, s becomes numerically unstable. # To fix that, we use an approximation. Here is the derivation. # First-order Taylor expansion at conc = 0 gives # sqrt(1 + 4 concentration^2) ~= 1 + (2 concentration)^2 / 2. # Therefore, r ~= 2 + 2 concentration. By plugging this into rho, we have # rho ~= conc + 1 / conc - sqrt(1 + 1 / concentration^2). # Let's expand the last term at concentration=0 up to the linear term: # sqrt(1 + 1 / concentration^2) ~= 1 / concentration + concentration / 2 # Thus, rho ~= concentration / 2. Finally, # s = 1 / (2 rho) + rho / 2 ~= 1 / concentration + concentration / 4. # Since concentration is small, we drop the second term and simply use # s ~= 1 / concentration. s_approximate = 1. / concentration # To compute the cutoff, we compute s_exact using mpmath with 30 decimal # digits precision and compare that to the s_exact and s_approximate # computed with dtype. Then, the cutoff is the largest concentration for # which abs(s_exact - s_exact_mpmath) > abs(s_approximate - s_exact_mpmath). s_concentration_cutoff_dict = { tf.float16: 1.8e-1, tf.float32: 2e-2, tf.float64: 1.2e-4, } s_concentration_cutoff = s_concentration_cutoff_dict[dtype] s = tf.where(concentration > s_concentration_cutoff, s_exact, s_approximate) def loop_body(done, u, w): """Resample the non-accepted points.""" # We resample u each time completely. Only its sign is used outside the # loop, which is random. u = tf.random.uniform( shape, minval=-1., maxval=1., dtype=dtype, seed=seed()) z = tf.cos(np.pi * u) # Update the non-accepted points. w = tf.where(done, w, (1. + s * z) / (s + z)) y = concentration * (s - w) v = tf.random.uniform( shape, minval=0., maxval=1., dtype=dtype, seed=seed()) accept = (y * (2. - y) >= v) | (tf.math.log(y / v) + 1. >= y) return done | accept, u, w _, u, w = tf.while_loop( cond=lambda done, *_: ~tf.reduce_all(input_tensor=done), body=loop_body, loop_vars=( tf.zeros(shape, dtype=tf.bool, name="done"), tf.zeros(shape, dtype=dtype, name="u"), tf.zeros(shape, dtype=dtype, name="w"), ), # The expected number of iterations depends on concentration. # It monotonically increases from one iteration for concentration = 0 to # sqrt(2 pi / e) ~= 1.52 iterations for concentration = +inf [1]. # We use a limit of 100 iterations to avoid infinite loops # for very large / nan concentration. maximum_iterations=100, parallel_iterations=1 if seed.original_seed is None else 10, ) x = tf.sign(u) * tf.math.acos(w) def grad(dy): """The gradient of the von Mises samples w.r.t. concentration.""" broadcast_concentration = concentration + tf.zeros_like(x) _, dcdf_dconcentration = value_and_gradient( lambda conc: von_mises_cdf(x, conc), broadcast_concentration) inv_prob = tf.exp(-broadcast_concentration * (tf.cos(x) - 1.)) * ( (2. * np.pi) * tf.math.bessel_i0e(broadcast_concentration)) # Compute the implicit reparameterization gradient [2], # dz/dconc = -(dF(z; conc) / dconc) / p(z; conc) ret = dy * (-inv_prob * dcdf_dconcentration) # Sum over the sample dimensions. Assume that they are always the first # ones. num_sample_dimensions = (tf.rank(broadcast_concentration) - tf.rank(concentration)) return tf.reduce_sum( input_tensor=ret, axis=tf.range(num_sample_dimensions)) return x, grad return rejection_sample_with_gradient(concentration)
[ "Samples", "from", "the", "standardized", "von", "Mises", "distribution", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/von_mises.py#L450-L582
[ "def", "random_von_mises", "(", "shape", ",", "concentration", ",", "dtype", "=", "tf", ".", "float32", ",", "seed", "=", "None", ")", ":", "seed", "=", "SeedStream", "(", "seed", ",", "salt", "=", "\"von_mises\"", ")", "concentration", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "concentration", ",", "dtype", "=", "dtype", ",", "name", "=", "\"concentration\"", ")", "@", "tf", ".", "custom_gradient", "def", "rejection_sample_with_gradient", "(", "concentration", ")", ":", "\"\"\"Performs rejection sampling for standardized von Mises.\n\n A nested function is required because @tf.custom_gradient does not handle\n non-tensor inputs such as dtype. Instead, they are captured by the outer\n scope.\n\n Arguments:\n concentration: The concentration parameter of the distribution.\n\n Returns:\n Differentiable samples of standardized von Mises.\n \"\"\"", "r", "=", "1.", "+", "tf", ".", "sqrt", "(", "1.", "+", "4.", "*", "concentration", "**", "2", ")", "rho", "=", "(", "r", "-", "tf", ".", "sqrt", "(", "2.", "*", "r", ")", ")", "/", "(", "2.", "*", "concentration", ")", "s_exact", "=", "(", "1.", "+", "rho", "**", "2", ")", "/", "(", "2.", "*", "rho", ")", "# For low concentration, s becomes numerically unstable.", "# To fix that, we use an approximation. Here is the derivation.", "# First-order Taylor expansion at conc = 0 gives", "# sqrt(1 + 4 concentration^2) ~= 1 + (2 concentration)^2 / 2.", "# Therefore, r ~= 2 + 2 concentration. By plugging this into rho, we have", "# rho ~= conc + 1 / conc - sqrt(1 + 1 / concentration^2).", "# Let's expand the last term at concentration=0 up to the linear term:", "# sqrt(1 + 1 / concentration^2) ~= 1 / concentration + concentration / 2", "# Thus, rho ~= concentration / 2. Finally,", "# s = 1 / (2 rho) + rho / 2 ~= 1 / concentration + concentration / 4.", "# Since concentration is small, we drop the second term and simply use", "# s ~= 1 / concentration.", "s_approximate", "=", "1.", "/", "concentration", "# To compute the cutoff, we compute s_exact using mpmath with 30 decimal", "# digits precision and compare that to the s_exact and s_approximate", "# computed with dtype. Then, the cutoff is the largest concentration for", "# which abs(s_exact - s_exact_mpmath) > abs(s_approximate - s_exact_mpmath).", "s_concentration_cutoff_dict", "=", "{", "tf", ".", "float16", ":", "1.8e-1", ",", "tf", ".", "float32", ":", "2e-2", ",", "tf", ".", "float64", ":", "1.2e-4", ",", "}", "s_concentration_cutoff", "=", "s_concentration_cutoff_dict", "[", "dtype", "]", "s", "=", "tf", ".", "where", "(", "concentration", ">", "s_concentration_cutoff", ",", "s_exact", ",", "s_approximate", ")", "def", "loop_body", "(", "done", ",", "u", ",", "w", ")", ":", "\"\"\"Resample the non-accepted points.\"\"\"", "# We resample u each time completely. Only its sign is used outside the", "# loop, which is random.", "u", "=", "tf", ".", "random", ".", "uniform", "(", "shape", ",", "minval", "=", "-", "1.", ",", "maxval", "=", "1.", ",", "dtype", "=", "dtype", ",", "seed", "=", "seed", "(", ")", ")", "z", "=", "tf", ".", "cos", "(", "np", ".", "pi", "*", "u", ")", "# Update the non-accepted points.", "w", "=", "tf", ".", "where", "(", "done", ",", "w", ",", "(", "1.", "+", "s", "*", "z", ")", "/", "(", "s", "+", "z", ")", ")", "y", "=", "concentration", "*", "(", "s", "-", "w", ")", "v", "=", "tf", ".", "random", ".", "uniform", "(", "shape", ",", "minval", "=", "0.", ",", "maxval", "=", "1.", ",", "dtype", "=", "dtype", ",", "seed", "=", "seed", "(", ")", ")", "accept", "=", "(", "y", "*", "(", "2.", "-", "y", ")", ">=", "v", ")", "|", "(", "tf", ".", "math", ".", "log", "(", "y", "/", "v", ")", "+", "1.", ">=", "y", ")", "return", "done", "|", "accept", ",", "u", ",", "w", "_", ",", "u", ",", "w", "=", "tf", ".", "while_loop", "(", "cond", "=", "lambda", "done", ",", "*", "_", ":", "~", "tf", ".", "reduce_all", "(", "input_tensor", "=", "done", ")", ",", "body", "=", "loop_body", ",", "loop_vars", "=", "(", "tf", ".", "zeros", "(", "shape", ",", "dtype", "=", "tf", ".", "bool", ",", "name", "=", "\"done\"", ")", ",", "tf", ".", "zeros", "(", "shape", ",", "dtype", "=", "dtype", ",", "name", "=", "\"u\"", ")", ",", "tf", ".", "zeros", "(", "shape", ",", "dtype", "=", "dtype", ",", "name", "=", "\"w\"", ")", ",", ")", ",", "# The expected number of iterations depends on concentration.", "# It monotonically increases from one iteration for concentration = 0 to", "# sqrt(2 pi / e) ~= 1.52 iterations for concentration = +inf [1].", "# We use a limit of 100 iterations to avoid infinite loops", "# for very large / nan concentration.", "maximum_iterations", "=", "100", ",", "parallel_iterations", "=", "1", "if", "seed", ".", "original_seed", "is", "None", "else", "10", ",", ")", "x", "=", "tf", ".", "sign", "(", "u", ")", "*", "tf", ".", "math", ".", "acos", "(", "w", ")", "def", "grad", "(", "dy", ")", ":", "\"\"\"The gradient of the von Mises samples w.r.t. concentration.\"\"\"", "broadcast_concentration", "=", "concentration", "+", "tf", ".", "zeros_like", "(", "x", ")", "_", ",", "dcdf_dconcentration", "=", "value_and_gradient", "(", "lambda", "conc", ":", "von_mises_cdf", "(", "x", ",", "conc", ")", ",", "broadcast_concentration", ")", "inv_prob", "=", "tf", ".", "exp", "(", "-", "broadcast_concentration", "*", "(", "tf", ".", "cos", "(", "x", ")", "-", "1.", ")", ")", "*", "(", "(", "2.", "*", "np", ".", "pi", ")", "*", "tf", ".", "math", ".", "bessel_i0e", "(", "broadcast_concentration", ")", ")", "# Compute the implicit reparameterization gradient [2],", "# dz/dconc = -(dF(z; conc) / dconc) / p(z; conc)", "ret", "=", "dy", "*", "(", "-", "inv_prob", "*", "dcdf_dconcentration", ")", "# Sum over the sample dimensions. Assume that they are always the first", "# ones.", "num_sample_dimensions", "=", "(", "tf", ".", "rank", "(", "broadcast_concentration", ")", "-", "tf", ".", "rank", "(", "concentration", ")", ")", "return", "tf", ".", "reduce_sum", "(", "input_tensor", "=", "ret", ",", "axis", "=", "tf", ".", "range", "(", "num_sample_dimensions", ")", ")", "return", "x", ",", "grad", "return", "rejection_sample_with_gradient", "(", "concentration", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
one_step
Performs one step of the differential evolution algorithm. Args: objective_function: A Python callable that accepts a batch of possible solutions and returns the values of the objective function at those arguments as a rank 1 real `Tensor`. This specifies the function to be minimized. The input to this callable may be either a single `Tensor` or a Python `list` of `Tensor`s. The signature must match the format of the argument `population`. (i.e. objective_function(*population) must return the value of the function to be minimized). population: `Tensor` or Python `list` of `Tensor`s representing the current population vectors. Each `Tensor` must be of the same real dtype. The first dimension indexes individual population members while the rest of the dimensions are consumed by the value function. For example, if the population is a single `Tensor` of shape [n, m1, m2], then `n` is the population size and the output of `objective_function` applied to the population is a `Tensor` of shape [n]. If the population is a python list of `Tensor`s then each `Tensor` in the list should have the first axis of a common size, say `n` and `objective_function(*population)` should return a `Tensor of shape [n]. The population must have at least 4 members for the algorithm to work correctly. population_values: A `Tensor` of rank 1 and real dtype. The result of applying `objective_function` to the `population`. If not supplied it is computed using the `objective_function`. Default value: None. differential_weight: Real scalar `Tensor`. Must be positive and less than 2.0. The parameter controlling the strength of mutation. Default value: 0.5 crossover_prob: Real scalar `Tensor`. Must be between 0 and 1. The probability of recombination per site. Default value: 0.9 seed: `int` or None. The random seed for this `Op`. If `None`, no seed is applied. Default value: None. name: (Optional) Python str. The name prefixed to the ops created by this function. If not supplied, the default name 'one_step' is used. Default value: None Returns: A sequence containing the following elements (in order): next_population: A `Tensor` or Python `list` of `Tensor`s of the same structure as the input population. The population at the next generation. next_population_values: A `Tensor` of same shape and dtype as input `population_values`. The function values for the `next_population`.
tensorflow_probability/python/optimizer/differential_evolution.py
def one_step( objective_function, population, population_values=None, differential_weight=0.5, crossover_prob=0.9, seed=None, name=None): """Performs one step of the differential evolution algorithm. Args: objective_function: A Python callable that accepts a batch of possible solutions and returns the values of the objective function at those arguments as a rank 1 real `Tensor`. This specifies the function to be minimized. The input to this callable may be either a single `Tensor` or a Python `list` of `Tensor`s. The signature must match the format of the argument `population`. (i.e. objective_function(*population) must return the value of the function to be minimized). population: `Tensor` or Python `list` of `Tensor`s representing the current population vectors. Each `Tensor` must be of the same real dtype. The first dimension indexes individual population members while the rest of the dimensions are consumed by the value function. For example, if the population is a single `Tensor` of shape [n, m1, m2], then `n` is the population size and the output of `objective_function` applied to the population is a `Tensor` of shape [n]. If the population is a python list of `Tensor`s then each `Tensor` in the list should have the first axis of a common size, say `n` and `objective_function(*population)` should return a `Tensor of shape [n]. The population must have at least 4 members for the algorithm to work correctly. population_values: A `Tensor` of rank 1 and real dtype. The result of applying `objective_function` to the `population`. If not supplied it is computed using the `objective_function`. Default value: None. differential_weight: Real scalar `Tensor`. Must be positive and less than 2.0. The parameter controlling the strength of mutation. Default value: 0.5 crossover_prob: Real scalar `Tensor`. Must be between 0 and 1. The probability of recombination per site. Default value: 0.9 seed: `int` or None. The random seed for this `Op`. If `None`, no seed is applied. Default value: None. name: (Optional) Python str. The name prefixed to the ops created by this function. If not supplied, the default name 'one_step' is used. Default value: None Returns: A sequence containing the following elements (in order): next_population: A `Tensor` or Python `list` of `Tensor`s of the same structure as the input population. The population at the next generation. next_population_values: A `Tensor` of same shape and dtype as input `population_values`. The function values for the `next_population`. """ with tf.compat.v1.name_scope( name, 'one_step', [population, population_values, differential_weight, crossover_prob]): population, _ = _ensure_list(population) if population_values is None: population_values = objective_function(*population) population_size = tf.shape(input=population[0])[0] seed_stream = distributions.SeedStream(seed, salt='one_step') mixing_indices = _get_mixing_indices(population_size, seed=seed_stream()) # Construct the mutated solution vectors. There is one for each member of # the population. mutants = _get_mutants(population, population_size, mixing_indices, differential_weight) # Perform recombination between the parents and the mutants. candidates = _binary_crossover(population, population_size, mutants, crossover_prob, seed=seed_stream()) candidate_values = objective_function(*candidates) if population_values is None: population_values = objective_function(*population) infinity = tf.zeros_like(population_values) + np.inf population_values = tf.where( tf.math.is_nan(population_values), x=infinity, y=population_values) to_replace = candidate_values < population_values next_population = [ tf.where(to_replace, x=candidates_part, y=population_part) for candidates_part, population_part in zip(candidates, population) ] next_values = tf.where(to_replace, x=candidate_values, y=population_values) return next_population, next_values
def one_step( objective_function, population, population_values=None, differential_weight=0.5, crossover_prob=0.9, seed=None, name=None): """Performs one step of the differential evolution algorithm. Args: objective_function: A Python callable that accepts a batch of possible solutions and returns the values of the objective function at those arguments as a rank 1 real `Tensor`. This specifies the function to be minimized. The input to this callable may be either a single `Tensor` or a Python `list` of `Tensor`s. The signature must match the format of the argument `population`. (i.e. objective_function(*population) must return the value of the function to be minimized). population: `Tensor` or Python `list` of `Tensor`s representing the current population vectors. Each `Tensor` must be of the same real dtype. The first dimension indexes individual population members while the rest of the dimensions are consumed by the value function. For example, if the population is a single `Tensor` of shape [n, m1, m2], then `n` is the population size and the output of `objective_function` applied to the population is a `Tensor` of shape [n]. If the population is a python list of `Tensor`s then each `Tensor` in the list should have the first axis of a common size, say `n` and `objective_function(*population)` should return a `Tensor of shape [n]. The population must have at least 4 members for the algorithm to work correctly. population_values: A `Tensor` of rank 1 and real dtype. The result of applying `objective_function` to the `population`. If not supplied it is computed using the `objective_function`. Default value: None. differential_weight: Real scalar `Tensor`. Must be positive and less than 2.0. The parameter controlling the strength of mutation. Default value: 0.5 crossover_prob: Real scalar `Tensor`. Must be between 0 and 1. The probability of recombination per site. Default value: 0.9 seed: `int` or None. The random seed for this `Op`. If `None`, no seed is applied. Default value: None. name: (Optional) Python str. The name prefixed to the ops created by this function. If not supplied, the default name 'one_step' is used. Default value: None Returns: A sequence containing the following elements (in order): next_population: A `Tensor` or Python `list` of `Tensor`s of the same structure as the input population. The population at the next generation. next_population_values: A `Tensor` of same shape and dtype as input `population_values`. The function values for the `next_population`. """ with tf.compat.v1.name_scope( name, 'one_step', [population, population_values, differential_weight, crossover_prob]): population, _ = _ensure_list(population) if population_values is None: population_values = objective_function(*population) population_size = tf.shape(input=population[0])[0] seed_stream = distributions.SeedStream(seed, salt='one_step') mixing_indices = _get_mixing_indices(population_size, seed=seed_stream()) # Construct the mutated solution vectors. There is one for each member of # the population. mutants = _get_mutants(population, population_size, mixing_indices, differential_weight) # Perform recombination between the parents and the mutants. candidates = _binary_crossover(population, population_size, mutants, crossover_prob, seed=seed_stream()) candidate_values = objective_function(*candidates) if population_values is None: population_values = objective_function(*population) infinity = tf.zeros_like(population_values) + np.inf population_values = tf.where( tf.math.is_nan(population_values), x=infinity, y=population_values) to_replace = candidate_values < population_values next_population = [ tf.where(to_replace, x=candidates_part, y=population_part) for candidates_part, population_part in zip(candidates, population) ] next_values = tf.where(to_replace, x=candidate_values, y=population_values) return next_population, next_values
[ "Performs", "one", "step", "of", "the", "differential", "evolution", "algorithm", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/differential_evolution.py#L120-L211
[ "def", "one_step", "(", "objective_function", ",", "population", ",", "population_values", "=", "None", ",", "differential_weight", "=", "0.5", ",", "crossover_prob", "=", "0.9", ",", "seed", "=", "None", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "'one_step'", ",", "[", "population", ",", "population_values", ",", "differential_weight", ",", "crossover_prob", "]", ")", ":", "population", ",", "_", "=", "_ensure_list", "(", "population", ")", "if", "population_values", "is", "None", ":", "population_values", "=", "objective_function", "(", "*", "population", ")", "population_size", "=", "tf", ".", "shape", "(", "input", "=", "population", "[", "0", "]", ")", "[", "0", "]", "seed_stream", "=", "distributions", ".", "SeedStream", "(", "seed", ",", "salt", "=", "'one_step'", ")", "mixing_indices", "=", "_get_mixing_indices", "(", "population_size", ",", "seed", "=", "seed_stream", "(", ")", ")", "# Construct the mutated solution vectors. There is one for each member of", "# the population.", "mutants", "=", "_get_mutants", "(", "population", ",", "population_size", ",", "mixing_indices", ",", "differential_weight", ")", "# Perform recombination between the parents and the mutants.", "candidates", "=", "_binary_crossover", "(", "population", ",", "population_size", ",", "mutants", ",", "crossover_prob", ",", "seed", "=", "seed_stream", "(", ")", ")", "candidate_values", "=", "objective_function", "(", "*", "candidates", ")", "if", "population_values", "is", "None", ":", "population_values", "=", "objective_function", "(", "*", "population", ")", "infinity", "=", "tf", ".", "zeros_like", "(", "population_values", ")", "+", "np", ".", "inf", "population_values", "=", "tf", ".", "where", "(", "tf", ".", "math", ".", "is_nan", "(", "population_values", ")", ",", "x", "=", "infinity", ",", "y", "=", "population_values", ")", "to_replace", "=", "candidate_values", "<", "population_values", "next_population", "=", "[", "tf", ".", "where", "(", "to_replace", ",", "x", "=", "candidates_part", ",", "y", "=", "population_part", ")", "for", "candidates_part", ",", "population_part", "in", "zip", "(", "candidates", ",", "population", ")", "]", "next_values", "=", "tf", ".", "where", "(", "to_replace", ",", "x", "=", "candidate_values", ",", "y", "=", "population_values", ")", "return", "next_population", ",", "next_values" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
minimize
Applies the Differential evolution algorithm to minimize a function. Differential Evolution is an evolutionary optimization algorithm which works on a set of candidate solutions called the population. It iteratively improves the population by applying genetic operators of mutation and recombination. The objective function `f` supplies the fitness of each candidate. A candidate `s_1` is considered better than `s_2` if `f(s_1) < f(s_2)`. This method allows the user to either specify an initial population or a single candidate solution. If a single solution is specified, a population of the specified size is initialized by adding independent normal noise to the candidate solution. The implementation also supports a multi-part specification of the state. For example, consider the objective function: ```python # x is a tensor of shape [n, m] while y is of shape [n]. def objective(x, y): return tf.math.reduce_sum(x ** 2, axis=-1) + y ** 2 ``` The state in this case is specified by two input tensors `x` and `y`. To apply the algorithm to this objective function, one would need to specify either an initial population as a list of two tensors of shapes `[population_size, k]` and `[population_size]`. The following code shows the complete example: ```python population_size = 40 # With an initial population and a multi-part state. initial_population = (tf.random.normal([population_size]), tf.random.normal([population_size])) def easom_fn(x, y): return -(tf.math.cos(x) * tf.math.cos(y) * tf.math.exp(-(x-np.pi)**2 - (y-np.pi)**2)) optim_results = tfp.optimizers.differential_evolution_minimize( easom_fn, initial_population=initial_population, seed=43210) print (optim_results.converged) print (optim_results.position) # Should be (close to) [pi, pi]. print (optim_results.objective_value) # Should be -1. # With a single starting point initial_position = (tf.constant(1.0), tf.constant(1.0)) optim_results = tfp.optimizers.differential_evolution_minimize( easom_fn, initial_position=initial_position, population_size=40, population_stddev=2.0, seed=43210) ``` Args: objective_function: A Python callable that accepts a batch of possible solutions and returns the values of the objective function at those arguments as a rank 1 real `Tensor`. This specifies the function to be minimized. The input to this callable may be either a single `Tensor` or a Python `list` of `Tensor`s. The signature must match the format of the argument `population`. (i.e. objective_function(*population) must return the value of the function to be minimized). initial_population: A real `Tensor` or Python list of `Tensor`s. If a list, each `Tensor` must be of rank at least 1 and with a common first dimension. The first dimension indexes into the candidate solutions while the rest of the dimensions (if any) index into an individual solution. The size of the population must be at least 4. This is a requirement of the DE algorithm. initial_position: A real `Tensor` of any shape. The seed solution used to initialize the population of solutions. If this parameter is specified then `initial_population` must not be specified. population_size: A positive scalar int32 `Tensor` greater than 4. The size of the population to evolve. This parameter is ignored if `initial_population` is specified. Default value: 50. population_stddev: A positive scalar real `Tensor` of the same dtype as `initial_position`. This parameter is ignored if `initial_population` is specified. Used to generate the population from the `initial_position` by adding random normal noise with zero mean and the specified standard deviation. Default value: 1.0 max_iterations: Positive scalar int32 `Tensor`. The maximum number of generations to evolve the population for. Default value: 100 func_tolerance: Scalar `Tensor` of the same dtype as the output of the `objective_function`. The algorithm stops if the absolute difference between the largest and the smallest objective function value in the population is below this number. Default value: 0 position_tolerance: Scalar `Tensor` of the same real dtype as `initial_position` or `initial_population`. The algorithm terminates if the largest absolute difference between the coordinates of the population members is below this threshold. Default value: 1e-8 differential_weight: Real scalar `Tensor`. Must be positive and less than 2.0. The parameter controlling the strength of mutation in the algorithm. Default value: 0.5 crossover_prob: Real scalar `Tensor`. Must be between 0 and 1. The probability of recombination per site. Default value: 0.9 seed: `int` or None. The random seed for this `Op`. If `None`, no seed is applied. Default value: None. name: (Optional) Python str. The name prefixed to the ops created by this function. If not supplied, the default name 'differential_evolution_minimize' is used. Default value: None Returns: optimizer_results: An object containing the following attributes: converged: Scalar boolean `Tensor` indicating whether the minimum was found within the specified tolerances. num_objective_evaluations: The total number of objective evaluations performed. position: A `Tensor` containing the best point found during the search. If the search converged, then this value is the argmin of the objective function within the specified tolerances. objective_value: A `Tensor` containing the value of the objective function at the `position`. If the search converged, then this is the (local) minimum of the objective function. final_population: The final state of the population. final_objective_values: The objective function evaluated at the final population. initial_population: The starting population. initial_objective_values: The objective function evaluated at the initial population. num_iterations: The number of iterations of the main algorithm body. Raises: ValueError: If neither the initial population, nor the initial position are specified or if both are specified.
tensorflow_probability/python/optimizer/differential_evolution.py
def minimize(objective_function, initial_population=None, initial_position=None, population_size=50, population_stddev=1., max_iterations=100, func_tolerance=0, position_tolerance=1e-8, differential_weight=0.5, crossover_prob=0.9, seed=None, name=None): """Applies the Differential evolution algorithm to minimize a function. Differential Evolution is an evolutionary optimization algorithm which works on a set of candidate solutions called the population. It iteratively improves the population by applying genetic operators of mutation and recombination. The objective function `f` supplies the fitness of each candidate. A candidate `s_1` is considered better than `s_2` if `f(s_1) < f(s_2)`. This method allows the user to either specify an initial population or a single candidate solution. If a single solution is specified, a population of the specified size is initialized by adding independent normal noise to the candidate solution. The implementation also supports a multi-part specification of the state. For example, consider the objective function: ```python # x is a tensor of shape [n, m] while y is of shape [n]. def objective(x, y): return tf.math.reduce_sum(x ** 2, axis=-1) + y ** 2 ``` The state in this case is specified by two input tensors `x` and `y`. To apply the algorithm to this objective function, one would need to specify either an initial population as a list of two tensors of shapes `[population_size, k]` and `[population_size]`. The following code shows the complete example: ```python population_size = 40 # With an initial population and a multi-part state. initial_population = (tf.random.normal([population_size]), tf.random.normal([population_size])) def easom_fn(x, y): return -(tf.math.cos(x) * tf.math.cos(y) * tf.math.exp(-(x-np.pi)**2 - (y-np.pi)**2)) optim_results = tfp.optimizers.differential_evolution_minimize( easom_fn, initial_population=initial_population, seed=43210) print (optim_results.converged) print (optim_results.position) # Should be (close to) [pi, pi]. print (optim_results.objective_value) # Should be -1. # With a single starting point initial_position = (tf.constant(1.0), tf.constant(1.0)) optim_results = tfp.optimizers.differential_evolution_minimize( easom_fn, initial_position=initial_position, population_size=40, population_stddev=2.0, seed=43210) ``` Args: objective_function: A Python callable that accepts a batch of possible solutions and returns the values of the objective function at those arguments as a rank 1 real `Tensor`. This specifies the function to be minimized. The input to this callable may be either a single `Tensor` or a Python `list` of `Tensor`s. The signature must match the format of the argument `population`. (i.e. objective_function(*population) must return the value of the function to be minimized). initial_population: A real `Tensor` or Python list of `Tensor`s. If a list, each `Tensor` must be of rank at least 1 and with a common first dimension. The first dimension indexes into the candidate solutions while the rest of the dimensions (if any) index into an individual solution. The size of the population must be at least 4. This is a requirement of the DE algorithm. initial_position: A real `Tensor` of any shape. The seed solution used to initialize the population of solutions. If this parameter is specified then `initial_population` must not be specified. population_size: A positive scalar int32 `Tensor` greater than 4. The size of the population to evolve. This parameter is ignored if `initial_population` is specified. Default value: 50. population_stddev: A positive scalar real `Tensor` of the same dtype as `initial_position`. This parameter is ignored if `initial_population` is specified. Used to generate the population from the `initial_position` by adding random normal noise with zero mean and the specified standard deviation. Default value: 1.0 max_iterations: Positive scalar int32 `Tensor`. The maximum number of generations to evolve the population for. Default value: 100 func_tolerance: Scalar `Tensor` of the same dtype as the output of the `objective_function`. The algorithm stops if the absolute difference between the largest and the smallest objective function value in the population is below this number. Default value: 0 position_tolerance: Scalar `Tensor` of the same real dtype as `initial_position` or `initial_population`. The algorithm terminates if the largest absolute difference between the coordinates of the population members is below this threshold. Default value: 1e-8 differential_weight: Real scalar `Tensor`. Must be positive and less than 2.0. The parameter controlling the strength of mutation in the algorithm. Default value: 0.5 crossover_prob: Real scalar `Tensor`. Must be between 0 and 1. The probability of recombination per site. Default value: 0.9 seed: `int` or None. The random seed for this `Op`. If `None`, no seed is applied. Default value: None. name: (Optional) Python str. The name prefixed to the ops created by this function. If not supplied, the default name 'differential_evolution_minimize' is used. Default value: None Returns: optimizer_results: An object containing the following attributes: converged: Scalar boolean `Tensor` indicating whether the minimum was found within the specified tolerances. num_objective_evaluations: The total number of objective evaluations performed. position: A `Tensor` containing the best point found during the search. If the search converged, then this value is the argmin of the objective function within the specified tolerances. objective_value: A `Tensor` containing the value of the objective function at the `position`. If the search converged, then this is the (local) minimum of the objective function. final_population: The final state of the population. final_objective_values: The objective function evaluated at the final population. initial_population: The starting population. initial_objective_values: The objective function evaluated at the initial population. num_iterations: The number of iterations of the main algorithm body. Raises: ValueError: If neither the initial population, nor the initial position are specified or if both are specified. """ if initial_population is None and initial_position is None: raise ValueError('Either the initial population or the initial position ' 'must be specified.') if initial_population is not None and initial_position is not None: raise ValueError('Only one of initial population or initial position ' 'should be specified') with tf.compat.v1.name_scope( name, default_name='minimize', values=[ initial_population, initial_position, population_size, population_stddev, max_iterations, func_tolerance, position_tolerance, differential_weight, crossover_prob ]): ( was_iterable, population, population_values, max_iterations, func_tolerance, position_tolerance, differential_weight, crossover_prob ) = _get_initial_args(objective_function, initial_population, initial_position, population_size, population_stddev, max_iterations, func_tolerance, position_tolerance, differential_weight, crossover_prob, seed) def evolve_body(loop_vars): """Performs one step of the evolution.""" next_population, next_population_values = one_step( objective_function, loop_vars.population, population_values=loop_vars.population_values, differential_weight=differential_weight, crossover_prob=crossover_prob, seed=seed) converged = _check_convergence(next_population, next_population_values, func_tolerance, position_tolerance) failed = _check_failure(next_population_values) return [_MinimizeLoopVars( converged=converged, failed=failed, num_iterations=loop_vars.num_iterations+1, population=next_population, population_values=next_population_values)] def evolve_cond(loop_vars): should_stop = ( loop_vars.failed | loop_vars.converged | (max_iterations is not None and loop_vars.num_iterations >= max_iterations)) return ~should_stop initial_vars = _MinimizeLoopVars( converged=tf.convert_to_tensor(value=False), failed=tf.convert_to_tensor(value=False), num_iterations=tf.convert_to_tensor(value=0), population=population, population_values=population_values) final_state = tf.while_loop( cond=evolve_cond, body=evolve_body, loop_vars=(initial_vars,))[0] best_position, best_values = _find_best_in_population( final_state.population, final_state.population_values) # Ensure we return a similar structure to what the user supplied. final_population = final_state.population if not was_iterable: final_population = final_population[0] best_position = best_position[0] return DifferentialEvolutionOptimizerResults( converged=final_state.converged, failed=final_state.failed, position=best_position, objective_value=best_values, final_population=final_population, final_objective_values=final_state.population_values, initial_population=population, initial_objective_values=population_values, num_iterations=final_state.num_iterations)
def minimize(objective_function, initial_population=None, initial_position=None, population_size=50, population_stddev=1., max_iterations=100, func_tolerance=0, position_tolerance=1e-8, differential_weight=0.5, crossover_prob=0.9, seed=None, name=None): """Applies the Differential evolution algorithm to minimize a function. Differential Evolution is an evolutionary optimization algorithm which works on a set of candidate solutions called the population. It iteratively improves the population by applying genetic operators of mutation and recombination. The objective function `f` supplies the fitness of each candidate. A candidate `s_1` is considered better than `s_2` if `f(s_1) < f(s_2)`. This method allows the user to either specify an initial population or a single candidate solution. If a single solution is specified, a population of the specified size is initialized by adding independent normal noise to the candidate solution. The implementation also supports a multi-part specification of the state. For example, consider the objective function: ```python # x is a tensor of shape [n, m] while y is of shape [n]. def objective(x, y): return tf.math.reduce_sum(x ** 2, axis=-1) + y ** 2 ``` The state in this case is specified by two input tensors `x` and `y`. To apply the algorithm to this objective function, one would need to specify either an initial population as a list of two tensors of shapes `[population_size, k]` and `[population_size]`. The following code shows the complete example: ```python population_size = 40 # With an initial population and a multi-part state. initial_population = (tf.random.normal([population_size]), tf.random.normal([population_size])) def easom_fn(x, y): return -(tf.math.cos(x) * tf.math.cos(y) * tf.math.exp(-(x-np.pi)**2 - (y-np.pi)**2)) optim_results = tfp.optimizers.differential_evolution_minimize( easom_fn, initial_population=initial_population, seed=43210) print (optim_results.converged) print (optim_results.position) # Should be (close to) [pi, pi]. print (optim_results.objective_value) # Should be -1. # With a single starting point initial_position = (tf.constant(1.0), tf.constant(1.0)) optim_results = tfp.optimizers.differential_evolution_minimize( easom_fn, initial_position=initial_position, population_size=40, population_stddev=2.0, seed=43210) ``` Args: objective_function: A Python callable that accepts a batch of possible solutions and returns the values of the objective function at those arguments as a rank 1 real `Tensor`. This specifies the function to be minimized. The input to this callable may be either a single `Tensor` or a Python `list` of `Tensor`s. The signature must match the format of the argument `population`. (i.e. objective_function(*population) must return the value of the function to be minimized). initial_population: A real `Tensor` or Python list of `Tensor`s. If a list, each `Tensor` must be of rank at least 1 and with a common first dimension. The first dimension indexes into the candidate solutions while the rest of the dimensions (if any) index into an individual solution. The size of the population must be at least 4. This is a requirement of the DE algorithm. initial_position: A real `Tensor` of any shape. The seed solution used to initialize the population of solutions. If this parameter is specified then `initial_population` must not be specified. population_size: A positive scalar int32 `Tensor` greater than 4. The size of the population to evolve. This parameter is ignored if `initial_population` is specified. Default value: 50. population_stddev: A positive scalar real `Tensor` of the same dtype as `initial_position`. This parameter is ignored if `initial_population` is specified. Used to generate the population from the `initial_position` by adding random normal noise with zero mean and the specified standard deviation. Default value: 1.0 max_iterations: Positive scalar int32 `Tensor`. The maximum number of generations to evolve the population for. Default value: 100 func_tolerance: Scalar `Tensor` of the same dtype as the output of the `objective_function`. The algorithm stops if the absolute difference between the largest and the smallest objective function value in the population is below this number. Default value: 0 position_tolerance: Scalar `Tensor` of the same real dtype as `initial_position` or `initial_population`. The algorithm terminates if the largest absolute difference between the coordinates of the population members is below this threshold. Default value: 1e-8 differential_weight: Real scalar `Tensor`. Must be positive and less than 2.0. The parameter controlling the strength of mutation in the algorithm. Default value: 0.5 crossover_prob: Real scalar `Tensor`. Must be between 0 and 1. The probability of recombination per site. Default value: 0.9 seed: `int` or None. The random seed for this `Op`. If `None`, no seed is applied. Default value: None. name: (Optional) Python str. The name prefixed to the ops created by this function. If not supplied, the default name 'differential_evolution_minimize' is used. Default value: None Returns: optimizer_results: An object containing the following attributes: converged: Scalar boolean `Tensor` indicating whether the minimum was found within the specified tolerances. num_objective_evaluations: The total number of objective evaluations performed. position: A `Tensor` containing the best point found during the search. If the search converged, then this value is the argmin of the objective function within the specified tolerances. objective_value: A `Tensor` containing the value of the objective function at the `position`. If the search converged, then this is the (local) minimum of the objective function. final_population: The final state of the population. final_objective_values: The objective function evaluated at the final population. initial_population: The starting population. initial_objective_values: The objective function evaluated at the initial population. num_iterations: The number of iterations of the main algorithm body. Raises: ValueError: If neither the initial population, nor the initial position are specified or if both are specified. """ if initial_population is None and initial_position is None: raise ValueError('Either the initial population or the initial position ' 'must be specified.') if initial_population is not None and initial_position is not None: raise ValueError('Only one of initial population or initial position ' 'should be specified') with tf.compat.v1.name_scope( name, default_name='minimize', values=[ initial_population, initial_position, population_size, population_stddev, max_iterations, func_tolerance, position_tolerance, differential_weight, crossover_prob ]): ( was_iterable, population, population_values, max_iterations, func_tolerance, position_tolerance, differential_weight, crossover_prob ) = _get_initial_args(objective_function, initial_population, initial_position, population_size, population_stddev, max_iterations, func_tolerance, position_tolerance, differential_weight, crossover_prob, seed) def evolve_body(loop_vars): """Performs one step of the evolution.""" next_population, next_population_values = one_step( objective_function, loop_vars.population, population_values=loop_vars.population_values, differential_weight=differential_weight, crossover_prob=crossover_prob, seed=seed) converged = _check_convergence(next_population, next_population_values, func_tolerance, position_tolerance) failed = _check_failure(next_population_values) return [_MinimizeLoopVars( converged=converged, failed=failed, num_iterations=loop_vars.num_iterations+1, population=next_population, population_values=next_population_values)] def evolve_cond(loop_vars): should_stop = ( loop_vars.failed | loop_vars.converged | (max_iterations is not None and loop_vars.num_iterations >= max_iterations)) return ~should_stop initial_vars = _MinimizeLoopVars( converged=tf.convert_to_tensor(value=False), failed=tf.convert_to_tensor(value=False), num_iterations=tf.convert_to_tensor(value=0), population=population, population_values=population_values) final_state = tf.while_loop( cond=evolve_cond, body=evolve_body, loop_vars=(initial_vars,))[0] best_position, best_values = _find_best_in_population( final_state.population, final_state.population_values) # Ensure we return a similar structure to what the user supplied. final_population = final_state.population if not was_iterable: final_population = final_population[0] best_position = best_position[0] return DifferentialEvolutionOptimizerResults( converged=final_state.converged, failed=final_state.failed, position=best_position, objective_value=best_values, final_population=final_population, final_objective_values=final_state.population_values, initial_population=population, initial_objective_values=population_values, num_iterations=final_state.num_iterations)
[ "Applies", "the", "Differential", "evolution", "algorithm", "to", "minimize", "a", "function", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/differential_evolution.py#L214-L456
[ "def", "minimize", "(", "objective_function", ",", "initial_population", "=", "None", ",", "initial_position", "=", "None", ",", "population_size", "=", "50", ",", "population_stddev", "=", "1.", ",", "max_iterations", "=", "100", ",", "func_tolerance", "=", "0", ",", "position_tolerance", "=", "1e-8", ",", "differential_weight", "=", "0.5", ",", "crossover_prob", "=", "0.9", ",", "seed", "=", "None", ",", "name", "=", "None", ")", ":", "if", "initial_population", "is", "None", "and", "initial_position", "is", "None", ":", "raise", "ValueError", "(", "'Either the initial population or the initial position '", "'must be specified.'", ")", "if", "initial_population", "is", "not", "None", "and", "initial_position", "is", "not", "None", ":", "raise", "ValueError", "(", "'Only one of initial population or initial position '", "'should be specified'", ")", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "default_name", "=", "'minimize'", ",", "values", "=", "[", "initial_population", ",", "initial_position", ",", "population_size", ",", "population_stddev", ",", "max_iterations", ",", "func_tolerance", ",", "position_tolerance", ",", "differential_weight", ",", "crossover_prob", "]", ")", ":", "(", "was_iterable", ",", "population", ",", "population_values", ",", "max_iterations", ",", "func_tolerance", ",", "position_tolerance", ",", "differential_weight", ",", "crossover_prob", ")", "=", "_get_initial_args", "(", "objective_function", ",", "initial_population", ",", "initial_position", ",", "population_size", ",", "population_stddev", ",", "max_iterations", ",", "func_tolerance", ",", "position_tolerance", ",", "differential_weight", ",", "crossover_prob", ",", "seed", ")", "def", "evolve_body", "(", "loop_vars", ")", ":", "\"\"\"Performs one step of the evolution.\"\"\"", "next_population", ",", "next_population_values", "=", "one_step", "(", "objective_function", ",", "loop_vars", ".", "population", ",", "population_values", "=", "loop_vars", ".", "population_values", ",", "differential_weight", "=", "differential_weight", ",", "crossover_prob", "=", "crossover_prob", ",", "seed", "=", "seed", ")", "converged", "=", "_check_convergence", "(", "next_population", ",", "next_population_values", ",", "func_tolerance", ",", "position_tolerance", ")", "failed", "=", "_check_failure", "(", "next_population_values", ")", "return", "[", "_MinimizeLoopVars", "(", "converged", "=", "converged", ",", "failed", "=", "failed", ",", "num_iterations", "=", "loop_vars", ".", "num_iterations", "+", "1", ",", "population", "=", "next_population", ",", "population_values", "=", "next_population_values", ")", "]", "def", "evolve_cond", "(", "loop_vars", ")", ":", "should_stop", "=", "(", "loop_vars", ".", "failed", "|", "loop_vars", ".", "converged", "|", "(", "max_iterations", "is", "not", "None", "and", "loop_vars", ".", "num_iterations", ">=", "max_iterations", ")", ")", "return", "~", "should_stop", "initial_vars", "=", "_MinimizeLoopVars", "(", "converged", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "False", ")", ",", "failed", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "False", ")", ",", "num_iterations", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "0", ")", ",", "population", "=", "population", ",", "population_values", "=", "population_values", ")", "final_state", "=", "tf", ".", "while_loop", "(", "cond", "=", "evolve_cond", ",", "body", "=", "evolve_body", ",", "loop_vars", "=", "(", "initial_vars", ",", ")", ")", "[", "0", "]", "best_position", ",", "best_values", "=", "_find_best_in_population", "(", "final_state", ".", "population", ",", "final_state", ".", "population_values", ")", "# Ensure we return a similar structure to what the user supplied.", "final_population", "=", "final_state", ".", "population", "if", "not", "was_iterable", ":", "final_population", "=", "final_population", "[", "0", "]", "best_position", "=", "best_position", "[", "0", "]", "return", "DifferentialEvolutionOptimizerResults", "(", "converged", "=", "final_state", ".", "converged", ",", "failed", "=", "final_state", ".", "failed", ",", "position", "=", "best_position", ",", "objective_value", "=", "best_values", ",", "final_population", "=", "final_population", ",", "final_objective_values", "=", "final_state", ".", "population_values", ",", "initial_population", "=", "population", ",", "initial_objective_values", "=", "population_values", ",", "num_iterations", "=", "final_state", ".", "num_iterations", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_get_initial_args
Processes initial args.
tensorflow_probability/python/optimizer/differential_evolution.py
def _get_initial_args(objective_function, initial_population, initial_position, population_size, population_stddev, max_iterations, func_tolerance, position_tolerance, differential_weight, crossover_prob, seed): """Processes initial args.""" was_iterable = False if initial_position is not None: initial_position, was_iterable = _ensure_list(initial_position) if initial_population is not None: initial_population, was_iterable = _ensure_list(initial_population) population = _get_starting_population(initial_population, initial_position, population_size, population_stddev, seed=seed) differential_weight = tf.convert_to_tensor( value=differential_weight, dtype=population[0].dtype.base_dtype) crossover_prob = tf.convert_to_tensor(value=crossover_prob) population_values = objective_function(*population) if max_iterations is not None: max_iterations = tf.convert_to_tensor(value=max_iterations) func_tolerance = tf.convert_to_tensor( value=func_tolerance, dtype=population_values.dtype.base_dtype) position_tolerance = tf.convert_to_tensor( value=position_tolerance, dtype=population[0].dtype.base_dtype) return (was_iterable, population, population_values, max_iterations, func_tolerance, position_tolerance, differential_weight, crossover_prob)
def _get_initial_args(objective_function, initial_population, initial_position, population_size, population_stddev, max_iterations, func_tolerance, position_tolerance, differential_weight, crossover_prob, seed): """Processes initial args.""" was_iterable = False if initial_position is not None: initial_position, was_iterable = _ensure_list(initial_position) if initial_population is not None: initial_population, was_iterable = _ensure_list(initial_population) population = _get_starting_population(initial_population, initial_position, population_size, population_stddev, seed=seed) differential_weight = tf.convert_to_tensor( value=differential_weight, dtype=population[0].dtype.base_dtype) crossover_prob = tf.convert_to_tensor(value=crossover_prob) population_values = objective_function(*population) if max_iterations is not None: max_iterations = tf.convert_to_tensor(value=max_iterations) func_tolerance = tf.convert_to_tensor( value=func_tolerance, dtype=population_values.dtype.base_dtype) position_tolerance = tf.convert_to_tensor( value=position_tolerance, dtype=population[0].dtype.base_dtype) return (was_iterable, population, population_values, max_iterations, func_tolerance, position_tolerance, differential_weight, crossover_prob)
[ "Processes", "initial", "args", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/differential_evolution.py#L459-L502
[ "def", "_get_initial_args", "(", "objective_function", ",", "initial_population", ",", "initial_position", ",", "population_size", ",", "population_stddev", ",", "max_iterations", ",", "func_tolerance", ",", "position_tolerance", ",", "differential_weight", ",", "crossover_prob", ",", "seed", ")", ":", "was_iterable", "=", "False", "if", "initial_position", "is", "not", "None", ":", "initial_position", ",", "was_iterable", "=", "_ensure_list", "(", "initial_position", ")", "if", "initial_population", "is", "not", "None", ":", "initial_population", ",", "was_iterable", "=", "_ensure_list", "(", "initial_population", ")", "population", "=", "_get_starting_population", "(", "initial_population", ",", "initial_position", ",", "population_size", ",", "population_stddev", ",", "seed", "=", "seed", ")", "differential_weight", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "differential_weight", ",", "dtype", "=", "population", "[", "0", "]", ".", "dtype", ".", "base_dtype", ")", "crossover_prob", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "crossover_prob", ")", "population_values", "=", "objective_function", "(", "*", "population", ")", "if", "max_iterations", "is", "not", "None", ":", "max_iterations", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "max_iterations", ")", "func_tolerance", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "func_tolerance", ",", "dtype", "=", "population_values", ".", "dtype", ".", "base_dtype", ")", "position_tolerance", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "position_tolerance", ",", "dtype", "=", "population", "[", "0", "]", ".", "dtype", ".", "base_dtype", ")", "return", "(", "was_iterable", ",", "population", ",", "population_values", ",", "max_iterations", ",", "func_tolerance", ",", "position_tolerance", ",", "differential_weight", ",", "crossover_prob", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_check_failure
Checks if all the population values are NaN/infinite.
tensorflow_probability/python/optimizer/differential_evolution.py
def _check_failure(population_values): """Checks if all the population values are NaN/infinite.""" return tf.math.reduce_all(input_tensor=tf.math.is_inf(population_values))
def _check_failure(population_values): """Checks if all the population values are NaN/infinite.""" return tf.math.reduce_all(input_tensor=tf.math.is_inf(population_values))
[ "Checks", "if", "all", "the", "population", "values", "are", "NaN", "/", "infinite", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/differential_evolution.py#L505-L507
[ "def", "_check_failure", "(", "population_values", ")", ":", "return", "tf", ".", "math", ".", "reduce_all", "(", "input_tensor", "=", "tf", ".", "math", ".", "is_inf", "(", "population_values", ")", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_find_best_in_population
Finds the population member with the lowest value.
tensorflow_probability/python/optimizer/differential_evolution.py
def _find_best_in_population(population, values): """Finds the population member with the lowest value.""" best_value = tf.math.reduce_min(input_tensor=values) best_index = tf.where(tf.math.equal(values, best_value))[0, 0] return ([population_part[best_index] for population_part in population], best_value)
def _find_best_in_population(population, values): """Finds the population member with the lowest value.""" best_value = tf.math.reduce_min(input_tensor=values) best_index = tf.where(tf.math.equal(values, best_value))[0, 0] return ([population_part[best_index] for population_part in population], best_value)
[ "Finds", "the", "population", "member", "with", "the", "lowest", "value", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/differential_evolution.py#L510-L516
[ "def", "_find_best_in_population", "(", "population", ",", "values", ")", ":", "best_value", "=", "tf", ".", "math", ".", "reduce_min", "(", "input_tensor", "=", "values", ")", "best_index", "=", "tf", ".", "where", "(", "tf", ".", "math", ".", "equal", "(", "values", ",", "best_value", ")", ")", "[", "0", ",", "0", "]", "return", "(", "[", "population_part", "[", "best_index", "]", "for", "population_part", "in", "population", "]", ",", "best_value", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_check_convergence
Checks whether the convergence criteria have been met.
tensorflow_probability/python/optimizer/differential_evolution.py
def _check_convergence(population, population_values, func_tolerance, position_tolerance): """Checks whether the convergence criteria have been met.""" # Check func tolerance value_range = tf.math.abs( tf.math.reduce_max(input_tensor=population_values) - tf.math.reduce_min(input_tensor=population_values)) value_converged = value_range <= func_tolerance # Ideally, we would compute the position convergence by computing the # pairwise distance between every member of the population and checking if # the maximum of those is less than the supplied tolerance. However, this is # completely infeasible in terms of performance. We adopt a more conservative # approach which checks the distance between the first population member # with the rest of the population. If the largest such distance is less than # half the supplied tolerance, we stop. The reason why this is sufficient is # as follows. For any pair of distinct points (a, b) in the population, we # have the relation: |a - b| <= |x0 - a| + |x0 - b|, where x0 is any # other point. In particular, let x0 be the first element of the population # and suppose that the largest distance between this point and any other # member is epsilon. Then, for any pair of points (a, b), # |a - b| <= 2 * epsilon and hence, the maximum distance between any pair of # points in the population is bounded above by twice the distance between # the first point and other points. half_tol = position_tolerance / 2 def part_converged(part): return tf.math.reduce_max(input_tensor=tf.math.abs(part - part[0])) <= half_tol x_converged = tf.math.reduce_all( input_tensor=[part_converged(part) for part in population]) return value_converged | x_converged
def _check_convergence(population, population_values, func_tolerance, position_tolerance): """Checks whether the convergence criteria have been met.""" # Check func tolerance value_range = tf.math.abs( tf.math.reduce_max(input_tensor=population_values) - tf.math.reduce_min(input_tensor=population_values)) value_converged = value_range <= func_tolerance # Ideally, we would compute the position convergence by computing the # pairwise distance between every member of the population and checking if # the maximum of those is less than the supplied tolerance. However, this is # completely infeasible in terms of performance. We adopt a more conservative # approach which checks the distance between the first population member # with the rest of the population. If the largest such distance is less than # half the supplied tolerance, we stop. The reason why this is sufficient is # as follows. For any pair of distinct points (a, b) in the population, we # have the relation: |a - b| <= |x0 - a| + |x0 - b|, where x0 is any # other point. In particular, let x0 be the first element of the population # and suppose that the largest distance between this point and any other # member is epsilon. Then, for any pair of points (a, b), # |a - b| <= 2 * epsilon and hence, the maximum distance between any pair of # points in the population is bounded above by twice the distance between # the first point and other points. half_tol = position_tolerance / 2 def part_converged(part): return tf.math.reduce_max(input_tensor=tf.math.abs(part - part[0])) <= half_tol x_converged = tf.math.reduce_all( input_tensor=[part_converged(part) for part in population]) return value_converged | x_converged
[ "Checks", "whether", "the", "convergence", "criteria", "have", "been", "met", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/differential_evolution.py#L519-L551
[ "def", "_check_convergence", "(", "population", ",", "population_values", ",", "func_tolerance", ",", "position_tolerance", ")", ":", "# Check func tolerance", "value_range", "=", "tf", ".", "math", ".", "abs", "(", "tf", ".", "math", ".", "reduce_max", "(", "input_tensor", "=", "population_values", ")", "-", "tf", ".", "math", ".", "reduce_min", "(", "input_tensor", "=", "population_values", ")", ")", "value_converged", "=", "value_range", "<=", "func_tolerance", "# Ideally, we would compute the position convergence by computing the", "# pairwise distance between every member of the population and checking if", "# the maximum of those is less than the supplied tolerance. However, this is", "# completely infeasible in terms of performance. We adopt a more conservative", "# approach which checks the distance between the first population member", "# with the rest of the population. If the largest such distance is less than", "# half the supplied tolerance, we stop. The reason why this is sufficient is", "# as follows. For any pair of distinct points (a, b) in the population, we", "# have the relation: |a - b| <= |x0 - a| + |x0 - b|, where x0 is any", "# other point. In particular, let x0 be the first element of the population", "# and suppose that the largest distance between this point and any other", "# member is epsilon. Then, for any pair of points (a, b),", "# |a - b| <= 2 * epsilon and hence, the maximum distance between any pair of", "# points in the population is bounded above by twice the distance between", "# the first point and other points.", "half_tol", "=", "position_tolerance", "/", "2", "def", "part_converged", "(", "part", ")", ":", "return", "tf", ".", "math", ".", "reduce_max", "(", "input_tensor", "=", "tf", ".", "math", ".", "abs", "(", "part", "-", "part", "[", "0", "]", ")", ")", "<=", "half_tol", "x_converged", "=", "tf", ".", "math", ".", "reduce_all", "(", "input_tensor", "=", "[", "part_converged", "(", "part", ")", "for", "part", "in", "population", "]", ")", "return", "value_converged", "|", "x_converged" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_get_starting_population
Constructs the initial population. If an initial population is not already provided, this function constructs a population by adding random normal noise to the initial position. Args: initial_population: None or a list of `Tensor`s. The initial population. initial_position: None or a list of `Tensor`s. The initial position. If initial_population is None, this argument must not be None. population_size: Scalar integer `Tensor`. The number of members in the population. If the initial population is not None, this parameter is ignored. population_stddev: A positive scalar real `Tensor` of the same dtype as `initial_position` or `initial_population` (whichever is not None). This parameter is ignored if `initial_population` is specified. Used to generate the population from the `initial_position` by adding random normal noise with zero mean and the specified standard deviation. seed: Seed for random number generation. Returns: A list of `Tensor`s. The initial population.
tensorflow_probability/python/optimizer/differential_evolution.py
def _get_starting_population(initial_population, initial_position, population_size, population_stddev, seed): """Constructs the initial population. If an initial population is not already provided, this function constructs a population by adding random normal noise to the initial position. Args: initial_population: None or a list of `Tensor`s. The initial population. initial_position: None or a list of `Tensor`s. The initial position. If initial_population is None, this argument must not be None. population_size: Scalar integer `Tensor`. The number of members in the population. If the initial population is not None, this parameter is ignored. population_stddev: A positive scalar real `Tensor` of the same dtype as `initial_position` or `initial_population` (whichever is not None). This parameter is ignored if `initial_population` is specified. Used to generate the population from the `initial_position` by adding random normal noise with zero mean and the specified standard deviation. seed: Seed for random number generation. Returns: A list of `Tensor`s. The initial population. """ if initial_population is not None: return [tf.convert_to_tensor(value=part) for part in initial_population] # Constructs the population by adding normal noise to the initial position. seed_stream = distributions.SeedStream(seed, salt='get_starting_population') population = [] for part in initial_position: part = tf.convert_to_tensor(value=part) part_event_shape = tf.shape(input=part) # We only draw population_size-1 random vectors because we want to ensure # that the supplied position is part of the population. The first member # is set to be the initial_position. population_part_shape = tf.concat([[population_size-1], part_event_shape], axis=0) population_part = tf.random.normal(population_part_shape, stddev=population_stddev, dtype=part.dtype.base_dtype, seed=seed_stream()) population_part += part population_part = tf.concat([[part], population_part], axis=0) population.append(population_part) return population
def _get_starting_population(initial_population, initial_position, population_size, population_stddev, seed): """Constructs the initial population. If an initial population is not already provided, this function constructs a population by adding random normal noise to the initial position. Args: initial_population: None or a list of `Tensor`s. The initial population. initial_position: None or a list of `Tensor`s. The initial position. If initial_population is None, this argument must not be None. population_size: Scalar integer `Tensor`. The number of members in the population. If the initial population is not None, this parameter is ignored. population_stddev: A positive scalar real `Tensor` of the same dtype as `initial_position` or `initial_population` (whichever is not None). This parameter is ignored if `initial_population` is specified. Used to generate the population from the `initial_position` by adding random normal noise with zero mean and the specified standard deviation. seed: Seed for random number generation. Returns: A list of `Tensor`s. The initial population. """ if initial_population is not None: return [tf.convert_to_tensor(value=part) for part in initial_population] # Constructs the population by adding normal noise to the initial position. seed_stream = distributions.SeedStream(seed, salt='get_starting_population') population = [] for part in initial_position: part = tf.convert_to_tensor(value=part) part_event_shape = tf.shape(input=part) # We only draw population_size-1 random vectors because we want to ensure # that the supplied position is part of the population. The first member # is set to be the initial_position. population_part_shape = tf.concat([[population_size-1], part_event_shape], axis=0) population_part = tf.random.normal(population_part_shape, stddev=population_stddev, dtype=part.dtype.base_dtype, seed=seed_stream()) population_part += part population_part = tf.concat([[part], population_part], axis=0) population.append(population_part) return population
[ "Constructs", "the", "initial", "population", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/differential_evolution.py#L554-L602
[ "def", "_get_starting_population", "(", "initial_population", ",", "initial_position", ",", "population_size", ",", "population_stddev", ",", "seed", ")", ":", "if", "initial_population", "is", "not", "None", ":", "return", "[", "tf", ".", "convert_to_tensor", "(", "value", "=", "part", ")", "for", "part", "in", "initial_population", "]", "# Constructs the population by adding normal noise to the initial position.", "seed_stream", "=", "distributions", ".", "SeedStream", "(", "seed", ",", "salt", "=", "'get_starting_population'", ")", "population", "=", "[", "]", "for", "part", "in", "initial_position", ":", "part", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "part", ")", "part_event_shape", "=", "tf", ".", "shape", "(", "input", "=", "part", ")", "# We only draw population_size-1 random vectors because we want to ensure", "# that the supplied position is part of the population. The first member", "# is set to be the initial_position.", "population_part_shape", "=", "tf", ".", "concat", "(", "[", "[", "population_size", "-", "1", "]", ",", "part_event_shape", "]", ",", "axis", "=", "0", ")", "population_part", "=", "tf", ".", "random", ".", "normal", "(", "population_part_shape", ",", "stddev", "=", "population_stddev", ",", "dtype", "=", "part", ".", "dtype", ".", "base_dtype", ",", "seed", "=", "seed_stream", "(", ")", ")", "population_part", "+=", "part", "population_part", "=", "tf", ".", "concat", "(", "[", "[", "part", "]", ",", "population_part", "]", ",", "axis", "=", "0", ")", "population", ".", "append", "(", "population_part", ")", "return", "population" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_binary_crossover
Performs recombination by binary crossover for the current population. Let v_i denote the i'th component of the member v and m_i the corresponding component of the mutant vector corresponding to v. Then the crossed over vector w_i is determined by setting w_i = (m_i with probability=crossover_prob else v_i). In addition, DE requires that at least one of the components is crossed over (otherwise we end up with no change). This is done by choosing on index say k randomly where a force crossover is performed (i.e. w_k = m_k). This is the scheme implemented in this function. Args: population: A Python list of `Tensor`s where each `Tensor` in the list must be of rank at least 1 and all the elements must have a common first dimension. The base population to cross over. population_size: A scalar integer `Tensor`. The number of elements in the population (i.e. size of the first dimension of any member of `population`). mutants: A Python list of `Tensor`s with the same structure as `population`. The mutated population. crossover_prob: A postive real scalar `Tensor` bounded above by 1.0. The probability of a crossover being performed for each axis. seed: `int` or None. The random seed for this `Op`. If `None`, no seed is applied. Returns: A list of `Tensor`s of the same structure, dtype and shape as `population`. The recombined population.
tensorflow_probability/python/optimizer/differential_evolution.py
def _binary_crossover(population, population_size, mutants, crossover_prob, seed): """Performs recombination by binary crossover for the current population. Let v_i denote the i'th component of the member v and m_i the corresponding component of the mutant vector corresponding to v. Then the crossed over vector w_i is determined by setting w_i = (m_i with probability=crossover_prob else v_i). In addition, DE requires that at least one of the components is crossed over (otherwise we end up with no change). This is done by choosing on index say k randomly where a force crossover is performed (i.e. w_k = m_k). This is the scheme implemented in this function. Args: population: A Python list of `Tensor`s where each `Tensor` in the list must be of rank at least 1 and all the elements must have a common first dimension. The base population to cross over. population_size: A scalar integer `Tensor`. The number of elements in the population (i.e. size of the first dimension of any member of `population`). mutants: A Python list of `Tensor`s with the same structure as `population`. The mutated population. crossover_prob: A postive real scalar `Tensor` bounded above by 1.0. The probability of a crossover being performed for each axis. seed: `int` or None. The random seed for this `Op`. If `None`, no seed is applied. Returns: A list of `Tensor`s of the same structure, dtype and shape as `population`. The recombined population. """ sizes = [tf.cast(tf.size(input=x), dtype=tf.float64) for x in population] seed_stream = distributions.SeedStream(seed, salt='binary_crossover') force_crossover_group = distributions.Categorical(sizes).sample( [population_size, 1], seed=seed_stream()) recombinants = [] for i, population_part in enumerate(population): pop_part_flat = tf.reshape(population_part, [population_size, -1]) mutant_part_flat = tf.reshape(mutants[i], [population_size, -1]) part_size = tf.size(input=population_part) // population_size force_crossovers = tf.one_hot( tf.random.uniform([population_size], minval=0, maxval=part_size, dtype=tf.int32, seed=seed_stream()), part_size, on_value=True, off_value=False, dtype=tf.bool) # Tensor of shape [population_size, size] group_mask = tf.math.equal(force_crossover_group, i) force_crossovers &= group_mask do_binary_crossover = tf.random.uniform( [population_size, part_size], dtype=crossover_prob.dtype.base_dtype, seed=seed_stream()) < crossover_prob do_binary_crossover |= force_crossovers recombinant_flat = tf.where( do_binary_crossover, x=mutant_part_flat, y=pop_part_flat) recombinant = tf.reshape(recombinant_flat, tf.shape(input=population_part)) recombinants.append(recombinant) return recombinants
def _binary_crossover(population, population_size, mutants, crossover_prob, seed): """Performs recombination by binary crossover for the current population. Let v_i denote the i'th component of the member v and m_i the corresponding component of the mutant vector corresponding to v. Then the crossed over vector w_i is determined by setting w_i = (m_i with probability=crossover_prob else v_i). In addition, DE requires that at least one of the components is crossed over (otherwise we end up with no change). This is done by choosing on index say k randomly where a force crossover is performed (i.e. w_k = m_k). This is the scheme implemented in this function. Args: population: A Python list of `Tensor`s where each `Tensor` in the list must be of rank at least 1 and all the elements must have a common first dimension. The base population to cross over. population_size: A scalar integer `Tensor`. The number of elements in the population (i.e. size of the first dimension of any member of `population`). mutants: A Python list of `Tensor`s with the same structure as `population`. The mutated population. crossover_prob: A postive real scalar `Tensor` bounded above by 1.0. The probability of a crossover being performed for each axis. seed: `int` or None. The random seed for this `Op`. If `None`, no seed is applied. Returns: A list of `Tensor`s of the same structure, dtype and shape as `population`. The recombined population. """ sizes = [tf.cast(tf.size(input=x), dtype=tf.float64) for x in population] seed_stream = distributions.SeedStream(seed, salt='binary_crossover') force_crossover_group = distributions.Categorical(sizes).sample( [population_size, 1], seed=seed_stream()) recombinants = [] for i, population_part in enumerate(population): pop_part_flat = tf.reshape(population_part, [population_size, -1]) mutant_part_flat = tf.reshape(mutants[i], [population_size, -1]) part_size = tf.size(input=population_part) // population_size force_crossovers = tf.one_hot( tf.random.uniform([population_size], minval=0, maxval=part_size, dtype=tf.int32, seed=seed_stream()), part_size, on_value=True, off_value=False, dtype=tf.bool) # Tensor of shape [population_size, size] group_mask = tf.math.equal(force_crossover_group, i) force_crossovers &= group_mask do_binary_crossover = tf.random.uniform( [population_size, part_size], dtype=crossover_prob.dtype.base_dtype, seed=seed_stream()) < crossover_prob do_binary_crossover |= force_crossovers recombinant_flat = tf.where( do_binary_crossover, x=mutant_part_flat, y=pop_part_flat) recombinant = tf.reshape(recombinant_flat, tf.shape(input=population_part)) recombinants.append(recombinant) return recombinants
[ "Performs", "recombination", "by", "binary", "crossover", "for", "the", "current", "population", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/differential_evolution.py#L605-L671
[ "def", "_binary_crossover", "(", "population", ",", "population_size", ",", "mutants", ",", "crossover_prob", ",", "seed", ")", ":", "sizes", "=", "[", "tf", ".", "cast", "(", "tf", ".", "size", "(", "input", "=", "x", ")", ",", "dtype", "=", "tf", ".", "float64", ")", "for", "x", "in", "population", "]", "seed_stream", "=", "distributions", ".", "SeedStream", "(", "seed", ",", "salt", "=", "'binary_crossover'", ")", "force_crossover_group", "=", "distributions", ".", "Categorical", "(", "sizes", ")", ".", "sample", "(", "[", "population_size", ",", "1", "]", ",", "seed", "=", "seed_stream", "(", ")", ")", "recombinants", "=", "[", "]", "for", "i", ",", "population_part", "in", "enumerate", "(", "population", ")", ":", "pop_part_flat", "=", "tf", ".", "reshape", "(", "population_part", ",", "[", "population_size", ",", "-", "1", "]", ")", "mutant_part_flat", "=", "tf", ".", "reshape", "(", "mutants", "[", "i", "]", ",", "[", "population_size", ",", "-", "1", "]", ")", "part_size", "=", "tf", ".", "size", "(", "input", "=", "population_part", ")", "//", "population_size", "force_crossovers", "=", "tf", ".", "one_hot", "(", "tf", ".", "random", ".", "uniform", "(", "[", "population_size", "]", ",", "minval", "=", "0", ",", "maxval", "=", "part_size", ",", "dtype", "=", "tf", ".", "int32", ",", "seed", "=", "seed_stream", "(", ")", ")", ",", "part_size", ",", "on_value", "=", "True", ",", "off_value", "=", "False", ",", "dtype", "=", "tf", ".", "bool", ")", "# Tensor of shape [population_size, size]", "group_mask", "=", "tf", ".", "math", ".", "equal", "(", "force_crossover_group", ",", "i", ")", "force_crossovers", "&=", "group_mask", "do_binary_crossover", "=", "tf", ".", "random", ".", "uniform", "(", "[", "population_size", ",", "part_size", "]", ",", "dtype", "=", "crossover_prob", ".", "dtype", ".", "base_dtype", ",", "seed", "=", "seed_stream", "(", ")", ")", "<", "crossover_prob", "do_binary_crossover", "|=", "force_crossovers", "recombinant_flat", "=", "tf", ".", "where", "(", "do_binary_crossover", ",", "x", "=", "mutant_part_flat", ",", "y", "=", "pop_part_flat", ")", "recombinant", "=", "tf", ".", "reshape", "(", "recombinant_flat", ",", "tf", ".", "shape", "(", "input", "=", "population_part", ")", ")", "recombinants", ".", "append", "(", "recombinant", ")", "return", "recombinants" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_get_mutants
Computes the mutatated vectors for each population member. Args: population: Python `list` of `Tensor`s representing the current population vectors. Each `Tensor` must be of the same real dtype. The first dimension of each `Tensor` indexes individual population members. For example, if the population is a list with a single `Tensor` of shape [n, m1, m2], then `n` is the population size and the shape of an individual solution is [m1, m2]. If there is more than one element in the population, then each `Tensor` in the list should have the first axis of the same size. population_size: Scalar integer `Tensor`. The size of the population. mixing_indices: `Tensor` of integral dtype and shape [n, 3] where `n` is the number of members in the population. Each element of the `Tensor` must be a valid index into the first dimension of the population (i.e range between `0` and `n-1` inclusive). differential_weight: Real scalar `Tensor`. Must be positive and less than 2.0. The parameter controlling the strength of mutation. Returns: mutants: `Tensor` or Python `list` of `Tensor`s of the same shape and dtype as the input population. The mutated vectors.
tensorflow_probability/python/optimizer/differential_evolution.py
def _get_mutants(population, population_size, mixing_indices, differential_weight): """Computes the mutatated vectors for each population member. Args: population: Python `list` of `Tensor`s representing the current population vectors. Each `Tensor` must be of the same real dtype. The first dimension of each `Tensor` indexes individual population members. For example, if the population is a list with a single `Tensor` of shape [n, m1, m2], then `n` is the population size and the shape of an individual solution is [m1, m2]. If there is more than one element in the population, then each `Tensor` in the list should have the first axis of the same size. population_size: Scalar integer `Tensor`. The size of the population. mixing_indices: `Tensor` of integral dtype and shape [n, 3] where `n` is the number of members in the population. Each element of the `Tensor` must be a valid index into the first dimension of the population (i.e range between `0` and `n-1` inclusive). differential_weight: Real scalar `Tensor`. Must be positive and less than 2.0. The parameter controlling the strength of mutation. Returns: mutants: `Tensor` or Python `list` of `Tensor`s of the same shape and dtype as the input population. The mutated vectors. """ mixing_indices = tf.reshape(mixing_indices, [-1]) weights = tf.stack([1.0, differential_weight, -differential_weight]) def _mutant_part(population_part): donors = tf.gather(population_part, mixing_indices) donors = tf.transpose( a=tf.reshape(donors, [population_size, 3, -1]), perm=[0, 2, 1]) return tf.math.reduce_sum(input_tensor=donors * weights, axis=-1) return [_mutant_part(population_part) for population_part in population]
def _get_mutants(population, population_size, mixing_indices, differential_weight): """Computes the mutatated vectors for each population member. Args: population: Python `list` of `Tensor`s representing the current population vectors. Each `Tensor` must be of the same real dtype. The first dimension of each `Tensor` indexes individual population members. For example, if the population is a list with a single `Tensor` of shape [n, m1, m2], then `n` is the population size and the shape of an individual solution is [m1, m2]. If there is more than one element in the population, then each `Tensor` in the list should have the first axis of the same size. population_size: Scalar integer `Tensor`. The size of the population. mixing_indices: `Tensor` of integral dtype and shape [n, 3] where `n` is the number of members in the population. Each element of the `Tensor` must be a valid index into the first dimension of the population (i.e range between `0` and `n-1` inclusive). differential_weight: Real scalar `Tensor`. Must be positive and less than 2.0. The parameter controlling the strength of mutation. Returns: mutants: `Tensor` or Python `list` of `Tensor`s of the same shape and dtype as the input population. The mutated vectors. """ mixing_indices = tf.reshape(mixing_indices, [-1]) weights = tf.stack([1.0, differential_weight, -differential_weight]) def _mutant_part(population_part): donors = tf.gather(population_part, mixing_indices) donors = tf.transpose( a=tf.reshape(donors, [population_size, 3, -1]), perm=[0, 2, 1]) return tf.math.reduce_sum(input_tensor=donors * weights, axis=-1) return [_mutant_part(population_part) for population_part in population]
[ "Computes", "the", "mutatated", "vectors", "for", "each", "population", "member", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/differential_evolution.py#L674-L709
[ "def", "_get_mutants", "(", "population", ",", "population_size", ",", "mixing_indices", ",", "differential_weight", ")", ":", "mixing_indices", "=", "tf", ".", "reshape", "(", "mixing_indices", ",", "[", "-", "1", "]", ")", "weights", "=", "tf", ".", "stack", "(", "[", "1.0", ",", "differential_weight", ",", "-", "differential_weight", "]", ")", "def", "_mutant_part", "(", "population_part", ")", ":", "donors", "=", "tf", ".", "gather", "(", "population_part", ",", "mixing_indices", ")", "donors", "=", "tf", ".", "transpose", "(", "a", "=", "tf", ".", "reshape", "(", "donors", ",", "[", "population_size", ",", "3", ",", "-", "1", "]", ")", ",", "perm", "=", "[", "0", ",", "2", ",", "1", "]", ")", "return", "tf", ".", "math", ".", "reduce_sum", "(", "input_tensor", "=", "donors", "*", "weights", ",", "axis", "=", "-", "1", ")", "return", "[", "_mutant_part", "(", "population_part", ")", "for", "population_part", "in", "population", "]" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_get_mixing_indices
Generates an array of indices suitable for mutation operation. The mutation operation in differential evolution requires that for every element of the population, three distinct other elements be chosen to produce a trial candidate. This function generates an array of shape [size, 3] satisfying the properties that: (a). array[i, :] does not contain the index 'i'. (b). array[i, :] does not contain any overlapping indices. (c). All elements in the array are between 0 and size - 1 inclusive. Args: size: Scalar integer `Tensor`. The number of samples as well as a the range of the indices to sample from. seed: `int` or None. The random seed for this `Op`. If `None`, no seed is applied. Default value: `None`. name: Python `str` name prefixed to Ops created by this function. Default value: 'get_mixing_indices'. Returns: sample: A `Tensor` of shape [size, 3] and same dtype as `size` containing samples without replacement between 0 and size - 1 (inclusive) with the `i`th row not including the number `i`.
tensorflow_probability/python/optimizer/differential_evolution.py
def _get_mixing_indices(size, seed=None, name=None): """Generates an array of indices suitable for mutation operation. The mutation operation in differential evolution requires that for every element of the population, three distinct other elements be chosen to produce a trial candidate. This function generates an array of shape [size, 3] satisfying the properties that: (a). array[i, :] does not contain the index 'i'. (b). array[i, :] does not contain any overlapping indices. (c). All elements in the array are between 0 and size - 1 inclusive. Args: size: Scalar integer `Tensor`. The number of samples as well as a the range of the indices to sample from. seed: `int` or None. The random seed for this `Op`. If `None`, no seed is applied. Default value: `None`. name: Python `str` name prefixed to Ops created by this function. Default value: 'get_mixing_indices'. Returns: sample: A `Tensor` of shape [size, 3] and same dtype as `size` containing samples without replacement between 0 and size - 1 (inclusive) with the `i`th row not including the number `i`. """ with tf.compat.v1.name_scope( name, default_name='get_mixing_indices', values=[size]): size = tf.convert_to_tensor(value=size) dtype = size.dtype seed_stream = distributions.SeedStream(seed, salt='get_mixing_indices') first = tf.random.uniform([size], maxval=size-1, dtype=dtype, seed=seed_stream()) second = tf.random.uniform([size], maxval=size-2, dtype=dtype, seed=seed_stream()) third = tf.random.uniform([size], maxval=size-3, dtype=dtype, seed=seed_stream()) # Shift second if it is on top of or to the right of first second = tf.where(first < second, x=second, y=second + 1) smaller = tf.math.minimum(first, second) larger = tf.math.maximum(first, second) # Shift the third one so it does not coincide with either the first or the # second number. Assuming first < second, shift by 1 if the number is in # [first, second) and by 2 if the number is greater than or equal to the # second. third = tf.where(third < smaller, x=third, y=third + 1) third = tf.where(third < larger, x=third, y=third + 1) sample = tf.stack([first, second, third], axis=1) to_avoid = tf.expand_dims(tf.range(size), axis=-1) sample = tf.where(sample < to_avoid, x=sample, y=sample + 1) return sample
def _get_mixing_indices(size, seed=None, name=None): """Generates an array of indices suitable for mutation operation. The mutation operation in differential evolution requires that for every element of the population, three distinct other elements be chosen to produce a trial candidate. This function generates an array of shape [size, 3] satisfying the properties that: (a). array[i, :] does not contain the index 'i'. (b). array[i, :] does not contain any overlapping indices. (c). All elements in the array are between 0 and size - 1 inclusive. Args: size: Scalar integer `Tensor`. The number of samples as well as a the range of the indices to sample from. seed: `int` or None. The random seed for this `Op`. If `None`, no seed is applied. Default value: `None`. name: Python `str` name prefixed to Ops created by this function. Default value: 'get_mixing_indices'. Returns: sample: A `Tensor` of shape [size, 3] and same dtype as `size` containing samples without replacement between 0 and size - 1 (inclusive) with the `i`th row not including the number `i`. """ with tf.compat.v1.name_scope( name, default_name='get_mixing_indices', values=[size]): size = tf.convert_to_tensor(value=size) dtype = size.dtype seed_stream = distributions.SeedStream(seed, salt='get_mixing_indices') first = tf.random.uniform([size], maxval=size-1, dtype=dtype, seed=seed_stream()) second = tf.random.uniform([size], maxval=size-2, dtype=dtype, seed=seed_stream()) third = tf.random.uniform([size], maxval=size-3, dtype=dtype, seed=seed_stream()) # Shift second if it is on top of or to the right of first second = tf.where(first < second, x=second, y=second + 1) smaller = tf.math.minimum(first, second) larger = tf.math.maximum(first, second) # Shift the third one so it does not coincide with either the first or the # second number. Assuming first < second, shift by 1 if the number is in # [first, second) and by 2 if the number is greater than or equal to the # second. third = tf.where(third < smaller, x=third, y=third + 1) third = tf.where(third < larger, x=third, y=third + 1) sample = tf.stack([first, second, third], axis=1) to_avoid = tf.expand_dims(tf.range(size), axis=-1) sample = tf.where(sample < to_avoid, x=sample, y=sample + 1) return sample
[ "Generates", "an", "array", "of", "indices", "suitable", "for", "mutation", "operation", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/differential_evolution.py#L712-L768
[ "def", "_get_mixing_indices", "(", "size", ",", "seed", "=", "None", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "default_name", "=", "'get_mixing_indices'", ",", "values", "=", "[", "size", "]", ")", ":", "size", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "size", ")", "dtype", "=", "size", ".", "dtype", "seed_stream", "=", "distributions", ".", "SeedStream", "(", "seed", ",", "salt", "=", "'get_mixing_indices'", ")", "first", "=", "tf", ".", "random", ".", "uniform", "(", "[", "size", "]", ",", "maxval", "=", "size", "-", "1", ",", "dtype", "=", "dtype", ",", "seed", "=", "seed_stream", "(", ")", ")", "second", "=", "tf", ".", "random", ".", "uniform", "(", "[", "size", "]", ",", "maxval", "=", "size", "-", "2", ",", "dtype", "=", "dtype", ",", "seed", "=", "seed_stream", "(", ")", ")", "third", "=", "tf", ".", "random", ".", "uniform", "(", "[", "size", "]", ",", "maxval", "=", "size", "-", "3", ",", "dtype", "=", "dtype", ",", "seed", "=", "seed_stream", "(", ")", ")", "# Shift second if it is on top of or to the right of first", "second", "=", "tf", ".", "where", "(", "first", "<", "second", ",", "x", "=", "second", ",", "y", "=", "second", "+", "1", ")", "smaller", "=", "tf", ".", "math", ".", "minimum", "(", "first", ",", "second", ")", "larger", "=", "tf", ".", "math", ".", "maximum", "(", "first", ",", "second", ")", "# Shift the third one so it does not coincide with either the first or the", "# second number. Assuming first < second, shift by 1 if the number is in", "# [first, second) and by 2 if the number is greater than or equal to the", "# second.", "third", "=", "tf", ".", "where", "(", "third", "<", "smaller", ",", "x", "=", "third", ",", "y", "=", "third", "+", "1", ")", "third", "=", "tf", ".", "where", "(", "third", "<", "larger", ",", "x", "=", "third", ",", "y", "=", "third", "+", "1", ")", "sample", "=", "tf", ".", "stack", "(", "[", "first", ",", "second", ",", "third", "]", ",", "axis", "=", "1", ")", "to_avoid", "=", "tf", ".", "expand_dims", "(", "tf", ".", "range", "(", "size", ")", ",", "axis", "=", "-", "1", ")", "sample", "=", "tf", ".", "where", "(", "sample", "<", "to_avoid", ",", "x", "=", "sample", ",", "y", "=", "sample", "+", "1", ")", "return", "sample" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_ensure_list
Converts the input arg to a list if it is not a list already. Args: tensor_or_list: A `Tensor` or a Python list of `Tensor`s. The argument to convert to a list of `Tensor`s. Returns: A tuple of two elements. The first is a Python list of `Tensor`s containing the original arguments. The second is a boolean indicating whether the original argument was a list or tuple already.
tensorflow_probability/python/optimizer/differential_evolution.py
def _ensure_list(tensor_or_list): """Converts the input arg to a list if it is not a list already. Args: tensor_or_list: A `Tensor` or a Python list of `Tensor`s. The argument to convert to a list of `Tensor`s. Returns: A tuple of two elements. The first is a Python list of `Tensor`s containing the original arguments. The second is a boolean indicating whether the original argument was a list or tuple already. """ if isinstance(tensor_or_list, (list, tuple)): return list(tensor_or_list), True return [tensor_or_list], False
def _ensure_list(tensor_or_list): """Converts the input arg to a list if it is not a list already. Args: tensor_or_list: A `Tensor` or a Python list of `Tensor`s. The argument to convert to a list of `Tensor`s. Returns: A tuple of two elements. The first is a Python list of `Tensor`s containing the original arguments. The second is a boolean indicating whether the original argument was a list or tuple already. """ if isinstance(tensor_or_list, (list, tuple)): return list(tensor_or_list), True return [tensor_or_list], False
[ "Converts", "the", "input", "arg", "to", "a", "list", "if", "it", "is", "not", "a", "list", "already", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/differential_evolution.py#L771-L785
[ "def", "_ensure_list", "(", "tensor_or_list", ")", ":", "if", "isinstance", "(", "tensor_or_list", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "list", "(", "tensor_or_list", ")", ",", "True", "return", "[", "tensor_or_list", "]", ",", "False" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_get_tol
Gets a Tensor of type `dtype`, 0 if `tol` is None, validation optional.
tensorflow_probability/python/distributions/deterministic.py
def _get_tol(tol, dtype, validate_args): """Gets a Tensor of type `dtype`, 0 if `tol` is None, validation optional.""" if tol is None: return tf.convert_to_tensor(value=0, dtype=dtype) tol = tf.convert_to_tensor(value=tol, dtype=dtype) if validate_args: tol = distribution_util.with_dependencies([ assert_util.assert_non_negative( tol, message="Argument 'tol' must be non-negative") ], tol) return tol
def _get_tol(tol, dtype, validate_args): """Gets a Tensor of type `dtype`, 0 if `tol` is None, validation optional.""" if tol is None: return tf.convert_to_tensor(value=0, dtype=dtype) tol = tf.convert_to_tensor(value=tol, dtype=dtype) if validate_args: tol = distribution_util.with_dependencies([ assert_util.assert_non_negative( tol, message="Argument 'tol' must be non-negative") ], tol) return tol
[ "Gets", "a", "Tensor", "of", "type", "dtype", "0", "if", "tol", "is", "None", "validation", "optional", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/deterministic.py#L41-L52
[ "def", "_get_tol", "(", "tol", ",", "dtype", ",", "validate_args", ")", ":", "if", "tol", "is", "None", ":", "return", "tf", ".", "convert_to_tensor", "(", "value", "=", "0", ",", "dtype", "=", "dtype", ")", "tol", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "tol", ",", "dtype", "=", "dtype", ")", "if", "validate_args", ":", "tol", "=", "distribution_util", ".", "with_dependencies", "(", "[", "assert_util", ".", "assert_non_negative", "(", "tol", ",", "message", "=", "\"Argument 'tol' must be non-negative\"", ")", "]", ",", "tol", ")", "return", "tol" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_kl_deterministic_distribution
Calculate the batched KL divergence `KL(a || b)` with `a` Deterministic. Args: a: instance of a Deterministic distribution object. b: instance of a Distribution distribution object. name: (optional) Name to use for created operations. Default is "kl_deterministic_distribution". Returns: Batchwise `KL(a || b)`.
tensorflow_probability/python/distributions/deterministic.py
def _kl_deterministic_distribution(a, b, name=None): """Calculate the batched KL divergence `KL(a || b)` with `a` Deterministic. Args: a: instance of a Deterministic distribution object. b: instance of a Distribution distribution object. name: (optional) Name to use for created operations. Default is "kl_deterministic_distribution". Returns: Batchwise `KL(a || b)`. """ with tf.name_scope(name or "kl_deterministic_distribution"): return -b.log_prob(a.loc)
def _kl_deterministic_distribution(a, b, name=None): """Calculate the batched KL divergence `KL(a || b)` with `a` Deterministic. Args: a: instance of a Deterministic distribution object. b: instance of a Distribution distribution object. name: (optional) Name to use for created operations. Default is "kl_deterministic_distribution". Returns: Batchwise `KL(a || b)`. """ with tf.name_scope(name or "kl_deterministic_distribution"): return -b.log_prob(a.loc)
[ "Calculate", "the", "batched", "KL", "divergence", "KL", "(", "a", "||", "b", ")", "with", "a", "Deterministic", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/deterministic.py#L407-L420
[ "def", "_kl_deterministic_distribution", "(", "a", ",", "b", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "name_scope", "(", "name", "or", "\"kl_deterministic_distribution\"", ")", ":", "return", "-", "b", ".", "log_prob", "(", "a", ".", "loc", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_sqrtx2p1
Implementation of `sqrt(1 + x**2)` which is stable despite large `x`.
tensorflow_probability/python/bijectors/sinh_arcsinh.py
def _sqrtx2p1(x): """Implementation of `sqrt(1 + x**2)` which is stable despite large `x`.""" sqrt_eps = np.sqrt(np.finfo(dtype_util.as_numpy_dtype(x.dtype)).eps) return tf.where( tf.abs(x) * sqrt_eps <= 1., tf.sqrt(x**2. + 1.), # For large x, calculating x**2 can overflow. This can be alleviated by # considering: # sqrt(1 + x**2) # = exp(0.5 log(1 + x**2)) # = exp(0.5 log(x**2 * (1 + x**-2))) # = exp(log(x) + 0.5 * log(1 + x**-2)) # = |x| * exp(0.5 log(1 + x**-2)) # = |x| * sqrt(1 + x**-2) # We omit the last term in this approximation. # When |x| > 1 / sqrt(machineepsilon), the second term will be 1, # due to sqrt(1 + x**-2) = 1. This is also true with the gradient term, # and higher order gradients, since the first order derivative of # sqrt(1 + x**-2) is -2 * x**-3 / (1 + x**-2) = -2 / (x**3 + x), # and all nth-order derivatives will be O(x**-(n + 2)). This makes any # gradient terms that contain any derivatives of sqrt(1 + x**-2) vanish. tf.abs(x))
def _sqrtx2p1(x): """Implementation of `sqrt(1 + x**2)` which is stable despite large `x`.""" sqrt_eps = np.sqrt(np.finfo(dtype_util.as_numpy_dtype(x.dtype)).eps) return tf.where( tf.abs(x) * sqrt_eps <= 1., tf.sqrt(x**2. + 1.), # For large x, calculating x**2 can overflow. This can be alleviated by # considering: # sqrt(1 + x**2) # = exp(0.5 log(1 + x**2)) # = exp(0.5 log(x**2 * (1 + x**-2))) # = exp(log(x) + 0.5 * log(1 + x**-2)) # = |x| * exp(0.5 log(1 + x**-2)) # = |x| * sqrt(1 + x**-2) # We omit the last term in this approximation. # When |x| > 1 / sqrt(machineepsilon), the second term will be 1, # due to sqrt(1 + x**-2) = 1. This is also true with the gradient term, # and higher order gradients, since the first order derivative of # sqrt(1 + x**-2) is -2 * x**-3 / (1 + x**-2) = -2 / (x**3 + x), # and all nth-order derivatives will be O(x**-(n + 2)). This makes any # gradient terms that contain any derivatives of sqrt(1 + x**-2) vanish. tf.abs(x))
[ "Implementation", "of", "sqrt", "(", "1", "+", "x", "**", "2", ")", "which", "is", "stable", "despite", "large", "x", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/sinh_arcsinh.py#L36-L57
[ "def", "_sqrtx2p1", "(", "x", ")", ":", "sqrt_eps", "=", "np", ".", "sqrt", "(", "np", ".", "finfo", "(", "dtype_util", ".", "as_numpy_dtype", "(", "x", ".", "dtype", ")", ")", ".", "eps", ")", "return", "tf", ".", "where", "(", "tf", ".", "abs", "(", "x", ")", "*", "sqrt_eps", "<=", "1.", ",", "tf", ".", "sqrt", "(", "x", "**", "2.", "+", "1.", ")", ",", "# For large x, calculating x**2 can overflow. This can be alleviated by", "# considering:", "# sqrt(1 + x**2)", "# = exp(0.5 log(1 + x**2))", "# = exp(0.5 log(x**2 * (1 + x**-2)))", "# = exp(log(x) + 0.5 * log(1 + x**-2))", "# = |x| * exp(0.5 log(1 + x**-2))", "# = |x| * sqrt(1 + x**-2)", "# We omit the last term in this approximation.", "# When |x| > 1 / sqrt(machineepsilon), the second term will be 1,", "# due to sqrt(1 + x**-2) = 1. This is also true with the gradient term,", "# and higher order gradients, since the first order derivative of", "# sqrt(1 + x**-2) is -2 * x**-3 / (1 + x**-2) = -2 / (x**3 + x),", "# and all nth-order derivatives will be O(x**-(n + 2)). This makes any", "# gradient terms that contain any derivatives of sqrt(1 + x**-2) vanish.", "tf", ".", "abs", "(", "x", ")", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
log1psquare
Numerically stable calculation of `log(1 + x**2)` for small or large `|x|`. For sufficiently large `x` we use the following observation: ```none log(1 + x**2) = 2 log(|x|) + log(1 + 1 / x**2) --> 2 log(|x|) as x --> inf ``` Numerically, `log(1 + 1 / x**2)` is `0` when `1 / x**2` is small relative to machine epsilon. Args: x: Float `Tensor` input. name: Python string indicating the name of the TensorFlow operation. Default value: `'log1psquare'`. Returns: log1psq: Float `Tensor` representing `log(1. + x**2.)`.
tensorflow_probability/python/math/numeric.py
def log1psquare(x, name=None): """Numerically stable calculation of `log(1 + x**2)` for small or large `|x|`. For sufficiently large `x` we use the following observation: ```none log(1 + x**2) = 2 log(|x|) + log(1 + 1 / x**2) --> 2 log(|x|) as x --> inf ``` Numerically, `log(1 + 1 / x**2)` is `0` when `1 / x**2` is small relative to machine epsilon. Args: x: Float `Tensor` input. name: Python string indicating the name of the TensorFlow operation. Default value: `'log1psquare'`. Returns: log1psq: Float `Tensor` representing `log(1. + x**2.)`. """ with tf.compat.v1.name_scope(name, 'log1psquare', [x]): x = tf.convert_to_tensor(value=x, dtype_hint=tf.float32, name='x') dtype = x.dtype.as_numpy_dtype eps = np.finfo(dtype).eps.astype(np.float64) is_large = tf.abs(x) > (eps**-0.5).astype(dtype) # Mask out small x's so the gradient correctly propagates. abs_large_x = tf.where(is_large, tf.abs(x), tf.ones_like(x)) return tf.where(is_large, 2. * tf.math.log(abs_large_x), tf.math.log1p(tf.square(x)))
def log1psquare(x, name=None): """Numerically stable calculation of `log(1 + x**2)` for small or large `|x|`. For sufficiently large `x` we use the following observation: ```none log(1 + x**2) = 2 log(|x|) + log(1 + 1 / x**2) --> 2 log(|x|) as x --> inf ``` Numerically, `log(1 + 1 / x**2)` is `0` when `1 / x**2` is small relative to machine epsilon. Args: x: Float `Tensor` input. name: Python string indicating the name of the TensorFlow operation. Default value: `'log1psquare'`. Returns: log1psq: Float `Tensor` representing `log(1. + x**2.)`. """ with tf.compat.v1.name_scope(name, 'log1psquare', [x]): x = tf.convert_to_tensor(value=x, dtype_hint=tf.float32, name='x') dtype = x.dtype.as_numpy_dtype eps = np.finfo(dtype).eps.astype(np.float64) is_large = tf.abs(x) > (eps**-0.5).astype(dtype) # Mask out small x's so the gradient correctly propagates. abs_large_x = tf.where(is_large, tf.abs(x), tf.ones_like(x)) return tf.where(is_large, 2. * tf.math.log(abs_large_x), tf.math.log1p(tf.square(x)))
[ "Numerically", "stable", "calculation", "of", "log", "(", "1", "+", "x", "**", "2", ")", "for", "small", "or", "large", "|x|", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/numeric.py#L25-L56
[ "def", "log1psquare", "(", "x", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "'log1psquare'", ",", "[", "x", "]", ")", ":", "x", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "x", ",", "dtype_hint", "=", "tf", ".", "float32", ",", "name", "=", "'x'", ")", "dtype", "=", "x", ".", "dtype", ".", "as_numpy_dtype", "eps", "=", "np", ".", "finfo", "(", "dtype", ")", ".", "eps", ".", "astype", "(", "np", ".", "float64", ")", "is_large", "=", "tf", ".", "abs", "(", "x", ")", ">", "(", "eps", "**", "-", "0.5", ")", ".", "astype", "(", "dtype", ")", "# Mask out small x's so the gradient correctly propagates.", "abs_large_x", "=", "tf", ".", "where", "(", "is_large", ",", "tf", ".", "abs", "(", "x", ")", ",", "tf", ".", "ones_like", "(", "x", ")", ")", "return", "tf", ".", "where", "(", "is_large", ",", "2.", "*", "tf", ".", "math", ".", "log", "(", "abs_large_x", ")", ",", "tf", ".", "math", ".", "log1p", "(", "tf", ".", "square", "(", "x", ")", ")", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
soft_threshold
Soft Thresholding operator. This operator is defined by the equations ```none { x[i] - gamma, x[i] > gamma SoftThreshold(x, gamma)[i] = { 0, x[i] == gamma { x[i] + gamma, x[i] < -gamma ``` In the context of proximal gradient methods, we have ```none SoftThreshold(x, gamma) = prox_{gamma L1}(x) ``` where `prox` is the proximity operator. Thus the soft thresholding operator is used in proximal gradient descent for optimizing a smooth function with (non-smooth) L1 regularization, as outlined below. The proximity operator is defined as: ```none prox_r(x) = argmin{ r(z) + 0.5 ||x - z||_2**2 : z }, ``` where `r` is a (weakly) convex function, not necessarily differentiable. Because the L2 norm is strictly convex, the above argmin is unique. One important application of the proximity operator is as follows. Let `L` be a convex and differentiable function with Lipschitz-continuous gradient. Let `R` be a convex lower semicontinuous function which is possibly nondifferentiable. Let `gamma` be an arbitrary positive real. Then ```none x_star = argmin{ L(x) + R(x) : x } ``` if and only if the fixed-point equation is satisfied: ```none x_star = prox_{gamma R}(x_star - gamma grad L(x_star)) ``` Proximal gradient descent thus typically consists of choosing an initial value `x^{(0)}` and repeatedly applying the update ```none x^{(k+1)} = prox_{gamma^{(k)} R}(x^{(k)} - gamma^{(k)} grad L(x^{(k)})) ``` where `gamma` is allowed to vary from iteration to iteration. Specializing to the case where `R(x) = ||x||_1`, we minimize `L(x) + ||x||_1` by repeatedly applying the update ``` x^{(k+1)} = SoftThreshold(x - gamma grad L(x^{(k)}), gamma) ``` (This idea can also be extended to second-order approximations, although the multivariate case does not have a known closed form like above.) Args: x: `float` `Tensor` representing the input to the SoftThreshold function. threshold: nonnegative scalar, `float` `Tensor` representing the radius of the interval on which each coordinate of SoftThreshold takes the value zero. Denoted `gamma` above. name: Python string indicating the name of the TensorFlow operation. Default value: `'soft_threshold'`. Returns: softthreshold: `float` `Tensor` with the same shape and dtype as `x`, representing the value of the SoftThreshold function. #### References [1]: Yu, Yao-Liang. The Proximity Operator. https://www.cs.cmu.edu/~suvrit/teach/yaoliang_proximity.pdf [2]: Wikipedia Contributors. Proximal gradient methods for learning. _Wikipedia, The Free Encyclopedia_, 2018. https://en.wikipedia.org/wiki/Proximal_gradient_methods_for_learning
tensorflow_probability/python/math/numeric.py
def soft_threshold(x, threshold, name=None): """Soft Thresholding operator. This operator is defined by the equations ```none { x[i] - gamma, x[i] > gamma SoftThreshold(x, gamma)[i] = { 0, x[i] == gamma { x[i] + gamma, x[i] < -gamma ``` In the context of proximal gradient methods, we have ```none SoftThreshold(x, gamma) = prox_{gamma L1}(x) ``` where `prox` is the proximity operator. Thus the soft thresholding operator is used in proximal gradient descent for optimizing a smooth function with (non-smooth) L1 regularization, as outlined below. The proximity operator is defined as: ```none prox_r(x) = argmin{ r(z) + 0.5 ||x - z||_2**2 : z }, ``` where `r` is a (weakly) convex function, not necessarily differentiable. Because the L2 norm is strictly convex, the above argmin is unique. One important application of the proximity operator is as follows. Let `L` be a convex and differentiable function with Lipschitz-continuous gradient. Let `R` be a convex lower semicontinuous function which is possibly nondifferentiable. Let `gamma` be an arbitrary positive real. Then ```none x_star = argmin{ L(x) + R(x) : x } ``` if and only if the fixed-point equation is satisfied: ```none x_star = prox_{gamma R}(x_star - gamma grad L(x_star)) ``` Proximal gradient descent thus typically consists of choosing an initial value `x^{(0)}` and repeatedly applying the update ```none x^{(k+1)} = prox_{gamma^{(k)} R}(x^{(k)} - gamma^{(k)} grad L(x^{(k)})) ``` where `gamma` is allowed to vary from iteration to iteration. Specializing to the case where `R(x) = ||x||_1`, we minimize `L(x) + ||x||_1` by repeatedly applying the update ``` x^{(k+1)} = SoftThreshold(x - gamma grad L(x^{(k)}), gamma) ``` (This idea can also be extended to second-order approximations, although the multivariate case does not have a known closed form like above.) Args: x: `float` `Tensor` representing the input to the SoftThreshold function. threshold: nonnegative scalar, `float` `Tensor` representing the radius of the interval on which each coordinate of SoftThreshold takes the value zero. Denoted `gamma` above. name: Python string indicating the name of the TensorFlow operation. Default value: `'soft_threshold'`. Returns: softthreshold: `float` `Tensor` with the same shape and dtype as `x`, representing the value of the SoftThreshold function. #### References [1]: Yu, Yao-Liang. The Proximity Operator. https://www.cs.cmu.edu/~suvrit/teach/yaoliang_proximity.pdf [2]: Wikipedia Contributors. Proximal gradient methods for learning. _Wikipedia, The Free Encyclopedia_, 2018. https://en.wikipedia.org/wiki/Proximal_gradient_methods_for_learning """ # https://math.stackexchange.com/questions/471339/derivation-of-soft-thresholding-operator with tf.compat.v1.name_scope(name, 'soft_threshold', [x, threshold]): x = tf.convert_to_tensor(value=x, name='x') threshold = tf.convert_to_tensor( value=threshold, dtype=x.dtype, name='threshold') return tf.sign(x) * tf.maximum(tf.abs(x) - threshold, 0.)
def soft_threshold(x, threshold, name=None): """Soft Thresholding operator. This operator is defined by the equations ```none { x[i] - gamma, x[i] > gamma SoftThreshold(x, gamma)[i] = { 0, x[i] == gamma { x[i] + gamma, x[i] < -gamma ``` In the context of proximal gradient methods, we have ```none SoftThreshold(x, gamma) = prox_{gamma L1}(x) ``` where `prox` is the proximity operator. Thus the soft thresholding operator is used in proximal gradient descent for optimizing a smooth function with (non-smooth) L1 regularization, as outlined below. The proximity operator is defined as: ```none prox_r(x) = argmin{ r(z) + 0.5 ||x - z||_2**2 : z }, ``` where `r` is a (weakly) convex function, not necessarily differentiable. Because the L2 norm is strictly convex, the above argmin is unique. One important application of the proximity operator is as follows. Let `L` be a convex and differentiable function with Lipschitz-continuous gradient. Let `R` be a convex lower semicontinuous function which is possibly nondifferentiable. Let `gamma` be an arbitrary positive real. Then ```none x_star = argmin{ L(x) + R(x) : x } ``` if and only if the fixed-point equation is satisfied: ```none x_star = prox_{gamma R}(x_star - gamma grad L(x_star)) ``` Proximal gradient descent thus typically consists of choosing an initial value `x^{(0)}` and repeatedly applying the update ```none x^{(k+1)} = prox_{gamma^{(k)} R}(x^{(k)} - gamma^{(k)} grad L(x^{(k)})) ``` where `gamma` is allowed to vary from iteration to iteration. Specializing to the case where `R(x) = ||x||_1`, we minimize `L(x) + ||x||_1` by repeatedly applying the update ``` x^{(k+1)} = SoftThreshold(x - gamma grad L(x^{(k)}), gamma) ``` (This idea can also be extended to second-order approximations, although the multivariate case does not have a known closed form like above.) Args: x: `float` `Tensor` representing the input to the SoftThreshold function. threshold: nonnegative scalar, `float` `Tensor` representing the radius of the interval on which each coordinate of SoftThreshold takes the value zero. Denoted `gamma` above. name: Python string indicating the name of the TensorFlow operation. Default value: `'soft_threshold'`. Returns: softthreshold: `float` `Tensor` with the same shape and dtype as `x`, representing the value of the SoftThreshold function. #### References [1]: Yu, Yao-Liang. The Proximity Operator. https://www.cs.cmu.edu/~suvrit/teach/yaoliang_proximity.pdf [2]: Wikipedia Contributors. Proximal gradient methods for learning. _Wikipedia, The Free Encyclopedia_, 2018. https://en.wikipedia.org/wiki/Proximal_gradient_methods_for_learning """ # https://math.stackexchange.com/questions/471339/derivation-of-soft-thresholding-operator with tf.compat.v1.name_scope(name, 'soft_threshold', [x, threshold]): x = tf.convert_to_tensor(value=x, name='x') threshold = tf.convert_to_tensor( value=threshold, dtype=x.dtype, name='threshold') return tf.sign(x) * tf.maximum(tf.abs(x) - threshold, 0.)
[ "Soft", "Thresholding", "operator", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/numeric.py#L59-L149
[ "def", "soft_threshold", "(", "x", ",", "threshold", ",", "name", "=", "None", ")", ":", "# https://math.stackexchange.com/questions/471339/derivation-of-soft-thresholding-operator", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "'soft_threshold'", ",", "[", "x", ",", "threshold", "]", ")", ":", "x", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "x", ",", "name", "=", "'x'", ")", "threshold", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "threshold", ",", "dtype", "=", "x", ".", "dtype", ",", "name", "=", "'threshold'", ")", "return", "tf", ".", "sign", "(", "x", ")", "*", "tf", ".", "maximum", "(", "tf", ".", "abs", "(", "x", ")", "-", "threshold", ",", "0.", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
clip_by_value_preserve_gradient
Clips values to a specified min and max while leaving gradient unaltered. Like `tf.clip_by_value`, this function returns a tensor of the same type and shape as input `t` but with values clamped to be no smaller than to `clip_value_min` and no larger than `clip_value_max`. Unlike `tf.clip_by_value`, the gradient is unaffected by this op, i.e., ```python tf.gradients(tfp.math.clip_by_value_preserve_gradient(x), x)[0] # ==> ones_like(x) ``` Note: `clip_value_min` needs to be smaller or equal to `clip_value_max` for correct results. Args: t: A `Tensor`. clip_value_min: A scalar `Tensor`, or a `Tensor` with the same shape as `t`. The minimum value to clip by. clip_value_max: A scalar `Tensor`, or a `Tensor` with the same shape as `t`. The maximum value to clip by. name: A name for the operation (optional). Default value: `'clip_by_value_preserve_gradient'`. Returns: clipped_t: A clipped `Tensor`.
tensorflow_probability/python/math/numeric.py
def clip_by_value_preserve_gradient(t, clip_value_min, clip_value_max, name=None): """Clips values to a specified min and max while leaving gradient unaltered. Like `tf.clip_by_value`, this function returns a tensor of the same type and shape as input `t` but with values clamped to be no smaller than to `clip_value_min` and no larger than `clip_value_max`. Unlike `tf.clip_by_value`, the gradient is unaffected by this op, i.e., ```python tf.gradients(tfp.math.clip_by_value_preserve_gradient(x), x)[0] # ==> ones_like(x) ``` Note: `clip_value_min` needs to be smaller or equal to `clip_value_max` for correct results. Args: t: A `Tensor`. clip_value_min: A scalar `Tensor`, or a `Tensor` with the same shape as `t`. The minimum value to clip by. clip_value_max: A scalar `Tensor`, or a `Tensor` with the same shape as `t`. The maximum value to clip by. name: A name for the operation (optional). Default value: `'clip_by_value_preserve_gradient'`. Returns: clipped_t: A clipped `Tensor`. """ with tf.compat.v1.name_scope(name, 'clip_by_value_preserve_gradient', [t, clip_value_min, clip_value_max]): t = tf.convert_to_tensor(value=t, name='t') clip_t = tf.clip_by_value(t, clip_value_min, clip_value_max) return t + tf.stop_gradient(clip_t - t)
def clip_by_value_preserve_gradient(t, clip_value_min, clip_value_max, name=None): """Clips values to a specified min and max while leaving gradient unaltered. Like `tf.clip_by_value`, this function returns a tensor of the same type and shape as input `t` but with values clamped to be no smaller than to `clip_value_min` and no larger than `clip_value_max`. Unlike `tf.clip_by_value`, the gradient is unaffected by this op, i.e., ```python tf.gradients(tfp.math.clip_by_value_preserve_gradient(x), x)[0] # ==> ones_like(x) ``` Note: `clip_value_min` needs to be smaller or equal to `clip_value_max` for correct results. Args: t: A `Tensor`. clip_value_min: A scalar `Tensor`, or a `Tensor` with the same shape as `t`. The minimum value to clip by. clip_value_max: A scalar `Tensor`, or a `Tensor` with the same shape as `t`. The maximum value to clip by. name: A name for the operation (optional). Default value: `'clip_by_value_preserve_gradient'`. Returns: clipped_t: A clipped `Tensor`. """ with tf.compat.v1.name_scope(name, 'clip_by_value_preserve_gradient', [t, clip_value_min, clip_value_max]): t = tf.convert_to_tensor(value=t, name='t') clip_t = tf.clip_by_value(t, clip_value_min, clip_value_max) return t + tf.stop_gradient(clip_t - t)
[ "Clips", "values", "to", "a", "specified", "min", "and", "max", "while", "leaving", "gradient", "unaltered", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/numeric.py#L152-L185
[ "def", "clip_by_value_preserve_gradient", "(", "t", ",", "clip_value_min", ",", "clip_value_max", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "'clip_by_value_preserve_gradient'", ",", "[", "t", ",", "clip_value_min", ",", "clip_value_max", "]", ")", ":", "t", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "t", ",", "name", "=", "'t'", ")", "clip_t", "=", "tf", ".", "clip_by_value", "(", "t", ",", "clip_value_min", ",", "clip_value_max", ")", "return", "t", "+", "tf", ".", "stop_gradient", "(", "clip_t", "-", "t", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
build_input_pipeline
Build an iterator over training batches.
tensorflow_probability/examples/generative_adversarial_network.py
def build_input_pipeline(train_images, batch_size): """Build an iterator over training batches.""" training_dataset = tf.data.Dataset.from_tensor_slices(train_images) training_batches = training_dataset.shuffle( 50000, reshuffle_each_iteration=True).repeat().batch(batch_size) training_iterator = tf.compat.v1.data.make_one_shot_iterator(training_batches) images = training_iterator.get_next() return images
def build_input_pipeline(train_images, batch_size): """Build an iterator over training batches.""" training_dataset = tf.data.Dataset.from_tensor_slices(train_images) training_batches = training_dataset.shuffle( 50000, reshuffle_each_iteration=True).repeat().batch(batch_size) training_iterator = tf.compat.v1.data.make_one_shot_iterator(training_batches) images = training_iterator.get_next() return images
[ "Build", "an", "iterator", "over", "training", "batches", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/generative_adversarial_network.py#L104-L112
[ "def", "build_input_pipeline", "(", "train_images", ",", "batch_size", ")", ":", "training_dataset", "=", "tf", ".", "data", ".", "Dataset", ".", "from_tensor_slices", "(", "train_images", ")", "training_batches", "=", "training_dataset", ".", "shuffle", "(", "50000", ",", "reshuffle_each_iteration", "=", "True", ")", ".", "repeat", "(", ")", ".", "batch", "(", "batch_size", ")", "training_iterator", "=", "tf", ".", "compat", ".", "v1", ".", "data", ".", "make_one_shot_iterator", "(", "training_batches", ")", "images", "=", "training_iterator", ".", "get_next", "(", ")", "return", "images" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
plot_generated_images
Save a synthetic image as a PNG file. Args: images: samples of synthetic images generated by the generative network. fname: Python `str`, filename to save the plot to.
tensorflow_probability/examples/generative_adversarial_network.py
def plot_generated_images(images, fname): """Save a synthetic image as a PNG file. Args: images: samples of synthetic images generated by the generative network. fname: Python `str`, filename to save the plot to. """ fig = plt.figure(figsize=(4, 4)) canvas = backend_agg.FigureCanvasAgg(fig) for i, image in enumerate(images): ax = fig.add_subplot(4, 4, i + 1) plt.axis('off') ax.set_xticklabels([]) ax.set_yticklabels([]) ax.imshow(image.reshape(IMAGE_SHAPE[:-1]), cmap='Greys_r') fig.tight_layout() plt.subplots_adjust(wspace=0.05, hspace=0.05) canvas.print_figure(fname, format='png')
def plot_generated_images(images, fname): """Save a synthetic image as a PNG file. Args: images: samples of synthetic images generated by the generative network. fname: Python `str`, filename to save the plot to. """ fig = plt.figure(figsize=(4, 4)) canvas = backend_agg.FigureCanvasAgg(fig) for i, image in enumerate(images): ax = fig.add_subplot(4, 4, i + 1) plt.axis('off') ax.set_xticklabels([]) ax.set_yticklabels([]) ax.imshow(image.reshape(IMAGE_SHAPE[:-1]), cmap='Greys_r') fig.tight_layout() plt.subplots_adjust(wspace=0.05, hspace=0.05) canvas.print_figure(fname, format='png')
[ "Save", "a", "synthetic", "image", "as", "a", "PNG", "file", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/generative_adversarial_network.py#L123-L142
[ "def", "plot_generated_images", "(", "images", ",", "fname", ")", ":", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "(", "4", ",", "4", ")", ")", "canvas", "=", "backend_agg", ".", "FigureCanvasAgg", "(", "fig", ")", "for", "i", ",", "image", "in", "enumerate", "(", "images", ")", ":", "ax", "=", "fig", ".", "add_subplot", "(", "4", ",", "4", ",", "i", "+", "1", ")", "plt", ".", "axis", "(", "'off'", ")", "ax", ".", "set_xticklabels", "(", "[", "]", ")", "ax", ".", "set_yticklabels", "(", "[", "]", ")", "ax", ".", "imshow", "(", "image", ".", "reshape", "(", "IMAGE_SHAPE", "[", ":", "-", "1", "]", ")", ",", "cmap", "=", "'Greys_r'", ")", "fig", ".", "tight_layout", "(", ")", "plt", ".", "subplots_adjust", "(", "wspace", "=", "0.05", ",", "hspace", "=", "0.05", ")", "canvas", ".", "print_figure", "(", "fname", ",", "format", "=", "'png'", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
SmilesGrammar.convert_to_string
Converts a sequence of productions into a string of terminal symbols. Args: productions: Tensor of shape [1, num_productions, num_production_rules]. Slices along the `num_productions` dimension represent one-hot vectors. Returns: str that concatenates all terminal symbols from `productions`. Raises: ValueError: If the first production rule does not begin with `self.start_symbol`.
tensorflow_probability/examples/grammar_vae.py
def convert_to_string(self, productions): """Converts a sequence of productions into a string of terminal symbols. Args: productions: Tensor of shape [1, num_productions, num_production_rules]. Slices along the `num_productions` dimension represent one-hot vectors. Returns: str that concatenates all terminal symbols from `productions`. Raises: ValueError: If the first production rule does not begin with `self.start_symbol`. """ symbols = [] for production in tf.unstack(productions, axis=1): lhs, rhs = self.production_rules[tf.argmax(input=production, axis=-1)] if not symbols: # first iteration if lhs != self.start_symbol: raise ValueError("`productions` must begin with `self.start_symbol`.") symbols = rhs else: # Greedily unroll the nonterminal symbols based on the first occurrence # in a linear sequence. index = symbols.index(lhs) symbols = symbols[:index] + rhs + symbols[index + 1:] string = "".join(symbols) return string
def convert_to_string(self, productions): """Converts a sequence of productions into a string of terminal symbols. Args: productions: Tensor of shape [1, num_productions, num_production_rules]. Slices along the `num_productions` dimension represent one-hot vectors. Returns: str that concatenates all terminal symbols from `productions`. Raises: ValueError: If the first production rule does not begin with `self.start_symbol`. """ symbols = [] for production in tf.unstack(productions, axis=1): lhs, rhs = self.production_rules[tf.argmax(input=production, axis=-1)] if not symbols: # first iteration if lhs != self.start_symbol: raise ValueError("`productions` must begin with `self.start_symbol`.") symbols = rhs else: # Greedily unroll the nonterminal symbols based on the first occurrence # in a linear sequence. index = symbols.index(lhs) symbols = symbols[:index] + rhs + symbols[index + 1:] string = "".join(symbols) return string
[ "Converts", "a", "sequence", "of", "productions", "into", "a", "string", "of", "terminal", "symbols", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/grammar_vae.py#L137-L164
[ "def", "convert_to_string", "(", "self", ",", "productions", ")", ":", "symbols", "=", "[", "]", "for", "production", "in", "tf", ".", "unstack", "(", "productions", ",", "axis", "=", "1", ")", ":", "lhs", ",", "rhs", "=", "self", ".", "production_rules", "[", "tf", ".", "argmax", "(", "input", "=", "production", ",", "axis", "=", "-", "1", ")", "]", "if", "not", "symbols", ":", "# first iteration", "if", "lhs", "!=", "self", ".", "start_symbol", ":", "raise", "ValueError", "(", "\"`productions` must begin with `self.start_symbol`.\"", ")", "symbols", "=", "rhs", "else", ":", "# Greedily unroll the nonterminal symbols based on the first occurrence", "# in a linear sequence.", "index", "=", "symbols", ".", "index", "(", "lhs", ")", "symbols", "=", "symbols", "[", ":", "index", "]", "+", "rhs", "+", "symbols", "[", "index", "+", "1", ":", "]", "string", "=", "\"\"", ".", "join", "(", "symbols", ")", "return", "string" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
SmilesGrammar.mask
Produces a masking tensor for (in)valid production rules. Args: symbol: str, a symbol in the grammar. on_value: Value to use for a valid production rule. off_value: Value to use for an invalid production rule. Returns: Tensor of shape [1, num_production_rules]. An element is `on_value` if its corresponding production rule has `symbol` on its left-hand-side; the element is `off_value` otherwise.
tensorflow_probability/examples/grammar_vae.py
def mask(self, symbol, on_value, off_value): """Produces a masking tensor for (in)valid production rules. Args: symbol: str, a symbol in the grammar. on_value: Value to use for a valid production rule. off_value: Value to use for an invalid production rule. Returns: Tensor of shape [1, num_production_rules]. An element is `on_value` if its corresponding production rule has `symbol` on its left-hand-side; the element is `off_value` otherwise. """ mask_values = [on_value if lhs == symbol else off_value for lhs, _ in self.production_rules] mask_values = tf.reshape(mask_values, [1, len(self.production_rules)]) return mask_values
def mask(self, symbol, on_value, off_value): """Produces a masking tensor for (in)valid production rules. Args: symbol: str, a symbol in the grammar. on_value: Value to use for a valid production rule. off_value: Value to use for an invalid production rule. Returns: Tensor of shape [1, num_production_rules]. An element is `on_value` if its corresponding production rule has `symbol` on its left-hand-side; the element is `off_value` otherwise. """ mask_values = [on_value if lhs == symbol else off_value for lhs, _ in self.production_rules] mask_values = tf.reshape(mask_values, [1, len(self.production_rules)]) return mask_values
[ "Produces", "a", "masking", "tensor", "for", "(", "in", ")", "valid", "production", "rules", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/grammar_vae.py#L166-L182
[ "def", "mask", "(", "self", ",", "symbol", ",", "on_value", ",", "off_value", ")", ":", "mask_values", "=", "[", "on_value", "if", "lhs", "==", "symbol", "else", "off_value", "for", "lhs", ",", "_", "in", "self", ".", "production_rules", "]", "mask_values", "=", "tf", ".", "reshape", "(", "mask_values", ",", "[", "1", ",", "len", "(", "self", ".", "production_rules", ")", "]", ")", "return", "mask_values" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
ProbabilisticGrammar.call
Runs the model forward to generate a sequence of productions. Args: inputs: Unused. Returns: productions: Tensor of shape [1, num_productions, num_production_rules]. Slices along the `num_productions` dimension represent one-hot vectors.
tensorflow_probability/examples/grammar_vae.py
def call(self, inputs): """Runs the model forward to generate a sequence of productions. Args: inputs: Unused. Returns: productions: Tensor of shape [1, num_productions, num_production_rules]. Slices along the `num_productions` dimension represent one-hot vectors. """ del inputs # unused latent_code = ed.MultivariateNormalDiag(loc=tf.zeros(self.latent_size), sample_shape=1, name="latent_code") state = self.lstm.zero_state(1, dtype=tf.float32) t = 0 productions = [] stack = [self.grammar.start_symbol] while stack: symbol = stack.pop() net, state = self.lstm(latent_code, state) logits = (self.output_layer(net) + self.grammar.mask(symbol, on_value=0., off_value=-1e9)) production = ed.OneHotCategorical(logits=logits, name="production_" + str(t)) _, rhs = self.grammar.production_rules[tf.argmax( input=production, axis=-1)] for symbol in rhs: if symbol in self.grammar.nonterminal_symbols: stack.append(symbol) productions.append(production) t += 1 return tf.stack(productions, axis=1)
def call(self, inputs): """Runs the model forward to generate a sequence of productions. Args: inputs: Unused. Returns: productions: Tensor of shape [1, num_productions, num_production_rules]. Slices along the `num_productions` dimension represent one-hot vectors. """ del inputs # unused latent_code = ed.MultivariateNormalDiag(loc=tf.zeros(self.latent_size), sample_shape=1, name="latent_code") state = self.lstm.zero_state(1, dtype=tf.float32) t = 0 productions = [] stack = [self.grammar.start_symbol] while stack: symbol = stack.pop() net, state = self.lstm(latent_code, state) logits = (self.output_layer(net) + self.grammar.mask(symbol, on_value=0., off_value=-1e9)) production = ed.OneHotCategorical(logits=logits, name="production_" + str(t)) _, rhs = self.grammar.production_rules[tf.argmax( input=production, axis=-1)] for symbol in rhs: if symbol in self.grammar.nonterminal_symbols: stack.append(symbol) productions.append(production) t += 1 return tf.stack(productions, axis=1)
[ "Runs", "the", "model", "forward", "to", "generate", "a", "sequence", "of", "productions", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/grammar_vae.py#L209-L241
[ "def", "call", "(", "self", ",", "inputs", ")", ":", "del", "inputs", "# unused", "latent_code", "=", "ed", ".", "MultivariateNormalDiag", "(", "loc", "=", "tf", ".", "zeros", "(", "self", ".", "latent_size", ")", ",", "sample_shape", "=", "1", ",", "name", "=", "\"latent_code\"", ")", "state", "=", "self", ".", "lstm", ".", "zero_state", "(", "1", ",", "dtype", "=", "tf", ".", "float32", ")", "t", "=", "0", "productions", "=", "[", "]", "stack", "=", "[", "self", ".", "grammar", ".", "start_symbol", "]", "while", "stack", ":", "symbol", "=", "stack", ".", "pop", "(", ")", "net", ",", "state", "=", "self", ".", "lstm", "(", "latent_code", ",", "state", ")", "logits", "=", "(", "self", ".", "output_layer", "(", "net", ")", "+", "self", ".", "grammar", ".", "mask", "(", "symbol", ",", "on_value", "=", "0.", ",", "off_value", "=", "-", "1e9", ")", ")", "production", "=", "ed", ".", "OneHotCategorical", "(", "logits", "=", "logits", ",", "name", "=", "\"production_\"", "+", "str", "(", "t", ")", ")", "_", ",", "rhs", "=", "self", ".", "grammar", ".", "production_rules", "[", "tf", ".", "argmax", "(", "input", "=", "production", ",", "axis", "=", "-", "1", ")", "]", "for", "symbol", "in", "rhs", ":", "if", "symbol", "in", "self", ".", "grammar", ".", "nonterminal_symbols", ":", "stack", ".", "append", "(", "symbol", ")", "productions", ".", "append", "(", "production", ")", "t", "+=", "1", "return", "tf", ".", "stack", "(", "productions", ",", "axis", "=", "1", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
ProbabilisticGrammarVariational.call
Runs the model forward to return a stochastic encoding. Args: inputs: Tensor of shape [1, num_productions, num_production_rules]. It is a sequence of productions of length `num_productions`. Each production is a one-hot vector of length `num_production_rules`: it determines which production rule the production corresponds to. Returns: latent_code_posterior: A random variable capturing a sample from the variational distribution, of shape [1, self.latent_size].
tensorflow_probability/examples/grammar_vae.py
def call(self, inputs): """Runs the model forward to return a stochastic encoding. Args: inputs: Tensor of shape [1, num_productions, num_production_rules]. It is a sequence of productions of length `num_productions`. Each production is a one-hot vector of length `num_production_rules`: it determines which production rule the production corresponds to. Returns: latent_code_posterior: A random variable capturing a sample from the variational distribution, of shape [1, self.latent_size]. """ net = self.encoder_net(tf.cast(inputs, tf.float32)) return ed.MultivariateNormalDiag( loc=net[..., :self.latent_size], scale_diag=tf.nn.softplus(net[..., self.latent_size:]), name="latent_code_posterior")
def call(self, inputs): """Runs the model forward to return a stochastic encoding. Args: inputs: Tensor of shape [1, num_productions, num_production_rules]. It is a sequence of productions of length `num_productions`. Each production is a one-hot vector of length `num_production_rules`: it determines which production rule the production corresponds to. Returns: latent_code_posterior: A random variable capturing a sample from the variational distribution, of shape [1, self.latent_size]. """ net = self.encoder_net(tf.cast(inputs, tf.float32)) return ed.MultivariateNormalDiag( loc=net[..., :self.latent_size], scale_diag=tf.nn.softplus(net[..., self.latent_size:]), name="latent_code_posterior")
[ "Runs", "the", "model", "forward", "to", "return", "a", "stochastic", "encoding", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/grammar_vae.py#L267-L284
[ "def", "call", "(", "self", ",", "inputs", ")", ":", "net", "=", "self", ".", "encoder_net", "(", "tf", ".", "cast", "(", "inputs", ",", "tf", ".", "float32", ")", ")", "return", "ed", ".", "MultivariateNormalDiag", "(", "loc", "=", "net", "[", "...", ",", ":", "self", ".", "latent_size", "]", ",", "scale_diag", "=", "tf", ".", "nn", ".", "softplus", "(", "net", "[", "...", ",", "self", ".", "latent_size", ":", "]", ")", ",", "name", "=", "\"latent_code_posterior\"", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
Zipf._hat_integral
Integral of the `hat` function, used for sampling. We choose a `hat` function, h(x) = x^(-power), which is a continuous (unnormalized) density touching each positive integer at the (unnormalized) pmf. This function implements `hat` integral: H(x) = int_x^inf h(t) dt; which is needed for sampling purposes. Arguments: x: A Tensor of points x at which to evaluate H(x). Returns: A Tensor containing evaluation H(x) at x.
tensorflow_probability/python/distributions/zipf.py
def _hat_integral(self, x): """Integral of the `hat` function, used for sampling. We choose a `hat` function, h(x) = x^(-power), which is a continuous (unnormalized) density touching each positive integer at the (unnormalized) pmf. This function implements `hat` integral: H(x) = int_x^inf h(t) dt; which is needed for sampling purposes. Arguments: x: A Tensor of points x at which to evaluate H(x). Returns: A Tensor containing evaluation H(x) at x. """ x = tf.cast(x, self.power.dtype) t = self.power - 1. return tf.exp((-t) * tf.math.log1p(x) - tf.math.log(t))
def _hat_integral(self, x): """Integral of the `hat` function, used for sampling. We choose a `hat` function, h(x) = x^(-power), which is a continuous (unnormalized) density touching each positive integer at the (unnormalized) pmf. This function implements `hat` integral: H(x) = int_x^inf h(t) dt; which is needed for sampling purposes. Arguments: x: A Tensor of points x at which to evaluate H(x). Returns: A Tensor containing evaluation H(x) at x. """ x = tf.cast(x, self.power.dtype) t = self.power - 1. return tf.exp((-t) * tf.math.log1p(x) - tf.math.log(t))
[ "Integral", "of", "the", "hat", "function", "used", "for", "sampling", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/zipf.py#L306-L322
[ "def", "_hat_integral", "(", "self", ",", "x", ")", ":", "x", "=", "tf", ".", "cast", "(", "x", ",", "self", ".", "power", ".", "dtype", ")", "t", "=", "self", ".", "power", "-", "1.", "return", "tf", ".", "exp", "(", "(", "-", "t", ")", "*", "tf", ".", "math", ".", "log1p", "(", "x", ")", "-", "tf", ".", "math", ".", "log", "(", "t", ")", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
Zipf._hat_integral_inverse
Inverse function of _hat_integral.
tensorflow_probability/python/distributions/zipf.py
def _hat_integral_inverse(self, x): """Inverse function of _hat_integral.""" x = tf.cast(x, self.power.dtype) t = self.power - 1. return tf.math.expm1(-(tf.math.log(t) + tf.math.log(x)) / t)
def _hat_integral_inverse(self, x): """Inverse function of _hat_integral.""" x = tf.cast(x, self.power.dtype) t = self.power - 1. return tf.math.expm1(-(tf.math.log(t) + tf.math.log(x)) / t)
[ "Inverse", "function", "of", "_hat_integral", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/zipf.py#L324-L328
[ "def", "_hat_integral_inverse", "(", "self", ",", "x", ")", ":", "x", "=", "tf", ".", "cast", "(", "x", ",", "self", ".", "power", ".", "dtype", ")", "t", "=", "self", ".", "power", "-", "1.", "return", "tf", ".", "math", ".", "expm1", "(", "-", "(", "tf", ".", "math", ".", "log", "(", "t", ")", "+", "tf", ".", "math", ".", "log", "(", "x", ")", ")", "/", "t", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
matrix_rank
Compute the matrix rank; the number of non-zero SVD singular values. Arguments: a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be pseudo-inverted. tol: Threshold below which the singular value is counted as "zero". Default value: `None` (i.e., `eps * max(rows, cols) * max(singular_val)`). validate_args: When `True`, additional assertions might be embedded in the graph. Default value: `False` (i.e., no graph assertions are added). name: Python `str` prefixed to ops created by this function. Default value: "matrix_rank". Returns: matrix_rank: (Batch of) `int32` scalars representing the number of non-zero singular values.
tensorflow_probability/python/math/linalg.py
def matrix_rank(a, tol=None, validate_args=False, name=None): """Compute the matrix rank; the number of non-zero SVD singular values. Arguments: a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be pseudo-inverted. tol: Threshold below which the singular value is counted as "zero". Default value: `None` (i.e., `eps * max(rows, cols) * max(singular_val)`). validate_args: When `True`, additional assertions might be embedded in the graph. Default value: `False` (i.e., no graph assertions are added). name: Python `str` prefixed to ops created by this function. Default value: "matrix_rank". Returns: matrix_rank: (Batch of) `int32` scalars representing the number of non-zero singular values. """ with tf.compat.v1.name_scope(name, 'matrix_rank', [a, tol]): a = tf.convert_to_tensor(value=a, dtype_hint=tf.float32, name='a') assertions = _maybe_validate_matrix(a, validate_args) if assertions: with tf.control_dependencies(assertions): a = tf.identity(a) s = tf.linalg.svd(a, compute_uv=False) if tol is None: if a.shape[-2:].is_fully_defined(): m = np.max(a.shape[-2:].as_list()) else: m = tf.reduce_max(input_tensor=tf.shape(input=a)[-2:]) eps = np.finfo(a.dtype.as_numpy_dtype).eps tol = (eps * tf.cast(m, a.dtype) * tf.reduce_max(input_tensor=s, axis=-1, keepdims=True)) return tf.reduce_sum(input_tensor=tf.cast(s > tol, tf.int32), axis=-1)
def matrix_rank(a, tol=None, validate_args=False, name=None): """Compute the matrix rank; the number of non-zero SVD singular values. Arguments: a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be pseudo-inverted. tol: Threshold below which the singular value is counted as "zero". Default value: `None` (i.e., `eps * max(rows, cols) * max(singular_val)`). validate_args: When `True`, additional assertions might be embedded in the graph. Default value: `False` (i.e., no graph assertions are added). name: Python `str` prefixed to ops created by this function. Default value: "matrix_rank". Returns: matrix_rank: (Batch of) `int32` scalars representing the number of non-zero singular values. """ with tf.compat.v1.name_scope(name, 'matrix_rank', [a, tol]): a = tf.convert_to_tensor(value=a, dtype_hint=tf.float32, name='a') assertions = _maybe_validate_matrix(a, validate_args) if assertions: with tf.control_dependencies(assertions): a = tf.identity(a) s = tf.linalg.svd(a, compute_uv=False) if tol is None: if a.shape[-2:].is_fully_defined(): m = np.max(a.shape[-2:].as_list()) else: m = tf.reduce_max(input_tensor=tf.shape(input=a)[-2:]) eps = np.finfo(a.dtype.as_numpy_dtype).eps tol = (eps * tf.cast(m, a.dtype) * tf.reduce_max(input_tensor=s, axis=-1, keepdims=True)) return tf.reduce_sum(input_tensor=tf.cast(s > tol, tf.int32), axis=-1)
[ "Compute", "the", "matrix", "rank", ";", "the", "number", "of", "non", "-", "zero", "SVD", "singular", "values", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/linalg.py#L48-L81
[ "def", "matrix_rank", "(", "a", ",", "tol", "=", "None", ",", "validate_args", "=", "False", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "'matrix_rank'", ",", "[", "a", ",", "tol", "]", ")", ":", "a", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "a", ",", "dtype_hint", "=", "tf", ".", "float32", ",", "name", "=", "'a'", ")", "assertions", "=", "_maybe_validate_matrix", "(", "a", ",", "validate_args", ")", "if", "assertions", ":", "with", "tf", ".", "control_dependencies", "(", "assertions", ")", ":", "a", "=", "tf", ".", "identity", "(", "a", ")", "s", "=", "tf", ".", "linalg", ".", "svd", "(", "a", ",", "compute_uv", "=", "False", ")", "if", "tol", "is", "None", ":", "if", "a", ".", "shape", "[", "-", "2", ":", "]", ".", "is_fully_defined", "(", ")", ":", "m", "=", "np", ".", "max", "(", "a", ".", "shape", "[", "-", "2", ":", "]", ".", "as_list", "(", ")", ")", "else", ":", "m", "=", "tf", ".", "reduce_max", "(", "input_tensor", "=", "tf", ".", "shape", "(", "input", "=", "a", ")", "[", "-", "2", ":", "]", ")", "eps", "=", "np", ".", "finfo", "(", "a", ".", "dtype", ".", "as_numpy_dtype", ")", ".", "eps", "tol", "=", "(", "eps", "*", "tf", ".", "cast", "(", "m", ",", "a", ".", "dtype", ")", "*", "tf", ".", "reduce_max", "(", "input_tensor", "=", "s", ",", "axis", "=", "-", "1", ",", "keepdims", "=", "True", ")", ")", "return", "tf", ".", "reduce_sum", "(", "input_tensor", "=", "tf", ".", "cast", "(", "s", ">", "tol", ",", "tf", ".", "int32", ")", ",", "axis", "=", "-", "1", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
cholesky_concat
Concatenates `chol @ chol.T` with additional rows and columns. This operation is conceptually identical to: ```python def cholesky_concat_slow(chol, cols): # cols shaped (n + m) x m = z x m mat = tf.matmul(chol, chol, adjoint_b=True) # batch of n x n # Concat columns. mat = tf.concat([mat, cols[..., :tf.shape(mat)[-2], :]], axis=-1) # n x z # Concat rows. mat = tf.concat([mat, tf.linalg.matrix_transpose(cols)], axis=-2) # z x z return tf.linalg.cholesky(mat) ``` but whereas `cholesky_concat_slow` would cost `O(z**3)` work, `cholesky_concat` only costs `O(z**2 + m**3)` work. The resulting (implicit) matrix must be symmetric and positive definite. Thus, the bottom right `m x m` must be self-adjoint, and we do not require a separate `rows` argument (which can be inferred from `conj(cols.T)`). Args: chol: Cholesky decomposition of `mat = chol @ chol.T`. cols: The new columns whose first `n` rows we would like concatenated to the right of `mat = chol @ chol.T`, and whose conjugate transpose we would like concatenated to the bottom of `concat(mat, cols[:n,:])`. A `Tensor` with final dims `(n+m, m)`. The first `n` rows are the top right rectangle (their conjugate transpose forms the bottom left), and the bottom `m x m` is self-adjoint. name: Optional name for this op. Returns: chol_concat: The Cholesky decomposition of: ``` [ [ mat cols[:n, :] ] [ conj(cols.T) ] ] ```
tensorflow_probability/python/math/linalg.py
def cholesky_concat(chol, cols, name=None): """Concatenates `chol @ chol.T` with additional rows and columns. This operation is conceptually identical to: ```python def cholesky_concat_slow(chol, cols): # cols shaped (n + m) x m = z x m mat = tf.matmul(chol, chol, adjoint_b=True) # batch of n x n # Concat columns. mat = tf.concat([mat, cols[..., :tf.shape(mat)[-2], :]], axis=-1) # n x z # Concat rows. mat = tf.concat([mat, tf.linalg.matrix_transpose(cols)], axis=-2) # z x z return tf.linalg.cholesky(mat) ``` but whereas `cholesky_concat_slow` would cost `O(z**3)` work, `cholesky_concat` only costs `O(z**2 + m**3)` work. The resulting (implicit) matrix must be symmetric and positive definite. Thus, the bottom right `m x m` must be self-adjoint, and we do not require a separate `rows` argument (which can be inferred from `conj(cols.T)`). Args: chol: Cholesky decomposition of `mat = chol @ chol.T`. cols: The new columns whose first `n` rows we would like concatenated to the right of `mat = chol @ chol.T`, and whose conjugate transpose we would like concatenated to the bottom of `concat(mat, cols[:n,:])`. A `Tensor` with final dims `(n+m, m)`. The first `n` rows are the top right rectangle (their conjugate transpose forms the bottom left), and the bottom `m x m` is self-adjoint. name: Optional name for this op. Returns: chol_concat: The Cholesky decomposition of: ``` [ [ mat cols[:n, :] ] [ conj(cols.T) ] ] ``` """ with tf.compat.v2.name_scope(name or 'cholesky_extend'): dtype = dtype_util.common_dtype([chol, cols], preferred_dtype=tf.float32) chol = tf.convert_to_tensor(value=chol, name='chol', dtype=dtype) cols = tf.convert_to_tensor(value=cols, name='cols', dtype=dtype) n = prefer_static.shape(chol)[-1] mat_nm, mat_mm = cols[..., :n, :], cols[..., n:, :] solved_nm = linear_operator_util.matrix_triangular_solve_with_broadcast( chol, mat_nm) lower_right_mm = tf.linalg.cholesky( mat_mm - tf.matmul(solved_nm, solved_nm, adjoint_a=True)) lower_left_mn = tf.math.conj(tf.linalg.matrix_transpose(solved_nm)) out_batch = prefer_static.shape(solved_nm)[:-2] chol = tf.broadcast_to( chol, tf.concat([out_batch, prefer_static.shape(chol)[-2:]], axis=0)) top_right_zeros_nm = tf.zeros_like(solved_nm) return tf.concat([tf.concat([chol, top_right_zeros_nm], axis=-1), tf.concat([lower_left_mn, lower_right_mm], axis=-1)], axis=-2)
def cholesky_concat(chol, cols, name=None): """Concatenates `chol @ chol.T` with additional rows and columns. This operation is conceptually identical to: ```python def cholesky_concat_slow(chol, cols): # cols shaped (n + m) x m = z x m mat = tf.matmul(chol, chol, adjoint_b=True) # batch of n x n # Concat columns. mat = tf.concat([mat, cols[..., :tf.shape(mat)[-2], :]], axis=-1) # n x z # Concat rows. mat = tf.concat([mat, tf.linalg.matrix_transpose(cols)], axis=-2) # z x z return tf.linalg.cholesky(mat) ``` but whereas `cholesky_concat_slow` would cost `O(z**3)` work, `cholesky_concat` only costs `O(z**2 + m**3)` work. The resulting (implicit) matrix must be symmetric and positive definite. Thus, the bottom right `m x m` must be self-adjoint, and we do not require a separate `rows` argument (which can be inferred from `conj(cols.T)`). Args: chol: Cholesky decomposition of `mat = chol @ chol.T`. cols: The new columns whose first `n` rows we would like concatenated to the right of `mat = chol @ chol.T`, and whose conjugate transpose we would like concatenated to the bottom of `concat(mat, cols[:n,:])`. A `Tensor` with final dims `(n+m, m)`. The first `n` rows are the top right rectangle (their conjugate transpose forms the bottom left), and the bottom `m x m` is self-adjoint. name: Optional name for this op. Returns: chol_concat: The Cholesky decomposition of: ``` [ [ mat cols[:n, :] ] [ conj(cols.T) ] ] ``` """ with tf.compat.v2.name_scope(name or 'cholesky_extend'): dtype = dtype_util.common_dtype([chol, cols], preferred_dtype=tf.float32) chol = tf.convert_to_tensor(value=chol, name='chol', dtype=dtype) cols = tf.convert_to_tensor(value=cols, name='cols', dtype=dtype) n = prefer_static.shape(chol)[-1] mat_nm, mat_mm = cols[..., :n, :], cols[..., n:, :] solved_nm = linear_operator_util.matrix_triangular_solve_with_broadcast( chol, mat_nm) lower_right_mm = tf.linalg.cholesky( mat_mm - tf.matmul(solved_nm, solved_nm, adjoint_a=True)) lower_left_mn = tf.math.conj(tf.linalg.matrix_transpose(solved_nm)) out_batch = prefer_static.shape(solved_nm)[:-2] chol = tf.broadcast_to( chol, tf.concat([out_batch, prefer_static.shape(chol)[-2:]], axis=0)) top_right_zeros_nm = tf.zeros_like(solved_nm) return tf.concat([tf.concat([chol, top_right_zeros_nm], axis=-1), tf.concat([lower_left_mn, lower_right_mm], axis=-1)], axis=-2)
[ "Concatenates", "chol", "@", "chol", ".", "T", "with", "additional", "rows", "and", "columns", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/linalg.py#L84-L139
[ "def", "cholesky_concat", "(", "chol", ",", "cols", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v2", ".", "name_scope", "(", "name", "or", "'cholesky_extend'", ")", ":", "dtype", "=", "dtype_util", ".", "common_dtype", "(", "[", "chol", ",", "cols", "]", ",", "preferred_dtype", "=", "tf", ".", "float32", ")", "chol", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "chol", ",", "name", "=", "'chol'", ",", "dtype", "=", "dtype", ")", "cols", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "cols", ",", "name", "=", "'cols'", ",", "dtype", "=", "dtype", ")", "n", "=", "prefer_static", ".", "shape", "(", "chol", ")", "[", "-", "1", "]", "mat_nm", ",", "mat_mm", "=", "cols", "[", "...", ",", ":", "n", ",", ":", "]", ",", "cols", "[", "...", ",", "n", ":", ",", ":", "]", "solved_nm", "=", "linear_operator_util", ".", "matrix_triangular_solve_with_broadcast", "(", "chol", ",", "mat_nm", ")", "lower_right_mm", "=", "tf", ".", "linalg", ".", "cholesky", "(", "mat_mm", "-", "tf", ".", "matmul", "(", "solved_nm", ",", "solved_nm", ",", "adjoint_a", "=", "True", ")", ")", "lower_left_mn", "=", "tf", ".", "math", ".", "conj", "(", "tf", ".", "linalg", ".", "matrix_transpose", "(", "solved_nm", ")", ")", "out_batch", "=", "prefer_static", ".", "shape", "(", "solved_nm", ")", "[", ":", "-", "2", "]", "chol", "=", "tf", ".", "broadcast_to", "(", "chol", ",", "tf", ".", "concat", "(", "[", "out_batch", ",", "prefer_static", ".", "shape", "(", "chol", ")", "[", "-", "2", ":", "]", "]", ",", "axis", "=", "0", ")", ")", "top_right_zeros_nm", "=", "tf", ".", "zeros_like", "(", "solved_nm", ")", "return", "tf", ".", "concat", "(", "[", "tf", ".", "concat", "(", "[", "chol", ",", "top_right_zeros_nm", "]", ",", "axis", "=", "-", "1", ")", ",", "tf", ".", "concat", "(", "[", "lower_left_mn", ",", "lower_right_mm", "]", ",", "axis", "=", "-", "1", ")", "]", ",", "axis", "=", "-", "2", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_swap_m_with_i
Swaps `m` and `i` on axis -1. (Helper for pivoted_cholesky.) Given a batch of int64 vectors `vecs`, scalar index `m`, and compatibly shaped per-vector indices `i`, this function swaps elements `m` and `i` in each vector. For the use-case below, these are permutation vectors. Args: vecs: Vectors on which we perform the swap, int64 `Tensor`. m: Scalar int64 `Tensor`, the index into which the `i`th element is going. i: Batch int64 `Tensor`, shaped like vecs.shape[:-1] + [1]; the index into which the `m`th element is going. Returns: vecs: The updated vectors.
tensorflow_probability/python/math/linalg.py
def _swap_m_with_i(vecs, m, i): """Swaps `m` and `i` on axis -1. (Helper for pivoted_cholesky.) Given a batch of int64 vectors `vecs`, scalar index `m`, and compatibly shaped per-vector indices `i`, this function swaps elements `m` and `i` in each vector. For the use-case below, these are permutation vectors. Args: vecs: Vectors on which we perform the swap, int64 `Tensor`. m: Scalar int64 `Tensor`, the index into which the `i`th element is going. i: Batch int64 `Tensor`, shaped like vecs.shape[:-1] + [1]; the index into which the `m`th element is going. Returns: vecs: The updated vectors. """ vecs = tf.convert_to_tensor(value=vecs, dtype=tf.int64, name='vecs') m = tf.convert_to_tensor(value=m, dtype=tf.int64, name='m') i = tf.convert_to_tensor(value=i, dtype=tf.int64, name='i') trailing_elts = tf.broadcast_to( tf.range(m + 1, prefer_static.shape(vecs, out_type=tf.int64)[-1]), prefer_static.shape(vecs[..., m + 1:])) shp = prefer_static.shape(trailing_elts) trailing_elts = tf.where( tf.equal(trailing_elts, tf.broadcast_to(i, shp)), tf.broadcast_to(tf.gather(vecs, [m], axis=-1), shp), tf.broadcast_to(vecs[..., m + 1:], shp)) # TODO(bjp): Could we use tensor_scatter_nd_update? vecs_shape = vecs.shape vecs = tf.concat([ vecs[..., :m], tf.gather(vecs, i, batch_dims=prefer_static.rank(vecs) - 1), trailing_elts ], axis=-1) tensorshape_util.set_shape(vecs, vecs_shape) return vecs
def _swap_m_with_i(vecs, m, i): """Swaps `m` and `i` on axis -1. (Helper for pivoted_cholesky.) Given a batch of int64 vectors `vecs`, scalar index `m`, and compatibly shaped per-vector indices `i`, this function swaps elements `m` and `i` in each vector. For the use-case below, these are permutation vectors. Args: vecs: Vectors on which we perform the swap, int64 `Tensor`. m: Scalar int64 `Tensor`, the index into which the `i`th element is going. i: Batch int64 `Tensor`, shaped like vecs.shape[:-1] + [1]; the index into which the `m`th element is going. Returns: vecs: The updated vectors. """ vecs = tf.convert_to_tensor(value=vecs, dtype=tf.int64, name='vecs') m = tf.convert_to_tensor(value=m, dtype=tf.int64, name='m') i = tf.convert_to_tensor(value=i, dtype=tf.int64, name='i') trailing_elts = tf.broadcast_to( tf.range(m + 1, prefer_static.shape(vecs, out_type=tf.int64)[-1]), prefer_static.shape(vecs[..., m + 1:])) shp = prefer_static.shape(trailing_elts) trailing_elts = tf.where( tf.equal(trailing_elts, tf.broadcast_to(i, shp)), tf.broadcast_to(tf.gather(vecs, [m], axis=-1), shp), tf.broadcast_to(vecs[..., m + 1:], shp)) # TODO(bjp): Could we use tensor_scatter_nd_update? vecs_shape = vecs.shape vecs = tf.concat([ vecs[..., :m], tf.gather(vecs, i, batch_dims=prefer_static.rank(vecs) - 1), trailing_elts ], axis=-1) tensorshape_util.set_shape(vecs, vecs_shape) return vecs
[ "Swaps", "m", "and", "i", "on", "axis", "-", "1", ".", "(", "Helper", "for", "pivoted_cholesky", ".", ")" ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/linalg.py#L142-L177
[ "def", "_swap_m_with_i", "(", "vecs", ",", "m", ",", "i", ")", ":", "vecs", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "vecs", ",", "dtype", "=", "tf", ".", "int64", ",", "name", "=", "'vecs'", ")", "m", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "m", ",", "dtype", "=", "tf", ".", "int64", ",", "name", "=", "'m'", ")", "i", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "i", ",", "dtype", "=", "tf", ".", "int64", ",", "name", "=", "'i'", ")", "trailing_elts", "=", "tf", ".", "broadcast_to", "(", "tf", ".", "range", "(", "m", "+", "1", ",", "prefer_static", ".", "shape", "(", "vecs", ",", "out_type", "=", "tf", ".", "int64", ")", "[", "-", "1", "]", ")", ",", "prefer_static", ".", "shape", "(", "vecs", "[", "...", ",", "m", "+", "1", ":", "]", ")", ")", "shp", "=", "prefer_static", ".", "shape", "(", "trailing_elts", ")", "trailing_elts", "=", "tf", ".", "where", "(", "tf", ".", "equal", "(", "trailing_elts", ",", "tf", ".", "broadcast_to", "(", "i", ",", "shp", ")", ")", ",", "tf", ".", "broadcast_to", "(", "tf", ".", "gather", "(", "vecs", ",", "[", "m", "]", ",", "axis", "=", "-", "1", ")", ",", "shp", ")", ",", "tf", ".", "broadcast_to", "(", "vecs", "[", "...", ",", "m", "+", "1", ":", "]", ",", "shp", ")", ")", "# TODO(bjp): Could we use tensor_scatter_nd_update?", "vecs_shape", "=", "vecs", ".", "shape", "vecs", "=", "tf", ".", "concat", "(", "[", "vecs", "[", "...", ",", ":", "m", "]", ",", "tf", ".", "gather", "(", "vecs", ",", "i", ",", "batch_dims", "=", "prefer_static", ".", "rank", "(", "vecs", ")", "-", "1", ")", ",", "trailing_elts", "]", ",", "axis", "=", "-", "1", ")", "tensorshape_util", ".", "set_shape", "(", "vecs", ",", "vecs_shape", ")", "return", "vecs" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
pivoted_cholesky
Computes the (partial) pivoted cholesky decomposition of `matrix`. The pivoted Cholesky is a low rank approximation of the Cholesky decomposition of `matrix`, i.e. as described in [(Harbrecht et al., 2012)][1]. The currently-worst-approximated diagonal element is selected as the pivot at each iteration. This yields from a `[B1...Bn, N, N]` shaped `matrix` a `[B1...Bn, N, K]` shaped rank-`K` approximation `lr` such that `lr @ lr.T ~= matrix`. Note that, unlike the Cholesky decomposition, `lr` is not triangular even in a rectangular-matrix sense. However, under a permutation it could be made triangular (it has one more zero in each column as you move to the right). Such a matrix can be useful as a preconditioner for conjugate gradient optimization, i.e. as in [(Wang et al. 2019)][2], as matmuls and solves can be cheaply done via the Woodbury matrix identity, as implemented by `tf.linalg.LinearOperatorLowRankUpdate`. Args: matrix: Floating point `Tensor` batch of symmetric, positive definite matrices. max_rank: Scalar `int` `Tensor`, the rank at which to truncate the approximation. diag_rtol: Scalar floating point `Tensor` (same dtype as `matrix`). If the errors of all diagonal elements of `lr @ lr.T` are each lower than `element * diag_rtol`, iteration is permitted to terminate early. name: Optional name for the op. Returns: lr: Low rank pivoted Cholesky approximation of `matrix`. #### References [1]: H Harbrecht, M Peters, R Schneider. On the low-rank approximation by the pivoted Cholesky decomposition. _Applied numerical mathematics_, 62(4):428-440, 2012. [2]: K. A. Wang et al. Exact Gaussian Processes on a Million Data Points. _arXiv preprint arXiv:1903.08114_, 2019. https://arxiv.org/abs/1903.08114
tensorflow_probability/python/math/linalg.py
def pivoted_cholesky(matrix, max_rank, diag_rtol=1e-3, name=None): """Computes the (partial) pivoted cholesky decomposition of `matrix`. The pivoted Cholesky is a low rank approximation of the Cholesky decomposition of `matrix`, i.e. as described in [(Harbrecht et al., 2012)][1]. The currently-worst-approximated diagonal element is selected as the pivot at each iteration. This yields from a `[B1...Bn, N, N]` shaped `matrix` a `[B1...Bn, N, K]` shaped rank-`K` approximation `lr` such that `lr @ lr.T ~= matrix`. Note that, unlike the Cholesky decomposition, `lr` is not triangular even in a rectangular-matrix sense. However, under a permutation it could be made triangular (it has one more zero in each column as you move to the right). Such a matrix can be useful as a preconditioner for conjugate gradient optimization, i.e. as in [(Wang et al. 2019)][2], as matmuls and solves can be cheaply done via the Woodbury matrix identity, as implemented by `tf.linalg.LinearOperatorLowRankUpdate`. Args: matrix: Floating point `Tensor` batch of symmetric, positive definite matrices. max_rank: Scalar `int` `Tensor`, the rank at which to truncate the approximation. diag_rtol: Scalar floating point `Tensor` (same dtype as `matrix`). If the errors of all diagonal elements of `lr @ lr.T` are each lower than `element * diag_rtol`, iteration is permitted to terminate early. name: Optional name for the op. Returns: lr: Low rank pivoted Cholesky approximation of `matrix`. #### References [1]: H Harbrecht, M Peters, R Schneider. On the low-rank approximation by the pivoted Cholesky decomposition. _Applied numerical mathematics_, 62(4):428-440, 2012. [2]: K. A. Wang et al. Exact Gaussian Processes on a Million Data Points. _arXiv preprint arXiv:1903.08114_, 2019. https://arxiv.org/abs/1903.08114 """ with tf.compat.v2.name_scope(name or 'pivoted_cholesky'): dtype = dtype_util.common_dtype([matrix, diag_rtol], preferred_dtype=tf.float32) matrix = tf.convert_to_tensor(value=matrix, name='matrix', dtype=dtype) if tensorshape_util.rank(matrix.shape) is None: raise NotImplementedError('Rank of `matrix` must be known statically') max_rank = tf.convert_to_tensor( value=max_rank, name='max_rank', dtype=tf.int64) max_rank = tf.minimum(max_rank, prefer_static.shape(matrix, out_type=tf.int64)[-1]) diag_rtol = tf.convert_to_tensor( value=diag_rtol, dtype=dtype, name='diag_rtol') matrix_diag = tf.linalg.diag_part(matrix) # matrix is P.D., therefore all matrix_diag > 0, so we don't need abs. orig_error = tf.reduce_max(input_tensor=matrix_diag, axis=-1) def cond(m, pchol, perm, matrix_diag): """Condition for `tf.while_loop` continuation.""" del pchol del perm error = tf.linalg.norm(tensor=matrix_diag, ord=1, axis=-1) max_err = tf.reduce_max(input_tensor=error / orig_error) return (m < max_rank) & (tf.equal(m, 0) | (max_err > diag_rtol)) batch_dims = tensorshape_util.rank(matrix.shape) - 2 def batch_gather(params, indices, axis=-1): return tf.gather(params, indices, axis=axis, batch_dims=batch_dims) def body(m, pchol, perm, matrix_diag): """Body of a single `tf.while_loop` iteration.""" # Here is roughly a numpy, non-batched version of what's going to happen. # (See also Algorithm 1 of Harbrecht et al.) # 1: maxi = np.argmax(matrix_diag[perm[m:]]) + m # 2: maxval = matrix_diag[perm][maxi] # 3: perm[m], perm[maxi] = perm[maxi], perm[m] # 4: row = matrix[perm[m]][perm[m + 1:]] # 5: row -= np.sum(pchol[:m][perm[m + 1:]] * pchol[:m][perm[m]]], axis=-2) # 6: pivot = np.sqrt(maxval); row /= pivot # 7: row = np.concatenate([[[pivot]], row], -1) # 8: matrix_diag[perm[m:]] -= row**2 # 9: pchol[m, perm[m:]] = row # Find the maximal position of the (remaining) permuted diagonal. # Steps 1, 2 above. permuted_diag = batch_gather(matrix_diag, perm[..., m:]) maxi = tf.argmax( input=permuted_diag, axis=-1, output_type=tf.int64)[..., tf.newaxis] maxval = batch_gather(permuted_diag, maxi) maxi = maxi + m maxval = maxval[..., 0] # Update perm: Swap perm[...,m] with perm[...,maxi]. Step 3 above. perm = _swap_m_with_i(perm, m, maxi) # Step 4. row = batch_gather(matrix, perm[..., m:m + 1], axis=-2) row = batch_gather(row, perm[..., m + 1:]) # Step 5. prev_rows = pchol[..., :m, :] prev_rows_perm_m_onward = batch_gather(prev_rows, perm[..., m + 1:]) prev_rows_pivot_col = batch_gather(prev_rows, perm[..., m:m + 1]) row -= tf.reduce_sum( input_tensor=prev_rows_perm_m_onward * prev_rows_pivot_col, axis=-2)[..., tf.newaxis, :] # Step 6. pivot = tf.sqrt(maxval)[..., tf.newaxis, tf.newaxis] # Step 7. row = tf.concat([pivot, row / pivot], axis=-1) # TODO(b/130899118): Pad grad fails with int64 paddings. # Step 8. paddings = tf.concat([ tf.zeros([prefer_static.rank(pchol) - 1, 2], dtype=tf.int32), [[tf.cast(m, tf.int32), 0]]], axis=0) diag_update = tf.pad(tensor=row**2, paddings=paddings)[..., 0, :] reverse_perm = _invert_permutation(perm) matrix_diag -= batch_gather(diag_update, reverse_perm) # Step 9. row = tf.pad(tensor=row, paddings=paddings) # TODO(bjp): Defer the reverse permutation all-at-once at the end? row = batch_gather(row, reverse_perm) pchol_shape = pchol.shape pchol = tf.concat([pchol[..., :m, :], row, pchol[..., m + 1:, :]], axis=-2) tensorshape_util.set_shape(pchol, pchol_shape) return m + 1, pchol, perm, matrix_diag m = np.int64(0) pchol = tf.zeros_like(matrix[..., :max_rank, :]) matrix_shape = prefer_static.shape(matrix, out_type=tf.int64) perm = tf.broadcast_to( prefer_static.range(matrix_shape[-1]), matrix_shape[:-1]) _, pchol, _, _ = tf.while_loop( cond=cond, body=body, loop_vars=(m, pchol, perm, matrix_diag)) pchol = tf.linalg.matrix_transpose(pchol) tensorshape_util.set_shape( pchol, tensorshape_util.concatenate(matrix_diag.shape, [None])) return pchol
def pivoted_cholesky(matrix, max_rank, diag_rtol=1e-3, name=None): """Computes the (partial) pivoted cholesky decomposition of `matrix`. The pivoted Cholesky is a low rank approximation of the Cholesky decomposition of `matrix`, i.e. as described in [(Harbrecht et al., 2012)][1]. The currently-worst-approximated diagonal element is selected as the pivot at each iteration. This yields from a `[B1...Bn, N, N]` shaped `matrix` a `[B1...Bn, N, K]` shaped rank-`K` approximation `lr` such that `lr @ lr.T ~= matrix`. Note that, unlike the Cholesky decomposition, `lr` is not triangular even in a rectangular-matrix sense. However, under a permutation it could be made triangular (it has one more zero in each column as you move to the right). Such a matrix can be useful as a preconditioner for conjugate gradient optimization, i.e. as in [(Wang et al. 2019)][2], as matmuls and solves can be cheaply done via the Woodbury matrix identity, as implemented by `tf.linalg.LinearOperatorLowRankUpdate`. Args: matrix: Floating point `Tensor` batch of symmetric, positive definite matrices. max_rank: Scalar `int` `Tensor`, the rank at which to truncate the approximation. diag_rtol: Scalar floating point `Tensor` (same dtype as `matrix`). If the errors of all diagonal elements of `lr @ lr.T` are each lower than `element * diag_rtol`, iteration is permitted to terminate early. name: Optional name for the op. Returns: lr: Low rank pivoted Cholesky approximation of `matrix`. #### References [1]: H Harbrecht, M Peters, R Schneider. On the low-rank approximation by the pivoted Cholesky decomposition. _Applied numerical mathematics_, 62(4):428-440, 2012. [2]: K. A. Wang et al. Exact Gaussian Processes on a Million Data Points. _arXiv preprint arXiv:1903.08114_, 2019. https://arxiv.org/abs/1903.08114 """ with tf.compat.v2.name_scope(name or 'pivoted_cholesky'): dtype = dtype_util.common_dtype([matrix, diag_rtol], preferred_dtype=tf.float32) matrix = tf.convert_to_tensor(value=matrix, name='matrix', dtype=dtype) if tensorshape_util.rank(matrix.shape) is None: raise NotImplementedError('Rank of `matrix` must be known statically') max_rank = tf.convert_to_tensor( value=max_rank, name='max_rank', dtype=tf.int64) max_rank = tf.minimum(max_rank, prefer_static.shape(matrix, out_type=tf.int64)[-1]) diag_rtol = tf.convert_to_tensor( value=diag_rtol, dtype=dtype, name='diag_rtol') matrix_diag = tf.linalg.diag_part(matrix) # matrix is P.D., therefore all matrix_diag > 0, so we don't need abs. orig_error = tf.reduce_max(input_tensor=matrix_diag, axis=-1) def cond(m, pchol, perm, matrix_diag): """Condition for `tf.while_loop` continuation.""" del pchol del perm error = tf.linalg.norm(tensor=matrix_diag, ord=1, axis=-1) max_err = tf.reduce_max(input_tensor=error / orig_error) return (m < max_rank) & (tf.equal(m, 0) | (max_err > diag_rtol)) batch_dims = tensorshape_util.rank(matrix.shape) - 2 def batch_gather(params, indices, axis=-1): return tf.gather(params, indices, axis=axis, batch_dims=batch_dims) def body(m, pchol, perm, matrix_diag): """Body of a single `tf.while_loop` iteration.""" # Here is roughly a numpy, non-batched version of what's going to happen. # (See also Algorithm 1 of Harbrecht et al.) # 1: maxi = np.argmax(matrix_diag[perm[m:]]) + m # 2: maxval = matrix_diag[perm][maxi] # 3: perm[m], perm[maxi] = perm[maxi], perm[m] # 4: row = matrix[perm[m]][perm[m + 1:]] # 5: row -= np.sum(pchol[:m][perm[m + 1:]] * pchol[:m][perm[m]]], axis=-2) # 6: pivot = np.sqrt(maxval); row /= pivot # 7: row = np.concatenate([[[pivot]], row], -1) # 8: matrix_diag[perm[m:]] -= row**2 # 9: pchol[m, perm[m:]] = row # Find the maximal position of the (remaining) permuted diagonal. # Steps 1, 2 above. permuted_diag = batch_gather(matrix_diag, perm[..., m:]) maxi = tf.argmax( input=permuted_diag, axis=-1, output_type=tf.int64)[..., tf.newaxis] maxval = batch_gather(permuted_diag, maxi) maxi = maxi + m maxval = maxval[..., 0] # Update perm: Swap perm[...,m] with perm[...,maxi]. Step 3 above. perm = _swap_m_with_i(perm, m, maxi) # Step 4. row = batch_gather(matrix, perm[..., m:m + 1], axis=-2) row = batch_gather(row, perm[..., m + 1:]) # Step 5. prev_rows = pchol[..., :m, :] prev_rows_perm_m_onward = batch_gather(prev_rows, perm[..., m + 1:]) prev_rows_pivot_col = batch_gather(prev_rows, perm[..., m:m + 1]) row -= tf.reduce_sum( input_tensor=prev_rows_perm_m_onward * prev_rows_pivot_col, axis=-2)[..., tf.newaxis, :] # Step 6. pivot = tf.sqrt(maxval)[..., tf.newaxis, tf.newaxis] # Step 7. row = tf.concat([pivot, row / pivot], axis=-1) # TODO(b/130899118): Pad grad fails with int64 paddings. # Step 8. paddings = tf.concat([ tf.zeros([prefer_static.rank(pchol) - 1, 2], dtype=tf.int32), [[tf.cast(m, tf.int32), 0]]], axis=0) diag_update = tf.pad(tensor=row**2, paddings=paddings)[..., 0, :] reverse_perm = _invert_permutation(perm) matrix_diag -= batch_gather(diag_update, reverse_perm) # Step 9. row = tf.pad(tensor=row, paddings=paddings) # TODO(bjp): Defer the reverse permutation all-at-once at the end? row = batch_gather(row, reverse_perm) pchol_shape = pchol.shape pchol = tf.concat([pchol[..., :m, :], row, pchol[..., m + 1:, :]], axis=-2) tensorshape_util.set_shape(pchol, pchol_shape) return m + 1, pchol, perm, matrix_diag m = np.int64(0) pchol = tf.zeros_like(matrix[..., :max_rank, :]) matrix_shape = prefer_static.shape(matrix, out_type=tf.int64) perm = tf.broadcast_to( prefer_static.range(matrix_shape[-1]), matrix_shape[:-1]) _, pchol, _, _ = tf.while_loop( cond=cond, body=body, loop_vars=(m, pchol, perm, matrix_diag)) pchol = tf.linalg.matrix_transpose(pchol) tensorshape_util.set_shape( pchol, tensorshape_util.concatenate(matrix_diag.shape, [None])) return pchol
[ "Computes", "the", "(", "partial", ")", "pivoted", "cholesky", "decomposition", "of", "matrix", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/linalg.py#L186-L320
[ "def", "pivoted_cholesky", "(", "matrix", ",", "max_rank", ",", "diag_rtol", "=", "1e-3", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v2", ".", "name_scope", "(", "name", "or", "'pivoted_cholesky'", ")", ":", "dtype", "=", "dtype_util", ".", "common_dtype", "(", "[", "matrix", ",", "diag_rtol", "]", ",", "preferred_dtype", "=", "tf", ".", "float32", ")", "matrix", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "matrix", ",", "name", "=", "'matrix'", ",", "dtype", "=", "dtype", ")", "if", "tensorshape_util", ".", "rank", "(", "matrix", ".", "shape", ")", "is", "None", ":", "raise", "NotImplementedError", "(", "'Rank of `matrix` must be known statically'", ")", "max_rank", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "max_rank", ",", "name", "=", "'max_rank'", ",", "dtype", "=", "tf", ".", "int64", ")", "max_rank", "=", "tf", ".", "minimum", "(", "max_rank", ",", "prefer_static", ".", "shape", "(", "matrix", ",", "out_type", "=", "tf", ".", "int64", ")", "[", "-", "1", "]", ")", "diag_rtol", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "diag_rtol", ",", "dtype", "=", "dtype", ",", "name", "=", "'diag_rtol'", ")", "matrix_diag", "=", "tf", ".", "linalg", ".", "diag_part", "(", "matrix", ")", "# matrix is P.D., therefore all matrix_diag > 0, so we don't need abs.", "orig_error", "=", "tf", ".", "reduce_max", "(", "input_tensor", "=", "matrix_diag", ",", "axis", "=", "-", "1", ")", "def", "cond", "(", "m", ",", "pchol", ",", "perm", ",", "matrix_diag", ")", ":", "\"\"\"Condition for `tf.while_loop` continuation.\"\"\"", "del", "pchol", "del", "perm", "error", "=", "tf", ".", "linalg", ".", "norm", "(", "tensor", "=", "matrix_diag", ",", "ord", "=", "1", ",", "axis", "=", "-", "1", ")", "max_err", "=", "tf", ".", "reduce_max", "(", "input_tensor", "=", "error", "/", "orig_error", ")", "return", "(", "m", "<", "max_rank", ")", "&", "(", "tf", ".", "equal", "(", "m", ",", "0", ")", "|", "(", "max_err", ">", "diag_rtol", ")", ")", "batch_dims", "=", "tensorshape_util", ".", "rank", "(", "matrix", ".", "shape", ")", "-", "2", "def", "batch_gather", "(", "params", ",", "indices", ",", "axis", "=", "-", "1", ")", ":", "return", "tf", ".", "gather", "(", "params", ",", "indices", ",", "axis", "=", "axis", ",", "batch_dims", "=", "batch_dims", ")", "def", "body", "(", "m", ",", "pchol", ",", "perm", ",", "matrix_diag", ")", ":", "\"\"\"Body of a single `tf.while_loop` iteration.\"\"\"", "# Here is roughly a numpy, non-batched version of what's going to happen.", "# (See also Algorithm 1 of Harbrecht et al.)", "# 1: maxi = np.argmax(matrix_diag[perm[m:]]) + m", "# 2: maxval = matrix_diag[perm][maxi]", "# 3: perm[m], perm[maxi] = perm[maxi], perm[m]", "# 4: row = matrix[perm[m]][perm[m + 1:]]", "# 5: row -= np.sum(pchol[:m][perm[m + 1:]] * pchol[:m][perm[m]]], axis=-2)", "# 6: pivot = np.sqrt(maxval); row /= pivot", "# 7: row = np.concatenate([[[pivot]], row], -1)", "# 8: matrix_diag[perm[m:]] -= row**2", "# 9: pchol[m, perm[m:]] = row", "# Find the maximal position of the (remaining) permuted diagonal.", "# Steps 1, 2 above.", "permuted_diag", "=", "batch_gather", "(", "matrix_diag", ",", "perm", "[", "...", ",", "m", ":", "]", ")", "maxi", "=", "tf", ".", "argmax", "(", "input", "=", "permuted_diag", ",", "axis", "=", "-", "1", ",", "output_type", "=", "tf", ".", "int64", ")", "[", "...", ",", "tf", ".", "newaxis", "]", "maxval", "=", "batch_gather", "(", "permuted_diag", ",", "maxi", ")", "maxi", "=", "maxi", "+", "m", "maxval", "=", "maxval", "[", "...", ",", "0", "]", "# Update perm: Swap perm[...,m] with perm[...,maxi]. Step 3 above.", "perm", "=", "_swap_m_with_i", "(", "perm", ",", "m", ",", "maxi", ")", "# Step 4.", "row", "=", "batch_gather", "(", "matrix", ",", "perm", "[", "...", ",", "m", ":", "m", "+", "1", "]", ",", "axis", "=", "-", "2", ")", "row", "=", "batch_gather", "(", "row", ",", "perm", "[", "...", ",", "m", "+", "1", ":", "]", ")", "# Step 5.", "prev_rows", "=", "pchol", "[", "...", ",", ":", "m", ",", ":", "]", "prev_rows_perm_m_onward", "=", "batch_gather", "(", "prev_rows", ",", "perm", "[", "...", ",", "m", "+", "1", ":", "]", ")", "prev_rows_pivot_col", "=", "batch_gather", "(", "prev_rows", ",", "perm", "[", "...", ",", "m", ":", "m", "+", "1", "]", ")", "row", "-=", "tf", ".", "reduce_sum", "(", "input_tensor", "=", "prev_rows_perm_m_onward", "*", "prev_rows_pivot_col", ",", "axis", "=", "-", "2", ")", "[", "...", ",", "tf", ".", "newaxis", ",", ":", "]", "# Step 6.", "pivot", "=", "tf", ".", "sqrt", "(", "maxval", ")", "[", "...", ",", "tf", ".", "newaxis", ",", "tf", ".", "newaxis", "]", "# Step 7.", "row", "=", "tf", ".", "concat", "(", "[", "pivot", ",", "row", "/", "pivot", "]", ",", "axis", "=", "-", "1", ")", "# TODO(b/130899118): Pad grad fails with int64 paddings.", "# Step 8.", "paddings", "=", "tf", ".", "concat", "(", "[", "tf", ".", "zeros", "(", "[", "prefer_static", ".", "rank", "(", "pchol", ")", "-", "1", ",", "2", "]", ",", "dtype", "=", "tf", ".", "int32", ")", ",", "[", "[", "tf", ".", "cast", "(", "m", ",", "tf", ".", "int32", ")", ",", "0", "]", "]", "]", ",", "axis", "=", "0", ")", "diag_update", "=", "tf", ".", "pad", "(", "tensor", "=", "row", "**", "2", ",", "paddings", "=", "paddings", ")", "[", "...", ",", "0", ",", ":", "]", "reverse_perm", "=", "_invert_permutation", "(", "perm", ")", "matrix_diag", "-=", "batch_gather", "(", "diag_update", ",", "reverse_perm", ")", "# Step 9.", "row", "=", "tf", ".", "pad", "(", "tensor", "=", "row", ",", "paddings", "=", "paddings", ")", "# TODO(bjp): Defer the reverse permutation all-at-once at the end?", "row", "=", "batch_gather", "(", "row", ",", "reverse_perm", ")", "pchol_shape", "=", "pchol", ".", "shape", "pchol", "=", "tf", ".", "concat", "(", "[", "pchol", "[", "...", ",", ":", "m", ",", ":", "]", ",", "row", ",", "pchol", "[", "...", ",", "m", "+", "1", ":", ",", ":", "]", "]", ",", "axis", "=", "-", "2", ")", "tensorshape_util", ".", "set_shape", "(", "pchol", ",", "pchol_shape", ")", "return", "m", "+", "1", ",", "pchol", ",", "perm", ",", "matrix_diag", "m", "=", "np", ".", "int64", "(", "0", ")", "pchol", "=", "tf", ".", "zeros_like", "(", "matrix", "[", "...", ",", ":", "max_rank", ",", ":", "]", ")", "matrix_shape", "=", "prefer_static", ".", "shape", "(", "matrix", ",", "out_type", "=", "tf", ".", "int64", ")", "perm", "=", "tf", ".", "broadcast_to", "(", "prefer_static", ".", "range", "(", "matrix_shape", "[", "-", "1", "]", ")", ",", "matrix_shape", "[", ":", "-", "1", "]", ")", "_", ",", "pchol", ",", "_", ",", "_", "=", "tf", ".", "while_loop", "(", "cond", "=", "cond", ",", "body", "=", "body", ",", "loop_vars", "=", "(", "m", ",", "pchol", ",", "perm", ",", "matrix_diag", ")", ")", "pchol", "=", "tf", ".", "linalg", ".", "matrix_transpose", "(", "pchol", ")", "tensorshape_util", ".", "set_shape", "(", "pchol", ",", "tensorshape_util", ".", "concatenate", "(", "matrix_diag", ".", "shape", ",", "[", "None", "]", ")", ")", "return", "pchol" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
pinv
Compute the Moore-Penrose pseudo-inverse of a matrix. Calculate the [generalized inverse of a matrix]( https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse) using its singular-value decomposition (SVD) and including all large singular values. The pseudo-inverse of a matrix `A`, is defined as: "the matrix that 'solves' [the least-squares problem] `A @ x = b`," i.e., if `x_hat` is a solution, then `A_pinv` is the matrix such that `x_hat = A_pinv @ b`. It can be shown that if `U @ Sigma @ V.T = A` is the singular value decomposition of `A`, then `A_pinv = V @ inv(Sigma) U^T`. [(Strang, 1980)][1] This function is analogous to [`numpy.linalg.pinv`]( https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.pinv.html). It differs only in default value of `rcond`. In `numpy.linalg.pinv`, the default `rcond` is `1e-15`. Here the default is `10. * max(num_rows, num_cols) * np.finfo(dtype).eps`. Args: a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be pseudo-inverted. rcond: `Tensor` of small singular value cutoffs. Singular values smaller (in modulus) than `rcond` * largest_singular_value (again, in modulus) are set to zero. Must broadcast against `tf.shape(a)[:-2]`. Default value: `10. * max(num_rows, num_cols) * np.finfo(a.dtype).eps`. validate_args: When `True`, additional assertions might be embedded in the graph. Default value: `False` (i.e., no graph assertions are added). name: Python `str` prefixed to ops created by this function. Default value: "pinv". Returns: a_pinv: The pseudo-inverse of input `a`. Has same shape as `a` except rightmost two dimensions are transposed. Raises: TypeError: if input `a` does not have `float`-like `dtype`. ValueError: if input `a` has fewer than 2 dimensions. #### Examples ```python import tensorflow as tf import tensorflow_probability as tfp a = tf.constant([[1., 0.4, 0.5], [0.4, 0.2, 0.25], [0.5, 0.25, 0.35]]) tf.matmul(tfp.math.pinv(a), a) # ==> array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], dtype=float32) a = tf.constant([[1., 0.4, 0.5, 1.], [0.4, 0.2, 0.25, 2.], [0.5, 0.25, 0.35, 3.]]) tf.matmul(tfp.math.pinv(a), a) # ==> array([[ 0.76, 0.37, 0.21, -0.02], [ 0.37, 0.43, -0.33, 0.02], [ 0.21, -0.33, 0.81, 0.01], [-0.02, 0.02, 0.01, 1. ]], dtype=float32) ``` #### References [1]: G. Strang. "Linear Algebra and Its Applications, 2nd Ed." Academic Press, Inc., 1980, pp. 139-142.
tensorflow_probability/python/math/linalg.py
def pinv(a, rcond=None, validate_args=False, name=None): """Compute the Moore-Penrose pseudo-inverse of a matrix. Calculate the [generalized inverse of a matrix]( https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse) using its singular-value decomposition (SVD) and including all large singular values. The pseudo-inverse of a matrix `A`, is defined as: "the matrix that 'solves' [the least-squares problem] `A @ x = b`," i.e., if `x_hat` is a solution, then `A_pinv` is the matrix such that `x_hat = A_pinv @ b`. It can be shown that if `U @ Sigma @ V.T = A` is the singular value decomposition of `A`, then `A_pinv = V @ inv(Sigma) U^T`. [(Strang, 1980)][1] This function is analogous to [`numpy.linalg.pinv`]( https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.pinv.html). It differs only in default value of `rcond`. In `numpy.linalg.pinv`, the default `rcond` is `1e-15`. Here the default is `10. * max(num_rows, num_cols) * np.finfo(dtype).eps`. Args: a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be pseudo-inverted. rcond: `Tensor` of small singular value cutoffs. Singular values smaller (in modulus) than `rcond` * largest_singular_value (again, in modulus) are set to zero. Must broadcast against `tf.shape(a)[:-2]`. Default value: `10. * max(num_rows, num_cols) * np.finfo(a.dtype).eps`. validate_args: When `True`, additional assertions might be embedded in the graph. Default value: `False` (i.e., no graph assertions are added). name: Python `str` prefixed to ops created by this function. Default value: "pinv". Returns: a_pinv: The pseudo-inverse of input `a`. Has same shape as `a` except rightmost two dimensions are transposed. Raises: TypeError: if input `a` does not have `float`-like `dtype`. ValueError: if input `a` has fewer than 2 dimensions. #### Examples ```python import tensorflow as tf import tensorflow_probability as tfp a = tf.constant([[1., 0.4, 0.5], [0.4, 0.2, 0.25], [0.5, 0.25, 0.35]]) tf.matmul(tfp.math.pinv(a), a) # ==> array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], dtype=float32) a = tf.constant([[1., 0.4, 0.5, 1.], [0.4, 0.2, 0.25, 2.], [0.5, 0.25, 0.35, 3.]]) tf.matmul(tfp.math.pinv(a), a) # ==> array([[ 0.76, 0.37, 0.21, -0.02], [ 0.37, 0.43, -0.33, 0.02], [ 0.21, -0.33, 0.81, 0.01], [-0.02, 0.02, 0.01, 1. ]], dtype=float32) ``` #### References [1]: G. Strang. "Linear Algebra and Its Applications, 2nd Ed." Academic Press, Inc., 1980, pp. 139-142. """ with tf.compat.v1.name_scope(name, 'pinv', [a, rcond]): a = tf.convert_to_tensor(value=a, name='a') assertions = _maybe_validate_matrix(a, validate_args) if assertions: with tf.control_dependencies(assertions): a = tf.identity(a) dtype = a.dtype.as_numpy_dtype if rcond is None: def get_dim_size(dim): if tf.compat.dimension_value(a.shape[dim]) is not None: return tf.compat.dimension_value(a.shape[dim]) return tf.shape(input=a)[dim] num_rows = get_dim_size(-2) num_cols = get_dim_size(-1) if isinstance(num_rows, int) and isinstance(num_cols, int): max_rows_cols = float(max(num_rows, num_cols)) else: max_rows_cols = tf.cast(tf.maximum(num_rows, num_cols), dtype) rcond = 10. * max_rows_cols * np.finfo(dtype).eps rcond = tf.convert_to_tensor(value=rcond, dtype=dtype, name='rcond') # Calculate pseudo inverse via SVD. # Note: if a is symmetric then u == v. (We might observe additional # performance by explicitly setting `v = u` in such cases.) [ singular_values, # Sigma left_singular_vectors, # U right_singular_vectors, # V ] = tf.linalg.svd(a, full_matrices=False, compute_uv=True) # Saturate small singular values to inf. This has the effect of make # `1. / s = 0.` while not resulting in `NaN` gradients. cutoff = rcond * tf.reduce_max(input_tensor=singular_values, axis=-1) singular_values = tf.where( singular_values > cutoff[..., tf.newaxis], singular_values, tf.fill(tf.shape(input=singular_values), np.array(np.inf, dtype))) # Although `a == tf.matmul(u, s * v, transpose_b=True)` we swap # `u` and `v` here so that `tf.matmul(pinv(A), A) = tf.eye()`, i.e., # a matrix inverse has "transposed" semantics. a_pinv = tf.matmul( right_singular_vectors / singular_values[..., tf.newaxis, :], left_singular_vectors, adjoint_b=True) if a.shape.ndims is not None: a_pinv.set_shape(a.shape[:-2].concatenate([a.shape[-1], a.shape[-2]])) return a_pinv
def pinv(a, rcond=None, validate_args=False, name=None): """Compute the Moore-Penrose pseudo-inverse of a matrix. Calculate the [generalized inverse of a matrix]( https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse) using its singular-value decomposition (SVD) and including all large singular values. The pseudo-inverse of a matrix `A`, is defined as: "the matrix that 'solves' [the least-squares problem] `A @ x = b`," i.e., if `x_hat` is a solution, then `A_pinv` is the matrix such that `x_hat = A_pinv @ b`. It can be shown that if `U @ Sigma @ V.T = A` is the singular value decomposition of `A`, then `A_pinv = V @ inv(Sigma) U^T`. [(Strang, 1980)][1] This function is analogous to [`numpy.linalg.pinv`]( https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.pinv.html). It differs only in default value of `rcond`. In `numpy.linalg.pinv`, the default `rcond` is `1e-15`. Here the default is `10. * max(num_rows, num_cols) * np.finfo(dtype).eps`. Args: a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be pseudo-inverted. rcond: `Tensor` of small singular value cutoffs. Singular values smaller (in modulus) than `rcond` * largest_singular_value (again, in modulus) are set to zero. Must broadcast against `tf.shape(a)[:-2]`. Default value: `10. * max(num_rows, num_cols) * np.finfo(a.dtype).eps`. validate_args: When `True`, additional assertions might be embedded in the graph. Default value: `False` (i.e., no graph assertions are added). name: Python `str` prefixed to ops created by this function. Default value: "pinv". Returns: a_pinv: The pseudo-inverse of input `a`. Has same shape as `a` except rightmost two dimensions are transposed. Raises: TypeError: if input `a` does not have `float`-like `dtype`. ValueError: if input `a` has fewer than 2 dimensions. #### Examples ```python import tensorflow as tf import tensorflow_probability as tfp a = tf.constant([[1., 0.4, 0.5], [0.4, 0.2, 0.25], [0.5, 0.25, 0.35]]) tf.matmul(tfp.math.pinv(a), a) # ==> array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], dtype=float32) a = tf.constant([[1., 0.4, 0.5, 1.], [0.4, 0.2, 0.25, 2.], [0.5, 0.25, 0.35, 3.]]) tf.matmul(tfp.math.pinv(a), a) # ==> array([[ 0.76, 0.37, 0.21, -0.02], [ 0.37, 0.43, -0.33, 0.02], [ 0.21, -0.33, 0.81, 0.01], [-0.02, 0.02, 0.01, 1. ]], dtype=float32) ``` #### References [1]: G. Strang. "Linear Algebra and Its Applications, 2nd Ed." Academic Press, Inc., 1980, pp. 139-142. """ with tf.compat.v1.name_scope(name, 'pinv', [a, rcond]): a = tf.convert_to_tensor(value=a, name='a') assertions = _maybe_validate_matrix(a, validate_args) if assertions: with tf.control_dependencies(assertions): a = tf.identity(a) dtype = a.dtype.as_numpy_dtype if rcond is None: def get_dim_size(dim): if tf.compat.dimension_value(a.shape[dim]) is not None: return tf.compat.dimension_value(a.shape[dim]) return tf.shape(input=a)[dim] num_rows = get_dim_size(-2) num_cols = get_dim_size(-1) if isinstance(num_rows, int) and isinstance(num_cols, int): max_rows_cols = float(max(num_rows, num_cols)) else: max_rows_cols = tf.cast(tf.maximum(num_rows, num_cols), dtype) rcond = 10. * max_rows_cols * np.finfo(dtype).eps rcond = tf.convert_to_tensor(value=rcond, dtype=dtype, name='rcond') # Calculate pseudo inverse via SVD. # Note: if a is symmetric then u == v. (We might observe additional # performance by explicitly setting `v = u` in such cases.) [ singular_values, # Sigma left_singular_vectors, # U right_singular_vectors, # V ] = tf.linalg.svd(a, full_matrices=False, compute_uv=True) # Saturate small singular values to inf. This has the effect of make # `1. / s = 0.` while not resulting in `NaN` gradients. cutoff = rcond * tf.reduce_max(input_tensor=singular_values, axis=-1) singular_values = tf.where( singular_values > cutoff[..., tf.newaxis], singular_values, tf.fill(tf.shape(input=singular_values), np.array(np.inf, dtype))) # Although `a == tf.matmul(u, s * v, transpose_b=True)` we swap # `u` and `v` here so that `tf.matmul(pinv(A), A) = tf.eye()`, i.e., # a matrix inverse has "transposed" semantics. a_pinv = tf.matmul( right_singular_vectors / singular_values[..., tf.newaxis, :], left_singular_vectors, adjoint_b=True) if a.shape.ndims is not None: a_pinv.set_shape(a.shape[:-2].concatenate([a.shape[-1], a.shape[-2]])) return a_pinv
[ "Compute", "the", "Moore", "-", "Penrose", "pseudo", "-", "inverse", "of", "a", "matrix", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/linalg.py#L323-L445
[ "def", "pinv", "(", "a", ",", "rcond", "=", "None", ",", "validate_args", "=", "False", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "'pinv'", ",", "[", "a", ",", "rcond", "]", ")", ":", "a", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "a", ",", "name", "=", "'a'", ")", "assertions", "=", "_maybe_validate_matrix", "(", "a", ",", "validate_args", ")", "if", "assertions", ":", "with", "tf", ".", "control_dependencies", "(", "assertions", ")", ":", "a", "=", "tf", ".", "identity", "(", "a", ")", "dtype", "=", "a", ".", "dtype", ".", "as_numpy_dtype", "if", "rcond", "is", "None", ":", "def", "get_dim_size", "(", "dim", ")", ":", "if", "tf", ".", "compat", ".", "dimension_value", "(", "a", ".", "shape", "[", "dim", "]", ")", "is", "not", "None", ":", "return", "tf", ".", "compat", ".", "dimension_value", "(", "a", ".", "shape", "[", "dim", "]", ")", "return", "tf", ".", "shape", "(", "input", "=", "a", ")", "[", "dim", "]", "num_rows", "=", "get_dim_size", "(", "-", "2", ")", "num_cols", "=", "get_dim_size", "(", "-", "1", ")", "if", "isinstance", "(", "num_rows", ",", "int", ")", "and", "isinstance", "(", "num_cols", ",", "int", ")", ":", "max_rows_cols", "=", "float", "(", "max", "(", "num_rows", ",", "num_cols", ")", ")", "else", ":", "max_rows_cols", "=", "tf", ".", "cast", "(", "tf", ".", "maximum", "(", "num_rows", ",", "num_cols", ")", ",", "dtype", ")", "rcond", "=", "10.", "*", "max_rows_cols", "*", "np", ".", "finfo", "(", "dtype", ")", ".", "eps", "rcond", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "rcond", ",", "dtype", "=", "dtype", ",", "name", "=", "'rcond'", ")", "# Calculate pseudo inverse via SVD.", "# Note: if a is symmetric then u == v. (We might observe additional", "# performance by explicitly setting `v = u` in such cases.)", "[", "singular_values", ",", "# Sigma", "left_singular_vectors", ",", "# U", "right_singular_vectors", ",", "# V", "]", "=", "tf", ".", "linalg", ".", "svd", "(", "a", ",", "full_matrices", "=", "False", ",", "compute_uv", "=", "True", ")", "# Saturate small singular values to inf. This has the effect of make", "# `1. / s = 0.` while not resulting in `NaN` gradients.", "cutoff", "=", "rcond", "*", "tf", ".", "reduce_max", "(", "input_tensor", "=", "singular_values", ",", "axis", "=", "-", "1", ")", "singular_values", "=", "tf", ".", "where", "(", "singular_values", ">", "cutoff", "[", "...", ",", "tf", ".", "newaxis", "]", ",", "singular_values", ",", "tf", ".", "fill", "(", "tf", ".", "shape", "(", "input", "=", "singular_values", ")", ",", "np", ".", "array", "(", "np", ".", "inf", ",", "dtype", ")", ")", ")", "# Although `a == tf.matmul(u, s * v, transpose_b=True)` we swap", "# `u` and `v` here so that `tf.matmul(pinv(A), A) = tf.eye()`, i.e.,", "# a matrix inverse has \"transposed\" semantics.", "a_pinv", "=", "tf", ".", "matmul", "(", "right_singular_vectors", "/", "singular_values", "[", "...", ",", "tf", ".", "newaxis", ",", ":", "]", ",", "left_singular_vectors", ",", "adjoint_b", "=", "True", ")", "if", "a", ".", "shape", ".", "ndims", "is", "not", "None", ":", "a_pinv", ".", "set_shape", "(", "a", ".", "shape", "[", ":", "-", "2", "]", ".", "concatenate", "(", "[", "a", ".", "shape", "[", "-", "1", "]", ",", "a", ".", "shape", "[", "-", "2", "]", "]", ")", ")", "return", "a_pinv" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
lu_solve
Solves systems of linear eqns `A X = RHS`, given LU factorizations. Note: this function does not verify the implied matrix is actually invertible nor is this condition checked even when `validate_args=True`. Args: lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `lower_upper = L + U - eye`. perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `perm = argmax(P)`. rhs: Matrix-shaped float `Tensor` representing targets for which to solve; `A X = RHS`. To handle vector cases, use: `lu_solve(..., rhs[..., tf.newaxis])[..., 0]`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. Note: this function does not verify the implied matrix is actually invertible, even when `validate_args=True`. Default value: `False` (i.e., don't validate arguments). name: Python `str` name given to ops managed by this object. Default value: `None` (i.e., "lu_solve"). Returns: x: The `X` in `A @ X = RHS`. #### Examples ```python import numpy as np import tensorflow as tf import tensorflow_probability as tfp x = [[[1., 2], [3, 4]], [[7, 8], [3, 4]]] inv_x = tfp.math.lu_solve(*tf.linalg.lu(x), rhs=tf.eye(2)) tf.assert_near(tf.matrix_inverse(x), inv_x) # ==> True ```
tensorflow_probability/python/math/linalg.py
def lu_solve(lower_upper, perm, rhs, validate_args=False, name=None): """Solves systems of linear eqns `A X = RHS`, given LU factorizations. Note: this function does not verify the implied matrix is actually invertible nor is this condition checked even when `validate_args=True`. Args: lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `lower_upper = L + U - eye`. perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `perm = argmax(P)`. rhs: Matrix-shaped float `Tensor` representing targets for which to solve; `A X = RHS`. To handle vector cases, use: `lu_solve(..., rhs[..., tf.newaxis])[..., 0]`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. Note: this function does not verify the implied matrix is actually invertible, even when `validate_args=True`. Default value: `False` (i.e., don't validate arguments). name: Python `str` name given to ops managed by this object. Default value: `None` (i.e., "lu_solve"). Returns: x: The `X` in `A @ X = RHS`. #### Examples ```python import numpy as np import tensorflow as tf import tensorflow_probability as tfp x = [[[1., 2], [3, 4]], [[7, 8], [3, 4]]] inv_x = tfp.math.lu_solve(*tf.linalg.lu(x), rhs=tf.eye(2)) tf.assert_near(tf.matrix_inverse(x), inv_x) # ==> True ``` """ with tf.compat.v1.name_scope(name, 'lu_solve', [lower_upper, perm, rhs]): lower_upper = tf.convert_to_tensor( value=lower_upper, dtype_hint=tf.float32, name='lower_upper') perm = tf.convert_to_tensor(value=perm, dtype_hint=tf.int32, name='perm') rhs = tf.convert_to_tensor( value=rhs, dtype_hint=lower_upper.dtype, name='rhs') assertions = _lu_solve_assertions(lower_upper, perm, rhs, validate_args) if assertions: with tf.control_dependencies(assertions): lower_upper = tf.identity(lower_upper) perm = tf.identity(perm) rhs = tf.identity(rhs) if rhs.shape.ndims == 2 and perm.shape.ndims == 1: # Both rhs and perm have scalar batch_shape. permuted_rhs = tf.gather(rhs, perm, axis=-2) else: # Either rhs or perm have non-scalar batch_shape or we can't determine # this information statically. rhs_shape = tf.shape(input=rhs) broadcast_batch_shape = tf.broadcast_dynamic_shape( rhs_shape[:-2], tf.shape(input=perm)[:-1]) d, m = rhs_shape[-2], rhs_shape[-1] rhs_broadcast_shape = tf.concat([broadcast_batch_shape, [d, m]], axis=0) # Tile out rhs. broadcast_rhs = tf.broadcast_to(rhs, rhs_broadcast_shape) broadcast_rhs = tf.reshape(broadcast_rhs, [-1, d, m]) # Tile out perm and add batch indices. broadcast_perm = tf.broadcast_to(perm, rhs_broadcast_shape[:-1]) broadcast_perm = tf.reshape(broadcast_perm, [-1, d]) broadcast_batch_size = tf.reduce_prod(input_tensor=broadcast_batch_shape) broadcast_batch_indices = tf.broadcast_to( tf.range(broadcast_batch_size)[:, tf.newaxis], [broadcast_batch_size, d]) broadcast_perm = tf.stack([broadcast_batch_indices, broadcast_perm], axis=-1) permuted_rhs = tf.gather_nd(broadcast_rhs, broadcast_perm) permuted_rhs = tf.reshape(permuted_rhs, rhs_broadcast_shape) lower = tf.linalg.set_diag( tf.linalg.band_part(lower_upper, num_lower=-1, num_upper=0), tf.ones(tf.shape(input=lower_upper)[:-1], dtype=lower_upper.dtype)) return linear_operator_util.matrix_triangular_solve_with_broadcast( lower_upper, # Only upper is accessed. linear_operator_util.matrix_triangular_solve_with_broadcast( lower, permuted_rhs), lower=False)
def lu_solve(lower_upper, perm, rhs, validate_args=False, name=None): """Solves systems of linear eqns `A X = RHS`, given LU factorizations. Note: this function does not verify the implied matrix is actually invertible nor is this condition checked even when `validate_args=True`. Args: lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `lower_upper = L + U - eye`. perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `perm = argmax(P)`. rhs: Matrix-shaped float `Tensor` representing targets for which to solve; `A X = RHS`. To handle vector cases, use: `lu_solve(..., rhs[..., tf.newaxis])[..., 0]`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. Note: this function does not verify the implied matrix is actually invertible, even when `validate_args=True`. Default value: `False` (i.e., don't validate arguments). name: Python `str` name given to ops managed by this object. Default value: `None` (i.e., "lu_solve"). Returns: x: The `X` in `A @ X = RHS`. #### Examples ```python import numpy as np import tensorflow as tf import tensorflow_probability as tfp x = [[[1., 2], [3, 4]], [[7, 8], [3, 4]]] inv_x = tfp.math.lu_solve(*tf.linalg.lu(x), rhs=tf.eye(2)) tf.assert_near(tf.matrix_inverse(x), inv_x) # ==> True ``` """ with tf.compat.v1.name_scope(name, 'lu_solve', [lower_upper, perm, rhs]): lower_upper = tf.convert_to_tensor( value=lower_upper, dtype_hint=tf.float32, name='lower_upper') perm = tf.convert_to_tensor(value=perm, dtype_hint=tf.int32, name='perm') rhs = tf.convert_to_tensor( value=rhs, dtype_hint=lower_upper.dtype, name='rhs') assertions = _lu_solve_assertions(lower_upper, perm, rhs, validate_args) if assertions: with tf.control_dependencies(assertions): lower_upper = tf.identity(lower_upper) perm = tf.identity(perm) rhs = tf.identity(rhs) if rhs.shape.ndims == 2 and perm.shape.ndims == 1: # Both rhs and perm have scalar batch_shape. permuted_rhs = tf.gather(rhs, perm, axis=-2) else: # Either rhs or perm have non-scalar batch_shape or we can't determine # this information statically. rhs_shape = tf.shape(input=rhs) broadcast_batch_shape = tf.broadcast_dynamic_shape( rhs_shape[:-2], tf.shape(input=perm)[:-1]) d, m = rhs_shape[-2], rhs_shape[-1] rhs_broadcast_shape = tf.concat([broadcast_batch_shape, [d, m]], axis=0) # Tile out rhs. broadcast_rhs = tf.broadcast_to(rhs, rhs_broadcast_shape) broadcast_rhs = tf.reshape(broadcast_rhs, [-1, d, m]) # Tile out perm and add batch indices. broadcast_perm = tf.broadcast_to(perm, rhs_broadcast_shape[:-1]) broadcast_perm = tf.reshape(broadcast_perm, [-1, d]) broadcast_batch_size = tf.reduce_prod(input_tensor=broadcast_batch_shape) broadcast_batch_indices = tf.broadcast_to( tf.range(broadcast_batch_size)[:, tf.newaxis], [broadcast_batch_size, d]) broadcast_perm = tf.stack([broadcast_batch_indices, broadcast_perm], axis=-1) permuted_rhs = tf.gather_nd(broadcast_rhs, broadcast_perm) permuted_rhs = tf.reshape(permuted_rhs, rhs_broadcast_shape) lower = tf.linalg.set_diag( tf.linalg.band_part(lower_upper, num_lower=-1, num_upper=0), tf.ones(tf.shape(input=lower_upper)[:-1], dtype=lower_upper.dtype)) return linear_operator_util.matrix_triangular_solve_with_broadcast( lower_upper, # Only upper is accessed. linear_operator_util.matrix_triangular_solve_with_broadcast( lower, permuted_rhs), lower=False)
[ "Solves", "systems", "of", "linear", "eqns", "A", "X", "=", "RHS", "given", "LU", "factorizations", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/linalg.py#L448-L543
[ "def", "lu_solve", "(", "lower_upper", ",", "perm", ",", "rhs", ",", "validate_args", "=", "False", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "'lu_solve'", ",", "[", "lower_upper", ",", "perm", ",", "rhs", "]", ")", ":", "lower_upper", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "lower_upper", ",", "dtype_hint", "=", "tf", ".", "float32", ",", "name", "=", "'lower_upper'", ")", "perm", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "perm", ",", "dtype_hint", "=", "tf", ".", "int32", ",", "name", "=", "'perm'", ")", "rhs", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "rhs", ",", "dtype_hint", "=", "lower_upper", ".", "dtype", ",", "name", "=", "'rhs'", ")", "assertions", "=", "_lu_solve_assertions", "(", "lower_upper", ",", "perm", ",", "rhs", ",", "validate_args", ")", "if", "assertions", ":", "with", "tf", ".", "control_dependencies", "(", "assertions", ")", ":", "lower_upper", "=", "tf", ".", "identity", "(", "lower_upper", ")", "perm", "=", "tf", ".", "identity", "(", "perm", ")", "rhs", "=", "tf", ".", "identity", "(", "rhs", ")", "if", "rhs", ".", "shape", ".", "ndims", "==", "2", "and", "perm", ".", "shape", ".", "ndims", "==", "1", ":", "# Both rhs and perm have scalar batch_shape.", "permuted_rhs", "=", "tf", ".", "gather", "(", "rhs", ",", "perm", ",", "axis", "=", "-", "2", ")", "else", ":", "# Either rhs or perm have non-scalar batch_shape or we can't determine", "# this information statically.", "rhs_shape", "=", "tf", ".", "shape", "(", "input", "=", "rhs", ")", "broadcast_batch_shape", "=", "tf", ".", "broadcast_dynamic_shape", "(", "rhs_shape", "[", ":", "-", "2", "]", ",", "tf", ".", "shape", "(", "input", "=", "perm", ")", "[", ":", "-", "1", "]", ")", "d", ",", "m", "=", "rhs_shape", "[", "-", "2", "]", ",", "rhs_shape", "[", "-", "1", "]", "rhs_broadcast_shape", "=", "tf", ".", "concat", "(", "[", "broadcast_batch_shape", ",", "[", "d", ",", "m", "]", "]", ",", "axis", "=", "0", ")", "# Tile out rhs.", "broadcast_rhs", "=", "tf", ".", "broadcast_to", "(", "rhs", ",", "rhs_broadcast_shape", ")", "broadcast_rhs", "=", "tf", ".", "reshape", "(", "broadcast_rhs", ",", "[", "-", "1", ",", "d", ",", "m", "]", ")", "# Tile out perm and add batch indices.", "broadcast_perm", "=", "tf", ".", "broadcast_to", "(", "perm", ",", "rhs_broadcast_shape", "[", ":", "-", "1", "]", ")", "broadcast_perm", "=", "tf", ".", "reshape", "(", "broadcast_perm", ",", "[", "-", "1", ",", "d", "]", ")", "broadcast_batch_size", "=", "tf", ".", "reduce_prod", "(", "input_tensor", "=", "broadcast_batch_shape", ")", "broadcast_batch_indices", "=", "tf", ".", "broadcast_to", "(", "tf", ".", "range", "(", "broadcast_batch_size", ")", "[", ":", ",", "tf", ".", "newaxis", "]", ",", "[", "broadcast_batch_size", ",", "d", "]", ")", "broadcast_perm", "=", "tf", ".", "stack", "(", "[", "broadcast_batch_indices", ",", "broadcast_perm", "]", ",", "axis", "=", "-", "1", ")", "permuted_rhs", "=", "tf", ".", "gather_nd", "(", "broadcast_rhs", ",", "broadcast_perm", ")", "permuted_rhs", "=", "tf", ".", "reshape", "(", "permuted_rhs", ",", "rhs_broadcast_shape", ")", "lower", "=", "tf", ".", "linalg", ".", "set_diag", "(", "tf", ".", "linalg", ".", "band_part", "(", "lower_upper", ",", "num_lower", "=", "-", "1", ",", "num_upper", "=", "0", ")", ",", "tf", ".", "ones", "(", "tf", ".", "shape", "(", "input", "=", "lower_upper", ")", "[", ":", "-", "1", "]", ",", "dtype", "=", "lower_upper", ".", "dtype", ")", ")", "return", "linear_operator_util", ".", "matrix_triangular_solve_with_broadcast", "(", "lower_upper", ",", "# Only upper is accessed.", "linear_operator_util", ".", "matrix_triangular_solve_with_broadcast", "(", "lower", ",", "permuted_rhs", ")", ",", "lower", "=", "False", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
lu_matrix_inverse
Computes a matrix inverse given the matrix's LU decomposition. This op is conceptually identical to, ````python inv_X = tf.lu_matrix_inverse(*tf.linalg.lu(X)) tf.assert_near(tf.matrix_inverse(X), inv_X) # ==> True ``` Note: this function does not verify the implied matrix is actually invertible nor is this condition checked even when `validate_args=True`. Args: lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `lower_upper = L + U - eye`. perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `perm = argmax(P)`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. Note: this function does not verify the implied matrix is actually invertible, even when `validate_args=True`. Default value: `False` (i.e., don't validate arguments). name: Python `str` name given to ops managed by this object. Default value: `None` (i.e., "lu_matrix_inverse"). Returns: inv_x: The matrix_inv, i.e., `tf.matrix_inverse(tfp.math.lu_reconstruct(lu, perm))`. #### Examples ```python import numpy as np import tensorflow as tf import tensorflow_probability as tfp x = [[[3., 4], [1, 2]], [[7., 8], [3, 4]]] inv_x = tfp.math.lu_matrix_inverse(*tf.linalg.lu(x)) tf.assert_near(tf.matrix_inverse(x), inv_x) # ==> True ```
tensorflow_probability/python/math/linalg.py
def lu_matrix_inverse(lower_upper, perm, validate_args=False, name=None): """Computes a matrix inverse given the matrix's LU decomposition. This op is conceptually identical to, ````python inv_X = tf.lu_matrix_inverse(*tf.linalg.lu(X)) tf.assert_near(tf.matrix_inverse(X), inv_X) # ==> True ``` Note: this function does not verify the implied matrix is actually invertible nor is this condition checked even when `validate_args=True`. Args: lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `lower_upper = L + U - eye`. perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `perm = argmax(P)`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. Note: this function does not verify the implied matrix is actually invertible, even when `validate_args=True`. Default value: `False` (i.e., don't validate arguments). name: Python `str` name given to ops managed by this object. Default value: `None` (i.e., "lu_matrix_inverse"). Returns: inv_x: The matrix_inv, i.e., `tf.matrix_inverse(tfp.math.lu_reconstruct(lu, perm))`. #### Examples ```python import numpy as np import tensorflow as tf import tensorflow_probability as tfp x = [[[3., 4], [1, 2]], [[7., 8], [3, 4]]] inv_x = tfp.math.lu_matrix_inverse(*tf.linalg.lu(x)) tf.assert_near(tf.matrix_inverse(x), inv_x) # ==> True ``` """ with tf.compat.v1.name_scope(name, 'lu_matrix_inverse', [lower_upper, perm]): lower_upper = tf.convert_to_tensor( value=lower_upper, dtype_hint=tf.float32, name='lower_upper') perm = tf.convert_to_tensor(value=perm, dtype_hint=tf.int32, name='perm') assertions = _lu_reconstruct_assertions(lower_upper, perm, validate_args) if assertions: with tf.control_dependencies(assertions): lower_upper = tf.identity(lower_upper) perm = tf.identity(perm) shape = tf.shape(input=lower_upper) return lu_solve( lower_upper, perm, rhs=tf.eye(shape[-1], batch_shape=shape[:-2], dtype=lower_upper.dtype), validate_args=False)
def lu_matrix_inverse(lower_upper, perm, validate_args=False, name=None): """Computes a matrix inverse given the matrix's LU decomposition. This op is conceptually identical to, ````python inv_X = tf.lu_matrix_inverse(*tf.linalg.lu(X)) tf.assert_near(tf.matrix_inverse(X), inv_X) # ==> True ``` Note: this function does not verify the implied matrix is actually invertible nor is this condition checked even when `validate_args=True`. Args: lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `lower_upper = L + U - eye`. perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `perm = argmax(P)`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. Note: this function does not verify the implied matrix is actually invertible, even when `validate_args=True`. Default value: `False` (i.e., don't validate arguments). name: Python `str` name given to ops managed by this object. Default value: `None` (i.e., "lu_matrix_inverse"). Returns: inv_x: The matrix_inv, i.e., `tf.matrix_inverse(tfp.math.lu_reconstruct(lu, perm))`. #### Examples ```python import numpy as np import tensorflow as tf import tensorflow_probability as tfp x = [[[3., 4], [1, 2]], [[7., 8], [3, 4]]] inv_x = tfp.math.lu_matrix_inverse(*tf.linalg.lu(x)) tf.assert_near(tf.matrix_inverse(x), inv_x) # ==> True ``` """ with tf.compat.v1.name_scope(name, 'lu_matrix_inverse', [lower_upper, perm]): lower_upper = tf.convert_to_tensor( value=lower_upper, dtype_hint=tf.float32, name='lower_upper') perm = tf.convert_to_tensor(value=perm, dtype_hint=tf.int32, name='perm') assertions = _lu_reconstruct_assertions(lower_upper, perm, validate_args) if assertions: with tf.control_dependencies(assertions): lower_upper = tf.identity(lower_upper) perm = tf.identity(perm) shape = tf.shape(input=lower_upper) return lu_solve( lower_upper, perm, rhs=tf.eye(shape[-1], batch_shape=shape[:-2], dtype=lower_upper.dtype), validate_args=False)
[ "Computes", "a", "matrix", "inverse", "given", "the", "matrix", "s", "LU", "decomposition", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/linalg.py#L546-L605
[ "def", "lu_matrix_inverse", "(", "lower_upper", ",", "perm", ",", "validate_args", "=", "False", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "'lu_matrix_inverse'", ",", "[", "lower_upper", ",", "perm", "]", ")", ":", "lower_upper", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "lower_upper", ",", "dtype_hint", "=", "tf", ".", "float32", ",", "name", "=", "'lower_upper'", ")", "perm", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "perm", ",", "dtype_hint", "=", "tf", ".", "int32", ",", "name", "=", "'perm'", ")", "assertions", "=", "_lu_reconstruct_assertions", "(", "lower_upper", ",", "perm", ",", "validate_args", ")", "if", "assertions", ":", "with", "tf", ".", "control_dependencies", "(", "assertions", ")", ":", "lower_upper", "=", "tf", ".", "identity", "(", "lower_upper", ")", "perm", "=", "tf", ".", "identity", "(", "perm", ")", "shape", "=", "tf", ".", "shape", "(", "input", "=", "lower_upper", ")", "return", "lu_solve", "(", "lower_upper", ",", "perm", ",", "rhs", "=", "tf", ".", "eye", "(", "shape", "[", "-", "1", "]", ",", "batch_shape", "=", "shape", "[", ":", "-", "2", "]", ",", "dtype", "=", "lower_upper", ".", "dtype", ")", ",", "validate_args", "=", "False", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
lu_reconstruct
The inverse LU decomposition, `X == lu_reconstruct(*tf.linalg.lu(X))`. Args: lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `lower_upper = L + U - eye`. perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `perm = argmax(P)`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. Default value: `False` (i.e., don't validate arguments). name: Python `str` name given to ops managed by this object. Default value: `None` (i.e., "lu_reconstruct"). Returns: x: The original input to `tf.linalg.lu`, i.e., `x` as in, `lu_reconstruct(*tf.linalg.lu(x))`. #### Examples ```python import numpy as np import tensorflow as tf import tensorflow_probability as tfp x = [[[3., 4], [1, 2]], [[7., 8], [3, 4]]] x_reconstructed = tfp.math.lu_reconstruct(*tf.linalg.lu(x)) tf.assert_near(x, x_reconstructed) # ==> True ```
tensorflow_probability/python/math/linalg.py
def lu_reconstruct(lower_upper, perm, validate_args=False, name=None): """The inverse LU decomposition, `X == lu_reconstruct(*tf.linalg.lu(X))`. Args: lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `lower_upper = L + U - eye`. perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `perm = argmax(P)`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. Default value: `False` (i.e., don't validate arguments). name: Python `str` name given to ops managed by this object. Default value: `None` (i.e., "lu_reconstruct"). Returns: x: The original input to `tf.linalg.lu`, i.e., `x` as in, `lu_reconstruct(*tf.linalg.lu(x))`. #### Examples ```python import numpy as np import tensorflow as tf import tensorflow_probability as tfp x = [[[3., 4], [1, 2]], [[7., 8], [3, 4]]] x_reconstructed = tfp.math.lu_reconstruct(*tf.linalg.lu(x)) tf.assert_near(x, x_reconstructed) # ==> True ``` """ with tf.compat.v1.name_scope(name, 'lu_reconstruct', [lower_upper, perm]): lower_upper = tf.convert_to_tensor( value=lower_upper, dtype_hint=tf.float32, name='lower_upper') perm = tf.convert_to_tensor(value=perm, dtype_hint=tf.int32, name='perm') assertions = _lu_reconstruct_assertions(lower_upper, perm, validate_args) if assertions: with tf.control_dependencies(assertions): lower_upper = tf.identity(lower_upper) perm = tf.identity(perm) shape = tf.shape(input=lower_upper) lower = tf.linalg.set_diag( tf.linalg.band_part(lower_upper, num_lower=-1, num_upper=0), tf.ones(shape[:-1], dtype=lower_upper.dtype)) upper = tf.linalg.band_part(lower_upper, num_lower=0, num_upper=-1) x = tf.matmul(lower, upper) if lower_upper.shape.ndims is None or lower_upper.shape.ndims != 2: # We either don't know the batch rank or there are >0 batch dims. batch_size = tf.reduce_prod(input_tensor=shape[:-2]) d = shape[-1] x = tf.reshape(x, [batch_size, d, d]) perm = tf.reshape(perm, [batch_size, d]) perm = tf.map_fn(tf.math.invert_permutation, perm) batch_indices = tf.broadcast_to( tf.range(batch_size)[:, tf.newaxis], [batch_size, d]) x = tf.gather_nd(x, tf.stack([batch_indices, perm], axis=-1)) x = tf.reshape(x, shape) else: x = tf.gather(x, tf.math.invert_permutation(perm)) x.set_shape(lower_upper.shape) return x
def lu_reconstruct(lower_upper, perm, validate_args=False, name=None): """The inverse LU decomposition, `X == lu_reconstruct(*tf.linalg.lu(X))`. Args: lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `lower_upper = L + U - eye`. perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `perm = argmax(P)`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. Default value: `False` (i.e., don't validate arguments). name: Python `str` name given to ops managed by this object. Default value: `None` (i.e., "lu_reconstruct"). Returns: x: The original input to `tf.linalg.lu`, i.e., `x` as in, `lu_reconstruct(*tf.linalg.lu(x))`. #### Examples ```python import numpy as np import tensorflow as tf import tensorflow_probability as tfp x = [[[3., 4], [1, 2]], [[7., 8], [3, 4]]] x_reconstructed = tfp.math.lu_reconstruct(*tf.linalg.lu(x)) tf.assert_near(x, x_reconstructed) # ==> True ``` """ with tf.compat.v1.name_scope(name, 'lu_reconstruct', [lower_upper, perm]): lower_upper = tf.convert_to_tensor( value=lower_upper, dtype_hint=tf.float32, name='lower_upper') perm = tf.convert_to_tensor(value=perm, dtype_hint=tf.int32, name='perm') assertions = _lu_reconstruct_assertions(lower_upper, perm, validate_args) if assertions: with tf.control_dependencies(assertions): lower_upper = tf.identity(lower_upper) perm = tf.identity(perm) shape = tf.shape(input=lower_upper) lower = tf.linalg.set_diag( tf.linalg.band_part(lower_upper, num_lower=-1, num_upper=0), tf.ones(shape[:-1], dtype=lower_upper.dtype)) upper = tf.linalg.band_part(lower_upper, num_lower=0, num_upper=-1) x = tf.matmul(lower, upper) if lower_upper.shape.ndims is None or lower_upper.shape.ndims != 2: # We either don't know the batch rank or there are >0 batch dims. batch_size = tf.reduce_prod(input_tensor=shape[:-2]) d = shape[-1] x = tf.reshape(x, [batch_size, d, d]) perm = tf.reshape(perm, [batch_size, d]) perm = tf.map_fn(tf.math.invert_permutation, perm) batch_indices = tf.broadcast_to( tf.range(batch_size)[:, tf.newaxis], [batch_size, d]) x = tf.gather_nd(x, tf.stack([batch_indices, perm], axis=-1)) x = tf.reshape(x, shape) else: x = tf.gather(x, tf.math.invert_permutation(perm)) x.set_shape(lower_upper.shape) return x
[ "The", "inverse", "LU", "decomposition", "X", "==", "lu_reconstruct", "(", "*", "tf", ".", "linalg", ".", "lu", "(", "X", "))", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/linalg.py#L608-L676
[ "def", "lu_reconstruct", "(", "lower_upper", ",", "perm", ",", "validate_args", "=", "False", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "'lu_reconstruct'", ",", "[", "lower_upper", ",", "perm", "]", ")", ":", "lower_upper", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "lower_upper", ",", "dtype_hint", "=", "tf", ".", "float32", ",", "name", "=", "'lower_upper'", ")", "perm", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "perm", ",", "dtype_hint", "=", "tf", ".", "int32", ",", "name", "=", "'perm'", ")", "assertions", "=", "_lu_reconstruct_assertions", "(", "lower_upper", ",", "perm", ",", "validate_args", ")", "if", "assertions", ":", "with", "tf", ".", "control_dependencies", "(", "assertions", ")", ":", "lower_upper", "=", "tf", ".", "identity", "(", "lower_upper", ")", "perm", "=", "tf", ".", "identity", "(", "perm", ")", "shape", "=", "tf", ".", "shape", "(", "input", "=", "lower_upper", ")", "lower", "=", "tf", ".", "linalg", ".", "set_diag", "(", "tf", ".", "linalg", ".", "band_part", "(", "lower_upper", ",", "num_lower", "=", "-", "1", ",", "num_upper", "=", "0", ")", ",", "tf", ".", "ones", "(", "shape", "[", ":", "-", "1", "]", ",", "dtype", "=", "lower_upper", ".", "dtype", ")", ")", "upper", "=", "tf", ".", "linalg", ".", "band_part", "(", "lower_upper", ",", "num_lower", "=", "0", ",", "num_upper", "=", "-", "1", ")", "x", "=", "tf", ".", "matmul", "(", "lower", ",", "upper", ")", "if", "lower_upper", ".", "shape", ".", "ndims", "is", "None", "or", "lower_upper", ".", "shape", ".", "ndims", "!=", "2", ":", "# We either don't know the batch rank or there are >0 batch dims.", "batch_size", "=", "tf", ".", "reduce_prod", "(", "input_tensor", "=", "shape", "[", ":", "-", "2", "]", ")", "d", "=", "shape", "[", "-", "1", "]", "x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "batch_size", ",", "d", ",", "d", "]", ")", "perm", "=", "tf", ".", "reshape", "(", "perm", ",", "[", "batch_size", ",", "d", "]", ")", "perm", "=", "tf", ".", "map_fn", "(", "tf", ".", "math", ".", "invert_permutation", ",", "perm", ")", "batch_indices", "=", "tf", ".", "broadcast_to", "(", "tf", ".", "range", "(", "batch_size", ")", "[", ":", ",", "tf", ".", "newaxis", "]", ",", "[", "batch_size", ",", "d", "]", ")", "x", "=", "tf", ".", "gather_nd", "(", "x", ",", "tf", ".", "stack", "(", "[", "batch_indices", ",", "perm", "]", ",", "axis", "=", "-", "1", ")", ")", "x", "=", "tf", ".", "reshape", "(", "x", ",", "shape", ")", "else", ":", "x", "=", "tf", ".", "gather", "(", "x", ",", "tf", ".", "math", ".", "invert_permutation", "(", "perm", ")", ")", "x", ".", "set_shape", "(", "lower_upper", ".", "shape", ")", "return", "x" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_lu_reconstruct_assertions
Returns list of assertions related to `lu_reconstruct` assumptions.
tensorflow_probability/python/math/linalg.py
def _lu_reconstruct_assertions(lower_upper, perm, validate_args): """Returns list of assertions related to `lu_reconstruct` assumptions.""" assertions = [] message = 'Input `lower_upper` must have at least 2 dimensions.' if lower_upper.shape.ndims is not None: if lower_upper.shape.ndims < 2: raise ValueError(message) elif validate_args: assertions.append( tf.compat.v1.assert_rank_at_least(lower_upper, rank=2, message=message)) message = '`rank(lower_upper)` must equal `rank(perm) + 1`' if lower_upper.shape.ndims is not None and perm.shape.ndims is not None: if lower_upper.shape.ndims != perm.shape.ndims + 1: raise ValueError(message) elif validate_args: assertions.append( tf.compat.v1.assert_rank( lower_upper, rank=tf.rank(perm) + 1, message=message)) message = '`lower_upper` must be square.' if lower_upper.shape[:-2].is_fully_defined(): if lower_upper.shape[-2] != lower_upper.shape[-1]: raise ValueError(message) elif validate_args: m, n = tf.split(tf.shape(input=lower_upper)[-2:], num_or_size_splits=2) assertions.append(tf.compat.v1.assert_equal(m, n, message=message)) return assertions
def _lu_reconstruct_assertions(lower_upper, perm, validate_args): """Returns list of assertions related to `lu_reconstruct` assumptions.""" assertions = [] message = 'Input `lower_upper` must have at least 2 dimensions.' if lower_upper.shape.ndims is not None: if lower_upper.shape.ndims < 2: raise ValueError(message) elif validate_args: assertions.append( tf.compat.v1.assert_rank_at_least(lower_upper, rank=2, message=message)) message = '`rank(lower_upper)` must equal `rank(perm) + 1`' if lower_upper.shape.ndims is not None and perm.shape.ndims is not None: if lower_upper.shape.ndims != perm.shape.ndims + 1: raise ValueError(message) elif validate_args: assertions.append( tf.compat.v1.assert_rank( lower_upper, rank=tf.rank(perm) + 1, message=message)) message = '`lower_upper` must be square.' if lower_upper.shape[:-2].is_fully_defined(): if lower_upper.shape[-2] != lower_upper.shape[-1]: raise ValueError(message) elif validate_args: m, n = tf.split(tf.shape(input=lower_upper)[-2:], num_or_size_splits=2) assertions.append(tf.compat.v1.assert_equal(m, n, message=message)) return assertions
[ "Returns", "list", "of", "assertions", "related", "to", "lu_reconstruct", "assumptions", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/linalg.py#L679-L708
[ "def", "_lu_reconstruct_assertions", "(", "lower_upper", ",", "perm", ",", "validate_args", ")", ":", "assertions", "=", "[", "]", "message", "=", "'Input `lower_upper` must have at least 2 dimensions.'", "if", "lower_upper", ".", "shape", ".", "ndims", "is", "not", "None", ":", "if", "lower_upper", ".", "shape", ".", "ndims", "<", "2", ":", "raise", "ValueError", "(", "message", ")", "elif", "validate_args", ":", "assertions", ".", "append", "(", "tf", ".", "compat", ".", "v1", ".", "assert_rank_at_least", "(", "lower_upper", ",", "rank", "=", "2", ",", "message", "=", "message", ")", ")", "message", "=", "'`rank(lower_upper)` must equal `rank(perm) + 1`'", "if", "lower_upper", ".", "shape", ".", "ndims", "is", "not", "None", "and", "perm", ".", "shape", ".", "ndims", "is", "not", "None", ":", "if", "lower_upper", ".", "shape", ".", "ndims", "!=", "perm", ".", "shape", ".", "ndims", "+", "1", ":", "raise", "ValueError", "(", "message", ")", "elif", "validate_args", ":", "assertions", ".", "append", "(", "tf", ".", "compat", ".", "v1", ".", "assert_rank", "(", "lower_upper", ",", "rank", "=", "tf", ".", "rank", "(", "perm", ")", "+", "1", ",", "message", "=", "message", ")", ")", "message", "=", "'`lower_upper` must be square.'", "if", "lower_upper", ".", "shape", "[", ":", "-", "2", "]", ".", "is_fully_defined", "(", ")", ":", "if", "lower_upper", ".", "shape", "[", "-", "2", "]", "!=", "lower_upper", ".", "shape", "[", "-", "1", "]", ":", "raise", "ValueError", "(", "message", ")", "elif", "validate_args", ":", "m", ",", "n", "=", "tf", ".", "split", "(", "tf", ".", "shape", "(", "input", "=", "lower_upper", ")", "[", "-", "2", ":", "]", ",", "num_or_size_splits", "=", "2", ")", "assertions", ".", "append", "(", "tf", ".", "compat", ".", "v1", ".", "assert_equal", "(", "m", ",", "n", ",", "message", "=", "message", ")", ")", "return", "assertions" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_lu_solve_assertions
Returns list of assertions related to `lu_solve` assumptions.
tensorflow_probability/python/math/linalg.py
def _lu_solve_assertions(lower_upper, perm, rhs, validate_args): """Returns list of assertions related to `lu_solve` assumptions.""" assertions = _lu_reconstruct_assertions(lower_upper, perm, validate_args) message = 'Input `rhs` must have at least 2 dimensions.' if rhs.shape.ndims is not None: if rhs.shape.ndims < 2: raise ValueError(message) elif validate_args: assertions.append( tf.compat.v1.assert_rank_at_least(rhs, rank=2, message=message)) message = '`lower_upper.shape[-1]` must equal `rhs.shape[-1]`.' if (tf.compat.dimension_value(lower_upper.shape[-1]) is not None and tf.compat.dimension_value(rhs.shape[-2]) is not None): if lower_upper.shape[-1] != rhs.shape[-2]: raise ValueError(message) elif validate_args: assertions.append( tf.compat.v1.assert_equal( tf.shape(input=lower_upper)[-1], tf.shape(input=rhs)[-2], message=message)) return assertions
def _lu_solve_assertions(lower_upper, perm, rhs, validate_args): """Returns list of assertions related to `lu_solve` assumptions.""" assertions = _lu_reconstruct_assertions(lower_upper, perm, validate_args) message = 'Input `rhs` must have at least 2 dimensions.' if rhs.shape.ndims is not None: if rhs.shape.ndims < 2: raise ValueError(message) elif validate_args: assertions.append( tf.compat.v1.assert_rank_at_least(rhs, rank=2, message=message)) message = '`lower_upper.shape[-1]` must equal `rhs.shape[-1]`.' if (tf.compat.dimension_value(lower_upper.shape[-1]) is not None and tf.compat.dimension_value(rhs.shape[-2]) is not None): if lower_upper.shape[-1] != rhs.shape[-2]: raise ValueError(message) elif validate_args: assertions.append( tf.compat.v1.assert_equal( tf.shape(input=lower_upper)[-1], tf.shape(input=rhs)[-2], message=message)) return assertions
[ "Returns", "list", "of", "assertions", "related", "to", "lu_solve", "assumptions", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/linalg.py#L711-L735
[ "def", "_lu_solve_assertions", "(", "lower_upper", ",", "perm", ",", "rhs", ",", "validate_args", ")", ":", "assertions", "=", "_lu_reconstruct_assertions", "(", "lower_upper", ",", "perm", ",", "validate_args", ")", "message", "=", "'Input `rhs` must have at least 2 dimensions.'", "if", "rhs", ".", "shape", ".", "ndims", "is", "not", "None", ":", "if", "rhs", ".", "shape", ".", "ndims", "<", "2", ":", "raise", "ValueError", "(", "message", ")", "elif", "validate_args", ":", "assertions", ".", "append", "(", "tf", ".", "compat", ".", "v1", ".", "assert_rank_at_least", "(", "rhs", ",", "rank", "=", "2", ",", "message", "=", "message", ")", ")", "message", "=", "'`lower_upper.shape[-1]` must equal `rhs.shape[-1]`.'", "if", "(", "tf", ".", "compat", ".", "dimension_value", "(", "lower_upper", ".", "shape", "[", "-", "1", "]", ")", "is", "not", "None", "and", "tf", ".", "compat", ".", "dimension_value", "(", "rhs", ".", "shape", "[", "-", "2", "]", ")", "is", "not", "None", ")", ":", "if", "lower_upper", ".", "shape", "[", "-", "1", "]", "!=", "rhs", ".", "shape", "[", "-", "2", "]", ":", "raise", "ValueError", "(", "message", ")", "elif", "validate_args", ":", "assertions", ".", "append", "(", "tf", ".", "compat", ".", "v1", ".", "assert_equal", "(", "tf", ".", "shape", "(", "input", "=", "lower_upper", ")", "[", "-", "1", "]", ",", "tf", ".", "shape", "(", "input", "=", "rhs", ")", "[", "-", "2", "]", ",", "message", "=", "message", ")", ")", "return", "assertions" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
sparse_or_dense_matmul
Returns (batched) matmul of a SparseTensor (or Tensor) with a Tensor. Args: sparse_or_dense_a: `SparseTensor` or `Tensor` representing a (batch of) matrices. dense_b: `Tensor` representing a (batch of) matrices, with the same batch shape as `sparse_or_dense_a`. The shape must be compatible with the shape of `sparse_or_dense_a` and kwargs. validate_args: When `True`, additional assertions might be embedded in the graph. Default value: `False` (i.e., no graph assertions are added). name: Python `str` prefixed to ops created by this function. Default value: "sparse_or_dense_matmul". **kwargs: Keyword arguments to `tf.sparse_tensor_dense_matmul` or `tf.matmul`. Returns: product: A dense (batch of) matrix-shaped Tensor of the same batch shape and dtype as `sparse_or_dense_a` and `dense_b`. If `sparse_or_dense_a` or `dense_b` is adjointed through `kwargs` then the shape is adjusted accordingly.
tensorflow_probability/python/math/linalg.py
def sparse_or_dense_matmul(sparse_or_dense_a, dense_b, validate_args=False, name=None, **kwargs): """Returns (batched) matmul of a SparseTensor (or Tensor) with a Tensor. Args: sparse_or_dense_a: `SparseTensor` or `Tensor` representing a (batch of) matrices. dense_b: `Tensor` representing a (batch of) matrices, with the same batch shape as `sparse_or_dense_a`. The shape must be compatible with the shape of `sparse_or_dense_a` and kwargs. validate_args: When `True`, additional assertions might be embedded in the graph. Default value: `False` (i.e., no graph assertions are added). name: Python `str` prefixed to ops created by this function. Default value: "sparse_or_dense_matmul". **kwargs: Keyword arguments to `tf.sparse_tensor_dense_matmul` or `tf.matmul`. Returns: product: A dense (batch of) matrix-shaped Tensor of the same batch shape and dtype as `sparse_or_dense_a` and `dense_b`. If `sparse_or_dense_a` or `dense_b` is adjointed through `kwargs` then the shape is adjusted accordingly. """ with tf.compat.v1.name_scope(name, 'sparse_or_dense_matmul', [sparse_or_dense_a, dense_b]): dense_b = tf.convert_to_tensor( value=dense_b, dtype_hint=tf.float32, name='dense_b') if validate_args: assert_a_rank_at_least_2 = tf.compat.v1.assert_rank_at_least( sparse_or_dense_a, rank=2, message='Input `sparse_or_dense_a` must have at least 2 dimensions.') assert_b_rank_at_least_2 = tf.compat.v1.assert_rank_at_least( dense_b, rank=2, message='Input `dense_b` must have at least 2 dimensions.') with tf.control_dependencies( [assert_a_rank_at_least_2, assert_b_rank_at_least_2]): sparse_or_dense_a = tf.identity(sparse_or_dense_a) dense_b = tf.identity(dense_b) if isinstance(sparse_or_dense_a, (tf.SparseTensor, tf.compat.v1.SparseTensorValue)): return _sparse_tensor_dense_matmul(sparse_or_dense_a, dense_b, **kwargs) else: return tf.matmul(sparse_or_dense_a, dense_b, **kwargs)
def sparse_or_dense_matmul(sparse_or_dense_a, dense_b, validate_args=False, name=None, **kwargs): """Returns (batched) matmul of a SparseTensor (or Tensor) with a Tensor. Args: sparse_or_dense_a: `SparseTensor` or `Tensor` representing a (batch of) matrices. dense_b: `Tensor` representing a (batch of) matrices, with the same batch shape as `sparse_or_dense_a`. The shape must be compatible with the shape of `sparse_or_dense_a` and kwargs. validate_args: When `True`, additional assertions might be embedded in the graph. Default value: `False` (i.e., no graph assertions are added). name: Python `str` prefixed to ops created by this function. Default value: "sparse_or_dense_matmul". **kwargs: Keyword arguments to `tf.sparse_tensor_dense_matmul` or `tf.matmul`. Returns: product: A dense (batch of) matrix-shaped Tensor of the same batch shape and dtype as `sparse_or_dense_a` and `dense_b`. If `sparse_or_dense_a` or `dense_b` is adjointed through `kwargs` then the shape is adjusted accordingly. """ with tf.compat.v1.name_scope(name, 'sparse_or_dense_matmul', [sparse_or_dense_a, dense_b]): dense_b = tf.convert_to_tensor( value=dense_b, dtype_hint=tf.float32, name='dense_b') if validate_args: assert_a_rank_at_least_2 = tf.compat.v1.assert_rank_at_least( sparse_or_dense_a, rank=2, message='Input `sparse_or_dense_a` must have at least 2 dimensions.') assert_b_rank_at_least_2 = tf.compat.v1.assert_rank_at_least( dense_b, rank=2, message='Input `dense_b` must have at least 2 dimensions.') with tf.control_dependencies( [assert_a_rank_at_least_2, assert_b_rank_at_least_2]): sparse_or_dense_a = tf.identity(sparse_or_dense_a) dense_b = tf.identity(dense_b) if isinstance(sparse_or_dense_a, (tf.SparseTensor, tf.compat.v1.SparseTensorValue)): return _sparse_tensor_dense_matmul(sparse_or_dense_a, dense_b, **kwargs) else: return tf.matmul(sparse_or_dense_a, dense_b, **kwargs)
[ "Returns", "(", "batched", ")", "matmul", "of", "a", "SparseTensor", "(", "or", "Tensor", ")", "with", "a", "Tensor", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/linalg.py#L738-L788
[ "def", "sparse_or_dense_matmul", "(", "sparse_or_dense_a", ",", "dense_b", ",", "validate_args", "=", "False", ",", "name", "=", "None", ",", "*", "*", "kwargs", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "'sparse_or_dense_matmul'", ",", "[", "sparse_or_dense_a", ",", "dense_b", "]", ")", ":", "dense_b", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "dense_b", ",", "dtype_hint", "=", "tf", ".", "float32", ",", "name", "=", "'dense_b'", ")", "if", "validate_args", ":", "assert_a_rank_at_least_2", "=", "tf", ".", "compat", ".", "v1", ".", "assert_rank_at_least", "(", "sparse_or_dense_a", ",", "rank", "=", "2", ",", "message", "=", "'Input `sparse_or_dense_a` must have at least 2 dimensions.'", ")", "assert_b_rank_at_least_2", "=", "tf", ".", "compat", ".", "v1", ".", "assert_rank_at_least", "(", "dense_b", ",", "rank", "=", "2", ",", "message", "=", "'Input `dense_b` must have at least 2 dimensions.'", ")", "with", "tf", ".", "control_dependencies", "(", "[", "assert_a_rank_at_least_2", ",", "assert_b_rank_at_least_2", "]", ")", ":", "sparse_or_dense_a", "=", "tf", ".", "identity", "(", "sparse_or_dense_a", ")", "dense_b", "=", "tf", ".", "identity", "(", "dense_b", ")", "if", "isinstance", "(", "sparse_or_dense_a", ",", "(", "tf", ".", "SparseTensor", ",", "tf", ".", "compat", ".", "v1", ".", "SparseTensorValue", ")", ")", ":", "return", "_sparse_tensor_dense_matmul", "(", "sparse_or_dense_a", ",", "dense_b", ",", "*", "*", "kwargs", ")", "else", ":", "return", "tf", ".", "matmul", "(", "sparse_or_dense_a", ",", "dense_b", ",", "*", "*", "kwargs", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
sparse_or_dense_matvecmul
Returns (batched) matmul of a (sparse) matrix with a column vector. Args: sparse_or_dense_matrix: `SparseTensor` or `Tensor` representing a (batch of) matrices. dense_vector: `Tensor` representing a (batch of) vectors, with the same batch shape as `sparse_or_dense_matrix`. The shape must be compatible with the shape of `sparse_or_dense_matrix` and kwargs. validate_args: When `True`, additional assertions might be embedded in the graph. Default value: `False` (i.e., no graph assertions are added). name: Python `str` prefixed to ops created by this function. Default value: "sparse_or_dense_matvecmul". **kwargs: Keyword arguments to `tf.sparse_tensor_dense_matmul` or `tf.matmul`. Returns: product: A dense (batch of) vector-shaped Tensor of the same batch shape and dtype as `sparse_or_dense_matrix` and `dense_vector`.
tensorflow_probability/python/math/linalg.py
def sparse_or_dense_matvecmul(sparse_or_dense_matrix, dense_vector, validate_args=False, name=None, **kwargs): """Returns (batched) matmul of a (sparse) matrix with a column vector. Args: sparse_or_dense_matrix: `SparseTensor` or `Tensor` representing a (batch of) matrices. dense_vector: `Tensor` representing a (batch of) vectors, with the same batch shape as `sparse_or_dense_matrix`. The shape must be compatible with the shape of `sparse_or_dense_matrix` and kwargs. validate_args: When `True`, additional assertions might be embedded in the graph. Default value: `False` (i.e., no graph assertions are added). name: Python `str` prefixed to ops created by this function. Default value: "sparse_or_dense_matvecmul". **kwargs: Keyword arguments to `tf.sparse_tensor_dense_matmul` or `tf.matmul`. Returns: product: A dense (batch of) vector-shaped Tensor of the same batch shape and dtype as `sparse_or_dense_matrix` and `dense_vector`. """ with tf.compat.v1.name_scope(name, 'sparse_or_dense_matvecmul', [sparse_or_dense_matrix, dense_vector]): dense_vector = tf.convert_to_tensor( value=dense_vector, dtype_hint=tf.float32, name='dense_vector') return tf.squeeze( sparse_or_dense_matmul( sparse_or_dense_matrix, dense_vector[..., tf.newaxis], validate_args=validate_args, **kwargs), axis=[-1])
def sparse_or_dense_matvecmul(sparse_or_dense_matrix, dense_vector, validate_args=False, name=None, **kwargs): """Returns (batched) matmul of a (sparse) matrix with a column vector. Args: sparse_or_dense_matrix: `SparseTensor` or `Tensor` representing a (batch of) matrices. dense_vector: `Tensor` representing a (batch of) vectors, with the same batch shape as `sparse_or_dense_matrix`. The shape must be compatible with the shape of `sparse_or_dense_matrix` and kwargs. validate_args: When `True`, additional assertions might be embedded in the graph. Default value: `False` (i.e., no graph assertions are added). name: Python `str` prefixed to ops created by this function. Default value: "sparse_or_dense_matvecmul". **kwargs: Keyword arguments to `tf.sparse_tensor_dense_matmul` or `tf.matmul`. Returns: product: A dense (batch of) vector-shaped Tensor of the same batch shape and dtype as `sparse_or_dense_matrix` and `dense_vector`. """ with tf.compat.v1.name_scope(name, 'sparse_or_dense_matvecmul', [sparse_or_dense_matrix, dense_vector]): dense_vector = tf.convert_to_tensor( value=dense_vector, dtype_hint=tf.float32, name='dense_vector') return tf.squeeze( sparse_or_dense_matmul( sparse_or_dense_matrix, dense_vector[..., tf.newaxis], validate_args=validate_args, **kwargs), axis=[-1])
[ "Returns", "(", "batched", ")", "matmul", "of", "a", "(", "sparse", ")", "matrix", "with", "a", "column", "vector", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/linalg.py#L791-L826
[ "def", "sparse_or_dense_matvecmul", "(", "sparse_or_dense_matrix", ",", "dense_vector", ",", "validate_args", "=", "False", ",", "name", "=", "None", ",", "*", "*", "kwargs", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "'sparse_or_dense_matvecmul'", ",", "[", "sparse_or_dense_matrix", ",", "dense_vector", "]", ")", ":", "dense_vector", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "dense_vector", ",", "dtype_hint", "=", "tf", ".", "float32", ",", "name", "=", "'dense_vector'", ")", "return", "tf", ".", "squeeze", "(", "sparse_or_dense_matmul", "(", "sparse_or_dense_matrix", ",", "dense_vector", "[", "...", ",", "tf", ".", "newaxis", "]", ",", "validate_args", "=", "validate_args", ",", "*", "*", "kwargs", ")", ",", "axis", "=", "[", "-", "1", "]", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_sparse_tensor_dense_matmul
Returns (batched) matmul of a SparseTensor with a Tensor. Args: sp_a: `SparseTensor` representing a (batch of) matrices. b: `Tensor` representing a (batch of) matrices, with the same batch shape of `sp_a`. The shape must be compatible with the shape of `sp_a` and kwargs. **kwargs: Keyword arguments to `tf.sparse_tensor_dense_matmul`. Returns: product: A dense (batch of) matrix-shaped Tensor of the same batch shape and dtype as `sp_a` and `b`. If `sp_a` or `b` is adjointed through `kwargs` then the shape is adjusted accordingly.
tensorflow_probability/python/math/linalg.py
def _sparse_tensor_dense_matmul(sp_a, b, **kwargs): """Returns (batched) matmul of a SparseTensor with a Tensor. Args: sp_a: `SparseTensor` representing a (batch of) matrices. b: `Tensor` representing a (batch of) matrices, with the same batch shape of `sp_a`. The shape must be compatible with the shape of `sp_a` and kwargs. **kwargs: Keyword arguments to `tf.sparse_tensor_dense_matmul`. Returns: product: A dense (batch of) matrix-shaped Tensor of the same batch shape and dtype as `sp_a` and `b`. If `sp_a` or `b` is adjointed through `kwargs` then the shape is adjusted accordingly. """ batch_shape = _get_shape(sp_a)[:-2] # Reshape the SparseTensor into a rank 3 SparseTensors, with the # batch shape flattened to a single dimension. If the batch rank is 0, then # we add a batch dimension of rank 1. sp_a = tf.sparse.reshape(sp_a, tf.concat([[-1], _get_shape(sp_a)[-2:]], axis=0)) # Reshape b to stack the batch dimension along the rows. b = tf.reshape(b, tf.concat([[-1], _get_shape(b)[-1:]], axis=0)) # Convert the SparseTensor to a matrix in block diagonal form with blocks of # matrices [M, N]. This allow us to use tf.sparse_tensor_dense_matmul which # only accepts rank 2 (Sparse)Tensors. out = tf.sparse.sparse_dense_matmul(_sparse_block_diag(sp_a), b, **kwargs) # Finally retrieve the original batch shape from the resulting rank 2 Tensor. # Note that we avoid inferring the final shape from `sp_a` or `b` because we # might have transposed one or both of them. return tf.reshape( out, tf.concat([batch_shape, [-1], _get_shape(out)[-1:]], axis=0))
def _sparse_tensor_dense_matmul(sp_a, b, **kwargs): """Returns (batched) matmul of a SparseTensor with a Tensor. Args: sp_a: `SparseTensor` representing a (batch of) matrices. b: `Tensor` representing a (batch of) matrices, with the same batch shape of `sp_a`. The shape must be compatible with the shape of `sp_a` and kwargs. **kwargs: Keyword arguments to `tf.sparse_tensor_dense_matmul`. Returns: product: A dense (batch of) matrix-shaped Tensor of the same batch shape and dtype as `sp_a` and `b`. If `sp_a` or `b` is adjointed through `kwargs` then the shape is adjusted accordingly. """ batch_shape = _get_shape(sp_a)[:-2] # Reshape the SparseTensor into a rank 3 SparseTensors, with the # batch shape flattened to a single dimension. If the batch rank is 0, then # we add a batch dimension of rank 1. sp_a = tf.sparse.reshape(sp_a, tf.concat([[-1], _get_shape(sp_a)[-2:]], axis=0)) # Reshape b to stack the batch dimension along the rows. b = tf.reshape(b, tf.concat([[-1], _get_shape(b)[-1:]], axis=0)) # Convert the SparseTensor to a matrix in block diagonal form with blocks of # matrices [M, N]. This allow us to use tf.sparse_tensor_dense_matmul which # only accepts rank 2 (Sparse)Tensors. out = tf.sparse.sparse_dense_matmul(_sparse_block_diag(sp_a), b, **kwargs) # Finally retrieve the original batch shape from the resulting rank 2 Tensor. # Note that we avoid inferring the final shape from `sp_a` or `b` because we # might have transposed one or both of them. return tf.reshape( out, tf.concat([batch_shape, [-1], _get_shape(out)[-1:]], axis=0))
[ "Returns", "(", "batched", ")", "matmul", "of", "a", "SparseTensor", "with", "a", "Tensor", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/linalg.py#L837-L871
[ "def", "_sparse_tensor_dense_matmul", "(", "sp_a", ",", "b", ",", "*", "*", "kwargs", ")", ":", "batch_shape", "=", "_get_shape", "(", "sp_a", ")", "[", ":", "-", "2", "]", "# Reshape the SparseTensor into a rank 3 SparseTensors, with the", "# batch shape flattened to a single dimension. If the batch rank is 0, then", "# we add a batch dimension of rank 1.", "sp_a", "=", "tf", ".", "sparse", ".", "reshape", "(", "sp_a", ",", "tf", ".", "concat", "(", "[", "[", "-", "1", "]", ",", "_get_shape", "(", "sp_a", ")", "[", "-", "2", ":", "]", "]", ",", "axis", "=", "0", ")", ")", "# Reshape b to stack the batch dimension along the rows.", "b", "=", "tf", ".", "reshape", "(", "b", ",", "tf", ".", "concat", "(", "[", "[", "-", "1", "]", ",", "_get_shape", "(", "b", ")", "[", "-", "1", ":", "]", "]", ",", "axis", "=", "0", ")", ")", "# Convert the SparseTensor to a matrix in block diagonal form with blocks of", "# matrices [M, N]. This allow us to use tf.sparse_tensor_dense_matmul which", "# only accepts rank 2 (Sparse)Tensors.", "out", "=", "tf", ".", "sparse", ".", "sparse_dense_matmul", "(", "_sparse_block_diag", "(", "sp_a", ")", ",", "b", ",", "*", "*", "kwargs", ")", "# Finally retrieve the original batch shape from the resulting rank 2 Tensor.", "# Note that we avoid inferring the final shape from `sp_a` or `b` because we", "# might have transposed one or both of them.", "return", "tf", ".", "reshape", "(", "out", ",", "tf", ".", "concat", "(", "[", "batch_shape", ",", "[", "-", "1", "]", ",", "_get_shape", "(", "out", ")", "[", "-", "1", ":", "]", "]", ",", "axis", "=", "0", ")", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_sparse_block_diag
Returns a block diagonal rank 2 SparseTensor from a batch of SparseTensors. Args: sp_a: A rank 3 `SparseTensor` representing a batch of matrices. Returns: sp_block_diag_a: matrix-shaped, `float` `SparseTensor` with the same dtype as `sparse_or_matrix`, of shape [B * M, B * N] where `sp_a` has shape [B, M, N]. Each [M, N] batch of `sp_a` is lined up along the diagonal.
tensorflow_probability/python/math/linalg.py
def _sparse_block_diag(sp_a): """Returns a block diagonal rank 2 SparseTensor from a batch of SparseTensors. Args: sp_a: A rank 3 `SparseTensor` representing a batch of matrices. Returns: sp_block_diag_a: matrix-shaped, `float` `SparseTensor` with the same dtype as `sparse_or_matrix`, of shape [B * M, B * N] where `sp_a` has shape [B, M, N]. Each [M, N] batch of `sp_a` is lined up along the diagonal. """ # Construct the matrix [[M, N], [1, 0], [0, 1]] which would map the index # (b, i, j) to (Mb + i, Nb + j). This effectively creates a block-diagonal # matrix of dense shape [B * M, B * N]. # Note that this transformation doesn't increase the number of non-zero # entries in the SparseTensor. sp_a_shape = tf.convert_to_tensor(value=_get_shape(sp_a, tf.int64)) ind_mat = tf.concat([[sp_a_shape[-2:]], tf.eye(2, dtype=tf.int64)], axis=0) indices = tf.matmul(sp_a.indices, ind_mat) dense_shape = sp_a_shape[0] * sp_a_shape[1:] return tf.SparseTensor( indices=indices, values=sp_a.values, dense_shape=dense_shape)
def _sparse_block_diag(sp_a): """Returns a block diagonal rank 2 SparseTensor from a batch of SparseTensors. Args: sp_a: A rank 3 `SparseTensor` representing a batch of matrices. Returns: sp_block_diag_a: matrix-shaped, `float` `SparseTensor` with the same dtype as `sparse_or_matrix`, of shape [B * M, B * N] where `sp_a` has shape [B, M, N]. Each [M, N] batch of `sp_a` is lined up along the diagonal. """ # Construct the matrix [[M, N], [1, 0], [0, 1]] which would map the index # (b, i, j) to (Mb + i, Nb + j). This effectively creates a block-diagonal # matrix of dense shape [B * M, B * N]. # Note that this transformation doesn't increase the number of non-zero # entries in the SparseTensor. sp_a_shape = tf.convert_to_tensor(value=_get_shape(sp_a, tf.int64)) ind_mat = tf.concat([[sp_a_shape[-2:]], tf.eye(2, dtype=tf.int64)], axis=0) indices = tf.matmul(sp_a.indices, ind_mat) dense_shape = sp_a_shape[0] * sp_a_shape[1:] return tf.SparseTensor( indices=indices, values=sp_a.values, dense_shape=dense_shape)
[ "Returns", "a", "block", "diagonal", "rank", "2", "SparseTensor", "from", "a", "batch", "of", "SparseTensors", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/linalg.py#L874-L895
[ "def", "_sparse_block_diag", "(", "sp_a", ")", ":", "# Construct the matrix [[M, N], [1, 0], [0, 1]] which would map the index", "# (b, i, j) to (Mb + i, Nb + j). This effectively creates a block-diagonal", "# matrix of dense shape [B * M, B * N].", "# Note that this transformation doesn't increase the number of non-zero", "# entries in the SparseTensor.", "sp_a_shape", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "_get_shape", "(", "sp_a", ",", "tf", ".", "int64", ")", ")", "ind_mat", "=", "tf", ".", "concat", "(", "[", "[", "sp_a_shape", "[", "-", "2", ":", "]", "]", ",", "tf", ".", "eye", "(", "2", ",", "dtype", "=", "tf", ".", "int64", ")", "]", ",", "axis", "=", "0", ")", "indices", "=", "tf", ".", "matmul", "(", "sp_a", ".", "indices", ",", "ind_mat", ")", "dense_shape", "=", "sp_a_shape", "[", "0", "]", "*", "sp_a_shape", "[", "1", ":", "]", "return", "tf", ".", "SparseTensor", "(", "indices", "=", "indices", ",", "values", "=", "sp_a", ".", "values", ",", "dense_shape", "=", "dense_shape", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_maybe_validate_matrix
Checks that input is a `float` matrix.
tensorflow_probability/python/math/linalg.py
def _maybe_validate_matrix(a, validate_args): """Checks that input is a `float` matrix.""" assertions = [] if not a.dtype.is_floating: raise TypeError('Input `a` must have `float`-like `dtype` ' '(saw {}).'.format(a.dtype.name)) if a.shape.ndims is not None: if a.shape.ndims < 2: raise ValueError('Input `a` must have at least 2 dimensions ' '(saw: {}).'.format(a.shape.ndims)) elif validate_args: assertions.append(tf.compat.v1.assert_rank_at_least( a, rank=2, message='Input `a` must have at least 2 dimensions.')) return assertions
def _maybe_validate_matrix(a, validate_args): """Checks that input is a `float` matrix.""" assertions = [] if not a.dtype.is_floating: raise TypeError('Input `a` must have `float`-like `dtype` ' '(saw {}).'.format(a.dtype.name)) if a.shape.ndims is not None: if a.shape.ndims < 2: raise ValueError('Input `a` must have at least 2 dimensions ' '(saw: {}).'.format(a.shape.ndims)) elif validate_args: assertions.append(tf.compat.v1.assert_rank_at_least( a, rank=2, message='Input `a` must have at least 2 dimensions.')) return assertions
[ "Checks", "that", "input", "is", "a", "float", "matrix", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/linalg.py#L898-L911
[ "def", "_maybe_validate_matrix", "(", "a", ",", "validate_args", ")", ":", "assertions", "=", "[", "]", "if", "not", "a", ".", "dtype", ".", "is_floating", ":", "raise", "TypeError", "(", "'Input `a` must have `float`-like `dtype` '", "'(saw {}).'", ".", "format", "(", "a", ".", "dtype", ".", "name", ")", ")", "if", "a", ".", "shape", ".", "ndims", "is", "not", "None", ":", "if", "a", ".", "shape", ".", "ndims", "<", "2", ":", "raise", "ValueError", "(", "'Input `a` must have at least 2 dimensions '", "'(saw: {}).'", ".", "format", "(", "a", ".", "shape", ".", "ndims", ")", ")", "elif", "validate_args", ":", "assertions", ".", "append", "(", "tf", ".", "compat", ".", "v1", ".", "assert_rank_at_least", "(", "a", ",", "rank", "=", "2", ",", "message", "=", "'Input `a` must have at least 2 dimensions.'", ")", ")", "return", "assertions" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_grad_neg_log_likelihood_and_fim
Computes the neg-log-likelihood gradient and Fisher information for a GLM. Note that Fisher information is related to the Hessian of the log-likelihood by the equation ```none FisherInfo = E[Hessian with respect to model_coefficients of -LogLikelihood( Y | model_matrix, model_coefficients)] ``` where `LogLikelihood` is the log-likelihood of a generalized linear model parameterized by `model_matrix` and `model_coefficients`, and the expectation is taken over Y, distributed according to the same GLM with the same parameter values. Args: model_matrix: (Batch of) matrix-shaped, `float` `Tensor` or `SparseTensor` where each row represents a sample's features. Has shape `[N, n]` where `N` is the number of data samples and `n` is the number of features per sample. linear_response: (Batch of) vector-shaped `Tensor` with the same dtype as `model_matrix`, equal to `model_matix @ model_coefficients` where `model_coefficients` are the coefficients of the linear component of the GLM. response: (Batch of) vector-shaped `Tensor` with the same dtype as `model_matrix` where each element represents a sample's observed response (to the corresponding row of features). model: `tfp.glm.ExponentialFamily`-like instance, which specifies the link function and distribution of the GLM, and thus characterizes the negative log-likelihood. Must have sufficient statistic equal to the response, that is, `T(y) = y`. Returns: grad_neg_log_likelihood: (Batch of) vector-shaped `Tensor` with the same shape and dtype as a single row of `model_matrix`, representing the gradient of the negative log likelihood of `response` given linear response `linear_response`. fim_middle: (Batch of) vector-shaped `Tensor` with the same shape and dtype as a single column of `model_matrix`, satisfying the equation `Fisher information = Transpose(model_matrix) @ diag(fim_middle) @ model_matrix`.
tensorflow_probability/python/glm/proximal_hessian.py
def _grad_neg_log_likelihood_and_fim(model_matrix, linear_response, response, model): """Computes the neg-log-likelihood gradient and Fisher information for a GLM. Note that Fisher information is related to the Hessian of the log-likelihood by the equation ```none FisherInfo = E[Hessian with respect to model_coefficients of -LogLikelihood( Y | model_matrix, model_coefficients)] ``` where `LogLikelihood` is the log-likelihood of a generalized linear model parameterized by `model_matrix` and `model_coefficients`, and the expectation is taken over Y, distributed according to the same GLM with the same parameter values. Args: model_matrix: (Batch of) matrix-shaped, `float` `Tensor` or `SparseTensor` where each row represents a sample's features. Has shape `[N, n]` where `N` is the number of data samples and `n` is the number of features per sample. linear_response: (Batch of) vector-shaped `Tensor` with the same dtype as `model_matrix`, equal to `model_matix @ model_coefficients` where `model_coefficients` are the coefficients of the linear component of the GLM. response: (Batch of) vector-shaped `Tensor` with the same dtype as `model_matrix` where each element represents a sample's observed response (to the corresponding row of features). model: `tfp.glm.ExponentialFamily`-like instance, which specifies the link function and distribution of the GLM, and thus characterizes the negative log-likelihood. Must have sufficient statistic equal to the response, that is, `T(y) = y`. Returns: grad_neg_log_likelihood: (Batch of) vector-shaped `Tensor` with the same shape and dtype as a single row of `model_matrix`, representing the gradient of the negative log likelihood of `response` given linear response `linear_response`. fim_middle: (Batch of) vector-shaped `Tensor` with the same shape and dtype as a single column of `model_matrix`, satisfying the equation `Fisher information = Transpose(model_matrix) @ diag(fim_middle) @ model_matrix`. """ # TODO(b/111926503): Determine whether there are some practical cases where it # is computationally favorable to compute the full FIM. mean, variance, grad_mean = model(linear_response) is_valid = ( tf.math.is_finite(grad_mean) & tf.not_equal(grad_mean, 0.) & tf.math.is_finite(variance) & (variance > 0.)) def _mask_if_invalid(x, mask): mask = tf.fill( tf.shape(input=x), value=np.array(mask, x.dtype.as_numpy_dtype)) return tf.where(is_valid, x, mask) # TODO(b/111923449): Link to derivation once it's available. v = (response - mean) * _mask_if_invalid(grad_mean, 1) / _mask_if_invalid( variance, np.inf) grad_log_likelihood = sparse_or_dense_matvecmul( model_matrix, v, adjoint_a=True) fim_middle = _mask_if_invalid(grad_mean, 0.)**2 / _mask_if_invalid( variance, np.inf) return -grad_log_likelihood, fim_middle
def _grad_neg_log_likelihood_and_fim(model_matrix, linear_response, response, model): """Computes the neg-log-likelihood gradient and Fisher information for a GLM. Note that Fisher information is related to the Hessian of the log-likelihood by the equation ```none FisherInfo = E[Hessian with respect to model_coefficients of -LogLikelihood( Y | model_matrix, model_coefficients)] ``` where `LogLikelihood` is the log-likelihood of a generalized linear model parameterized by `model_matrix` and `model_coefficients`, and the expectation is taken over Y, distributed according to the same GLM with the same parameter values. Args: model_matrix: (Batch of) matrix-shaped, `float` `Tensor` or `SparseTensor` where each row represents a sample's features. Has shape `[N, n]` where `N` is the number of data samples and `n` is the number of features per sample. linear_response: (Batch of) vector-shaped `Tensor` with the same dtype as `model_matrix`, equal to `model_matix @ model_coefficients` where `model_coefficients` are the coefficients of the linear component of the GLM. response: (Batch of) vector-shaped `Tensor` with the same dtype as `model_matrix` where each element represents a sample's observed response (to the corresponding row of features). model: `tfp.glm.ExponentialFamily`-like instance, which specifies the link function and distribution of the GLM, and thus characterizes the negative log-likelihood. Must have sufficient statistic equal to the response, that is, `T(y) = y`. Returns: grad_neg_log_likelihood: (Batch of) vector-shaped `Tensor` with the same shape and dtype as a single row of `model_matrix`, representing the gradient of the negative log likelihood of `response` given linear response `linear_response`. fim_middle: (Batch of) vector-shaped `Tensor` with the same shape and dtype as a single column of `model_matrix`, satisfying the equation `Fisher information = Transpose(model_matrix) @ diag(fim_middle) @ model_matrix`. """ # TODO(b/111926503): Determine whether there are some practical cases where it # is computationally favorable to compute the full FIM. mean, variance, grad_mean = model(linear_response) is_valid = ( tf.math.is_finite(grad_mean) & tf.not_equal(grad_mean, 0.) & tf.math.is_finite(variance) & (variance > 0.)) def _mask_if_invalid(x, mask): mask = tf.fill( tf.shape(input=x), value=np.array(mask, x.dtype.as_numpy_dtype)) return tf.where(is_valid, x, mask) # TODO(b/111923449): Link to derivation once it's available. v = (response - mean) * _mask_if_invalid(grad_mean, 1) / _mask_if_invalid( variance, np.inf) grad_log_likelihood = sparse_or_dense_matvecmul( model_matrix, v, adjoint_a=True) fim_middle = _mask_if_invalid(grad_mean, 0.)**2 / _mask_if_invalid( variance, np.inf) return -grad_log_likelihood, fim_middle
[ "Computes", "the", "neg", "-", "log", "-", "likelihood", "gradient", "and", "Fisher", "information", "for", "a", "GLM", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/glm/proximal_hessian.py#L41-L107
[ "def", "_grad_neg_log_likelihood_and_fim", "(", "model_matrix", ",", "linear_response", ",", "response", ",", "model", ")", ":", "# TODO(b/111926503): Determine whether there are some practical cases where it", "# is computationally favorable to compute the full FIM.", "mean", ",", "variance", ",", "grad_mean", "=", "model", "(", "linear_response", ")", "is_valid", "=", "(", "tf", ".", "math", ".", "is_finite", "(", "grad_mean", ")", "&", "tf", ".", "not_equal", "(", "grad_mean", ",", "0.", ")", "&", "tf", ".", "math", ".", "is_finite", "(", "variance", ")", "&", "(", "variance", ">", "0.", ")", ")", "def", "_mask_if_invalid", "(", "x", ",", "mask", ")", ":", "mask", "=", "tf", ".", "fill", "(", "tf", ".", "shape", "(", "input", "=", "x", ")", ",", "value", "=", "np", ".", "array", "(", "mask", ",", "x", ".", "dtype", ".", "as_numpy_dtype", ")", ")", "return", "tf", ".", "where", "(", "is_valid", ",", "x", ",", "mask", ")", "# TODO(b/111923449): Link to derivation once it's available.", "v", "=", "(", "response", "-", "mean", ")", "*", "_mask_if_invalid", "(", "grad_mean", ",", "1", ")", "/", "_mask_if_invalid", "(", "variance", ",", "np", ".", "inf", ")", "grad_log_likelihood", "=", "sparse_or_dense_matvecmul", "(", "model_matrix", ",", "v", ",", "adjoint_a", "=", "True", ")", "fim_middle", "=", "_mask_if_invalid", "(", "grad_mean", ",", "0.", ")", "**", "2", "/", "_mask_if_invalid", "(", "variance", ",", "np", ".", "inf", ")", "return", "-", "grad_log_likelihood", ",", "fim_middle" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
fit_sparse_one_step
One step of (the outer loop of) the GLM fitting algorithm. This function returns a new value of `model_coefficients`, equal to `model_coefficients_start + model_coefficients_update`. The increment `model_coefficients_update in R^n` is computed by a coordinate descent method, that is, by a loop in which each iteration updates exactly one coordinate of `model_coefficients_update`. (Some updates may leave the value of the coordinate unchanged.) The particular update method used is to apply an L1-based proximity operator, "soft threshold", whose fixed point `model_coefficients_update^*` is the desired minimum ```none model_coefficients_update^* = argmin{ -LogLikelihood(model_coefficients_start + model_coefficients_update') + l1_regularizer * ||model_coefficients_start + model_coefficients_update'||_1 + l2_regularizer * ||model_coefficients_start + model_coefficients_update'||_2**2 : model_coefficients_update' } ``` where in each iteration `model_coefficients_update'` has at most one nonzero coordinate. This update method preserves sparsity, i.e., tends to find sparse solutions if `model_coefficients_start` is sparse. Additionally, the choice of step size is based on curvature (Fisher information matrix), which significantly speeds up convergence. Args: model_matrix: (Batch of) matrix-shaped, `float` `Tensor` or `SparseTensor` where each row represents a sample's features. Has shape `[N, n]` where `N` is the number of data samples and `n` is the number of features per sample. response: (Batch of) vector-shaped `Tensor` with the same dtype as `model_matrix` where each element represents a sample's observed response (to the corresponding row of features). model: `tfp.glm.ExponentialFamily`-like instance, which specifies the link function and distribution of the GLM, and thus characterizes the negative log-likelihood which will be minimized. Must have sufficient statistic equal to the response, that is, `T(y) = y`. model_coefficients_start: (Batch of) vector-shaped, `float` `Tensor` with the same dtype as `model_matrix`, representing the initial values of the coefficients for the GLM regression. Has shape `[n]` where `model_matrix` has shape `[N, n]`. tolerance: scalar, `float` `Tensor` representing the convergence threshold. The optimization step will terminate early, returning its current value of `model_coefficients_start + model_coefficients_update`, once the following condition is met: `||model_coefficients_update_end - model_coefficients_update_start||_2 / (1 + ||model_coefficients_start||_2) < sqrt(tolerance)`, where `model_coefficients_update_end` is the value of `model_coefficients_update` at the end of a sweep and `model_coefficients_update_start` is the value of `model_coefficients_update` at the beginning of that sweep. l1_regularizer: scalar, `float` `Tensor` representing the weight of the L1 regularization term (see equation above). l2_regularizer: scalar, `float` `Tensor` representing the weight of the L2 regularization term (see equation above). Default value: `None` (i.e., no L2 regularization). maximum_full_sweeps: Python integer specifying maximum number of sweeps to run. A "sweep" consists of an iteration of coordinate descent on each coordinate. After this many sweeps, the algorithm will terminate even if convergence has not been reached. Default value: `1`. learning_rate: scalar, `float` `Tensor` representing a multiplicative factor used to dampen the proximal gradient descent steps. Default value: `None` (i.e., factor is conceptually `1`). name: Python string representing the name of the TensorFlow operation. The default name is `"fit_sparse_one_step"`. Returns: model_coefficients: (Batch of) `Tensor` having the same shape and dtype as `model_coefficients_start`, representing the updated value of `model_coefficients`, that is, `model_coefficients_start + model_coefficients_update`. is_converged: scalar, `bool` `Tensor` indicating whether convergence occurred across all batches within the specified number of sweeps. iter: scalar, `int` `Tensor` representing the actual number of coordinate updates made (before achieving convergence). Since each sweep consists of `tf.size(model_coefficients_start)` iterations, the maximum number of updates is `maximum_full_sweeps * tf.size(model_coefficients_start)`.
tensorflow_probability/python/glm/proximal_hessian.py
def fit_sparse_one_step(model_matrix, response, model, model_coefficients_start, tolerance, l1_regularizer, l2_regularizer=None, maximum_full_sweeps=None, learning_rate=None, name=None): """One step of (the outer loop of) the GLM fitting algorithm. This function returns a new value of `model_coefficients`, equal to `model_coefficients_start + model_coefficients_update`. The increment `model_coefficients_update in R^n` is computed by a coordinate descent method, that is, by a loop in which each iteration updates exactly one coordinate of `model_coefficients_update`. (Some updates may leave the value of the coordinate unchanged.) The particular update method used is to apply an L1-based proximity operator, "soft threshold", whose fixed point `model_coefficients_update^*` is the desired minimum ```none model_coefficients_update^* = argmin{ -LogLikelihood(model_coefficients_start + model_coefficients_update') + l1_regularizer * ||model_coefficients_start + model_coefficients_update'||_1 + l2_regularizer * ||model_coefficients_start + model_coefficients_update'||_2**2 : model_coefficients_update' } ``` where in each iteration `model_coefficients_update'` has at most one nonzero coordinate. This update method preserves sparsity, i.e., tends to find sparse solutions if `model_coefficients_start` is sparse. Additionally, the choice of step size is based on curvature (Fisher information matrix), which significantly speeds up convergence. Args: model_matrix: (Batch of) matrix-shaped, `float` `Tensor` or `SparseTensor` where each row represents a sample's features. Has shape `[N, n]` where `N` is the number of data samples and `n` is the number of features per sample. response: (Batch of) vector-shaped `Tensor` with the same dtype as `model_matrix` where each element represents a sample's observed response (to the corresponding row of features). model: `tfp.glm.ExponentialFamily`-like instance, which specifies the link function and distribution of the GLM, and thus characterizes the negative log-likelihood which will be minimized. Must have sufficient statistic equal to the response, that is, `T(y) = y`. model_coefficients_start: (Batch of) vector-shaped, `float` `Tensor` with the same dtype as `model_matrix`, representing the initial values of the coefficients for the GLM regression. Has shape `[n]` where `model_matrix` has shape `[N, n]`. tolerance: scalar, `float` `Tensor` representing the convergence threshold. The optimization step will terminate early, returning its current value of `model_coefficients_start + model_coefficients_update`, once the following condition is met: `||model_coefficients_update_end - model_coefficients_update_start||_2 / (1 + ||model_coefficients_start||_2) < sqrt(tolerance)`, where `model_coefficients_update_end` is the value of `model_coefficients_update` at the end of a sweep and `model_coefficients_update_start` is the value of `model_coefficients_update` at the beginning of that sweep. l1_regularizer: scalar, `float` `Tensor` representing the weight of the L1 regularization term (see equation above). l2_regularizer: scalar, `float` `Tensor` representing the weight of the L2 regularization term (see equation above). Default value: `None` (i.e., no L2 regularization). maximum_full_sweeps: Python integer specifying maximum number of sweeps to run. A "sweep" consists of an iteration of coordinate descent on each coordinate. After this many sweeps, the algorithm will terminate even if convergence has not been reached. Default value: `1`. learning_rate: scalar, `float` `Tensor` representing a multiplicative factor used to dampen the proximal gradient descent steps. Default value: `None` (i.e., factor is conceptually `1`). name: Python string representing the name of the TensorFlow operation. The default name is `"fit_sparse_one_step"`. Returns: model_coefficients: (Batch of) `Tensor` having the same shape and dtype as `model_coefficients_start`, representing the updated value of `model_coefficients`, that is, `model_coefficients_start + model_coefficients_update`. is_converged: scalar, `bool` `Tensor` indicating whether convergence occurred across all batches within the specified number of sweeps. iter: scalar, `int` `Tensor` representing the actual number of coordinate updates made (before achieving convergence). Since each sweep consists of `tf.size(model_coefficients_start)` iterations, the maximum number of updates is `maximum_full_sweeps * tf.size(model_coefficients_start)`. """ graph_deps = [ model_matrix, response, model_coefficients_start, l1_regularizer, l2_regularizer, maximum_full_sweeps, tolerance, learning_rate, ] with tf.compat.v1.name_scope(name, 'fit_sparse_one_step', graph_deps): predicted_linear_response = sparse_or_dense_matvecmul( model_matrix, model_coefficients_start) g, h_middle = _grad_neg_log_likelihood_and_fim( model_matrix, predicted_linear_response, response, model) return tfp.optimizer.proximal_hessian_sparse_one_step( gradient_unregularized_loss=g, hessian_unregularized_loss_outer=model_matrix, hessian_unregularized_loss_middle=h_middle, x_start=model_coefficients_start, l1_regularizer=l1_regularizer, l2_regularizer=l2_regularizer, maximum_full_sweeps=maximum_full_sweeps, tolerance=tolerance, learning_rate=learning_rate, name=name)
def fit_sparse_one_step(model_matrix, response, model, model_coefficients_start, tolerance, l1_regularizer, l2_regularizer=None, maximum_full_sweeps=None, learning_rate=None, name=None): """One step of (the outer loop of) the GLM fitting algorithm. This function returns a new value of `model_coefficients`, equal to `model_coefficients_start + model_coefficients_update`. The increment `model_coefficients_update in R^n` is computed by a coordinate descent method, that is, by a loop in which each iteration updates exactly one coordinate of `model_coefficients_update`. (Some updates may leave the value of the coordinate unchanged.) The particular update method used is to apply an L1-based proximity operator, "soft threshold", whose fixed point `model_coefficients_update^*` is the desired minimum ```none model_coefficients_update^* = argmin{ -LogLikelihood(model_coefficients_start + model_coefficients_update') + l1_regularizer * ||model_coefficients_start + model_coefficients_update'||_1 + l2_regularizer * ||model_coefficients_start + model_coefficients_update'||_2**2 : model_coefficients_update' } ``` where in each iteration `model_coefficients_update'` has at most one nonzero coordinate. This update method preserves sparsity, i.e., tends to find sparse solutions if `model_coefficients_start` is sparse. Additionally, the choice of step size is based on curvature (Fisher information matrix), which significantly speeds up convergence. Args: model_matrix: (Batch of) matrix-shaped, `float` `Tensor` or `SparseTensor` where each row represents a sample's features. Has shape `[N, n]` where `N` is the number of data samples and `n` is the number of features per sample. response: (Batch of) vector-shaped `Tensor` with the same dtype as `model_matrix` where each element represents a sample's observed response (to the corresponding row of features). model: `tfp.glm.ExponentialFamily`-like instance, which specifies the link function and distribution of the GLM, and thus characterizes the negative log-likelihood which will be minimized. Must have sufficient statistic equal to the response, that is, `T(y) = y`. model_coefficients_start: (Batch of) vector-shaped, `float` `Tensor` with the same dtype as `model_matrix`, representing the initial values of the coefficients for the GLM regression. Has shape `[n]` where `model_matrix` has shape `[N, n]`. tolerance: scalar, `float` `Tensor` representing the convergence threshold. The optimization step will terminate early, returning its current value of `model_coefficients_start + model_coefficients_update`, once the following condition is met: `||model_coefficients_update_end - model_coefficients_update_start||_2 / (1 + ||model_coefficients_start||_2) < sqrt(tolerance)`, where `model_coefficients_update_end` is the value of `model_coefficients_update` at the end of a sweep and `model_coefficients_update_start` is the value of `model_coefficients_update` at the beginning of that sweep. l1_regularizer: scalar, `float` `Tensor` representing the weight of the L1 regularization term (see equation above). l2_regularizer: scalar, `float` `Tensor` representing the weight of the L2 regularization term (see equation above). Default value: `None` (i.e., no L2 regularization). maximum_full_sweeps: Python integer specifying maximum number of sweeps to run. A "sweep" consists of an iteration of coordinate descent on each coordinate. After this many sweeps, the algorithm will terminate even if convergence has not been reached. Default value: `1`. learning_rate: scalar, `float` `Tensor` representing a multiplicative factor used to dampen the proximal gradient descent steps. Default value: `None` (i.e., factor is conceptually `1`). name: Python string representing the name of the TensorFlow operation. The default name is `"fit_sparse_one_step"`. Returns: model_coefficients: (Batch of) `Tensor` having the same shape and dtype as `model_coefficients_start`, representing the updated value of `model_coefficients`, that is, `model_coefficients_start + model_coefficients_update`. is_converged: scalar, `bool` `Tensor` indicating whether convergence occurred across all batches within the specified number of sweeps. iter: scalar, `int` `Tensor` representing the actual number of coordinate updates made (before achieving convergence). Since each sweep consists of `tf.size(model_coefficients_start)` iterations, the maximum number of updates is `maximum_full_sweeps * tf.size(model_coefficients_start)`. """ graph_deps = [ model_matrix, response, model_coefficients_start, l1_regularizer, l2_regularizer, maximum_full_sweeps, tolerance, learning_rate, ] with tf.compat.v1.name_scope(name, 'fit_sparse_one_step', graph_deps): predicted_linear_response = sparse_or_dense_matvecmul( model_matrix, model_coefficients_start) g, h_middle = _grad_neg_log_likelihood_and_fim( model_matrix, predicted_linear_response, response, model) return tfp.optimizer.proximal_hessian_sparse_one_step( gradient_unregularized_loss=g, hessian_unregularized_loss_outer=model_matrix, hessian_unregularized_loss_middle=h_middle, x_start=model_coefficients_start, l1_regularizer=l1_regularizer, l2_regularizer=l2_regularizer, maximum_full_sweeps=maximum_full_sweeps, tolerance=tolerance, learning_rate=learning_rate, name=name)
[ "One", "step", "of", "(", "the", "outer", "loop", "of", ")", "the", "GLM", "fitting", "algorithm", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/glm/proximal_hessian.py#L110-L232
[ "def", "fit_sparse_one_step", "(", "model_matrix", ",", "response", ",", "model", ",", "model_coefficients_start", ",", "tolerance", ",", "l1_regularizer", ",", "l2_regularizer", "=", "None", ",", "maximum_full_sweeps", "=", "None", ",", "learning_rate", "=", "None", ",", "name", "=", "None", ")", ":", "graph_deps", "=", "[", "model_matrix", ",", "response", ",", "model_coefficients_start", ",", "l1_regularizer", ",", "l2_regularizer", ",", "maximum_full_sweeps", ",", "tolerance", ",", "learning_rate", ",", "]", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "'fit_sparse_one_step'", ",", "graph_deps", ")", ":", "predicted_linear_response", "=", "sparse_or_dense_matvecmul", "(", "model_matrix", ",", "model_coefficients_start", ")", "g", ",", "h_middle", "=", "_grad_neg_log_likelihood_and_fim", "(", "model_matrix", ",", "predicted_linear_response", ",", "response", ",", "model", ")", "return", "tfp", ".", "optimizer", ".", "proximal_hessian_sparse_one_step", "(", "gradient_unregularized_loss", "=", "g", ",", "hessian_unregularized_loss_outer", "=", "model_matrix", ",", "hessian_unregularized_loss_middle", "=", "h_middle", ",", "x_start", "=", "model_coefficients_start", ",", "l1_regularizer", "=", "l1_regularizer", ",", "l2_regularizer", "=", "l2_regularizer", ",", "maximum_full_sweeps", "=", "maximum_full_sweeps", ",", "tolerance", "=", "tolerance", ",", "learning_rate", "=", "learning_rate", ",", "name", "=", "name", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
fit_sparse
r"""Fits a GLM using coordinate-wise FIM-informed proximal gradient descent. This function uses a L1- and L2-regularized, second-order quasi-Newton method to find maximum-likelihood parameters for the given model and observed data. The second-order approximations use negative Fisher information in place of the Hessian, that is, ```none FisherInfo = E_Y[Hessian with respect to model_coefficients of -LogLikelihood( Y | model_matrix, current value of model_coefficients)] ``` For large, sparse data sets, `model_matrix` should be supplied as a `SparseTensor`. Args: model_matrix: (Batch of) matrix-shaped, `float` `Tensor` or `SparseTensor` where each row represents a sample's features. Has shape `[N, n]` where `N` is the number of data samples and `n` is the number of features per sample. response: (Batch of) vector-shaped `Tensor` with the same dtype as `model_matrix` where each element represents a sample's observed response (to the corresponding row of features). model: `tfp.glm.ExponentialFamily`-like instance, which specifies the link function and distribution of the GLM, and thus characterizes the negative log-likelihood which will be minimized. Must have sufficient statistic equal to the response, that is, `T(y) = y`. model_coefficients_start: (Batch of) vector-shaped, `float` `Tensor` with the same dtype as `model_matrix`, representing the initial values of the coefficients for the GLM regression. Has shape `[n]` where `model_matrix` has shape `[N, n]`. tolerance: scalar, `float` `Tensor` representing the tolerance for each optiization step; see the `tolerance` argument of `fit_sparse_one_step`. l1_regularizer: scalar, `float` `Tensor` representing the weight of the L1 regularization term. l2_regularizer: scalar, `float` `Tensor` representing the weight of the L2 regularization term. Default value: `None` (i.e., no L2 regularization). maximum_iterations: Python integer specifying maximum number of iterations of the outer loop of the optimizer (i.e., maximum number of calls to `fit_sparse_one_step`). After this many iterations of the outer loop, the algorithm will terminate even if the return value `model_coefficients` has not converged. Default value: `1`. maximum_full_sweeps_per_iteration: Python integer specifying the maximum number of coordinate descent sweeps allowed in each iteration. Default value: `1`. learning_rate: scalar, `float` `Tensor` representing a multiplicative factor used to dampen the proximal gradient descent steps. Default value: `None` (i.e., factor is conceptually `1`). name: Python string representing the name of the TensorFlow operation. The default name is `"fit_sparse"`. Returns: model_coefficients: (Batch of) `Tensor` of the same shape and dtype as `model_coefficients_start`, representing the computed model coefficients which minimize the regularized negative log-likelihood. is_converged: scalar, `bool` `Tensor` indicating whether the minimization procedure converged across all batches within the specified number of iterations. Here convergence means that an iteration of the inner loop (`fit_sparse_one_step`) returns `True` for its `is_converged` output value. iter: scalar, `int` `Tensor` indicating the actual number of iterations of the outer loop of the optimizer completed (i.e., number of calls to `fit_sparse_one_step` before achieving convergence). #### Example ```python from __future__ import print_function import numpy as np import tensorflow as tf import tensorflow_probability as tfp tfd = tfp.distributions def make_dataset(n, d, link, scale=1., dtype=np.float32): model_coefficients = tfd.Uniform( low=np.array(-1, dtype), high=np.array(1, dtype)).sample( d, seed=42) radius = np.sqrt(2.) model_coefficients *= radius / tf.linalg.norm(model_coefficients) mask = tf.random_shuffle(tf.range(d)) < tf.to_int32(0.5 * tf.to_float(d)) model_coefficients = tf.where(mask, model_coefficients, tf.zeros_like(model_coefficients)) model_matrix = tfd.Normal( loc=np.array(0, dtype), scale=np.array(1, dtype)).sample( [n, d], seed=43) scale = tf.convert_to_tensor(scale, dtype) linear_response = tf.matmul(model_matrix, model_coefficients[..., tf.newaxis])[..., 0] if link == 'linear': response = tfd.Normal(loc=linear_response, scale=scale).sample(seed=44) elif link == 'probit': response = tf.cast( tfd.Normal(loc=linear_response, scale=scale).sample(seed=44) > 0, dtype) elif link == 'logit': response = tfd.Bernoulli(logits=linear_response).sample(seed=44) else: raise ValueError('unrecognized true link: {}'.format(link)) return model_matrix, response, model_coefficients, mask with tf.Session() as sess: x_, y_, model_coefficients_true_, _ = sess.run(make_dataset( n=int(1e5), d=100, link='probit')) model = tfp.glm.Bernoulli() model_coefficients_start = tf.zeros(x_.shape[-1], np.float32) model_coefficients, is_converged, num_iter = tfp.glm.fit_sparse( model_matrix=tf.convert_to_tensor(x_), response=tf.convert_to_tensor(y_), model=model, model_coefficients_start=model_coefficients_start, l1_regularizer=800., l2_regularizer=None, maximum_iterations=10, maximum_full_sweeps_per_iteration=10, tolerance=1e-6, learning_rate=None) model_coefficients_, is_converged_, num_iter_ = sess.run([ model_coefficients, is_converged, num_iter]) print("is_converged:", is_converged_) print(" num_iter:", num_iter_) print("\nLearned / True") print(np.concatenate( [[model_coefficients_], [model_coefficients_true_]], axis=0).T) # ==> # is_converged: True # num_iter: 1 # # Learned / True # [[ 0. 0. ] # [ 0. 0. ] # [ 0. 0. ] # [ 0.11195257 0.12484948] # [ 0. 0. ] # [ 0.05191106 0.06394956] # [-0.15090358 -0.15325639] # [-0.18187316 -0.18825999] # [-0.06140942 -0.07994166] # [ 0. 0. ] # [ 0. 0. ] # [ 0. 0. ] # [ 0.14474444 0.15810856] # [ 0. 0. ] # [-0.25249591 -0.24260855] # [ 0. 0. ] # [ 0. 0. ] # [-0.03888761 -0.06755984] # [ 0. 0. ] # [ 0. 0. ] # [ 0. 0. ] # [-0.0192222 -0.04169233] # [ 0. 0. ] # [ 0. 0. ] # [ 0.01434913 0.03568212] # [-0.11336883 -0.12873614] # [ 0. 0. ] # [-0.24496339 -0.24048163] # [ 0. 0. ] # [ 0. 0. ] # [ 0.04088281 0.06565224] # [-0.12784363 -0.13359821] # [ 0.05618424 0.07396613] # [ 0. 0. ] # [ 0. 0. ] # [ 0. 0. ] # [ 0. -0.01719233] # [ 0. 0. ] # [ 0. 0. ] # [-0.00076072 -0.03607186] # [ 0.21801499 0.21146794] # [-0.02161094 -0.04031265] # [ 0.0918689 0.10487888] # [ 0.0106154 0.03233612] # [-0.07817317 -0.09725142] # [ 0. 0. ] # [ 0. 0. ] # [-0.23725343 -0.24194022] # [ 0. 0. ] # [-0.08725718 -0.1048776 ] # [ 0. 0. ] # [ 0. 0. ] # [-0.02114314 -0.04145789] # [ 0. 0. ] # [ 0. 0. ] # [-0.02710908 -0.04590397] # [ 0.15293184 0.15415154] # [ 0.2114463 0.2088728 ] # [-0.10969634 -0.12368613] # [ 0. -0.01505797] # [-0.01140458 -0.03234904] # [ 0.16051085 0.1680062 ] # [ 0.09816848 0.11094204] ``` #### References [1]: Jerome Friedman, Trevor Hastie and Rob Tibshirani. Regularization Paths for Generalized Linear Models via Coordinate Descent. _Journal of Statistical Software_, 33(1), 2010. https://www.jstatsoft.org/article/view/v033i01/v33i01.pdf [2]: Guo-Xun Yuan, Chia-Hua Ho and Chih-Jen Lin. An Improved GLMNET for L1-regularized Logistic Regression. _Journal of Machine Learning Research_, 13, 2012. http://www.jmlr.org/papers/volume13/yuan12a/yuan12a.pdf
tensorflow_probability/python/glm/proximal_hessian.py
def fit_sparse(model_matrix, response, model, model_coefficients_start, tolerance, l1_regularizer, l2_regularizer=None, maximum_iterations=None, maximum_full_sweeps_per_iteration=1, learning_rate=None, name=None): r"""Fits a GLM using coordinate-wise FIM-informed proximal gradient descent. This function uses a L1- and L2-regularized, second-order quasi-Newton method to find maximum-likelihood parameters for the given model and observed data. The second-order approximations use negative Fisher information in place of the Hessian, that is, ```none FisherInfo = E_Y[Hessian with respect to model_coefficients of -LogLikelihood( Y | model_matrix, current value of model_coefficients)] ``` For large, sparse data sets, `model_matrix` should be supplied as a `SparseTensor`. Args: model_matrix: (Batch of) matrix-shaped, `float` `Tensor` or `SparseTensor` where each row represents a sample's features. Has shape `[N, n]` where `N` is the number of data samples and `n` is the number of features per sample. response: (Batch of) vector-shaped `Tensor` with the same dtype as `model_matrix` where each element represents a sample's observed response (to the corresponding row of features). model: `tfp.glm.ExponentialFamily`-like instance, which specifies the link function and distribution of the GLM, and thus characterizes the negative log-likelihood which will be minimized. Must have sufficient statistic equal to the response, that is, `T(y) = y`. model_coefficients_start: (Batch of) vector-shaped, `float` `Tensor` with the same dtype as `model_matrix`, representing the initial values of the coefficients for the GLM regression. Has shape `[n]` where `model_matrix` has shape `[N, n]`. tolerance: scalar, `float` `Tensor` representing the tolerance for each optiization step; see the `tolerance` argument of `fit_sparse_one_step`. l1_regularizer: scalar, `float` `Tensor` representing the weight of the L1 regularization term. l2_regularizer: scalar, `float` `Tensor` representing the weight of the L2 regularization term. Default value: `None` (i.e., no L2 regularization). maximum_iterations: Python integer specifying maximum number of iterations of the outer loop of the optimizer (i.e., maximum number of calls to `fit_sparse_one_step`). After this many iterations of the outer loop, the algorithm will terminate even if the return value `model_coefficients` has not converged. Default value: `1`. maximum_full_sweeps_per_iteration: Python integer specifying the maximum number of coordinate descent sweeps allowed in each iteration. Default value: `1`. learning_rate: scalar, `float` `Tensor` representing a multiplicative factor used to dampen the proximal gradient descent steps. Default value: `None` (i.e., factor is conceptually `1`). name: Python string representing the name of the TensorFlow operation. The default name is `"fit_sparse"`. Returns: model_coefficients: (Batch of) `Tensor` of the same shape and dtype as `model_coefficients_start`, representing the computed model coefficients which minimize the regularized negative log-likelihood. is_converged: scalar, `bool` `Tensor` indicating whether the minimization procedure converged across all batches within the specified number of iterations. Here convergence means that an iteration of the inner loop (`fit_sparse_one_step`) returns `True` for its `is_converged` output value. iter: scalar, `int` `Tensor` indicating the actual number of iterations of the outer loop of the optimizer completed (i.e., number of calls to `fit_sparse_one_step` before achieving convergence). #### Example ```python from __future__ import print_function import numpy as np import tensorflow as tf import tensorflow_probability as tfp tfd = tfp.distributions def make_dataset(n, d, link, scale=1., dtype=np.float32): model_coefficients = tfd.Uniform( low=np.array(-1, dtype), high=np.array(1, dtype)).sample( d, seed=42) radius = np.sqrt(2.) model_coefficients *= radius / tf.linalg.norm(model_coefficients) mask = tf.random_shuffle(tf.range(d)) < tf.to_int32(0.5 * tf.to_float(d)) model_coefficients = tf.where(mask, model_coefficients, tf.zeros_like(model_coefficients)) model_matrix = tfd.Normal( loc=np.array(0, dtype), scale=np.array(1, dtype)).sample( [n, d], seed=43) scale = tf.convert_to_tensor(scale, dtype) linear_response = tf.matmul(model_matrix, model_coefficients[..., tf.newaxis])[..., 0] if link == 'linear': response = tfd.Normal(loc=linear_response, scale=scale).sample(seed=44) elif link == 'probit': response = tf.cast( tfd.Normal(loc=linear_response, scale=scale).sample(seed=44) > 0, dtype) elif link == 'logit': response = tfd.Bernoulli(logits=linear_response).sample(seed=44) else: raise ValueError('unrecognized true link: {}'.format(link)) return model_matrix, response, model_coefficients, mask with tf.Session() as sess: x_, y_, model_coefficients_true_, _ = sess.run(make_dataset( n=int(1e5), d=100, link='probit')) model = tfp.glm.Bernoulli() model_coefficients_start = tf.zeros(x_.shape[-1], np.float32) model_coefficients, is_converged, num_iter = tfp.glm.fit_sparse( model_matrix=tf.convert_to_tensor(x_), response=tf.convert_to_tensor(y_), model=model, model_coefficients_start=model_coefficients_start, l1_regularizer=800., l2_regularizer=None, maximum_iterations=10, maximum_full_sweeps_per_iteration=10, tolerance=1e-6, learning_rate=None) model_coefficients_, is_converged_, num_iter_ = sess.run([ model_coefficients, is_converged, num_iter]) print("is_converged:", is_converged_) print(" num_iter:", num_iter_) print("\nLearned / True") print(np.concatenate( [[model_coefficients_], [model_coefficients_true_]], axis=0).T) # ==> # is_converged: True # num_iter: 1 # # Learned / True # [[ 0. 0. ] # [ 0. 0. ] # [ 0. 0. ] # [ 0.11195257 0.12484948] # [ 0. 0. ] # [ 0.05191106 0.06394956] # [-0.15090358 -0.15325639] # [-0.18187316 -0.18825999] # [-0.06140942 -0.07994166] # [ 0. 0. ] # [ 0. 0. ] # [ 0. 0. ] # [ 0.14474444 0.15810856] # [ 0. 0. ] # [-0.25249591 -0.24260855] # [ 0. 0. ] # [ 0. 0. ] # [-0.03888761 -0.06755984] # [ 0. 0. ] # [ 0. 0. ] # [ 0. 0. ] # [-0.0192222 -0.04169233] # [ 0. 0. ] # [ 0. 0. ] # [ 0.01434913 0.03568212] # [-0.11336883 -0.12873614] # [ 0. 0. ] # [-0.24496339 -0.24048163] # [ 0. 0. ] # [ 0. 0. ] # [ 0.04088281 0.06565224] # [-0.12784363 -0.13359821] # [ 0.05618424 0.07396613] # [ 0. 0. ] # [ 0. 0. ] # [ 0. 0. ] # [ 0. -0.01719233] # [ 0. 0. ] # [ 0. 0. ] # [-0.00076072 -0.03607186] # [ 0.21801499 0.21146794] # [-0.02161094 -0.04031265] # [ 0.0918689 0.10487888] # [ 0.0106154 0.03233612] # [-0.07817317 -0.09725142] # [ 0. 0. ] # [ 0. 0. ] # [-0.23725343 -0.24194022] # [ 0. 0. ] # [-0.08725718 -0.1048776 ] # [ 0. 0. ] # [ 0. 0. ] # [-0.02114314 -0.04145789] # [ 0. 0. ] # [ 0. 0. ] # [-0.02710908 -0.04590397] # [ 0.15293184 0.15415154] # [ 0.2114463 0.2088728 ] # [-0.10969634 -0.12368613] # [ 0. -0.01505797] # [-0.01140458 -0.03234904] # [ 0.16051085 0.1680062 ] # [ 0.09816848 0.11094204] ``` #### References [1]: Jerome Friedman, Trevor Hastie and Rob Tibshirani. Regularization Paths for Generalized Linear Models via Coordinate Descent. _Journal of Statistical Software_, 33(1), 2010. https://www.jstatsoft.org/article/view/v033i01/v33i01.pdf [2]: Guo-Xun Yuan, Chia-Hua Ho and Chih-Jen Lin. An Improved GLMNET for L1-regularized Logistic Regression. _Journal of Machine Learning Research_, 13, 2012. http://www.jmlr.org/papers/volume13/yuan12a/yuan12a.pdf """ graph_deps = [ model_matrix, response, model_coefficients_start, l1_regularizer, l2_regularizer, maximum_iterations, maximum_full_sweeps_per_iteration, # TODO(b/111925792): Replace `tolerance` arg with something like # `convergence_criteria_fn`. tolerance, learning_rate, ] with tf.compat.v1.name_scope(name, 'fit_sparse', graph_deps): # TODO(b/111922388): Include dispersion and offset parameters. def _grad_neg_log_likelihood_and_fim_fn(x): predicted_linear_response = sparse_or_dense_matvecmul(model_matrix, x) g, h_middle = _grad_neg_log_likelihood_and_fim( model_matrix, predicted_linear_response, response, model) return g, model_matrix, h_middle return tfp.optimizer.proximal_hessian_sparse_minimize( _grad_neg_log_likelihood_and_fim_fn, x_start=model_coefficients_start, l1_regularizer=l1_regularizer, l2_regularizer=l2_regularizer, maximum_iterations=maximum_iterations, maximum_full_sweeps_per_iteration=maximum_full_sweeps_per_iteration, learning_rate=learning_rate, tolerance=tolerance, name=name)
def fit_sparse(model_matrix, response, model, model_coefficients_start, tolerance, l1_regularizer, l2_regularizer=None, maximum_iterations=None, maximum_full_sweeps_per_iteration=1, learning_rate=None, name=None): r"""Fits a GLM using coordinate-wise FIM-informed proximal gradient descent. This function uses a L1- and L2-regularized, second-order quasi-Newton method to find maximum-likelihood parameters for the given model and observed data. The second-order approximations use negative Fisher information in place of the Hessian, that is, ```none FisherInfo = E_Y[Hessian with respect to model_coefficients of -LogLikelihood( Y | model_matrix, current value of model_coefficients)] ``` For large, sparse data sets, `model_matrix` should be supplied as a `SparseTensor`. Args: model_matrix: (Batch of) matrix-shaped, `float` `Tensor` or `SparseTensor` where each row represents a sample's features. Has shape `[N, n]` where `N` is the number of data samples and `n` is the number of features per sample. response: (Batch of) vector-shaped `Tensor` with the same dtype as `model_matrix` where each element represents a sample's observed response (to the corresponding row of features). model: `tfp.glm.ExponentialFamily`-like instance, which specifies the link function and distribution of the GLM, and thus characterizes the negative log-likelihood which will be minimized. Must have sufficient statistic equal to the response, that is, `T(y) = y`. model_coefficients_start: (Batch of) vector-shaped, `float` `Tensor` with the same dtype as `model_matrix`, representing the initial values of the coefficients for the GLM regression. Has shape `[n]` where `model_matrix` has shape `[N, n]`. tolerance: scalar, `float` `Tensor` representing the tolerance for each optiization step; see the `tolerance` argument of `fit_sparse_one_step`. l1_regularizer: scalar, `float` `Tensor` representing the weight of the L1 regularization term. l2_regularizer: scalar, `float` `Tensor` representing the weight of the L2 regularization term. Default value: `None` (i.e., no L2 regularization). maximum_iterations: Python integer specifying maximum number of iterations of the outer loop of the optimizer (i.e., maximum number of calls to `fit_sparse_one_step`). After this many iterations of the outer loop, the algorithm will terminate even if the return value `model_coefficients` has not converged. Default value: `1`. maximum_full_sweeps_per_iteration: Python integer specifying the maximum number of coordinate descent sweeps allowed in each iteration. Default value: `1`. learning_rate: scalar, `float` `Tensor` representing a multiplicative factor used to dampen the proximal gradient descent steps. Default value: `None` (i.e., factor is conceptually `1`). name: Python string representing the name of the TensorFlow operation. The default name is `"fit_sparse"`. Returns: model_coefficients: (Batch of) `Tensor` of the same shape and dtype as `model_coefficients_start`, representing the computed model coefficients which minimize the regularized negative log-likelihood. is_converged: scalar, `bool` `Tensor` indicating whether the minimization procedure converged across all batches within the specified number of iterations. Here convergence means that an iteration of the inner loop (`fit_sparse_one_step`) returns `True` for its `is_converged` output value. iter: scalar, `int` `Tensor` indicating the actual number of iterations of the outer loop of the optimizer completed (i.e., number of calls to `fit_sparse_one_step` before achieving convergence). #### Example ```python from __future__ import print_function import numpy as np import tensorflow as tf import tensorflow_probability as tfp tfd = tfp.distributions def make_dataset(n, d, link, scale=1., dtype=np.float32): model_coefficients = tfd.Uniform( low=np.array(-1, dtype), high=np.array(1, dtype)).sample( d, seed=42) radius = np.sqrt(2.) model_coefficients *= radius / tf.linalg.norm(model_coefficients) mask = tf.random_shuffle(tf.range(d)) < tf.to_int32(0.5 * tf.to_float(d)) model_coefficients = tf.where(mask, model_coefficients, tf.zeros_like(model_coefficients)) model_matrix = tfd.Normal( loc=np.array(0, dtype), scale=np.array(1, dtype)).sample( [n, d], seed=43) scale = tf.convert_to_tensor(scale, dtype) linear_response = tf.matmul(model_matrix, model_coefficients[..., tf.newaxis])[..., 0] if link == 'linear': response = tfd.Normal(loc=linear_response, scale=scale).sample(seed=44) elif link == 'probit': response = tf.cast( tfd.Normal(loc=linear_response, scale=scale).sample(seed=44) > 0, dtype) elif link == 'logit': response = tfd.Bernoulli(logits=linear_response).sample(seed=44) else: raise ValueError('unrecognized true link: {}'.format(link)) return model_matrix, response, model_coefficients, mask with tf.Session() as sess: x_, y_, model_coefficients_true_, _ = sess.run(make_dataset( n=int(1e5), d=100, link='probit')) model = tfp.glm.Bernoulli() model_coefficients_start = tf.zeros(x_.shape[-1], np.float32) model_coefficients, is_converged, num_iter = tfp.glm.fit_sparse( model_matrix=tf.convert_to_tensor(x_), response=tf.convert_to_tensor(y_), model=model, model_coefficients_start=model_coefficients_start, l1_regularizer=800., l2_regularizer=None, maximum_iterations=10, maximum_full_sweeps_per_iteration=10, tolerance=1e-6, learning_rate=None) model_coefficients_, is_converged_, num_iter_ = sess.run([ model_coefficients, is_converged, num_iter]) print("is_converged:", is_converged_) print(" num_iter:", num_iter_) print("\nLearned / True") print(np.concatenate( [[model_coefficients_], [model_coefficients_true_]], axis=0).T) # ==> # is_converged: True # num_iter: 1 # # Learned / True # [[ 0. 0. ] # [ 0. 0. ] # [ 0. 0. ] # [ 0.11195257 0.12484948] # [ 0. 0. ] # [ 0.05191106 0.06394956] # [-0.15090358 -0.15325639] # [-0.18187316 -0.18825999] # [-0.06140942 -0.07994166] # [ 0. 0. ] # [ 0. 0. ] # [ 0. 0. ] # [ 0.14474444 0.15810856] # [ 0. 0. ] # [-0.25249591 -0.24260855] # [ 0. 0. ] # [ 0. 0. ] # [-0.03888761 -0.06755984] # [ 0. 0. ] # [ 0. 0. ] # [ 0. 0. ] # [-0.0192222 -0.04169233] # [ 0. 0. ] # [ 0. 0. ] # [ 0.01434913 0.03568212] # [-0.11336883 -0.12873614] # [ 0. 0. ] # [-0.24496339 -0.24048163] # [ 0. 0. ] # [ 0. 0. ] # [ 0.04088281 0.06565224] # [-0.12784363 -0.13359821] # [ 0.05618424 0.07396613] # [ 0. 0. ] # [ 0. 0. ] # [ 0. 0. ] # [ 0. -0.01719233] # [ 0. 0. ] # [ 0. 0. ] # [-0.00076072 -0.03607186] # [ 0.21801499 0.21146794] # [-0.02161094 -0.04031265] # [ 0.0918689 0.10487888] # [ 0.0106154 0.03233612] # [-0.07817317 -0.09725142] # [ 0. 0. ] # [ 0. 0. ] # [-0.23725343 -0.24194022] # [ 0. 0. ] # [-0.08725718 -0.1048776 ] # [ 0. 0. ] # [ 0. 0. ] # [-0.02114314 -0.04145789] # [ 0. 0. ] # [ 0. 0. ] # [-0.02710908 -0.04590397] # [ 0.15293184 0.15415154] # [ 0.2114463 0.2088728 ] # [-0.10969634 -0.12368613] # [ 0. -0.01505797] # [-0.01140458 -0.03234904] # [ 0.16051085 0.1680062 ] # [ 0.09816848 0.11094204] ``` #### References [1]: Jerome Friedman, Trevor Hastie and Rob Tibshirani. Regularization Paths for Generalized Linear Models via Coordinate Descent. _Journal of Statistical Software_, 33(1), 2010. https://www.jstatsoft.org/article/view/v033i01/v33i01.pdf [2]: Guo-Xun Yuan, Chia-Hua Ho and Chih-Jen Lin. An Improved GLMNET for L1-regularized Logistic Regression. _Journal of Machine Learning Research_, 13, 2012. http://www.jmlr.org/papers/volume13/yuan12a/yuan12a.pdf """ graph_deps = [ model_matrix, response, model_coefficients_start, l1_regularizer, l2_regularizer, maximum_iterations, maximum_full_sweeps_per_iteration, # TODO(b/111925792): Replace `tolerance` arg with something like # `convergence_criteria_fn`. tolerance, learning_rate, ] with tf.compat.v1.name_scope(name, 'fit_sparse', graph_deps): # TODO(b/111922388): Include dispersion and offset parameters. def _grad_neg_log_likelihood_and_fim_fn(x): predicted_linear_response = sparse_or_dense_matvecmul(model_matrix, x) g, h_middle = _grad_neg_log_likelihood_and_fim( model_matrix, predicted_linear_response, response, model) return g, model_matrix, h_middle return tfp.optimizer.proximal_hessian_sparse_minimize( _grad_neg_log_likelihood_and_fim_fn, x_start=model_coefficients_start, l1_regularizer=l1_regularizer, l2_regularizer=l2_regularizer, maximum_iterations=maximum_iterations, maximum_full_sweeps_per_iteration=maximum_full_sweeps_per_iteration, learning_rate=learning_rate, tolerance=tolerance, name=name)
[ "r", "Fits", "a", "GLM", "using", "coordinate", "-", "wise", "FIM", "-", "informed", "proximal", "gradient", "descent", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/glm/proximal_hessian.py#L235-L488
[ "def", "fit_sparse", "(", "model_matrix", ",", "response", ",", "model", ",", "model_coefficients_start", ",", "tolerance", ",", "l1_regularizer", ",", "l2_regularizer", "=", "None", ",", "maximum_iterations", "=", "None", ",", "maximum_full_sweeps_per_iteration", "=", "1", ",", "learning_rate", "=", "None", ",", "name", "=", "None", ")", ":", "graph_deps", "=", "[", "model_matrix", ",", "response", ",", "model_coefficients_start", ",", "l1_regularizer", ",", "l2_regularizer", ",", "maximum_iterations", ",", "maximum_full_sweeps_per_iteration", ",", "# TODO(b/111925792): Replace `tolerance` arg with something like", "# `convergence_criteria_fn`.", "tolerance", ",", "learning_rate", ",", "]", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "'fit_sparse'", ",", "graph_deps", ")", ":", "# TODO(b/111922388): Include dispersion and offset parameters.", "def", "_grad_neg_log_likelihood_and_fim_fn", "(", "x", ")", ":", "predicted_linear_response", "=", "sparse_or_dense_matvecmul", "(", "model_matrix", ",", "x", ")", "g", ",", "h_middle", "=", "_grad_neg_log_likelihood_and_fim", "(", "model_matrix", ",", "predicted_linear_response", ",", "response", ",", "model", ")", "return", "g", ",", "model_matrix", ",", "h_middle", "return", "tfp", ".", "optimizer", ".", "proximal_hessian_sparse_minimize", "(", "_grad_neg_log_likelihood_and_fim_fn", ",", "x_start", "=", "model_coefficients_start", ",", "l1_regularizer", "=", "l1_regularizer", ",", "l2_regularizer", "=", "l2_regularizer", ",", "maximum_iterations", "=", "maximum_iterations", ",", "maximum_full_sweeps_per_iteration", "=", "maximum_full_sweeps_per_iteration", ",", "learning_rate", "=", "learning_rate", ",", "tolerance", "=", "tolerance", ",", "name", "=", "name", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_gen_slices
Generate the slices for building an autoregressive mask.
tensorflow_probability/python/bijectors/masked_autoregressive.py
def _gen_slices(num_blocks, n_in, n_out, mask_type=MASK_EXCLUSIVE): """Generate the slices for building an autoregressive mask.""" # TODO(b/67594795): Better support of dynamic shape. slices = [] col = 0 d_in = n_in // num_blocks d_out = n_out // num_blocks row = d_out if mask_type == MASK_EXCLUSIVE else 0 for _ in range(num_blocks): row_slice = slice(row, None) col_slice = slice(col, col + d_in) slices.append([row_slice, col_slice]) col += d_in row += d_out return slices
def _gen_slices(num_blocks, n_in, n_out, mask_type=MASK_EXCLUSIVE): """Generate the slices for building an autoregressive mask.""" # TODO(b/67594795): Better support of dynamic shape. slices = [] col = 0 d_in = n_in // num_blocks d_out = n_out // num_blocks row = d_out if mask_type == MASK_EXCLUSIVE else 0 for _ in range(num_blocks): row_slice = slice(row, None) col_slice = slice(col, col + d_in) slices.append([row_slice, col_slice]) col += d_in row += d_out return slices
[ "Generate", "the", "slices", "for", "building", "an", "autoregressive", "mask", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/masked_autoregressive.py#L306-L320
[ "def", "_gen_slices", "(", "num_blocks", ",", "n_in", ",", "n_out", ",", "mask_type", "=", "MASK_EXCLUSIVE", ")", ":", "# TODO(b/67594795): Better support of dynamic shape.", "slices", "=", "[", "]", "col", "=", "0", "d_in", "=", "n_in", "//", "num_blocks", "d_out", "=", "n_out", "//", "num_blocks", "row", "=", "d_out", "if", "mask_type", "==", "MASK_EXCLUSIVE", "else", "0", "for", "_", "in", "range", "(", "num_blocks", ")", ":", "row_slice", "=", "slice", "(", "row", ",", "None", ")", "col_slice", "=", "slice", "(", "col", ",", "col", "+", "d_in", ")", "slices", ".", "append", "(", "[", "row_slice", ",", "col_slice", "]", ")", "col", "+=", "d_in", "row", "+=", "d_out", "return", "slices" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
_gen_mask
Generate the mask for building an autoregressive dense layer.
tensorflow_probability/python/bijectors/masked_autoregressive.py
def _gen_mask(num_blocks, n_in, n_out, mask_type=MASK_EXCLUSIVE, dtype=tf.float32): """Generate the mask for building an autoregressive dense layer.""" # TODO(b/67594795): Better support of dynamic shape. mask = np.zeros([n_out, n_in], dtype=dtype.as_numpy_dtype()) slices = _gen_slices(num_blocks, n_in, n_out, mask_type=mask_type) for [row_slice, col_slice] in slices: mask[row_slice, col_slice] = 1 return mask
def _gen_mask(num_blocks, n_in, n_out, mask_type=MASK_EXCLUSIVE, dtype=tf.float32): """Generate the mask for building an autoregressive dense layer.""" # TODO(b/67594795): Better support of dynamic shape. mask = np.zeros([n_out, n_in], dtype=dtype.as_numpy_dtype()) slices = _gen_slices(num_blocks, n_in, n_out, mask_type=mask_type) for [row_slice, col_slice] in slices: mask[row_slice, col_slice] = 1 return mask
[ "Generate", "the", "mask", "for", "building", "an", "autoregressive", "dense", "layer", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/masked_autoregressive.py#L323-L334
[ "def", "_gen_mask", "(", "num_blocks", ",", "n_in", ",", "n_out", ",", "mask_type", "=", "MASK_EXCLUSIVE", ",", "dtype", "=", "tf", ".", "float32", ")", ":", "# TODO(b/67594795): Better support of dynamic shape.", "mask", "=", "np", ".", "zeros", "(", "[", "n_out", ",", "n_in", "]", ",", "dtype", "=", "dtype", ".", "as_numpy_dtype", "(", ")", ")", "slices", "=", "_gen_slices", "(", "num_blocks", ",", "n_in", ",", "n_out", ",", "mask_type", "=", "mask_type", ")", "for", "[", "row_slice", ",", "col_slice", "]", "in", "slices", ":", "mask", "[", "row_slice", ",", "col_slice", "]", "=", "1", "return", "mask" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
masked_dense
A autoregressively masked dense layer. Analogous to `tf.layers.dense`. See [Germain et al. (2015)][1] for detailed explanation. Arguments: inputs: Tensor input. units: Python `int` scalar representing the dimensionality of the output space. num_blocks: Python `int` scalar representing the number of blocks for the MADE masks. exclusive: Python `bool` scalar representing whether to zero the diagonal of the mask, used for the first layer of a MADE. kernel_initializer: Initializer function for the weight matrix. If `None` (default), weights are initialized using the `tf.glorot_random_initializer`. reuse: Python `bool` scalar representing whether to reuse the weights of a previous layer by the same name. name: Python `str` used to describe ops managed by this function. *args: `tf.layers.dense` arguments. **kwargs: `tf.layers.dense` keyword arguments. Returns: Output tensor. Raises: NotImplementedError: if rightmost dimension of `inputs` is unknown prior to graph execution. #### References [1]: Mathieu Germain, Karol Gregor, Iain Murray, and Hugo Larochelle. MADE: Masked Autoencoder for Distribution Estimation. In _International Conference on Machine Learning_, 2015. https://arxiv.org/abs/1502.03509
tensorflow_probability/python/bijectors/masked_autoregressive.py
def masked_dense(inputs, units, num_blocks=None, exclusive=False, kernel_initializer=None, reuse=None, name=None, *args, # pylint: disable=keyword-arg-before-vararg **kwargs): """A autoregressively masked dense layer. Analogous to `tf.layers.dense`. See [Germain et al. (2015)][1] for detailed explanation. Arguments: inputs: Tensor input. units: Python `int` scalar representing the dimensionality of the output space. num_blocks: Python `int` scalar representing the number of blocks for the MADE masks. exclusive: Python `bool` scalar representing whether to zero the diagonal of the mask, used for the first layer of a MADE. kernel_initializer: Initializer function for the weight matrix. If `None` (default), weights are initialized using the `tf.glorot_random_initializer`. reuse: Python `bool` scalar representing whether to reuse the weights of a previous layer by the same name. name: Python `str` used to describe ops managed by this function. *args: `tf.layers.dense` arguments. **kwargs: `tf.layers.dense` keyword arguments. Returns: Output tensor. Raises: NotImplementedError: if rightmost dimension of `inputs` is unknown prior to graph execution. #### References [1]: Mathieu Germain, Karol Gregor, Iain Murray, and Hugo Larochelle. MADE: Masked Autoencoder for Distribution Estimation. In _International Conference on Machine Learning_, 2015. https://arxiv.org/abs/1502.03509 """ # TODO(b/67594795): Better support of dynamic shape. input_depth = tf.compat.dimension_value( tensorshape_util.with_rank_at_least(inputs.shape, 1)[-1]) if input_depth is None: raise NotImplementedError( "Rightmost dimension must be known prior to graph execution.") mask = _gen_mask(num_blocks, input_depth, units, MASK_EXCLUSIVE if exclusive else MASK_INCLUSIVE).T if kernel_initializer is None: kernel_initializer = tf.compat.v1.glorot_normal_initializer() def masked_initializer(shape, dtype=None, partition_info=None): return mask * kernel_initializer(shape, dtype, partition_info) with tf.compat.v2.name_scope(name or "masked_dense"): layer = tf.compat.v1.layers.Dense( units, kernel_initializer=masked_initializer, kernel_constraint=lambda x: mask * x, name=name, dtype=dtype_util.base_dtype(inputs.dtype), _scope=name, _reuse=reuse, *args, # pylint: disable=keyword-arg-before-vararg **kwargs) return layer.apply(inputs)
def masked_dense(inputs, units, num_blocks=None, exclusive=False, kernel_initializer=None, reuse=None, name=None, *args, # pylint: disable=keyword-arg-before-vararg **kwargs): """A autoregressively masked dense layer. Analogous to `tf.layers.dense`. See [Germain et al. (2015)][1] for detailed explanation. Arguments: inputs: Tensor input. units: Python `int` scalar representing the dimensionality of the output space. num_blocks: Python `int` scalar representing the number of blocks for the MADE masks. exclusive: Python `bool` scalar representing whether to zero the diagonal of the mask, used for the first layer of a MADE. kernel_initializer: Initializer function for the weight matrix. If `None` (default), weights are initialized using the `tf.glorot_random_initializer`. reuse: Python `bool` scalar representing whether to reuse the weights of a previous layer by the same name. name: Python `str` used to describe ops managed by this function. *args: `tf.layers.dense` arguments. **kwargs: `tf.layers.dense` keyword arguments. Returns: Output tensor. Raises: NotImplementedError: if rightmost dimension of `inputs` is unknown prior to graph execution. #### References [1]: Mathieu Germain, Karol Gregor, Iain Murray, and Hugo Larochelle. MADE: Masked Autoencoder for Distribution Estimation. In _International Conference on Machine Learning_, 2015. https://arxiv.org/abs/1502.03509 """ # TODO(b/67594795): Better support of dynamic shape. input_depth = tf.compat.dimension_value( tensorshape_util.with_rank_at_least(inputs.shape, 1)[-1]) if input_depth is None: raise NotImplementedError( "Rightmost dimension must be known prior to graph execution.") mask = _gen_mask(num_blocks, input_depth, units, MASK_EXCLUSIVE if exclusive else MASK_INCLUSIVE).T if kernel_initializer is None: kernel_initializer = tf.compat.v1.glorot_normal_initializer() def masked_initializer(shape, dtype=None, partition_info=None): return mask * kernel_initializer(shape, dtype, partition_info) with tf.compat.v2.name_scope(name or "masked_dense"): layer = tf.compat.v1.layers.Dense( units, kernel_initializer=masked_initializer, kernel_constraint=lambda x: mask * x, name=name, dtype=dtype_util.base_dtype(inputs.dtype), _scope=name, _reuse=reuse, *args, # pylint: disable=keyword-arg-before-vararg **kwargs) return layer.apply(inputs)
[ "A", "autoregressively", "masked", "dense", "layer", ".", "Analogous", "to", "tf", ".", "layers", ".", "dense", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/masked_autoregressive.py#L337-L407
[ "def", "masked_dense", "(", "inputs", ",", "units", ",", "num_blocks", "=", "None", ",", "exclusive", "=", "False", ",", "kernel_initializer", "=", "None", ",", "reuse", "=", "None", ",", "name", "=", "None", ",", "*", "args", ",", "# pylint: disable=keyword-arg-before-vararg", "*", "*", "kwargs", ")", ":", "# TODO(b/67594795): Better support of dynamic shape.", "input_depth", "=", "tf", ".", "compat", ".", "dimension_value", "(", "tensorshape_util", ".", "with_rank_at_least", "(", "inputs", ".", "shape", ",", "1", ")", "[", "-", "1", "]", ")", "if", "input_depth", "is", "None", ":", "raise", "NotImplementedError", "(", "\"Rightmost dimension must be known prior to graph execution.\"", ")", "mask", "=", "_gen_mask", "(", "num_blocks", ",", "input_depth", ",", "units", ",", "MASK_EXCLUSIVE", "if", "exclusive", "else", "MASK_INCLUSIVE", ")", ".", "T", "if", "kernel_initializer", "is", "None", ":", "kernel_initializer", "=", "tf", ".", "compat", ".", "v1", ".", "glorot_normal_initializer", "(", ")", "def", "masked_initializer", "(", "shape", ",", "dtype", "=", "None", ",", "partition_info", "=", "None", ")", ":", "return", "mask", "*", "kernel_initializer", "(", "shape", ",", "dtype", ",", "partition_info", ")", "with", "tf", ".", "compat", ".", "v2", ".", "name_scope", "(", "name", "or", "\"masked_dense\"", ")", ":", "layer", "=", "tf", ".", "compat", ".", "v1", ".", "layers", ".", "Dense", "(", "units", ",", "kernel_initializer", "=", "masked_initializer", ",", "kernel_constraint", "=", "lambda", "x", ":", "mask", "*", "x", ",", "name", "=", "name", ",", "dtype", "=", "dtype_util", ".", "base_dtype", "(", "inputs", ".", "dtype", ")", ",", "_scope", "=", "name", ",", "_reuse", "=", "reuse", ",", "*", "args", ",", "# pylint: disable=keyword-arg-before-vararg", "*", "*", "kwargs", ")", "return", "layer", ".", "apply", "(", "inputs", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
test
masked_autoregressive_default_template
Build the Masked Autoregressive Density Estimator (Germain et al., 2015). This will be wrapped in a make_template to ensure the variables are only created once. It takes the input and returns the `loc` ("mu" in [Germain et al. (2015)][1]) and `log_scale` ("alpha" in [Germain et al. (2015)][1]) from the MADE network. Warning: This function uses `masked_dense` to create randomly initialized `tf.Variables`. It is presumed that these will be fit, just as you would any other neural architecture which uses `tf.layers.dense`. #### About Hidden Layers Each element of `hidden_layers` should be greater than the `input_depth` (i.e., `input_depth = tf.shape(input)[-1]` where `input` is the input to the neural network). This is necessary to ensure the autoregressivity property. #### About Clipping This function also optionally clips the `log_scale` (but possibly not its gradient). This is useful because if `log_scale` is too small/large it might underflow/overflow making it impossible for the `MaskedAutoregressiveFlow` bijector to implement a bijection. Additionally, the `log_scale_clip_gradient` `bool` indicates whether the gradient should also be clipped. The default does not clip the gradient; this is useful because it still provides gradient information (for fitting) yet solves the numerical stability problem. I.e., `log_scale_clip_gradient = False` means `grad[exp(clip(x))] = grad[x] exp(clip(x))` rather than the usual `grad[clip(x)] exp(clip(x))`. Args: hidden_layers: Python `list`-like of non-negative integer, scalars indicating the number of units in each hidden layer. Default: `[512, 512]. shift_only: Python `bool` indicating if only the `shift` term shall be computed. Default: `False`. activation: Activation function (callable). Explicitly setting to `None` implies a linear activation. log_scale_min_clip: `float`-like scalar `Tensor`, or a `Tensor` with the same shape as `log_scale`. The minimum value to clip by. Default: -5. log_scale_max_clip: `float`-like scalar `Tensor`, or a `Tensor` with the same shape as `log_scale`. The maximum value to clip by. Default: 3. log_scale_clip_gradient: Python `bool` indicating that the gradient of `tf.clip_by_value` should be preserved. Default: `False`. name: A name for ops managed by this function. Default: "masked_autoregressive_default_template". *args: `tf.layers.dense` arguments. **kwargs: `tf.layers.dense` keyword arguments. Returns: shift: `Float`-like `Tensor` of shift terms (the "mu" in [Germain et al. (2015)][1]). log_scale: `Float`-like `Tensor` of log(scale) terms (the "alpha" in [Germain et al. (2015)][1]). Raises: NotImplementedError: if rightmost dimension of `inputs` is unknown prior to graph execution. #### References [1]: Mathieu Germain, Karol Gregor, Iain Murray, and Hugo Larochelle. MADE: Masked Autoencoder for Distribution Estimation. In _International Conference on Machine Learning_, 2015. https://arxiv.org/abs/1502.03509
tensorflow_probability/python/bijectors/masked_autoregressive.py
def masked_autoregressive_default_template(hidden_layers, shift_only=False, activation=tf.nn.relu, log_scale_min_clip=-5., log_scale_max_clip=3., log_scale_clip_gradient=False, name=None, *args, # pylint: disable=keyword-arg-before-vararg **kwargs): """Build the Masked Autoregressive Density Estimator (Germain et al., 2015). This will be wrapped in a make_template to ensure the variables are only created once. It takes the input and returns the `loc` ("mu" in [Germain et al. (2015)][1]) and `log_scale` ("alpha" in [Germain et al. (2015)][1]) from the MADE network. Warning: This function uses `masked_dense` to create randomly initialized `tf.Variables`. It is presumed that these will be fit, just as you would any other neural architecture which uses `tf.layers.dense`. #### About Hidden Layers Each element of `hidden_layers` should be greater than the `input_depth` (i.e., `input_depth = tf.shape(input)[-1]` where `input` is the input to the neural network). This is necessary to ensure the autoregressivity property. #### About Clipping This function also optionally clips the `log_scale` (but possibly not its gradient). This is useful because if `log_scale` is too small/large it might underflow/overflow making it impossible for the `MaskedAutoregressiveFlow` bijector to implement a bijection. Additionally, the `log_scale_clip_gradient` `bool` indicates whether the gradient should also be clipped. The default does not clip the gradient; this is useful because it still provides gradient information (for fitting) yet solves the numerical stability problem. I.e., `log_scale_clip_gradient = False` means `grad[exp(clip(x))] = grad[x] exp(clip(x))` rather than the usual `grad[clip(x)] exp(clip(x))`. Args: hidden_layers: Python `list`-like of non-negative integer, scalars indicating the number of units in each hidden layer. Default: `[512, 512]. shift_only: Python `bool` indicating if only the `shift` term shall be computed. Default: `False`. activation: Activation function (callable). Explicitly setting to `None` implies a linear activation. log_scale_min_clip: `float`-like scalar `Tensor`, or a `Tensor` with the same shape as `log_scale`. The minimum value to clip by. Default: -5. log_scale_max_clip: `float`-like scalar `Tensor`, or a `Tensor` with the same shape as `log_scale`. The maximum value to clip by. Default: 3. log_scale_clip_gradient: Python `bool` indicating that the gradient of `tf.clip_by_value` should be preserved. Default: `False`. name: A name for ops managed by this function. Default: "masked_autoregressive_default_template". *args: `tf.layers.dense` arguments. **kwargs: `tf.layers.dense` keyword arguments. Returns: shift: `Float`-like `Tensor` of shift terms (the "mu" in [Germain et al. (2015)][1]). log_scale: `Float`-like `Tensor` of log(scale) terms (the "alpha" in [Germain et al. (2015)][1]). Raises: NotImplementedError: if rightmost dimension of `inputs` is unknown prior to graph execution. #### References [1]: Mathieu Germain, Karol Gregor, Iain Murray, and Hugo Larochelle. MADE: Masked Autoencoder for Distribution Estimation. In _International Conference on Machine Learning_, 2015. https://arxiv.org/abs/1502.03509 """ name = name or "masked_autoregressive_default_template" with tf.compat.v2.name_scope(name): def _fn(x): """MADE parameterized via `masked_autoregressive_default_template`.""" # TODO(b/67594795): Better support of dynamic shape. input_depth = tf.compat.dimension_value( tensorshape_util.with_rank_at_least(x.shape, 1)[-1]) if input_depth is None: raise NotImplementedError( "Rightmost dimension must be known prior to graph execution.") input_shape = ( np.int32(tensorshape_util.as_list(x.shape)) if tensorshape_util.is_fully_defined(x.shape) else tf.shape(input=x)) if tensorshape_util.rank(x.shape) == 1: x = x[tf.newaxis, ...] for i, units in enumerate(hidden_layers): x = masked_dense( inputs=x, units=units, num_blocks=input_depth, exclusive=True if i == 0 else False, activation=activation, *args, # pylint: disable=keyword-arg-before-vararg **kwargs) x = masked_dense( inputs=x, units=(1 if shift_only else 2) * input_depth, num_blocks=input_depth, activation=None, *args, # pylint: disable=keyword-arg-before-vararg **kwargs) if shift_only: x = tf.reshape(x, shape=input_shape) return x, None x = tf.reshape(x, shape=tf.concat([input_shape, [2]], axis=0)) shift, log_scale = tf.unstack(x, num=2, axis=-1) which_clip = ( tf.clip_by_value if log_scale_clip_gradient else clip_by_value_preserve_gradient) log_scale = which_clip(log_scale, log_scale_min_clip, log_scale_max_clip) return shift, log_scale return tf.compat.v1.make_template(name, _fn)
def masked_autoregressive_default_template(hidden_layers, shift_only=False, activation=tf.nn.relu, log_scale_min_clip=-5., log_scale_max_clip=3., log_scale_clip_gradient=False, name=None, *args, # pylint: disable=keyword-arg-before-vararg **kwargs): """Build the Masked Autoregressive Density Estimator (Germain et al., 2015). This will be wrapped in a make_template to ensure the variables are only created once. It takes the input and returns the `loc` ("mu" in [Germain et al. (2015)][1]) and `log_scale` ("alpha" in [Germain et al. (2015)][1]) from the MADE network. Warning: This function uses `masked_dense` to create randomly initialized `tf.Variables`. It is presumed that these will be fit, just as you would any other neural architecture which uses `tf.layers.dense`. #### About Hidden Layers Each element of `hidden_layers` should be greater than the `input_depth` (i.e., `input_depth = tf.shape(input)[-1]` where `input` is the input to the neural network). This is necessary to ensure the autoregressivity property. #### About Clipping This function also optionally clips the `log_scale` (but possibly not its gradient). This is useful because if `log_scale` is too small/large it might underflow/overflow making it impossible for the `MaskedAutoregressiveFlow` bijector to implement a bijection. Additionally, the `log_scale_clip_gradient` `bool` indicates whether the gradient should also be clipped. The default does not clip the gradient; this is useful because it still provides gradient information (for fitting) yet solves the numerical stability problem. I.e., `log_scale_clip_gradient = False` means `grad[exp(clip(x))] = grad[x] exp(clip(x))` rather than the usual `grad[clip(x)] exp(clip(x))`. Args: hidden_layers: Python `list`-like of non-negative integer, scalars indicating the number of units in each hidden layer. Default: `[512, 512]. shift_only: Python `bool` indicating if only the `shift` term shall be computed. Default: `False`. activation: Activation function (callable). Explicitly setting to `None` implies a linear activation. log_scale_min_clip: `float`-like scalar `Tensor`, or a `Tensor` with the same shape as `log_scale`. The minimum value to clip by. Default: -5. log_scale_max_clip: `float`-like scalar `Tensor`, or a `Tensor` with the same shape as `log_scale`. The maximum value to clip by. Default: 3. log_scale_clip_gradient: Python `bool` indicating that the gradient of `tf.clip_by_value` should be preserved. Default: `False`. name: A name for ops managed by this function. Default: "masked_autoregressive_default_template". *args: `tf.layers.dense` arguments. **kwargs: `tf.layers.dense` keyword arguments. Returns: shift: `Float`-like `Tensor` of shift terms (the "mu" in [Germain et al. (2015)][1]). log_scale: `Float`-like `Tensor` of log(scale) terms (the "alpha" in [Germain et al. (2015)][1]). Raises: NotImplementedError: if rightmost dimension of `inputs` is unknown prior to graph execution. #### References [1]: Mathieu Germain, Karol Gregor, Iain Murray, and Hugo Larochelle. MADE: Masked Autoencoder for Distribution Estimation. In _International Conference on Machine Learning_, 2015. https://arxiv.org/abs/1502.03509 """ name = name or "masked_autoregressive_default_template" with tf.compat.v2.name_scope(name): def _fn(x): """MADE parameterized via `masked_autoregressive_default_template`.""" # TODO(b/67594795): Better support of dynamic shape. input_depth = tf.compat.dimension_value( tensorshape_util.with_rank_at_least(x.shape, 1)[-1]) if input_depth is None: raise NotImplementedError( "Rightmost dimension must be known prior to graph execution.") input_shape = ( np.int32(tensorshape_util.as_list(x.shape)) if tensorshape_util.is_fully_defined(x.shape) else tf.shape(input=x)) if tensorshape_util.rank(x.shape) == 1: x = x[tf.newaxis, ...] for i, units in enumerate(hidden_layers): x = masked_dense( inputs=x, units=units, num_blocks=input_depth, exclusive=True if i == 0 else False, activation=activation, *args, # pylint: disable=keyword-arg-before-vararg **kwargs) x = masked_dense( inputs=x, units=(1 if shift_only else 2) * input_depth, num_blocks=input_depth, activation=None, *args, # pylint: disable=keyword-arg-before-vararg **kwargs) if shift_only: x = tf.reshape(x, shape=input_shape) return x, None x = tf.reshape(x, shape=tf.concat([input_shape, [2]], axis=0)) shift, log_scale = tf.unstack(x, num=2, axis=-1) which_clip = ( tf.clip_by_value if log_scale_clip_gradient else clip_by_value_preserve_gradient) log_scale = which_clip(log_scale, log_scale_min_clip, log_scale_max_clip) return shift, log_scale return tf.compat.v1.make_template(name, _fn)
[ "Build", "the", "Masked", "Autoregressive", "Density", "Estimator", "(", "Germain", "et", "al", ".", "2015", ")", "." ]
tensorflow/probability
python
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/masked_autoregressive.py#L410-L526
[ "def", "masked_autoregressive_default_template", "(", "hidden_layers", ",", "shift_only", "=", "False", ",", "activation", "=", "tf", ".", "nn", ".", "relu", ",", "log_scale_min_clip", "=", "-", "5.", ",", "log_scale_max_clip", "=", "3.", ",", "log_scale_clip_gradient", "=", "False", ",", "name", "=", "None", ",", "*", "args", ",", "# pylint: disable=keyword-arg-before-vararg", "*", "*", "kwargs", ")", ":", "name", "=", "name", "or", "\"masked_autoregressive_default_template\"", "with", "tf", ".", "compat", ".", "v2", ".", "name_scope", "(", "name", ")", ":", "def", "_fn", "(", "x", ")", ":", "\"\"\"MADE parameterized via `masked_autoregressive_default_template`.\"\"\"", "# TODO(b/67594795): Better support of dynamic shape.", "input_depth", "=", "tf", ".", "compat", ".", "dimension_value", "(", "tensorshape_util", ".", "with_rank_at_least", "(", "x", ".", "shape", ",", "1", ")", "[", "-", "1", "]", ")", "if", "input_depth", "is", "None", ":", "raise", "NotImplementedError", "(", "\"Rightmost dimension must be known prior to graph execution.\"", ")", "input_shape", "=", "(", "np", ".", "int32", "(", "tensorshape_util", ".", "as_list", "(", "x", ".", "shape", ")", ")", "if", "tensorshape_util", ".", "is_fully_defined", "(", "x", ".", "shape", ")", "else", "tf", ".", "shape", "(", "input", "=", "x", ")", ")", "if", "tensorshape_util", ".", "rank", "(", "x", ".", "shape", ")", "==", "1", ":", "x", "=", "x", "[", "tf", ".", "newaxis", ",", "...", "]", "for", "i", ",", "units", "in", "enumerate", "(", "hidden_layers", ")", ":", "x", "=", "masked_dense", "(", "inputs", "=", "x", ",", "units", "=", "units", ",", "num_blocks", "=", "input_depth", ",", "exclusive", "=", "True", "if", "i", "==", "0", "else", "False", ",", "activation", "=", "activation", ",", "*", "args", ",", "# pylint: disable=keyword-arg-before-vararg", "*", "*", "kwargs", ")", "x", "=", "masked_dense", "(", "inputs", "=", "x", ",", "units", "=", "(", "1", "if", "shift_only", "else", "2", ")", "*", "input_depth", ",", "num_blocks", "=", "input_depth", ",", "activation", "=", "None", ",", "*", "args", ",", "# pylint: disable=keyword-arg-before-vararg", "*", "*", "kwargs", ")", "if", "shift_only", ":", "x", "=", "tf", ".", "reshape", "(", "x", ",", "shape", "=", "input_shape", ")", "return", "x", ",", "None", "x", "=", "tf", ".", "reshape", "(", "x", ",", "shape", "=", "tf", ".", "concat", "(", "[", "input_shape", ",", "[", "2", "]", "]", ",", "axis", "=", "0", ")", ")", "shift", ",", "log_scale", "=", "tf", ".", "unstack", "(", "x", ",", "num", "=", "2", ",", "axis", "=", "-", "1", ")", "which_clip", "=", "(", "tf", ".", "clip_by_value", "if", "log_scale_clip_gradient", "else", "clip_by_value_preserve_gradient", ")", "log_scale", "=", "which_clip", "(", "log_scale", ",", "log_scale_min_clip", ",", "log_scale_max_clip", ")", "return", "shift", ",", "log_scale", "return", "tf", ".", "compat", ".", "v1", ".", "make_template", "(", "name", ",", "_fn", ")" ]
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5