partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
_value_and_gradients
|
Helper to `maybe_call_fn_and_grads`.
|
tensorflow_probability/python/mcmc/internal/util.py
|
def _value_and_gradients(fn, fn_arg_list, result=None, grads=None, name=None):
"""Helper to `maybe_call_fn_and_grads`."""
with tf.compat.v1.name_scope(name, 'value_and_gradients',
[fn_arg_list, result, grads]):
def _convert_to_tensor(x, name):
ctt = lambda x_: x_ if x_ is None else tf.convert_to_tensor(
value=x_, name=name)
return [ctt(x_) for x_ in x] if is_list_like(x) else ctt(x)
fn_arg_list = (list(fn_arg_list) if is_list_like(fn_arg_list)
else [fn_arg_list])
fn_arg_list = _convert_to_tensor(fn_arg_list, 'fn_arg')
if result is None:
result = fn(*fn_arg_list)
if grads is None and tf.executing_eagerly():
# Ensure we disable bijector cacheing in eager mode.
# TODO(b/72831017): Remove this once bijector cacheing is fixed for
# eager mode.
fn_arg_list = [0 + x for x in fn_arg_list]
result = _convert_to_tensor(result, 'fn_result')
if grads is not None:
grads = _convert_to_tensor(grads, 'fn_grad')
return result, grads
if is_list_like(result) and len(result) == len(fn_arg_list):
# Compute the block diagonal of Jacobian.
# TODO(b/79158574): Guard this calculation by an arg which explicitly
# requests block diagonal Jacobian calculation.
def fn_slice(i):
"""Needed to prevent `cell-var-from-loop` pylint warning."""
return lambda x: fn(*(fn_arg_list[:i] + [x] + fn_arg_list[i+1:]))
grads = [
tfp_math_value_and_gradients(fn_slice(i), fn_arg_list[i])[1]
for i in range(len(result))
]
else:
_, grads = tfp_math_value_and_gradients(fn, fn_arg_list)
return result, grads
|
def _value_and_gradients(fn, fn_arg_list, result=None, grads=None, name=None):
"""Helper to `maybe_call_fn_and_grads`."""
with tf.compat.v1.name_scope(name, 'value_and_gradients',
[fn_arg_list, result, grads]):
def _convert_to_tensor(x, name):
ctt = lambda x_: x_ if x_ is None else tf.convert_to_tensor(
value=x_, name=name)
return [ctt(x_) for x_ in x] if is_list_like(x) else ctt(x)
fn_arg_list = (list(fn_arg_list) if is_list_like(fn_arg_list)
else [fn_arg_list])
fn_arg_list = _convert_to_tensor(fn_arg_list, 'fn_arg')
if result is None:
result = fn(*fn_arg_list)
if grads is None and tf.executing_eagerly():
# Ensure we disable bijector cacheing in eager mode.
# TODO(b/72831017): Remove this once bijector cacheing is fixed for
# eager mode.
fn_arg_list = [0 + x for x in fn_arg_list]
result = _convert_to_tensor(result, 'fn_result')
if grads is not None:
grads = _convert_to_tensor(grads, 'fn_grad')
return result, grads
if is_list_like(result) and len(result) == len(fn_arg_list):
# Compute the block diagonal of Jacobian.
# TODO(b/79158574): Guard this calculation by an arg which explicitly
# requests block diagonal Jacobian calculation.
def fn_slice(i):
"""Needed to prevent `cell-var-from-loop` pylint warning."""
return lambda x: fn(*(fn_arg_list[:i] + [x] + fn_arg_list[i+1:]))
grads = [
tfp_math_value_and_gradients(fn_slice(i), fn_arg_list[i])[1]
for i in range(len(result))
]
else:
_, grads = tfp_math_value_and_gradients(fn, fn_arg_list)
return result, grads
|
[
"Helper",
"to",
"maybe_call_fn_and_grads",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/internal/util.py#L176-L218
|
[
"def",
"_value_and_gradients",
"(",
"fn",
",",
"fn_arg_list",
",",
"result",
"=",
"None",
",",
"grads",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'value_and_gradients'",
",",
"[",
"fn_arg_list",
",",
"result",
",",
"grads",
"]",
")",
":",
"def",
"_convert_to_tensor",
"(",
"x",
",",
"name",
")",
":",
"ctt",
"=",
"lambda",
"x_",
":",
"x_",
"if",
"x_",
"is",
"None",
"else",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x_",
",",
"name",
"=",
"name",
")",
"return",
"[",
"ctt",
"(",
"x_",
")",
"for",
"x_",
"in",
"x",
"]",
"if",
"is_list_like",
"(",
"x",
")",
"else",
"ctt",
"(",
"x",
")",
"fn_arg_list",
"=",
"(",
"list",
"(",
"fn_arg_list",
")",
"if",
"is_list_like",
"(",
"fn_arg_list",
")",
"else",
"[",
"fn_arg_list",
"]",
")",
"fn_arg_list",
"=",
"_convert_to_tensor",
"(",
"fn_arg_list",
",",
"'fn_arg'",
")",
"if",
"result",
"is",
"None",
":",
"result",
"=",
"fn",
"(",
"*",
"fn_arg_list",
")",
"if",
"grads",
"is",
"None",
"and",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"# Ensure we disable bijector cacheing in eager mode.",
"# TODO(b/72831017): Remove this once bijector cacheing is fixed for",
"# eager mode.",
"fn_arg_list",
"=",
"[",
"0",
"+",
"x",
"for",
"x",
"in",
"fn_arg_list",
"]",
"result",
"=",
"_convert_to_tensor",
"(",
"result",
",",
"'fn_result'",
")",
"if",
"grads",
"is",
"not",
"None",
":",
"grads",
"=",
"_convert_to_tensor",
"(",
"grads",
",",
"'fn_grad'",
")",
"return",
"result",
",",
"grads",
"if",
"is_list_like",
"(",
"result",
")",
"and",
"len",
"(",
"result",
")",
"==",
"len",
"(",
"fn_arg_list",
")",
":",
"# Compute the block diagonal of Jacobian.",
"# TODO(b/79158574): Guard this calculation by an arg which explicitly",
"# requests block diagonal Jacobian calculation.",
"def",
"fn_slice",
"(",
"i",
")",
":",
"\"\"\"Needed to prevent `cell-var-from-loop` pylint warning.\"\"\"",
"return",
"lambda",
"x",
":",
"fn",
"(",
"*",
"(",
"fn_arg_list",
"[",
":",
"i",
"]",
"+",
"[",
"x",
"]",
"+",
"fn_arg_list",
"[",
"i",
"+",
"1",
":",
"]",
")",
")",
"grads",
"=",
"[",
"tfp_math_value_and_gradients",
"(",
"fn_slice",
"(",
"i",
")",
",",
"fn_arg_list",
"[",
"i",
"]",
")",
"[",
"1",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"result",
")",
")",
"]",
"else",
":",
"_",
",",
"grads",
"=",
"tfp_math_value_and_gradients",
"(",
"fn",
",",
"fn_arg_list",
")",
"return",
"result",
",",
"grads"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
maybe_call_fn_and_grads
|
Calls `fn` and computes the gradient of the result wrt `args_list`.
|
tensorflow_probability/python/mcmc/internal/util.py
|
def maybe_call_fn_and_grads(fn,
fn_arg_list,
result=None,
grads=None,
check_non_none_grads=True,
name=None):
"""Calls `fn` and computes the gradient of the result wrt `args_list`."""
with tf.compat.v1.name_scope(name, 'maybe_call_fn_and_grads',
[fn_arg_list, result, grads]):
fn_arg_list = (list(fn_arg_list) if is_list_like(fn_arg_list)
else [fn_arg_list])
result, grads = _value_and_gradients(fn, fn_arg_list, result, grads)
if not all(r.dtype.is_floating
for r in (result if is_list_like(result) else [result])): # pylint: disable=superfluous-parens
raise TypeError('Function result must be a `Tensor` with `float` '
'`dtype`.')
if len(fn_arg_list) != len(grads):
raise ValueError('Function args must be in one-to-one correspondence '
'with grads.')
if check_non_none_grads and any(g is None for g in grads):
raise ValueError('Encountered `None` gradient.\n'
' fn_arg_list: {}\n'
' grads: {}'.format(fn_arg_list, grads))
return result, grads
|
def maybe_call_fn_and_grads(fn,
fn_arg_list,
result=None,
grads=None,
check_non_none_grads=True,
name=None):
"""Calls `fn` and computes the gradient of the result wrt `args_list`."""
with tf.compat.v1.name_scope(name, 'maybe_call_fn_and_grads',
[fn_arg_list, result, grads]):
fn_arg_list = (list(fn_arg_list) if is_list_like(fn_arg_list)
else [fn_arg_list])
result, grads = _value_and_gradients(fn, fn_arg_list, result, grads)
if not all(r.dtype.is_floating
for r in (result if is_list_like(result) else [result])): # pylint: disable=superfluous-parens
raise TypeError('Function result must be a `Tensor` with `float` '
'`dtype`.')
if len(fn_arg_list) != len(grads):
raise ValueError('Function args must be in one-to-one correspondence '
'with grads.')
if check_non_none_grads and any(g is None for g in grads):
raise ValueError('Encountered `None` gradient.\n'
' fn_arg_list: {}\n'
' grads: {}'.format(fn_arg_list, grads))
return result, grads
|
[
"Calls",
"fn",
"and",
"computes",
"the",
"gradient",
"of",
"the",
"result",
"wrt",
"args_list",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/internal/util.py#L221-L244
|
[
"def",
"maybe_call_fn_and_grads",
"(",
"fn",
",",
"fn_arg_list",
",",
"result",
"=",
"None",
",",
"grads",
"=",
"None",
",",
"check_non_none_grads",
"=",
"True",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'maybe_call_fn_and_grads'",
",",
"[",
"fn_arg_list",
",",
"result",
",",
"grads",
"]",
")",
":",
"fn_arg_list",
"=",
"(",
"list",
"(",
"fn_arg_list",
")",
"if",
"is_list_like",
"(",
"fn_arg_list",
")",
"else",
"[",
"fn_arg_list",
"]",
")",
"result",
",",
"grads",
"=",
"_value_and_gradients",
"(",
"fn",
",",
"fn_arg_list",
",",
"result",
",",
"grads",
")",
"if",
"not",
"all",
"(",
"r",
".",
"dtype",
".",
"is_floating",
"for",
"r",
"in",
"(",
"result",
"if",
"is_list_like",
"(",
"result",
")",
"else",
"[",
"result",
"]",
")",
")",
":",
"# pylint: disable=superfluous-parens",
"raise",
"TypeError",
"(",
"'Function result must be a `Tensor` with `float` '",
"'`dtype`.'",
")",
"if",
"len",
"(",
"fn_arg_list",
")",
"!=",
"len",
"(",
"grads",
")",
":",
"raise",
"ValueError",
"(",
"'Function args must be in one-to-one correspondence '",
"'with grads.'",
")",
"if",
"check_non_none_grads",
"and",
"any",
"(",
"g",
"is",
"None",
"for",
"g",
"in",
"grads",
")",
":",
"raise",
"ValueError",
"(",
"'Encountered `None` gradient.\\n'",
"' fn_arg_list: {}\\n'",
"' grads: {}'",
".",
"format",
"(",
"fn_arg_list",
",",
"grads",
")",
")",
"return",
"result",
",",
"grads"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
smart_for_loop
|
Construct a for loop, preferring a python loop if `n` is staticaly known.
Given `loop_num_iter` and `body_fn`, return an op corresponding to executing
`body_fn` `loop_num_iter` times, feeding previous outputs of `body_fn` into
the next iteration.
If `loop_num_iter` is statically known, the op is constructed via python for
loop, and otherwise a `tf.while_loop` is used.
Args:
loop_num_iter: `Integer` `Tensor` representing the number of loop
iterations.
body_fn: Callable to be executed `loop_num_iter` times.
initial_loop_vars: Listlike object of `Tensors` to be passed in to
`body_fn`'s first execution.
parallel_iterations: The number of iterations allowed to run in parallel.
It must be a positive integer. See `tf.while_loop` for more details.
Default value: `10`.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "smart_for_loop").
Returns:
result: `Tensor` representing applying `body_fn` iteratively `n` times.
|
tensorflow_probability/python/mcmc/internal/util.py
|
def smart_for_loop(loop_num_iter, body_fn, initial_loop_vars,
parallel_iterations=10, name=None):
"""Construct a for loop, preferring a python loop if `n` is staticaly known.
Given `loop_num_iter` and `body_fn`, return an op corresponding to executing
`body_fn` `loop_num_iter` times, feeding previous outputs of `body_fn` into
the next iteration.
If `loop_num_iter` is statically known, the op is constructed via python for
loop, and otherwise a `tf.while_loop` is used.
Args:
loop_num_iter: `Integer` `Tensor` representing the number of loop
iterations.
body_fn: Callable to be executed `loop_num_iter` times.
initial_loop_vars: Listlike object of `Tensors` to be passed in to
`body_fn`'s first execution.
parallel_iterations: The number of iterations allowed to run in parallel.
It must be a positive integer. See `tf.while_loop` for more details.
Default value: `10`.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "smart_for_loop").
Returns:
result: `Tensor` representing applying `body_fn` iteratively `n` times.
"""
with tf.compat.v1.name_scope(name, 'smart_for_loop',
[loop_num_iter, initial_loop_vars]):
loop_num_iter_ = tf.get_static_value(loop_num_iter)
if (loop_num_iter_ is None or tf.executing_eagerly() or
control_flow_util.GraphOrParentsInXlaContext(
tf.compat.v1.get_default_graph())):
# Cast to int32 to run the comparison against i in host memory,
# where while/LoopCond needs it.
loop_num_iter = tf.cast(loop_num_iter, dtype=tf.int32)
return tf.while_loop(
cond=lambda i, *args: i < loop_num_iter,
body=lambda i, *args: [i + 1] + list(body_fn(*args)),
loop_vars=[np.int32(0)] + initial_loop_vars,
parallel_iterations=parallel_iterations
)[1:]
result = initial_loop_vars
for _ in range(loop_num_iter_):
result = body_fn(*result)
return result
|
def smart_for_loop(loop_num_iter, body_fn, initial_loop_vars,
parallel_iterations=10, name=None):
"""Construct a for loop, preferring a python loop if `n` is staticaly known.
Given `loop_num_iter` and `body_fn`, return an op corresponding to executing
`body_fn` `loop_num_iter` times, feeding previous outputs of `body_fn` into
the next iteration.
If `loop_num_iter` is statically known, the op is constructed via python for
loop, and otherwise a `tf.while_loop` is used.
Args:
loop_num_iter: `Integer` `Tensor` representing the number of loop
iterations.
body_fn: Callable to be executed `loop_num_iter` times.
initial_loop_vars: Listlike object of `Tensors` to be passed in to
`body_fn`'s first execution.
parallel_iterations: The number of iterations allowed to run in parallel.
It must be a positive integer. See `tf.while_loop` for more details.
Default value: `10`.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "smart_for_loop").
Returns:
result: `Tensor` representing applying `body_fn` iteratively `n` times.
"""
with tf.compat.v1.name_scope(name, 'smart_for_loop',
[loop_num_iter, initial_loop_vars]):
loop_num_iter_ = tf.get_static_value(loop_num_iter)
if (loop_num_iter_ is None or tf.executing_eagerly() or
control_flow_util.GraphOrParentsInXlaContext(
tf.compat.v1.get_default_graph())):
# Cast to int32 to run the comparison against i in host memory,
# where while/LoopCond needs it.
loop_num_iter = tf.cast(loop_num_iter, dtype=tf.int32)
return tf.while_loop(
cond=lambda i, *args: i < loop_num_iter,
body=lambda i, *args: [i + 1] + list(body_fn(*args)),
loop_vars=[np.int32(0)] + initial_loop_vars,
parallel_iterations=parallel_iterations
)[1:]
result = initial_loop_vars
for _ in range(loop_num_iter_):
result = body_fn(*result)
return result
|
[
"Construct",
"a",
"for",
"loop",
"preferring",
"a",
"python",
"loop",
"if",
"n",
"is",
"staticaly",
"known",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/internal/util.py#L247-L290
|
[
"def",
"smart_for_loop",
"(",
"loop_num_iter",
",",
"body_fn",
",",
"initial_loop_vars",
",",
"parallel_iterations",
"=",
"10",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'smart_for_loop'",
",",
"[",
"loop_num_iter",
",",
"initial_loop_vars",
"]",
")",
":",
"loop_num_iter_",
"=",
"tf",
".",
"get_static_value",
"(",
"loop_num_iter",
")",
"if",
"(",
"loop_num_iter_",
"is",
"None",
"or",
"tf",
".",
"executing_eagerly",
"(",
")",
"or",
"control_flow_util",
".",
"GraphOrParentsInXlaContext",
"(",
"tf",
".",
"compat",
".",
"v1",
".",
"get_default_graph",
"(",
")",
")",
")",
":",
"# Cast to int32 to run the comparison against i in host memory,",
"# where while/LoopCond needs it.",
"loop_num_iter",
"=",
"tf",
".",
"cast",
"(",
"loop_num_iter",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"return",
"tf",
".",
"while_loop",
"(",
"cond",
"=",
"lambda",
"i",
",",
"*",
"args",
":",
"i",
"<",
"loop_num_iter",
",",
"body",
"=",
"lambda",
"i",
",",
"*",
"args",
":",
"[",
"i",
"+",
"1",
"]",
"+",
"list",
"(",
"body_fn",
"(",
"*",
"args",
")",
")",
",",
"loop_vars",
"=",
"[",
"np",
".",
"int32",
"(",
"0",
")",
"]",
"+",
"initial_loop_vars",
",",
"parallel_iterations",
"=",
"parallel_iterations",
")",
"[",
"1",
":",
"]",
"result",
"=",
"initial_loop_vars",
"for",
"_",
"in",
"range",
"(",
"loop_num_iter_",
")",
":",
"result",
"=",
"body_fn",
"(",
"*",
"result",
")",
"return",
"result"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
trace_scan
|
A simplified version of `tf.scan` that has configurable tracing.
This function repeatedly calls `loop_fn(state, elem)`, where `state` is the
`initial_state` during the first iteration, and the return value of `loop_fn`
for every iteration thereafter. `elem` is a slice of `elements` along the
first dimension, accessed in order. Additionally, it calls `trace_fn` on the
return value of `loop_fn`. The `Tensor`s in return values of `trace_fn` are
stacked and returned from this function, such that the first dimension of
those `Tensor`s matches the size of `elems`.
Args:
loop_fn: A callable that takes in a `Tensor` or a nested collection of
`Tensor`s with the same structure as `initial_state`, a slice of `elems`
and returns the same structure as `initial_state`.
initial_state: A `Tensor` or a nested collection of `Tensor`s passed to
`loop_fn` in the first iteration.
elems: A `Tensor` that is split along the first dimension and each element
of which is passed to `loop_fn`.
trace_fn: A callable that takes in the return value of `loop_fn` and returns
a `Tensor` or a nested collection of `Tensor`s.
parallel_iterations: Passed to the internal `tf.while_loop`.
name: Name scope used in this function. Default: 'trace_scan'.
Returns:
final_state: The final return value of `loop_fn`.
trace: The same structure as the return value of `trace_fn`, but with each
`Tensor` being a stack of the corresponding `Tensors` in the return value
of `trace_fn` for each slice of `elems`.
|
tensorflow_probability/python/mcmc/internal/util.py
|
def trace_scan(loop_fn,
initial_state,
elems,
trace_fn,
parallel_iterations=10,
name=None):
"""A simplified version of `tf.scan` that has configurable tracing.
This function repeatedly calls `loop_fn(state, elem)`, where `state` is the
`initial_state` during the first iteration, and the return value of `loop_fn`
for every iteration thereafter. `elem` is a slice of `elements` along the
first dimension, accessed in order. Additionally, it calls `trace_fn` on the
return value of `loop_fn`. The `Tensor`s in return values of `trace_fn` are
stacked and returned from this function, such that the first dimension of
those `Tensor`s matches the size of `elems`.
Args:
loop_fn: A callable that takes in a `Tensor` or a nested collection of
`Tensor`s with the same structure as `initial_state`, a slice of `elems`
and returns the same structure as `initial_state`.
initial_state: A `Tensor` or a nested collection of `Tensor`s passed to
`loop_fn` in the first iteration.
elems: A `Tensor` that is split along the first dimension and each element
of which is passed to `loop_fn`.
trace_fn: A callable that takes in the return value of `loop_fn` and returns
a `Tensor` or a nested collection of `Tensor`s.
parallel_iterations: Passed to the internal `tf.while_loop`.
name: Name scope used in this function. Default: 'trace_scan'.
Returns:
final_state: The final return value of `loop_fn`.
trace: The same structure as the return value of `trace_fn`, but with each
`Tensor` being a stack of the corresponding `Tensors` in the return value
of `trace_fn` for each slice of `elems`.
"""
with tf.compat.v1.name_scope(
name, 'trace_scan', [initial_state, elems]), tf.compat.v1.variable_scope(
tf.compat.v1.get_variable_scope()) as vs:
if vs.caching_device is None and not tf.executing_eagerly():
vs.set_caching_device(lambda op: op.device)
initial_state = tf.nest.map_structure(
lambda x: tf.convert_to_tensor(value=x, name='initial_state'),
initial_state)
elems = tf.convert_to_tensor(value=elems, name='elems')
static_length = elems.shape[0]
if tf.compat.dimension_value(static_length) is None:
length = tf.shape(input=elems)[0]
else:
length = tf.convert_to_tensor(
value=static_length, dtype=tf.int32, name='length')
# This is an TensorArray in part because of XLA, which had trouble with
# non-statically known indices. I.e. elems[i] errored, but
# elems_array.read(i) worked.
elems_array = tf.TensorArray(
elems.dtype, size=length, element_shape=elems.shape[1:])
elems_array = elems_array.unstack(elems)
trace_arrays = tf.nest.map_structure(
lambda x: tf.TensorArray(x.dtype, size=length, element_shape=x.shape),
trace_fn(initial_state))
def _body(i, state, trace_arrays):
state = loop_fn(state, elems_array.read(i))
trace_arrays = tf.nest.pack_sequence_as(trace_arrays, [
a.write(i, v) for a, v in zip(
tf.nest.flatten(trace_arrays), tf.nest.flatten(trace_fn(state)))
])
return i + 1, state, trace_arrays
_, final_state, trace_arrays = tf.while_loop(
cond=lambda i, *args: i < length,
body=_body,
loop_vars=(0, initial_state, trace_arrays),
parallel_iterations=parallel_iterations)
stacked_trace = tf.nest.map_structure(lambda x: x.stack(), trace_arrays)
# Restore the static length if we know it.
def _merge_static_length(x):
x.set_shape(tf.TensorShape(static_length).concatenate(x.shape[1:]))
return x
stacked_trace = tf.nest.map_structure(_merge_static_length, stacked_trace)
return final_state, stacked_trace
|
def trace_scan(loop_fn,
initial_state,
elems,
trace_fn,
parallel_iterations=10,
name=None):
"""A simplified version of `tf.scan` that has configurable tracing.
This function repeatedly calls `loop_fn(state, elem)`, where `state` is the
`initial_state` during the first iteration, and the return value of `loop_fn`
for every iteration thereafter. `elem` is a slice of `elements` along the
first dimension, accessed in order. Additionally, it calls `trace_fn` on the
return value of `loop_fn`. The `Tensor`s in return values of `trace_fn` are
stacked and returned from this function, such that the first dimension of
those `Tensor`s matches the size of `elems`.
Args:
loop_fn: A callable that takes in a `Tensor` or a nested collection of
`Tensor`s with the same structure as `initial_state`, a slice of `elems`
and returns the same structure as `initial_state`.
initial_state: A `Tensor` or a nested collection of `Tensor`s passed to
`loop_fn` in the first iteration.
elems: A `Tensor` that is split along the first dimension and each element
of which is passed to `loop_fn`.
trace_fn: A callable that takes in the return value of `loop_fn` and returns
a `Tensor` or a nested collection of `Tensor`s.
parallel_iterations: Passed to the internal `tf.while_loop`.
name: Name scope used in this function. Default: 'trace_scan'.
Returns:
final_state: The final return value of `loop_fn`.
trace: The same structure as the return value of `trace_fn`, but with each
`Tensor` being a stack of the corresponding `Tensors` in the return value
of `trace_fn` for each slice of `elems`.
"""
with tf.compat.v1.name_scope(
name, 'trace_scan', [initial_state, elems]), tf.compat.v1.variable_scope(
tf.compat.v1.get_variable_scope()) as vs:
if vs.caching_device is None and not tf.executing_eagerly():
vs.set_caching_device(lambda op: op.device)
initial_state = tf.nest.map_structure(
lambda x: tf.convert_to_tensor(value=x, name='initial_state'),
initial_state)
elems = tf.convert_to_tensor(value=elems, name='elems')
static_length = elems.shape[0]
if tf.compat.dimension_value(static_length) is None:
length = tf.shape(input=elems)[0]
else:
length = tf.convert_to_tensor(
value=static_length, dtype=tf.int32, name='length')
# This is an TensorArray in part because of XLA, which had trouble with
# non-statically known indices. I.e. elems[i] errored, but
# elems_array.read(i) worked.
elems_array = tf.TensorArray(
elems.dtype, size=length, element_shape=elems.shape[1:])
elems_array = elems_array.unstack(elems)
trace_arrays = tf.nest.map_structure(
lambda x: tf.TensorArray(x.dtype, size=length, element_shape=x.shape),
trace_fn(initial_state))
def _body(i, state, trace_arrays):
state = loop_fn(state, elems_array.read(i))
trace_arrays = tf.nest.pack_sequence_as(trace_arrays, [
a.write(i, v) for a, v in zip(
tf.nest.flatten(trace_arrays), tf.nest.flatten(trace_fn(state)))
])
return i + 1, state, trace_arrays
_, final_state, trace_arrays = tf.while_loop(
cond=lambda i, *args: i < length,
body=_body,
loop_vars=(0, initial_state, trace_arrays),
parallel_iterations=parallel_iterations)
stacked_trace = tf.nest.map_structure(lambda x: x.stack(), trace_arrays)
# Restore the static length if we know it.
def _merge_static_length(x):
x.set_shape(tf.TensorShape(static_length).concatenate(x.shape[1:]))
return x
stacked_trace = tf.nest.map_structure(_merge_static_length, stacked_trace)
return final_state, stacked_trace
|
[
"A",
"simplified",
"version",
"of",
"tf",
".",
"scan",
"that",
"has",
"configurable",
"tracing",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/internal/util.py#L293-L379
|
[
"def",
"trace_scan",
"(",
"loop_fn",
",",
"initial_state",
",",
"elems",
",",
"trace_fn",
",",
"parallel_iterations",
"=",
"10",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'trace_scan'",
",",
"[",
"initial_state",
",",
"elems",
"]",
")",
",",
"tf",
".",
"compat",
".",
"v1",
".",
"variable_scope",
"(",
"tf",
".",
"compat",
".",
"v1",
".",
"get_variable_scope",
"(",
")",
")",
"as",
"vs",
":",
"if",
"vs",
".",
"caching_device",
"is",
"None",
"and",
"not",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"vs",
".",
"set_caching_device",
"(",
"lambda",
"op",
":",
"op",
".",
"device",
")",
"initial_state",
"=",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"lambda",
"x",
":",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"'initial_state'",
")",
",",
"initial_state",
")",
"elems",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"elems",
",",
"name",
"=",
"'elems'",
")",
"static_length",
"=",
"elems",
".",
"shape",
"[",
"0",
"]",
"if",
"tf",
".",
"compat",
".",
"dimension_value",
"(",
"static_length",
")",
"is",
"None",
":",
"length",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"elems",
")",
"[",
"0",
"]",
"else",
":",
"length",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"static_length",
",",
"dtype",
"=",
"tf",
".",
"int32",
",",
"name",
"=",
"'length'",
")",
"# This is an TensorArray in part because of XLA, which had trouble with",
"# non-statically known indices. I.e. elems[i] errored, but",
"# elems_array.read(i) worked.",
"elems_array",
"=",
"tf",
".",
"TensorArray",
"(",
"elems",
".",
"dtype",
",",
"size",
"=",
"length",
",",
"element_shape",
"=",
"elems",
".",
"shape",
"[",
"1",
":",
"]",
")",
"elems_array",
"=",
"elems_array",
".",
"unstack",
"(",
"elems",
")",
"trace_arrays",
"=",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"lambda",
"x",
":",
"tf",
".",
"TensorArray",
"(",
"x",
".",
"dtype",
",",
"size",
"=",
"length",
",",
"element_shape",
"=",
"x",
".",
"shape",
")",
",",
"trace_fn",
"(",
"initial_state",
")",
")",
"def",
"_body",
"(",
"i",
",",
"state",
",",
"trace_arrays",
")",
":",
"state",
"=",
"loop_fn",
"(",
"state",
",",
"elems_array",
".",
"read",
"(",
"i",
")",
")",
"trace_arrays",
"=",
"tf",
".",
"nest",
".",
"pack_sequence_as",
"(",
"trace_arrays",
",",
"[",
"a",
".",
"write",
"(",
"i",
",",
"v",
")",
"for",
"a",
",",
"v",
"in",
"zip",
"(",
"tf",
".",
"nest",
".",
"flatten",
"(",
"trace_arrays",
")",
",",
"tf",
".",
"nest",
".",
"flatten",
"(",
"trace_fn",
"(",
"state",
")",
")",
")",
"]",
")",
"return",
"i",
"+",
"1",
",",
"state",
",",
"trace_arrays",
"_",
",",
"final_state",
",",
"trace_arrays",
"=",
"tf",
".",
"while_loop",
"(",
"cond",
"=",
"lambda",
"i",
",",
"*",
"args",
":",
"i",
"<",
"length",
",",
"body",
"=",
"_body",
",",
"loop_vars",
"=",
"(",
"0",
",",
"initial_state",
",",
"trace_arrays",
")",
",",
"parallel_iterations",
"=",
"parallel_iterations",
")",
"stacked_trace",
"=",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"lambda",
"x",
":",
"x",
".",
"stack",
"(",
")",
",",
"trace_arrays",
")",
"# Restore the static length if we know it.",
"def",
"_merge_static_length",
"(",
"x",
")",
":",
"x",
".",
"set_shape",
"(",
"tf",
".",
"TensorShape",
"(",
"static_length",
")",
".",
"concatenate",
"(",
"x",
".",
"shape",
"[",
"1",
":",
"]",
")",
")",
"return",
"x",
"stacked_trace",
"=",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"_merge_static_length",
",",
"stacked_trace",
")",
"return",
"final_state",
",",
"stacked_trace"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
make_innermost_setter
|
Wraps a setter so it applies to the inner-most results in `kernel_results`.
The wrapped setter unwraps `kernel_results` and applies `setter` to the first
results without an `inner_results` attribute.
Args:
setter: A callable that takes the kernel results as well as some `*args` and
`**kwargs` and returns a modified copy of those kernel results.
Returns:
new_setter: A wrapped `setter`.
|
tensorflow_probability/python/mcmc/internal/util.py
|
def make_innermost_setter(setter):
"""Wraps a setter so it applies to the inner-most results in `kernel_results`.
The wrapped setter unwraps `kernel_results` and applies `setter` to the first
results without an `inner_results` attribute.
Args:
setter: A callable that takes the kernel results as well as some `*args` and
`**kwargs` and returns a modified copy of those kernel results.
Returns:
new_setter: A wrapped `setter`.
"""
@functools.wraps(setter)
def _new_setter(kernel_results, *args, **kwargs):
"""Wrapped setter."""
results_stack = []
while hasattr(kernel_results, 'inner_results'):
results_stack.append(kernel_results)
kernel_results = kernel_results.inner_results
new_kernel_results = setter(kernel_results, *args, **kwargs)
for outer_results in reversed(results_stack):
new_kernel_results = outer_results._replace(
inner_results=new_kernel_results)
return new_kernel_results
return _new_setter
|
def make_innermost_setter(setter):
"""Wraps a setter so it applies to the inner-most results in `kernel_results`.
The wrapped setter unwraps `kernel_results` and applies `setter` to the first
results without an `inner_results` attribute.
Args:
setter: A callable that takes the kernel results as well as some `*args` and
`**kwargs` and returns a modified copy of those kernel results.
Returns:
new_setter: A wrapped `setter`.
"""
@functools.wraps(setter)
def _new_setter(kernel_results, *args, **kwargs):
"""Wrapped setter."""
results_stack = []
while hasattr(kernel_results, 'inner_results'):
results_stack.append(kernel_results)
kernel_results = kernel_results.inner_results
new_kernel_results = setter(kernel_results, *args, **kwargs)
for outer_results in reversed(results_stack):
new_kernel_results = outer_results._replace(
inner_results=new_kernel_results)
return new_kernel_results
return _new_setter
|
[
"Wraps",
"a",
"setter",
"so",
"it",
"applies",
"to",
"the",
"inner",
"-",
"most",
"results",
"in",
"kernel_results",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/internal/util.py#L382-L411
|
[
"def",
"make_innermost_setter",
"(",
"setter",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"setter",
")",
"def",
"_new_setter",
"(",
"kernel_results",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Wrapped setter.\"\"\"",
"results_stack",
"=",
"[",
"]",
"while",
"hasattr",
"(",
"kernel_results",
",",
"'inner_results'",
")",
":",
"results_stack",
".",
"append",
"(",
"kernel_results",
")",
"kernel_results",
"=",
"kernel_results",
".",
"inner_results",
"new_kernel_results",
"=",
"setter",
"(",
"kernel_results",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"for",
"outer_results",
"in",
"reversed",
"(",
"results_stack",
")",
":",
"new_kernel_results",
"=",
"outer_results",
".",
"_replace",
"(",
"inner_results",
"=",
"new_kernel_results",
")",
"return",
"new_kernel_results",
"return",
"_new_setter"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
make_innermost_getter
|
Wraps a getter so it applies to the inner-most results in `kernel_results`.
The wrapped getter unwraps `kernel_results` and returns the return value of
`getter` called with the first results without an `inner_results` attribute.
Args:
getter: A callable that takes Kernel results and returns some value.
Returns:
new_getter: A wrapped `getter`.
|
tensorflow_probability/python/mcmc/internal/util.py
|
def make_innermost_getter(getter):
"""Wraps a getter so it applies to the inner-most results in `kernel_results`.
The wrapped getter unwraps `kernel_results` and returns the return value of
`getter` called with the first results without an `inner_results` attribute.
Args:
getter: A callable that takes Kernel results and returns some value.
Returns:
new_getter: A wrapped `getter`.
"""
@functools.wraps(getter)
def _new_getter(kernel_results, *args, **kwargs):
"""Wrapped getter."""
results_stack = []
while hasattr(kernel_results, 'inner_results'):
results_stack.append(kernel_results)
kernel_results = kernel_results.inner_results
return getter(kernel_results, *args, **kwargs)
return _new_getter
|
def make_innermost_getter(getter):
"""Wraps a getter so it applies to the inner-most results in `kernel_results`.
The wrapped getter unwraps `kernel_results` and returns the return value of
`getter` called with the first results without an `inner_results` attribute.
Args:
getter: A callable that takes Kernel results and returns some value.
Returns:
new_getter: A wrapped `getter`.
"""
@functools.wraps(getter)
def _new_getter(kernel_results, *args, **kwargs):
"""Wrapped getter."""
results_stack = []
while hasattr(kernel_results, 'inner_results'):
results_stack.append(kernel_results)
kernel_results = kernel_results.inner_results
return getter(kernel_results, *args, **kwargs)
return _new_getter
|
[
"Wraps",
"a",
"getter",
"so",
"it",
"applies",
"to",
"the",
"inner",
"-",
"most",
"results",
"in",
"kernel_results",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/internal/util.py#L414-L437
|
[
"def",
"make_innermost_getter",
"(",
"getter",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"getter",
")",
"def",
"_new_getter",
"(",
"kernel_results",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Wrapped getter.\"\"\"",
"results_stack",
"=",
"[",
"]",
"while",
"hasattr",
"(",
"kernel_results",
",",
"'inner_results'",
")",
":",
"results_stack",
".",
"append",
"(",
"kernel_results",
")",
"kernel_results",
"=",
"kernel_results",
".",
"inner_results",
"return",
"getter",
"(",
"kernel_results",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"_new_getter"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
enable_store_parameters_in_results
|
Enables the `store_parameters_in_results` parameter in a chain of kernels.
This is a temporary utility for use during the transition period of the
parameter storage methods.
Args:
kernel: A TransitionKernel.
Returns:
kernel: The same kernel, but recreated with `store_parameters_in_results`
recursively set to `True` in its parameters and its inner kernels (as
appropriate).
|
tensorflow_probability/python/mcmc/internal/util.py
|
def enable_store_parameters_in_results(kernel):
"""Enables the `store_parameters_in_results` parameter in a chain of kernels.
This is a temporary utility for use during the transition period of the
parameter storage methods.
Args:
kernel: A TransitionKernel.
Returns:
kernel: The same kernel, but recreated with `store_parameters_in_results`
recursively set to `True` in its parameters and its inner kernels (as
appropriate).
"""
kernel_stack = []
while hasattr(kernel, 'parameters') and 'inner_kernel' in kernel.parameters:
kernel_stack.append(kernel)
kernel = kernel.parameters['inner_kernel']
def _recreate_kernel(kernel, parameters):
new_parameters = kernel.parameters.copy()
new_parameters.update(parameters)
if 'store_parameters_in_results' in new_parameters:
new_parameters['store_parameters_in_results'] = True
with deprecation.silence():
return type(kernel)(**new_parameters)
if hasattr(kernel, 'parameters'):
kernel = _recreate_kernel(kernel, {})
for outer_kernel in reversed(kernel_stack):
outer_kernel = _recreate_kernel(outer_kernel, {'inner_kernel': kernel})
kernel = outer_kernel
return kernel
|
def enable_store_parameters_in_results(kernel):
"""Enables the `store_parameters_in_results` parameter in a chain of kernels.
This is a temporary utility for use during the transition period of the
parameter storage methods.
Args:
kernel: A TransitionKernel.
Returns:
kernel: The same kernel, but recreated with `store_parameters_in_results`
recursively set to `True` in its parameters and its inner kernels (as
appropriate).
"""
kernel_stack = []
while hasattr(kernel, 'parameters') and 'inner_kernel' in kernel.parameters:
kernel_stack.append(kernel)
kernel = kernel.parameters['inner_kernel']
def _recreate_kernel(kernel, parameters):
new_parameters = kernel.parameters.copy()
new_parameters.update(parameters)
if 'store_parameters_in_results' in new_parameters:
new_parameters['store_parameters_in_results'] = True
with deprecation.silence():
return type(kernel)(**new_parameters)
if hasattr(kernel, 'parameters'):
kernel = _recreate_kernel(kernel, {})
for outer_kernel in reversed(kernel_stack):
outer_kernel = _recreate_kernel(outer_kernel, {'inner_kernel': kernel})
kernel = outer_kernel
return kernel
|
[
"Enables",
"the",
"store_parameters_in_results",
"parameter",
"in",
"a",
"chain",
"of",
"kernels",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/internal/util.py#L440-L474
|
[
"def",
"enable_store_parameters_in_results",
"(",
"kernel",
")",
":",
"kernel_stack",
"=",
"[",
"]",
"while",
"hasattr",
"(",
"kernel",
",",
"'parameters'",
")",
"and",
"'inner_kernel'",
"in",
"kernel",
".",
"parameters",
":",
"kernel_stack",
".",
"append",
"(",
"kernel",
")",
"kernel",
"=",
"kernel",
".",
"parameters",
"[",
"'inner_kernel'",
"]",
"def",
"_recreate_kernel",
"(",
"kernel",
",",
"parameters",
")",
":",
"new_parameters",
"=",
"kernel",
".",
"parameters",
".",
"copy",
"(",
")",
"new_parameters",
".",
"update",
"(",
"parameters",
")",
"if",
"'store_parameters_in_results'",
"in",
"new_parameters",
":",
"new_parameters",
"[",
"'store_parameters_in_results'",
"]",
"=",
"True",
"with",
"deprecation",
".",
"silence",
"(",
")",
":",
"return",
"type",
"(",
"kernel",
")",
"(",
"*",
"*",
"new_parameters",
")",
"if",
"hasattr",
"(",
"kernel",
",",
"'parameters'",
")",
":",
"kernel",
"=",
"_recreate_kernel",
"(",
"kernel",
",",
"{",
"}",
")",
"for",
"outer_kernel",
"in",
"reversed",
"(",
"kernel_stack",
")",
":",
"outer_kernel",
"=",
"_recreate_kernel",
"(",
"outer_kernel",
",",
"{",
"'inner_kernel'",
":",
"kernel",
"}",
")",
"kernel",
"=",
"outer_kernel",
"return",
"kernel"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_replace_event_shape_in_shape_tensor
|
Replaces the rightmost dims in a `Tensor` representing a shape.
Args:
input_shape: a rank-1 `Tensor` of integers
event_shape_in: the event shape expected to be present in rightmost dims
of `shape_in`.
event_shape_out: the event shape with which to replace `event_shape_in` in
the rightmost dims of `input_shape`.
validate_args: Python `bool` indicating whether arguments should
be checked for correctness.
Returns:
output_shape: A rank-1 integer `Tensor` with the same contents as
`input_shape` except for the event dims, which are replaced with
`event_shape_out`.
|
tensorflow_probability/python/bijectors/reshape.py
|
def _replace_event_shape_in_shape_tensor(
input_shape, event_shape_in, event_shape_out, validate_args):
"""Replaces the rightmost dims in a `Tensor` representing a shape.
Args:
input_shape: a rank-1 `Tensor` of integers
event_shape_in: the event shape expected to be present in rightmost dims
of `shape_in`.
event_shape_out: the event shape with which to replace `event_shape_in` in
the rightmost dims of `input_shape`.
validate_args: Python `bool` indicating whether arguments should
be checked for correctness.
Returns:
output_shape: A rank-1 integer `Tensor` with the same contents as
`input_shape` except for the event dims, which are replaced with
`event_shape_out`.
"""
output_tensorshape, is_validated = _replace_event_shape_in_tensorshape(
tensorshape_util.constant_value_as_shape(input_shape),
event_shape_in,
event_shape_out)
# TODO(b/124240153): Remove map(tf.identity, deps) once tf.function
# correctly supports control_dependencies.
validation_dependencies = (
map(tf.identity, (event_shape_in, event_shape_out))
if validate_args else ())
if (tensorshape_util.is_fully_defined(output_tensorshape) and
(is_validated or not validate_args)):
with tf.control_dependencies(validation_dependencies):
output_shape = tf.convert_to_tensor(
value=output_tensorshape, name='output_shape', dtype_hint=tf.int32)
return output_shape, output_tensorshape
with tf.control_dependencies(validation_dependencies):
event_shape_in_ndims = (
tf.size(input=event_shape_in)
if tensorshape_util.num_elements(event_shape_in.shape) is None else
tensorshape_util.num_elements(event_shape_in.shape))
input_non_event_shape, input_event_shape = tf.split(
input_shape, num_or_size_splits=[-1, event_shape_in_ndims])
additional_assertions = []
if is_validated:
pass
elif validate_args:
# Check that `input_event_shape` and `event_shape_in` are compatible in the
# sense that they have equal entries in any position that isn't a `-1` in
# `event_shape_in`. Note that our validations at construction time ensure
# there is at most one such entry in `event_shape_in`.
mask = event_shape_in >= 0
explicit_input_event_shape = tf.boolean_mask(
tensor=input_event_shape, mask=mask)
explicit_event_shape_in = tf.boolean_mask(
tensor=event_shape_in, mask=mask)
additional_assertions.append(
assert_util.assert_equal(
explicit_input_event_shape,
explicit_event_shape_in,
message='Input `event_shape` does not match `event_shape_in`.'))
# We don't explicitly additionally verify
# `tf.size(input_shape) > tf.size(event_shape_in)` since `tf.split`
# already makes this assertion.
with tf.control_dependencies(additional_assertions):
output_shape = tf.concat([input_non_event_shape, event_shape_out], axis=0,
name='output_shape')
return output_shape, output_tensorshape
|
def _replace_event_shape_in_shape_tensor(
input_shape, event_shape_in, event_shape_out, validate_args):
"""Replaces the rightmost dims in a `Tensor` representing a shape.
Args:
input_shape: a rank-1 `Tensor` of integers
event_shape_in: the event shape expected to be present in rightmost dims
of `shape_in`.
event_shape_out: the event shape with which to replace `event_shape_in` in
the rightmost dims of `input_shape`.
validate_args: Python `bool` indicating whether arguments should
be checked for correctness.
Returns:
output_shape: A rank-1 integer `Tensor` with the same contents as
`input_shape` except for the event dims, which are replaced with
`event_shape_out`.
"""
output_tensorshape, is_validated = _replace_event_shape_in_tensorshape(
tensorshape_util.constant_value_as_shape(input_shape),
event_shape_in,
event_shape_out)
# TODO(b/124240153): Remove map(tf.identity, deps) once tf.function
# correctly supports control_dependencies.
validation_dependencies = (
map(tf.identity, (event_shape_in, event_shape_out))
if validate_args else ())
if (tensorshape_util.is_fully_defined(output_tensorshape) and
(is_validated or not validate_args)):
with tf.control_dependencies(validation_dependencies):
output_shape = tf.convert_to_tensor(
value=output_tensorshape, name='output_shape', dtype_hint=tf.int32)
return output_shape, output_tensorshape
with tf.control_dependencies(validation_dependencies):
event_shape_in_ndims = (
tf.size(input=event_shape_in)
if tensorshape_util.num_elements(event_shape_in.shape) is None else
tensorshape_util.num_elements(event_shape_in.shape))
input_non_event_shape, input_event_shape = tf.split(
input_shape, num_or_size_splits=[-1, event_shape_in_ndims])
additional_assertions = []
if is_validated:
pass
elif validate_args:
# Check that `input_event_shape` and `event_shape_in` are compatible in the
# sense that they have equal entries in any position that isn't a `-1` in
# `event_shape_in`. Note that our validations at construction time ensure
# there is at most one such entry in `event_shape_in`.
mask = event_shape_in >= 0
explicit_input_event_shape = tf.boolean_mask(
tensor=input_event_shape, mask=mask)
explicit_event_shape_in = tf.boolean_mask(
tensor=event_shape_in, mask=mask)
additional_assertions.append(
assert_util.assert_equal(
explicit_input_event_shape,
explicit_event_shape_in,
message='Input `event_shape` does not match `event_shape_in`.'))
# We don't explicitly additionally verify
# `tf.size(input_shape) > tf.size(event_shape_in)` since `tf.split`
# already makes this assertion.
with tf.control_dependencies(additional_assertions):
output_shape = tf.concat([input_non_event_shape, event_shape_out], axis=0,
name='output_shape')
return output_shape, output_tensorshape
|
[
"Replaces",
"the",
"rightmost",
"dims",
"in",
"a",
"Tensor",
"representing",
"a",
"shape",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/reshape.py#L243-L313
|
[
"def",
"_replace_event_shape_in_shape_tensor",
"(",
"input_shape",
",",
"event_shape_in",
",",
"event_shape_out",
",",
"validate_args",
")",
":",
"output_tensorshape",
",",
"is_validated",
"=",
"_replace_event_shape_in_tensorshape",
"(",
"tensorshape_util",
".",
"constant_value_as_shape",
"(",
"input_shape",
")",
",",
"event_shape_in",
",",
"event_shape_out",
")",
"# TODO(b/124240153): Remove map(tf.identity, deps) once tf.function",
"# correctly supports control_dependencies.",
"validation_dependencies",
"=",
"(",
"map",
"(",
"tf",
".",
"identity",
",",
"(",
"event_shape_in",
",",
"event_shape_out",
")",
")",
"if",
"validate_args",
"else",
"(",
")",
")",
"if",
"(",
"tensorshape_util",
".",
"is_fully_defined",
"(",
"output_tensorshape",
")",
"and",
"(",
"is_validated",
"or",
"not",
"validate_args",
")",
")",
":",
"with",
"tf",
".",
"control_dependencies",
"(",
"validation_dependencies",
")",
":",
"output_shape",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"output_tensorshape",
",",
"name",
"=",
"'output_shape'",
",",
"dtype_hint",
"=",
"tf",
".",
"int32",
")",
"return",
"output_shape",
",",
"output_tensorshape",
"with",
"tf",
".",
"control_dependencies",
"(",
"validation_dependencies",
")",
":",
"event_shape_in_ndims",
"=",
"(",
"tf",
".",
"size",
"(",
"input",
"=",
"event_shape_in",
")",
"if",
"tensorshape_util",
".",
"num_elements",
"(",
"event_shape_in",
".",
"shape",
")",
"is",
"None",
"else",
"tensorshape_util",
".",
"num_elements",
"(",
"event_shape_in",
".",
"shape",
")",
")",
"input_non_event_shape",
",",
"input_event_shape",
"=",
"tf",
".",
"split",
"(",
"input_shape",
",",
"num_or_size_splits",
"=",
"[",
"-",
"1",
",",
"event_shape_in_ndims",
"]",
")",
"additional_assertions",
"=",
"[",
"]",
"if",
"is_validated",
":",
"pass",
"elif",
"validate_args",
":",
"# Check that `input_event_shape` and `event_shape_in` are compatible in the",
"# sense that they have equal entries in any position that isn't a `-1` in",
"# `event_shape_in`. Note that our validations at construction time ensure",
"# there is at most one such entry in `event_shape_in`.",
"mask",
"=",
"event_shape_in",
">=",
"0",
"explicit_input_event_shape",
"=",
"tf",
".",
"boolean_mask",
"(",
"tensor",
"=",
"input_event_shape",
",",
"mask",
"=",
"mask",
")",
"explicit_event_shape_in",
"=",
"tf",
".",
"boolean_mask",
"(",
"tensor",
"=",
"event_shape_in",
",",
"mask",
"=",
"mask",
")",
"additional_assertions",
".",
"append",
"(",
"assert_util",
".",
"assert_equal",
"(",
"explicit_input_event_shape",
",",
"explicit_event_shape_in",
",",
"message",
"=",
"'Input `event_shape` does not match `event_shape_in`.'",
")",
")",
"# We don't explicitly additionally verify",
"# `tf.size(input_shape) > tf.size(event_shape_in)` since `tf.split`",
"# already makes this assertion.",
"with",
"tf",
".",
"control_dependencies",
"(",
"additional_assertions",
")",
":",
"output_shape",
"=",
"tf",
".",
"concat",
"(",
"[",
"input_non_event_shape",
",",
"event_shape_out",
"]",
",",
"axis",
"=",
"0",
",",
"name",
"=",
"'output_shape'",
")",
"return",
"output_shape",
",",
"output_tensorshape"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_replace_event_shape_in_tensorshape
|
Replaces the event shape dims of a `TensorShape`.
Args:
input_tensorshape: a `TensorShape` instance in which to attempt replacing
event shape.
event_shape_in: `Tensor` shape representing the event shape expected to
be present in (rightmost dims of) `tensorshape_in`. Must be compatible
with the rightmost dims of `tensorshape_in`.
event_shape_out: `Tensor` shape representing the new event shape, i.e.,
the replacement of `event_shape_in`,
Returns:
output_tensorshape: `TensorShape` with the rightmost `event_shape_in`
replaced by `event_shape_out`. Might be partially defined, i.e.,
`TensorShape(None)`.
is_validated: Python `bool` indicating static validation happened.
Raises:
ValueError: if we can determine the event shape portion of
`tensorshape_in` as well as `event_shape_in` both statically, and they
are not compatible. "Compatible" here means that they are identical on
any dims that are not -1 in `event_shape_in`.
|
tensorflow_probability/python/bijectors/reshape.py
|
def _replace_event_shape_in_tensorshape(
input_tensorshape, event_shape_in, event_shape_out):
"""Replaces the event shape dims of a `TensorShape`.
Args:
input_tensorshape: a `TensorShape` instance in which to attempt replacing
event shape.
event_shape_in: `Tensor` shape representing the event shape expected to
be present in (rightmost dims of) `tensorshape_in`. Must be compatible
with the rightmost dims of `tensorshape_in`.
event_shape_out: `Tensor` shape representing the new event shape, i.e.,
the replacement of `event_shape_in`,
Returns:
output_tensorshape: `TensorShape` with the rightmost `event_shape_in`
replaced by `event_shape_out`. Might be partially defined, i.e.,
`TensorShape(None)`.
is_validated: Python `bool` indicating static validation happened.
Raises:
ValueError: if we can determine the event shape portion of
`tensorshape_in` as well as `event_shape_in` both statically, and they
are not compatible. "Compatible" here means that they are identical on
any dims that are not -1 in `event_shape_in`.
"""
event_shape_in_ndims = tensorshape_util.num_elements(event_shape_in.shape)
if tensorshape_util.rank(
input_tensorshape) is None or event_shape_in_ndims is None:
return tf.TensorShape(None), False # Not is_validated.
input_non_event_ndims = tensorshape_util.rank(
input_tensorshape) - event_shape_in_ndims
if input_non_event_ndims < 0:
raise ValueError(
'Input has fewer ndims ({}) than event shape ndims ({}).'.format(
tensorshape_util.rank(input_tensorshape), event_shape_in_ndims))
input_non_event_tensorshape = input_tensorshape[:input_non_event_ndims]
input_event_tensorshape = input_tensorshape[input_non_event_ndims:]
# Check that `input_event_shape_` and `event_shape_in` are compatible in the
# sense that they have equal entries in any position that isn't a `-1` in
# `event_shape_in`. Note that our validations at construction time ensure
# there is at most one such entry in `event_shape_in`.
event_shape_in_ = tf.get_static_value(event_shape_in)
is_validated = (
tensorshape_util.is_fully_defined(input_event_tensorshape) and
event_shape_in_ is not None)
if is_validated:
input_event_shape_ = np.int32(input_event_tensorshape)
mask = event_shape_in_ >= 0
explicit_input_event_shape_ = input_event_shape_[mask]
explicit_event_shape_in_ = event_shape_in_[mask]
if not all(explicit_input_event_shape_ == explicit_event_shape_in_):
raise ValueError(
'Input `event_shape` does not match `event_shape_in`. '
'({} vs {}).'.format(input_event_shape_, event_shape_in_))
event_tensorshape_out = tensorshape_util.constant_value_as_shape(
event_shape_out)
if tensorshape_util.rank(event_tensorshape_out) is None:
output_tensorshape = tf.TensorShape(None)
else:
output_tensorshape = tensorshape_util.concatenate(
input_non_event_tensorshape, event_tensorshape_out)
return output_tensorshape, is_validated
|
def _replace_event_shape_in_tensorshape(
input_tensorshape, event_shape_in, event_shape_out):
"""Replaces the event shape dims of a `TensorShape`.
Args:
input_tensorshape: a `TensorShape` instance in which to attempt replacing
event shape.
event_shape_in: `Tensor` shape representing the event shape expected to
be present in (rightmost dims of) `tensorshape_in`. Must be compatible
with the rightmost dims of `tensorshape_in`.
event_shape_out: `Tensor` shape representing the new event shape, i.e.,
the replacement of `event_shape_in`,
Returns:
output_tensorshape: `TensorShape` with the rightmost `event_shape_in`
replaced by `event_shape_out`. Might be partially defined, i.e.,
`TensorShape(None)`.
is_validated: Python `bool` indicating static validation happened.
Raises:
ValueError: if we can determine the event shape portion of
`tensorshape_in` as well as `event_shape_in` both statically, and they
are not compatible. "Compatible" here means that they are identical on
any dims that are not -1 in `event_shape_in`.
"""
event_shape_in_ndims = tensorshape_util.num_elements(event_shape_in.shape)
if tensorshape_util.rank(
input_tensorshape) is None or event_shape_in_ndims is None:
return tf.TensorShape(None), False # Not is_validated.
input_non_event_ndims = tensorshape_util.rank(
input_tensorshape) - event_shape_in_ndims
if input_non_event_ndims < 0:
raise ValueError(
'Input has fewer ndims ({}) than event shape ndims ({}).'.format(
tensorshape_util.rank(input_tensorshape), event_shape_in_ndims))
input_non_event_tensorshape = input_tensorshape[:input_non_event_ndims]
input_event_tensorshape = input_tensorshape[input_non_event_ndims:]
# Check that `input_event_shape_` and `event_shape_in` are compatible in the
# sense that they have equal entries in any position that isn't a `-1` in
# `event_shape_in`. Note that our validations at construction time ensure
# there is at most one such entry in `event_shape_in`.
event_shape_in_ = tf.get_static_value(event_shape_in)
is_validated = (
tensorshape_util.is_fully_defined(input_event_tensorshape) and
event_shape_in_ is not None)
if is_validated:
input_event_shape_ = np.int32(input_event_tensorshape)
mask = event_shape_in_ >= 0
explicit_input_event_shape_ = input_event_shape_[mask]
explicit_event_shape_in_ = event_shape_in_[mask]
if not all(explicit_input_event_shape_ == explicit_event_shape_in_):
raise ValueError(
'Input `event_shape` does not match `event_shape_in`. '
'({} vs {}).'.format(input_event_shape_, event_shape_in_))
event_tensorshape_out = tensorshape_util.constant_value_as_shape(
event_shape_out)
if tensorshape_util.rank(event_tensorshape_out) is None:
output_tensorshape = tf.TensorShape(None)
else:
output_tensorshape = tensorshape_util.concatenate(
input_non_event_tensorshape, event_tensorshape_out)
return output_tensorshape, is_validated
|
[
"Replaces",
"the",
"event",
"shape",
"dims",
"of",
"a",
"TensorShape",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/reshape.py#L316-L382
|
[
"def",
"_replace_event_shape_in_tensorshape",
"(",
"input_tensorshape",
",",
"event_shape_in",
",",
"event_shape_out",
")",
":",
"event_shape_in_ndims",
"=",
"tensorshape_util",
".",
"num_elements",
"(",
"event_shape_in",
".",
"shape",
")",
"if",
"tensorshape_util",
".",
"rank",
"(",
"input_tensorshape",
")",
"is",
"None",
"or",
"event_shape_in_ndims",
"is",
"None",
":",
"return",
"tf",
".",
"TensorShape",
"(",
"None",
")",
",",
"False",
"# Not is_validated.",
"input_non_event_ndims",
"=",
"tensorshape_util",
".",
"rank",
"(",
"input_tensorshape",
")",
"-",
"event_shape_in_ndims",
"if",
"input_non_event_ndims",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'Input has fewer ndims ({}) than event shape ndims ({}).'",
".",
"format",
"(",
"tensorshape_util",
".",
"rank",
"(",
"input_tensorshape",
")",
",",
"event_shape_in_ndims",
")",
")",
"input_non_event_tensorshape",
"=",
"input_tensorshape",
"[",
":",
"input_non_event_ndims",
"]",
"input_event_tensorshape",
"=",
"input_tensorshape",
"[",
"input_non_event_ndims",
":",
"]",
"# Check that `input_event_shape_` and `event_shape_in` are compatible in the",
"# sense that they have equal entries in any position that isn't a `-1` in",
"# `event_shape_in`. Note that our validations at construction time ensure",
"# there is at most one such entry in `event_shape_in`.",
"event_shape_in_",
"=",
"tf",
".",
"get_static_value",
"(",
"event_shape_in",
")",
"is_validated",
"=",
"(",
"tensorshape_util",
".",
"is_fully_defined",
"(",
"input_event_tensorshape",
")",
"and",
"event_shape_in_",
"is",
"not",
"None",
")",
"if",
"is_validated",
":",
"input_event_shape_",
"=",
"np",
".",
"int32",
"(",
"input_event_tensorshape",
")",
"mask",
"=",
"event_shape_in_",
">=",
"0",
"explicit_input_event_shape_",
"=",
"input_event_shape_",
"[",
"mask",
"]",
"explicit_event_shape_in_",
"=",
"event_shape_in_",
"[",
"mask",
"]",
"if",
"not",
"all",
"(",
"explicit_input_event_shape_",
"==",
"explicit_event_shape_in_",
")",
":",
"raise",
"ValueError",
"(",
"'Input `event_shape` does not match `event_shape_in`. '",
"'({} vs {}).'",
".",
"format",
"(",
"input_event_shape_",
",",
"event_shape_in_",
")",
")",
"event_tensorshape_out",
"=",
"tensorshape_util",
".",
"constant_value_as_shape",
"(",
"event_shape_out",
")",
"if",
"tensorshape_util",
".",
"rank",
"(",
"event_tensorshape_out",
")",
"is",
"None",
":",
"output_tensorshape",
"=",
"tf",
".",
"TensorShape",
"(",
"None",
")",
"else",
":",
"output_tensorshape",
"=",
"tensorshape_util",
".",
"concatenate",
"(",
"input_non_event_tensorshape",
",",
"event_tensorshape_out",
")",
"return",
"output_tensorshape",
",",
"is_validated"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_maybe_check_valid_shape
|
Check that a shape Tensor is int-type and otherwise sane.
|
tensorflow_probability/python/bijectors/reshape.py
|
def _maybe_check_valid_shape(shape, validate_args):
"""Check that a shape Tensor is int-type and otherwise sane."""
if not dtype_util.is_integer(shape.dtype):
raise TypeError('{} dtype ({}) should be `int`-like.'.format(
shape, dtype_util.name(shape.dtype)))
assertions = []
message = '`{}` rank should be <= 1.'
if tensorshape_util.rank(shape.shape) is not None:
if tensorshape_util.rank(shape.shape) > 1:
raise ValueError(message.format(shape))
elif validate_args:
assertions.append(assert_util.assert_less(
tf.rank(shape), 2, message=message.format(shape)))
shape_ = tf.get_static_value(shape)
message = '`{}` elements must have at most one `-1`.'
if shape_ is not None:
if sum(shape_ == -1) > 1:
raise ValueError(message.format(shape))
elif validate_args:
assertions.append(assert_util.assert_less(
tf.reduce_sum(input_tensor=tf.cast(tf.equal(shape, -1), tf.int32)),
2,
message=message.format(shape)))
message = '`{}` elements must be either positive integers or `-1`.'
if shape_ is not None:
if np.any(shape_ < -1):
raise ValueError(message.format(shape))
elif validate_args:
assertions.append(assert_util.assert_greater(
shape, -2, message=message.format(shape)))
return assertions
|
def _maybe_check_valid_shape(shape, validate_args):
"""Check that a shape Tensor is int-type and otherwise sane."""
if not dtype_util.is_integer(shape.dtype):
raise TypeError('{} dtype ({}) should be `int`-like.'.format(
shape, dtype_util.name(shape.dtype)))
assertions = []
message = '`{}` rank should be <= 1.'
if tensorshape_util.rank(shape.shape) is not None:
if tensorshape_util.rank(shape.shape) > 1:
raise ValueError(message.format(shape))
elif validate_args:
assertions.append(assert_util.assert_less(
tf.rank(shape), 2, message=message.format(shape)))
shape_ = tf.get_static_value(shape)
message = '`{}` elements must have at most one `-1`.'
if shape_ is not None:
if sum(shape_ == -1) > 1:
raise ValueError(message.format(shape))
elif validate_args:
assertions.append(assert_util.assert_less(
tf.reduce_sum(input_tensor=tf.cast(tf.equal(shape, -1), tf.int32)),
2,
message=message.format(shape)))
message = '`{}` elements must be either positive integers or `-1`.'
if shape_ is not None:
if np.any(shape_ < -1):
raise ValueError(message.format(shape))
elif validate_args:
assertions.append(assert_util.assert_greater(
shape, -2, message=message.format(shape)))
return assertions
|
[
"Check",
"that",
"a",
"shape",
"Tensor",
"is",
"int",
"-",
"type",
"and",
"otherwise",
"sane",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/reshape.py#L385-L421
|
[
"def",
"_maybe_check_valid_shape",
"(",
"shape",
",",
"validate_args",
")",
":",
"if",
"not",
"dtype_util",
".",
"is_integer",
"(",
"shape",
".",
"dtype",
")",
":",
"raise",
"TypeError",
"(",
"'{} dtype ({}) should be `int`-like.'",
".",
"format",
"(",
"shape",
",",
"dtype_util",
".",
"name",
"(",
"shape",
".",
"dtype",
")",
")",
")",
"assertions",
"=",
"[",
"]",
"message",
"=",
"'`{}` rank should be <= 1.'",
"if",
"tensorshape_util",
".",
"rank",
"(",
"shape",
".",
"shape",
")",
"is",
"not",
"None",
":",
"if",
"tensorshape_util",
".",
"rank",
"(",
"shape",
".",
"shape",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"message",
".",
"format",
"(",
"shape",
")",
")",
"elif",
"validate_args",
":",
"assertions",
".",
"append",
"(",
"assert_util",
".",
"assert_less",
"(",
"tf",
".",
"rank",
"(",
"shape",
")",
",",
"2",
",",
"message",
"=",
"message",
".",
"format",
"(",
"shape",
")",
")",
")",
"shape_",
"=",
"tf",
".",
"get_static_value",
"(",
"shape",
")",
"message",
"=",
"'`{}` elements must have at most one `-1`.'",
"if",
"shape_",
"is",
"not",
"None",
":",
"if",
"sum",
"(",
"shape_",
"==",
"-",
"1",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"message",
".",
"format",
"(",
"shape",
")",
")",
"elif",
"validate_args",
":",
"assertions",
".",
"append",
"(",
"assert_util",
".",
"assert_less",
"(",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"tf",
".",
"cast",
"(",
"tf",
".",
"equal",
"(",
"shape",
",",
"-",
"1",
")",
",",
"tf",
".",
"int32",
")",
")",
",",
"2",
",",
"message",
"=",
"message",
".",
"format",
"(",
"shape",
")",
")",
")",
"message",
"=",
"'`{}` elements must be either positive integers or `-1`.'",
"if",
"shape_",
"is",
"not",
"None",
":",
"if",
"np",
".",
"any",
"(",
"shape_",
"<",
"-",
"1",
")",
":",
"raise",
"ValueError",
"(",
"message",
".",
"format",
"(",
"shape",
")",
")",
"elif",
"validate_args",
":",
"assertions",
".",
"append",
"(",
"assert_util",
".",
"assert_greater",
"(",
"shape",
",",
"-",
"2",
",",
"message",
"=",
"message",
".",
"format",
"(",
"shape",
")",
")",
")",
"return",
"assertions"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_kl_beta_beta
|
Calculate the batchwise KL divergence KL(d1 || d2) with d1 and d2 Beta.
Args:
d1: instance of a Beta distribution object.
d2: instance of a Beta distribution object.
name: (optional) Name to use for created operations.
default is "kl_beta_beta".
Returns:
Batchwise KL(d1 || d2)
|
tensorflow_probability/python/distributions/beta.py
|
def _kl_beta_beta(d1, d2, name=None):
"""Calculate the batchwise KL divergence KL(d1 || d2) with d1 and d2 Beta.
Args:
d1: instance of a Beta distribution object.
d2: instance of a Beta distribution object.
name: (optional) Name to use for created operations.
default is "kl_beta_beta".
Returns:
Batchwise KL(d1 || d2)
"""
def delta(fn, is_property=True):
fn1 = getattr(d1, fn)
fn2 = getattr(d2, fn)
return (fn2 - fn1) if is_property else (fn2() - fn1())
with tf.name_scope(name or "kl_beta_beta"):
return (delta("_log_normalization", is_property=False) -
tf.math.digamma(d1.concentration1) * delta("concentration1") -
tf.math.digamma(d1.concentration0) * delta("concentration0") +
(tf.math.digamma(d1.total_concentration) *
delta("total_concentration")))
|
def _kl_beta_beta(d1, d2, name=None):
"""Calculate the batchwise KL divergence KL(d1 || d2) with d1 and d2 Beta.
Args:
d1: instance of a Beta distribution object.
d2: instance of a Beta distribution object.
name: (optional) Name to use for created operations.
default is "kl_beta_beta".
Returns:
Batchwise KL(d1 || d2)
"""
def delta(fn, is_property=True):
fn1 = getattr(d1, fn)
fn2 = getattr(d2, fn)
return (fn2 - fn1) if is_property else (fn2() - fn1())
with tf.name_scope(name or "kl_beta_beta"):
return (delta("_log_normalization", is_property=False) -
tf.math.digamma(d1.concentration1) * delta("concentration1") -
tf.math.digamma(d1.concentration0) * delta("concentration0") +
(tf.math.digamma(d1.total_concentration) *
delta("total_concentration")))
|
[
"Calculate",
"the",
"batchwise",
"KL",
"divergence",
"KL",
"(",
"d1",
"||",
"d2",
")",
"with",
"d1",
"and",
"d2",
"Beta",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/beta.py#L331-L353
|
[
"def",
"_kl_beta_beta",
"(",
"d1",
",",
"d2",
",",
"name",
"=",
"None",
")",
":",
"def",
"delta",
"(",
"fn",
",",
"is_property",
"=",
"True",
")",
":",
"fn1",
"=",
"getattr",
"(",
"d1",
",",
"fn",
")",
"fn2",
"=",
"getattr",
"(",
"d2",
",",
"fn",
")",
"return",
"(",
"fn2",
"-",
"fn1",
")",
"if",
"is_property",
"else",
"(",
"fn2",
"(",
")",
"-",
"fn1",
"(",
")",
")",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"\"kl_beta_beta\"",
")",
":",
"return",
"(",
"delta",
"(",
"\"_log_normalization\"",
",",
"is_property",
"=",
"False",
")",
"-",
"tf",
".",
"math",
".",
"digamma",
"(",
"d1",
".",
"concentration1",
")",
"*",
"delta",
"(",
"\"concentration1\"",
")",
"-",
"tf",
".",
"math",
".",
"digamma",
"(",
"d1",
".",
"concentration0",
")",
"*",
"delta",
"(",
"\"concentration0\"",
")",
"+",
"(",
"tf",
".",
"math",
".",
"digamma",
"(",
"d1",
".",
"total_concentration",
")",
"*",
"delta",
"(",
"\"total_concentration\"",
")",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
Beta._maybe_assert_valid_sample
|
Checks the validity of a sample.
|
tensorflow_probability/python/distributions/beta.py
|
def _maybe_assert_valid_sample(self, x):
"""Checks the validity of a sample."""
if not self.validate_args:
return x
return distribution_util.with_dependencies([
assert_util.assert_positive(x, message="sample must be positive"),
assert_util.assert_less(x, 1., message="sample must be less than `1`."),
], x)
|
def _maybe_assert_valid_sample(self, x):
"""Checks the validity of a sample."""
if not self.validate_args:
return x
return distribution_util.with_dependencies([
assert_util.assert_positive(x, message="sample must be positive"),
assert_util.assert_less(x, 1., message="sample must be less than `1`."),
], x)
|
[
"Checks",
"the",
"validity",
"of",
"a",
"sample",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/beta.py#L320-L327
|
[
"def",
"_maybe_assert_valid_sample",
"(",
"self",
",",
"x",
")",
":",
"if",
"not",
"self",
".",
"validate_args",
":",
"return",
"x",
"return",
"distribution_util",
".",
"with_dependencies",
"(",
"[",
"assert_util",
".",
"assert_positive",
"(",
"x",
",",
"message",
"=",
"\"sample must be positive\"",
")",
",",
"assert_util",
".",
"assert_less",
"(",
"x",
",",
"1.",
",",
"message",
"=",
"\"sample must be less than `1`.\"",
")",
",",
"]",
",",
"x",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
converged_any
|
Condition to stop when any batch member converges, or all have failed.
|
tensorflow_probability/python/optimizer/bfgs_utils.py
|
def converged_any(converged, failed):
"""Condition to stop when any batch member converges, or all have failed."""
return (tf.reduce_any(input_tensor=converged) |
tf.reduce_all(input_tensor=failed))
|
def converged_any(converged, failed):
"""Condition to stop when any batch member converges, or all have failed."""
return (tf.reduce_any(input_tensor=converged) |
tf.reduce_all(input_tensor=failed))
|
[
"Condition",
"to",
"stop",
"when",
"any",
"batch",
"member",
"converges",
"or",
"all",
"have",
"failed",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/bfgs_utils.py#L36-L39
|
[
"def",
"converged_any",
"(",
"converged",
",",
"failed",
")",
":",
"return",
"(",
"tf",
".",
"reduce_any",
"(",
"input_tensor",
"=",
"converged",
")",
"|",
"tf",
".",
"reduce_all",
"(",
"input_tensor",
"=",
"failed",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
get_initial_state_args
|
Returns a dictionary to populate the initial state of the search procedure.
Performs an initial convergence check and the first evaluation of the
objective function.
Args:
value_and_gradients_function: A Python callable that accepts a tensor and
returns a tuple of two tensors: the objective function value and its
derivative.
initial_position: The starting point of the search procedure.
grad_tolerance: The gradient tolerance for the procedure.
control_inputs: Optional ops used to assert the validity of inputs, these
are added as control dependencies to execute before the objective
function is evaluated for the first time.
Returns:
An dictionary with values for the following keys:
converged: True if the convergence check finds that the initial position
is already an argmin of the objective function.
failed: Initialized to False.
num_objective_evaluations: Initialized to 1.
position: Initialized to the initial position.
objective_value: Initialized to the value of the objective function at
the initial position.
objective_gradient: Initialized to the gradient of the objective
function at the initial position.
|
tensorflow_probability/python/optimizer/bfgs_utils.py
|
def get_initial_state_args(value_and_gradients_function,
initial_position,
grad_tolerance,
control_inputs=None):
"""Returns a dictionary to populate the initial state of the search procedure.
Performs an initial convergence check and the first evaluation of the
objective function.
Args:
value_and_gradients_function: A Python callable that accepts a tensor and
returns a tuple of two tensors: the objective function value and its
derivative.
initial_position: The starting point of the search procedure.
grad_tolerance: The gradient tolerance for the procedure.
control_inputs: Optional ops used to assert the validity of inputs, these
are added as control dependencies to execute before the objective
function is evaluated for the first time.
Returns:
An dictionary with values for the following keys:
converged: True if the convergence check finds that the initial position
is already an argmin of the objective function.
failed: Initialized to False.
num_objective_evaluations: Initialized to 1.
position: Initialized to the initial position.
objective_value: Initialized to the value of the objective function at
the initial position.
objective_gradient: Initialized to the gradient of the objective
function at the initial position.
"""
if control_inputs:
with tf.control_dependencies(control_inputs):
f0, df0 = value_and_gradients_function(initial_position)
else:
f0, df0 = value_and_gradients_function(initial_position)
converged = norm(df0, dims=1) < grad_tolerance
return dict(
converged=converged,
failed=tf.zeros_like(converged), # i.e. False.
num_iterations=tf.convert_to_tensor(value=0),
num_objective_evaluations=tf.convert_to_tensor(value=1),
position=initial_position,
objective_value=f0,
objective_gradient=df0)
|
def get_initial_state_args(value_and_gradients_function,
initial_position,
grad_tolerance,
control_inputs=None):
"""Returns a dictionary to populate the initial state of the search procedure.
Performs an initial convergence check and the first evaluation of the
objective function.
Args:
value_and_gradients_function: A Python callable that accepts a tensor and
returns a tuple of two tensors: the objective function value and its
derivative.
initial_position: The starting point of the search procedure.
grad_tolerance: The gradient tolerance for the procedure.
control_inputs: Optional ops used to assert the validity of inputs, these
are added as control dependencies to execute before the objective
function is evaluated for the first time.
Returns:
An dictionary with values for the following keys:
converged: True if the convergence check finds that the initial position
is already an argmin of the objective function.
failed: Initialized to False.
num_objective_evaluations: Initialized to 1.
position: Initialized to the initial position.
objective_value: Initialized to the value of the objective function at
the initial position.
objective_gradient: Initialized to the gradient of the objective
function at the initial position.
"""
if control_inputs:
with tf.control_dependencies(control_inputs):
f0, df0 = value_and_gradients_function(initial_position)
else:
f0, df0 = value_and_gradients_function(initial_position)
converged = norm(df0, dims=1) < grad_tolerance
return dict(
converged=converged,
failed=tf.zeros_like(converged), # i.e. False.
num_iterations=tf.convert_to_tensor(value=0),
num_objective_evaluations=tf.convert_to_tensor(value=1),
position=initial_position,
objective_value=f0,
objective_gradient=df0)
|
[
"Returns",
"a",
"dictionary",
"to",
"populate",
"the",
"initial",
"state",
"of",
"the",
"search",
"procedure",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/bfgs_utils.py#L47-L91
|
[
"def",
"get_initial_state_args",
"(",
"value_and_gradients_function",
",",
"initial_position",
",",
"grad_tolerance",
",",
"control_inputs",
"=",
"None",
")",
":",
"if",
"control_inputs",
":",
"with",
"tf",
".",
"control_dependencies",
"(",
"control_inputs",
")",
":",
"f0",
",",
"df0",
"=",
"value_and_gradients_function",
"(",
"initial_position",
")",
"else",
":",
"f0",
",",
"df0",
"=",
"value_and_gradients_function",
"(",
"initial_position",
")",
"converged",
"=",
"norm",
"(",
"df0",
",",
"dims",
"=",
"1",
")",
"<",
"grad_tolerance",
"return",
"dict",
"(",
"converged",
"=",
"converged",
",",
"failed",
"=",
"tf",
".",
"zeros_like",
"(",
"converged",
")",
",",
"# i.e. False.",
"num_iterations",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"0",
")",
",",
"num_objective_evaluations",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"1",
")",
",",
"position",
"=",
"initial_position",
",",
"objective_value",
"=",
"f0",
",",
"objective_gradient",
"=",
"df0",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
line_search_step
|
Performs the line search step of the BFGS search procedure.
Uses hager_zhang line search procedure to compute a suitable step size
to advance the current `state.position` along the given `search_direction`.
Also, if the line search is successful, updates the `state.position` by
taking the corresponding step.
Args:
state: A namedtuple instance holding values for the current state of the
search procedure. The state must include the fields: `position`,
`objective_value`, `objective_gradient`, `num_iterations`,
`num_objective_evaluations`, `converged` and `failed`.
value_and_gradients_function: A Python callable that accepts a point as a
real `Tensor` of shape `[..., n]` and returns a tuple of two tensors of
the same dtype: the objective function value, a real `Tensor` of shape
`[...]`, and its derivative, another real `Tensor` of shape `[..., n]`.
search_direction: A real `Tensor` of shape `[..., n]`. The direction along
which to perform line search.
grad_tolerance: Scalar `Tensor` of real dtype. Specifies the gradient
tolerance for the procedure.
f_relative_tolerance: Scalar `Tensor` of real dtype. Specifies the
tolerance for the relative change in the objective value.
x_tolerance: Scalar `Tensor` of real dtype. Specifies the tolerance for the
change in the position.
stopping_condition: A Python function that takes as input two Boolean
tensors of shape `[...]`, and returns a Boolean scalar tensor. The input
tensors are `converged` and `failed`, indicating the current status of
each respective batch member; the return value states whether the
algorithm should stop.
Returns:
A copy of the input state with the following fields updated:
converged: a Boolean `Tensor` of shape `[...]` indicating whether the
convergence criteria has been met.
failed: a Boolean `Tensor` of shape `[...]` indicating whether the line
search procedure failed to converge, or if either the updated gradient
or objective function are no longer finite.
num_iterations: Increased by 1.
num_objective_evaluations: Increased by the number of times that the
objective function got evaluated.
position, objective_value, objective_gradient: If line search succeeded,
updated by computing the new position and evaluating the objective
function at that position.
|
tensorflow_probability/python/optimizer/bfgs_utils.py
|
def line_search_step(state, value_and_gradients_function, search_direction,
grad_tolerance, f_relative_tolerance, x_tolerance,
stopping_condition):
"""Performs the line search step of the BFGS search procedure.
Uses hager_zhang line search procedure to compute a suitable step size
to advance the current `state.position` along the given `search_direction`.
Also, if the line search is successful, updates the `state.position` by
taking the corresponding step.
Args:
state: A namedtuple instance holding values for the current state of the
search procedure. The state must include the fields: `position`,
`objective_value`, `objective_gradient`, `num_iterations`,
`num_objective_evaluations`, `converged` and `failed`.
value_and_gradients_function: A Python callable that accepts a point as a
real `Tensor` of shape `[..., n]` and returns a tuple of two tensors of
the same dtype: the objective function value, a real `Tensor` of shape
`[...]`, and its derivative, another real `Tensor` of shape `[..., n]`.
search_direction: A real `Tensor` of shape `[..., n]`. The direction along
which to perform line search.
grad_tolerance: Scalar `Tensor` of real dtype. Specifies the gradient
tolerance for the procedure.
f_relative_tolerance: Scalar `Tensor` of real dtype. Specifies the
tolerance for the relative change in the objective value.
x_tolerance: Scalar `Tensor` of real dtype. Specifies the tolerance for the
change in the position.
stopping_condition: A Python function that takes as input two Boolean
tensors of shape `[...]`, and returns a Boolean scalar tensor. The input
tensors are `converged` and `failed`, indicating the current status of
each respective batch member; the return value states whether the
algorithm should stop.
Returns:
A copy of the input state with the following fields updated:
converged: a Boolean `Tensor` of shape `[...]` indicating whether the
convergence criteria has been met.
failed: a Boolean `Tensor` of shape `[...]` indicating whether the line
search procedure failed to converge, or if either the updated gradient
or objective function are no longer finite.
num_iterations: Increased by 1.
num_objective_evaluations: Increased by the number of times that the
objective function got evaluated.
position, objective_value, objective_gradient: If line search succeeded,
updated by computing the new position and evaluating the objective
function at that position.
"""
line_search_value_grad_func = _restrict_along_direction(
value_and_gradients_function, state.position, search_direction)
derivative_at_start_pt = tf.reduce_sum(
input_tensor=state.objective_gradient * search_direction, axis=-1)
val_0 = ValueAndGradient(x=_broadcast(0, state.position),
f=state.objective_value,
df=derivative_at_start_pt,
full_gradient=state.objective_gradient)
inactive = state.failed | state.converged
ls_result = linesearch.hager_zhang(
line_search_value_grad_func,
initial_step_size=_broadcast(1, state.position),
value_at_zero=val_0,
converged=inactive) # No search needed for these.
state_after_ls = update_fields(
state,
failed=state.failed | ~ls_result.converged,
num_iterations=state.num_iterations + 1,
num_objective_evaluations=(
state.num_objective_evaluations + ls_result.func_evals))
def _do_update_position():
# For inactive batch members `left.x` is zero. However, their
# `search_direction` might also be undefined, so we can't rely on
# multiplication by zero to produce a `position_delta` of zero.
position_delta = tf.where(
inactive,
tf.zeros_like(search_direction),
search_direction * tf.expand_dims(ls_result.left.x, axis=-1))
return _update_position(
state_after_ls,
position_delta,
ls_result.left.f,
ls_result.left.full_gradient,
grad_tolerance, f_relative_tolerance, x_tolerance)
return prefer_static.cond(
stopping_condition(state.converged, state.failed),
true_fn=lambda: state_after_ls,
false_fn=_do_update_position)
|
def line_search_step(state, value_and_gradients_function, search_direction,
grad_tolerance, f_relative_tolerance, x_tolerance,
stopping_condition):
"""Performs the line search step of the BFGS search procedure.
Uses hager_zhang line search procedure to compute a suitable step size
to advance the current `state.position` along the given `search_direction`.
Also, if the line search is successful, updates the `state.position` by
taking the corresponding step.
Args:
state: A namedtuple instance holding values for the current state of the
search procedure. The state must include the fields: `position`,
`objective_value`, `objective_gradient`, `num_iterations`,
`num_objective_evaluations`, `converged` and `failed`.
value_and_gradients_function: A Python callable that accepts a point as a
real `Tensor` of shape `[..., n]` and returns a tuple of two tensors of
the same dtype: the objective function value, a real `Tensor` of shape
`[...]`, and its derivative, another real `Tensor` of shape `[..., n]`.
search_direction: A real `Tensor` of shape `[..., n]`. The direction along
which to perform line search.
grad_tolerance: Scalar `Tensor` of real dtype. Specifies the gradient
tolerance for the procedure.
f_relative_tolerance: Scalar `Tensor` of real dtype. Specifies the
tolerance for the relative change in the objective value.
x_tolerance: Scalar `Tensor` of real dtype. Specifies the tolerance for the
change in the position.
stopping_condition: A Python function that takes as input two Boolean
tensors of shape `[...]`, and returns a Boolean scalar tensor. The input
tensors are `converged` and `failed`, indicating the current status of
each respective batch member; the return value states whether the
algorithm should stop.
Returns:
A copy of the input state with the following fields updated:
converged: a Boolean `Tensor` of shape `[...]` indicating whether the
convergence criteria has been met.
failed: a Boolean `Tensor` of shape `[...]` indicating whether the line
search procedure failed to converge, or if either the updated gradient
or objective function are no longer finite.
num_iterations: Increased by 1.
num_objective_evaluations: Increased by the number of times that the
objective function got evaluated.
position, objective_value, objective_gradient: If line search succeeded,
updated by computing the new position and evaluating the objective
function at that position.
"""
line_search_value_grad_func = _restrict_along_direction(
value_and_gradients_function, state.position, search_direction)
derivative_at_start_pt = tf.reduce_sum(
input_tensor=state.objective_gradient * search_direction, axis=-1)
val_0 = ValueAndGradient(x=_broadcast(0, state.position),
f=state.objective_value,
df=derivative_at_start_pt,
full_gradient=state.objective_gradient)
inactive = state.failed | state.converged
ls_result = linesearch.hager_zhang(
line_search_value_grad_func,
initial_step_size=_broadcast(1, state.position),
value_at_zero=val_0,
converged=inactive) # No search needed for these.
state_after_ls = update_fields(
state,
failed=state.failed | ~ls_result.converged,
num_iterations=state.num_iterations + 1,
num_objective_evaluations=(
state.num_objective_evaluations + ls_result.func_evals))
def _do_update_position():
# For inactive batch members `left.x` is zero. However, their
# `search_direction` might also be undefined, so we can't rely on
# multiplication by zero to produce a `position_delta` of zero.
position_delta = tf.where(
inactive,
tf.zeros_like(search_direction),
search_direction * tf.expand_dims(ls_result.left.x, axis=-1))
return _update_position(
state_after_ls,
position_delta,
ls_result.left.f,
ls_result.left.full_gradient,
grad_tolerance, f_relative_tolerance, x_tolerance)
return prefer_static.cond(
stopping_condition(state.converged, state.failed),
true_fn=lambda: state_after_ls,
false_fn=_do_update_position)
|
[
"Performs",
"the",
"line",
"search",
"step",
"of",
"the",
"BFGS",
"search",
"procedure",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/bfgs_utils.py#L94-L180
|
[
"def",
"line_search_step",
"(",
"state",
",",
"value_and_gradients_function",
",",
"search_direction",
",",
"grad_tolerance",
",",
"f_relative_tolerance",
",",
"x_tolerance",
",",
"stopping_condition",
")",
":",
"line_search_value_grad_func",
"=",
"_restrict_along_direction",
"(",
"value_and_gradients_function",
",",
"state",
".",
"position",
",",
"search_direction",
")",
"derivative_at_start_pt",
"=",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"state",
".",
"objective_gradient",
"*",
"search_direction",
",",
"axis",
"=",
"-",
"1",
")",
"val_0",
"=",
"ValueAndGradient",
"(",
"x",
"=",
"_broadcast",
"(",
"0",
",",
"state",
".",
"position",
")",
",",
"f",
"=",
"state",
".",
"objective_value",
",",
"df",
"=",
"derivative_at_start_pt",
",",
"full_gradient",
"=",
"state",
".",
"objective_gradient",
")",
"inactive",
"=",
"state",
".",
"failed",
"|",
"state",
".",
"converged",
"ls_result",
"=",
"linesearch",
".",
"hager_zhang",
"(",
"line_search_value_grad_func",
",",
"initial_step_size",
"=",
"_broadcast",
"(",
"1",
",",
"state",
".",
"position",
")",
",",
"value_at_zero",
"=",
"val_0",
",",
"converged",
"=",
"inactive",
")",
"# No search needed for these.",
"state_after_ls",
"=",
"update_fields",
"(",
"state",
",",
"failed",
"=",
"state",
".",
"failed",
"|",
"~",
"ls_result",
".",
"converged",
",",
"num_iterations",
"=",
"state",
".",
"num_iterations",
"+",
"1",
",",
"num_objective_evaluations",
"=",
"(",
"state",
".",
"num_objective_evaluations",
"+",
"ls_result",
".",
"func_evals",
")",
")",
"def",
"_do_update_position",
"(",
")",
":",
"# For inactive batch members `left.x` is zero. However, their",
"# `search_direction` might also be undefined, so we can't rely on",
"# multiplication by zero to produce a `position_delta` of zero.",
"position_delta",
"=",
"tf",
".",
"where",
"(",
"inactive",
",",
"tf",
".",
"zeros_like",
"(",
"search_direction",
")",
",",
"search_direction",
"*",
"tf",
".",
"expand_dims",
"(",
"ls_result",
".",
"left",
".",
"x",
",",
"axis",
"=",
"-",
"1",
")",
")",
"return",
"_update_position",
"(",
"state_after_ls",
",",
"position_delta",
",",
"ls_result",
".",
"left",
".",
"f",
",",
"ls_result",
".",
"left",
".",
"full_gradient",
",",
"grad_tolerance",
",",
"f_relative_tolerance",
",",
"x_tolerance",
")",
"return",
"prefer_static",
".",
"cond",
"(",
"stopping_condition",
"(",
"state",
".",
"converged",
",",
"state",
".",
"failed",
")",
",",
"true_fn",
"=",
"lambda",
":",
"state_after_ls",
",",
"false_fn",
"=",
"_do_update_position",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_restrict_along_direction
|
Restricts a function in n-dimensions to a given direction.
Suppose f: R^n -> R. Then given a point x0 and a vector p0 in R^n, the
restriction of the function along that direction is defined by:
```None
g(t) = f(x0 + t * p0)
```
This function performs this restriction on the given function. In addition, it
also computes the gradient of the restricted function along the restriction
direction. This is equivalent to computing `dg/dt` in the definition above.
Args:
value_and_gradients_function: Callable accepting a single real `Tensor`
argument of shape `[..., n]` and returning a tuple of a real `Tensor` of
shape `[...]` and a real `Tensor` of shape `[..., n]`. The multivariate
function whose restriction is to be computed. The output values of the
callable are the function value and the gradients at the input argument.
position: `Tensor` of real dtype and shape consumable by
`value_and_gradients_function`. Corresponds to `x0` in the definition
above.
direction: `Tensor` of the same dtype and shape as `position`. The direction
along which to restrict the function. Note that the direction need not
be a unit vector.
Returns:
restricted_value_and_gradients_func: A callable accepting a tensor of shape
broadcastable to `[...]` and same dtype as `position` and returning a
namedtuple of `Tensors`. The input tensor is the parameter along the
direction labelled `t` above. The return value contains fields:
x: A real `Tensor` of shape `[...]`. The input value `t` where the line
function was evaluated, after any necessary broadcasting.
f: A real `Tensor` of shape `[...]` containing the value of the
function at the point `position + t * direction`.
df: A real `Tensor` of shape `[...]` containing the derivative at
`position + t * direction`.
full_gradient: A real `Tensor` of shape `[..., n]`, the full gradient
of the original `value_and_gradients_function`.
|
tensorflow_probability/python/optimizer/bfgs_utils.py
|
def _restrict_along_direction(value_and_gradients_function,
position,
direction):
"""Restricts a function in n-dimensions to a given direction.
Suppose f: R^n -> R. Then given a point x0 and a vector p0 in R^n, the
restriction of the function along that direction is defined by:
```None
g(t) = f(x0 + t * p0)
```
This function performs this restriction on the given function. In addition, it
also computes the gradient of the restricted function along the restriction
direction. This is equivalent to computing `dg/dt` in the definition above.
Args:
value_and_gradients_function: Callable accepting a single real `Tensor`
argument of shape `[..., n]` and returning a tuple of a real `Tensor` of
shape `[...]` and a real `Tensor` of shape `[..., n]`. The multivariate
function whose restriction is to be computed. The output values of the
callable are the function value and the gradients at the input argument.
position: `Tensor` of real dtype and shape consumable by
`value_and_gradients_function`. Corresponds to `x0` in the definition
above.
direction: `Tensor` of the same dtype and shape as `position`. The direction
along which to restrict the function. Note that the direction need not
be a unit vector.
Returns:
restricted_value_and_gradients_func: A callable accepting a tensor of shape
broadcastable to `[...]` and same dtype as `position` and returning a
namedtuple of `Tensors`. The input tensor is the parameter along the
direction labelled `t` above. The return value contains fields:
x: A real `Tensor` of shape `[...]`. The input value `t` where the line
function was evaluated, after any necessary broadcasting.
f: A real `Tensor` of shape `[...]` containing the value of the
function at the point `position + t * direction`.
df: A real `Tensor` of shape `[...]` containing the derivative at
`position + t * direction`.
full_gradient: A real `Tensor` of shape `[..., n]`, the full gradient
of the original `value_and_gradients_function`.
"""
def _restricted_func(t):
t = _broadcast(t, position)
pt = position + tf.expand_dims(t, axis=-1) * direction
objective_value, gradient = value_and_gradients_function(pt)
return ValueAndGradient(
x=t,
f=objective_value,
df=tf.reduce_sum(input_tensor=gradient * direction, axis=-1),
full_gradient=gradient)
return _restricted_func
|
def _restrict_along_direction(value_and_gradients_function,
position,
direction):
"""Restricts a function in n-dimensions to a given direction.
Suppose f: R^n -> R. Then given a point x0 and a vector p0 in R^n, the
restriction of the function along that direction is defined by:
```None
g(t) = f(x0 + t * p0)
```
This function performs this restriction on the given function. In addition, it
also computes the gradient of the restricted function along the restriction
direction. This is equivalent to computing `dg/dt` in the definition above.
Args:
value_and_gradients_function: Callable accepting a single real `Tensor`
argument of shape `[..., n]` and returning a tuple of a real `Tensor` of
shape `[...]` and a real `Tensor` of shape `[..., n]`. The multivariate
function whose restriction is to be computed. The output values of the
callable are the function value and the gradients at the input argument.
position: `Tensor` of real dtype and shape consumable by
`value_and_gradients_function`. Corresponds to `x0` in the definition
above.
direction: `Tensor` of the same dtype and shape as `position`. The direction
along which to restrict the function. Note that the direction need not
be a unit vector.
Returns:
restricted_value_and_gradients_func: A callable accepting a tensor of shape
broadcastable to `[...]` and same dtype as `position` and returning a
namedtuple of `Tensors`. The input tensor is the parameter along the
direction labelled `t` above. The return value contains fields:
x: A real `Tensor` of shape `[...]`. The input value `t` where the line
function was evaluated, after any necessary broadcasting.
f: A real `Tensor` of shape `[...]` containing the value of the
function at the point `position + t * direction`.
df: A real `Tensor` of shape `[...]` containing the derivative at
`position + t * direction`.
full_gradient: A real `Tensor` of shape `[..., n]`, the full gradient
of the original `value_and_gradients_function`.
"""
def _restricted_func(t):
t = _broadcast(t, position)
pt = position + tf.expand_dims(t, axis=-1) * direction
objective_value, gradient = value_and_gradients_function(pt)
return ValueAndGradient(
x=t,
f=objective_value,
df=tf.reduce_sum(input_tensor=gradient * direction, axis=-1),
full_gradient=gradient)
return _restricted_func
|
[
"Restricts",
"a",
"function",
"in",
"n",
"-",
"dimensions",
"to",
"a",
"given",
"direction",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/bfgs_utils.py#L202-L255
|
[
"def",
"_restrict_along_direction",
"(",
"value_and_gradients_function",
",",
"position",
",",
"direction",
")",
":",
"def",
"_restricted_func",
"(",
"t",
")",
":",
"t",
"=",
"_broadcast",
"(",
"t",
",",
"position",
")",
"pt",
"=",
"position",
"+",
"tf",
".",
"expand_dims",
"(",
"t",
",",
"axis",
"=",
"-",
"1",
")",
"*",
"direction",
"objective_value",
",",
"gradient",
"=",
"value_and_gradients_function",
"(",
"pt",
")",
"return",
"ValueAndGradient",
"(",
"x",
"=",
"t",
",",
"f",
"=",
"objective_value",
",",
"df",
"=",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"gradient",
"*",
"direction",
",",
"axis",
"=",
"-",
"1",
")",
",",
"full_gradient",
"=",
"gradient",
")",
"return",
"_restricted_func"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_update_position
|
Updates the state advancing its position by a given position_delta.
|
tensorflow_probability/python/optimizer/bfgs_utils.py
|
def _update_position(state,
position_delta,
next_objective,
next_gradient,
grad_tolerance,
f_relative_tolerance,
x_tolerance):
"""Updates the state advancing its position by a given position_delta."""
failed = state.failed | ~tf.math.is_finite(next_objective) | ~tf.reduce_all(
input_tensor=tf.math.is_finite(next_gradient), axis=-1)
next_position = state.position + position_delta
converged = ~failed & _check_convergence(state.position,
next_position,
state.objective_value,
next_objective,
next_gradient,
grad_tolerance,
f_relative_tolerance,
x_tolerance)
return update_fields(
state,
converged=state.converged | converged,
failed=failed,
position=next_position,
objective_value=next_objective,
objective_gradient=next_gradient)
|
def _update_position(state,
position_delta,
next_objective,
next_gradient,
grad_tolerance,
f_relative_tolerance,
x_tolerance):
"""Updates the state advancing its position by a given position_delta."""
failed = state.failed | ~tf.math.is_finite(next_objective) | ~tf.reduce_all(
input_tensor=tf.math.is_finite(next_gradient), axis=-1)
next_position = state.position + position_delta
converged = ~failed & _check_convergence(state.position,
next_position,
state.objective_value,
next_objective,
next_gradient,
grad_tolerance,
f_relative_tolerance,
x_tolerance)
return update_fields(
state,
converged=state.converged | converged,
failed=failed,
position=next_position,
objective_value=next_objective,
objective_gradient=next_gradient)
|
[
"Updates",
"the",
"state",
"advancing",
"its",
"position",
"by",
"a",
"given",
"position_delta",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/bfgs_utils.py#L258-L284
|
[
"def",
"_update_position",
"(",
"state",
",",
"position_delta",
",",
"next_objective",
",",
"next_gradient",
",",
"grad_tolerance",
",",
"f_relative_tolerance",
",",
"x_tolerance",
")",
":",
"failed",
"=",
"state",
".",
"failed",
"|",
"~",
"tf",
".",
"math",
".",
"is_finite",
"(",
"next_objective",
")",
"|",
"~",
"tf",
".",
"reduce_all",
"(",
"input_tensor",
"=",
"tf",
".",
"math",
".",
"is_finite",
"(",
"next_gradient",
")",
",",
"axis",
"=",
"-",
"1",
")",
"next_position",
"=",
"state",
".",
"position",
"+",
"position_delta",
"converged",
"=",
"~",
"failed",
"&",
"_check_convergence",
"(",
"state",
".",
"position",
",",
"next_position",
",",
"state",
".",
"objective_value",
",",
"next_objective",
",",
"next_gradient",
",",
"grad_tolerance",
",",
"f_relative_tolerance",
",",
"x_tolerance",
")",
"return",
"update_fields",
"(",
"state",
",",
"converged",
"=",
"state",
".",
"converged",
"|",
"converged",
",",
"failed",
"=",
"failed",
",",
"position",
"=",
"next_position",
",",
"objective_value",
"=",
"next_objective",
",",
"objective_gradient",
"=",
"next_gradient",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
norm
|
Compute the norm of the given (possibly batched) value.
Args:
value: A `Tensor` of real dtype.
dims: An Python integer with the number of non-batching dimensions in the
value, i.e. `dims=0` (scalars), `dims=1` (vectors), `dims=2` (matrices).
order: Order of the norm, defaults to `np.inf`.
|
tensorflow_probability/python/optimizer/bfgs_utils.py
|
def norm(value, dims, order=None):
"""Compute the norm of the given (possibly batched) value.
Args:
value: A `Tensor` of real dtype.
dims: An Python integer with the number of non-batching dimensions in the
value, i.e. `dims=0` (scalars), `dims=1` (vectors), `dims=2` (matrices).
order: Order of the norm, defaults to `np.inf`.
"""
if dims == 0:
return tf.math.abs(value)
elif dims == 1:
axis = -1
elif dims == 2:
axis = [-1, -2]
else:
ValueError(dims)
if order is None:
order = np.inf
return tf.norm(tensor=value, axis=axis, ord=order)
|
def norm(value, dims, order=None):
"""Compute the norm of the given (possibly batched) value.
Args:
value: A `Tensor` of real dtype.
dims: An Python integer with the number of non-batching dimensions in the
value, i.e. `dims=0` (scalars), `dims=1` (vectors), `dims=2` (matrices).
order: Order of the norm, defaults to `np.inf`.
"""
if dims == 0:
return tf.math.abs(value)
elif dims == 1:
axis = -1
elif dims == 2:
axis = [-1, -2]
else:
ValueError(dims)
if order is None:
order = np.inf
return tf.norm(tensor=value, axis=axis, ord=order)
|
[
"Compute",
"the",
"norm",
"of",
"the",
"given",
"(",
"possibly",
"batched",
")",
"value",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/bfgs_utils.py#L287-L306
|
[
"def",
"norm",
"(",
"value",
",",
"dims",
",",
"order",
"=",
"None",
")",
":",
"if",
"dims",
"==",
"0",
":",
"return",
"tf",
".",
"math",
".",
"abs",
"(",
"value",
")",
"elif",
"dims",
"==",
"1",
":",
"axis",
"=",
"-",
"1",
"elif",
"dims",
"==",
"2",
":",
"axis",
"=",
"[",
"-",
"1",
",",
"-",
"2",
"]",
"else",
":",
"ValueError",
"(",
"dims",
")",
"if",
"order",
"is",
"None",
":",
"order",
"=",
"np",
".",
"inf",
"return",
"tf",
".",
"norm",
"(",
"tensor",
"=",
"value",
",",
"axis",
"=",
"axis",
",",
"ord",
"=",
"order",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_check_convergence
|
Checks if the algorithm satisfies the convergence criteria.
|
tensorflow_probability/python/optimizer/bfgs_utils.py
|
def _check_convergence(current_position,
next_position,
current_objective,
next_objective,
next_gradient,
grad_tolerance,
f_relative_tolerance,
x_tolerance):
"""Checks if the algorithm satisfies the convergence criteria."""
grad_converged = norm(next_gradient, dims=1) <= grad_tolerance
x_converged = norm(next_position - current_position, dims=1) <= x_tolerance
f_converged = (norm(next_objective - current_objective, dims=0) <=
f_relative_tolerance * current_objective)
return grad_converged | x_converged | f_converged
|
def _check_convergence(current_position,
next_position,
current_objective,
next_objective,
next_gradient,
grad_tolerance,
f_relative_tolerance,
x_tolerance):
"""Checks if the algorithm satisfies the convergence criteria."""
grad_converged = norm(next_gradient, dims=1) <= grad_tolerance
x_converged = norm(next_position - current_position, dims=1) <= x_tolerance
f_converged = (norm(next_objective - current_objective, dims=0) <=
f_relative_tolerance * current_objective)
return grad_converged | x_converged | f_converged
|
[
"Checks",
"if",
"the",
"algorithm",
"satisfies",
"the",
"convergence",
"criteria",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/bfgs_utils.py#L309-L322
|
[
"def",
"_check_convergence",
"(",
"current_position",
",",
"next_position",
",",
"current_objective",
",",
"next_objective",
",",
"next_gradient",
",",
"grad_tolerance",
",",
"f_relative_tolerance",
",",
"x_tolerance",
")",
":",
"grad_converged",
"=",
"norm",
"(",
"next_gradient",
",",
"dims",
"=",
"1",
")",
"<=",
"grad_tolerance",
"x_converged",
"=",
"norm",
"(",
"next_position",
"-",
"current_position",
",",
"dims",
"=",
"1",
")",
"<=",
"x_tolerance",
"f_converged",
"=",
"(",
"norm",
"(",
"next_objective",
"-",
"current_objective",
",",
"dims",
"=",
"0",
")",
"<=",
"f_relative_tolerance",
"*",
"current_objective",
")",
"return",
"grad_converged",
"|",
"x_converged",
"|",
"f_converged"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_broadcast
|
Broadcast a value to match the batching dimensions of a target.
If necessary the value is converted into a tensor. Both value and target
should be of the same dtype.
Args:
value: A value to broadcast.
target: A `Tensor` of shape [b1, ..., bn, d].
Returns:
A `Tensor` of shape [b1, ..., bn] and same dtype as the target.
|
tensorflow_probability/python/optimizer/bfgs_utils.py
|
def _broadcast(value, target):
"""Broadcast a value to match the batching dimensions of a target.
If necessary the value is converted into a tensor. Both value and target
should be of the same dtype.
Args:
value: A value to broadcast.
target: A `Tensor` of shape [b1, ..., bn, d].
Returns:
A `Tensor` of shape [b1, ..., bn] and same dtype as the target.
"""
return tf.broadcast_to(
tf.convert_to_tensor(value=value, dtype=target.dtype),
distribution_util.prefer_static_shape(target)[:-1])
|
def _broadcast(value, target):
"""Broadcast a value to match the batching dimensions of a target.
If necessary the value is converted into a tensor. Both value and target
should be of the same dtype.
Args:
value: A value to broadcast.
target: A `Tensor` of shape [b1, ..., bn, d].
Returns:
A `Tensor` of shape [b1, ..., bn] and same dtype as the target.
"""
return tf.broadcast_to(
tf.convert_to_tensor(value=value, dtype=target.dtype),
distribution_util.prefer_static_shape(target)[:-1])
|
[
"Broadcast",
"a",
"value",
"to",
"match",
"the",
"batching",
"dimensions",
"of",
"a",
"target",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/bfgs_utils.py#L325-L340
|
[
"def",
"_broadcast",
"(",
"value",
",",
"target",
")",
":",
"return",
"tf",
".",
"broadcast_to",
"(",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"value",
",",
"dtype",
"=",
"target",
".",
"dtype",
")",
",",
"distribution_util",
".",
"prefer_static_shape",
"(",
"target",
")",
"[",
":",
"-",
"1",
"]",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_harmonic_number
|
Compute the harmonic number from its analytic continuation.
Derivation from [here](
https://en.wikipedia.org/wiki/Digamma_function#Relation_to_harmonic_numbers)
and [Euler's constant](
https://en.wikipedia.org/wiki/Euler%E2%80%93Mascheroni_constant).
Args:
x: input float.
Returns:
z: The analytic continuation of the harmonic number for the input.
|
tensorflow_probability/python/distributions/kumaraswamy.py
|
def _harmonic_number(x):
"""Compute the harmonic number from its analytic continuation.
Derivation from [here](
https://en.wikipedia.org/wiki/Digamma_function#Relation_to_harmonic_numbers)
and [Euler's constant](
https://en.wikipedia.org/wiki/Euler%E2%80%93Mascheroni_constant).
Args:
x: input float.
Returns:
z: The analytic continuation of the harmonic number for the input.
"""
one = tf.ones([], dtype=x.dtype)
return tf.math.digamma(x + one) - tf.math.digamma(one)
|
def _harmonic_number(x):
"""Compute the harmonic number from its analytic continuation.
Derivation from [here](
https://en.wikipedia.org/wiki/Digamma_function#Relation_to_harmonic_numbers)
and [Euler's constant](
https://en.wikipedia.org/wiki/Euler%E2%80%93Mascheroni_constant).
Args:
x: input float.
Returns:
z: The analytic continuation of the harmonic number for the input.
"""
one = tf.ones([], dtype=x.dtype)
return tf.math.digamma(x + one) - tf.math.digamma(one)
|
[
"Compute",
"the",
"harmonic",
"number",
"from",
"its",
"analytic",
"continuation",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/kumaraswamy.py#L40-L55
|
[
"def",
"_harmonic_number",
"(",
"x",
")",
":",
"one",
"=",
"tf",
".",
"ones",
"(",
"[",
"]",
",",
"dtype",
"=",
"x",
".",
"dtype",
")",
"return",
"tf",
".",
"math",
".",
"digamma",
"(",
"x",
"+",
"one",
")",
"-",
"tf",
".",
"math",
".",
"digamma",
"(",
"one",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
Kumaraswamy._moment
|
Compute the n'th (uncentered) moment.
|
tensorflow_probability/python/distributions/kumaraswamy.py
|
def _moment(self, n):
"""Compute the n'th (uncentered) moment."""
total_concentration = self.concentration1 + self.concentration0
expanded_concentration1 = tf.ones_like(
total_concentration, dtype=self.dtype) * self.concentration1
expanded_concentration0 = tf.ones_like(
total_concentration, dtype=self.dtype) * self.concentration0
beta_arg0 = 1 + n / expanded_concentration1
beta_arg = tf.stack([beta_arg0, expanded_concentration0], -1)
log_moment = tf.math.log(expanded_concentration0) + tf.math.lbeta(beta_arg)
return tf.exp(log_moment)
|
def _moment(self, n):
"""Compute the n'th (uncentered) moment."""
total_concentration = self.concentration1 + self.concentration0
expanded_concentration1 = tf.ones_like(
total_concentration, dtype=self.dtype) * self.concentration1
expanded_concentration0 = tf.ones_like(
total_concentration, dtype=self.dtype) * self.concentration0
beta_arg0 = 1 + n / expanded_concentration1
beta_arg = tf.stack([beta_arg0, expanded_concentration0], -1)
log_moment = tf.math.log(expanded_concentration0) + tf.math.lbeta(beta_arg)
return tf.exp(log_moment)
|
[
"Compute",
"the",
"n",
"th",
"(",
"uncentered",
")",
"moment",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/kumaraswamy.py#L191-L201
|
[
"def",
"_moment",
"(",
"self",
",",
"n",
")",
":",
"total_concentration",
"=",
"self",
".",
"concentration1",
"+",
"self",
".",
"concentration0",
"expanded_concentration1",
"=",
"tf",
".",
"ones_like",
"(",
"total_concentration",
",",
"dtype",
"=",
"self",
".",
"dtype",
")",
"*",
"self",
".",
"concentration1",
"expanded_concentration0",
"=",
"tf",
".",
"ones_like",
"(",
"total_concentration",
",",
"dtype",
"=",
"self",
".",
"dtype",
")",
"*",
"self",
".",
"concentration0",
"beta_arg0",
"=",
"1",
"+",
"n",
"/",
"expanded_concentration1",
"beta_arg",
"=",
"tf",
".",
"stack",
"(",
"[",
"beta_arg0",
",",
"expanded_concentration0",
"]",
",",
"-",
"1",
")",
"log_moment",
"=",
"tf",
".",
"math",
".",
"log",
"(",
"expanded_concentration0",
")",
"+",
"tf",
".",
"math",
".",
"lbeta",
"(",
"beta_arg",
")",
"return",
"tf",
".",
"exp",
"(",
"log_moment",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_maybe_validate_target_accept_prob
|
Validates that target_accept_prob is in (0, 1).
|
tensorflow_probability/python/mcmc/simple_step_size_adaptation.py
|
def _maybe_validate_target_accept_prob(target_accept_prob, validate_args):
"""Validates that target_accept_prob is in (0, 1)."""
if not validate_args:
return target_accept_prob
with tf.control_dependencies([
tf.compat.v1.assert_positive(
target_accept_prob, message='`target_accept_prob` must be > 0.'),
tf.compat.v1.assert_less(
target_accept_prob,
tf.ones_like(target_accept_prob),
message='`target_accept_prob` must be < 1.')
]):
return tf.identity(target_accept_prob)
|
def _maybe_validate_target_accept_prob(target_accept_prob, validate_args):
"""Validates that target_accept_prob is in (0, 1)."""
if not validate_args:
return target_accept_prob
with tf.control_dependencies([
tf.compat.v1.assert_positive(
target_accept_prob, message='`target_accept_prob` must be > 0.'),
tf.compat.v1.assert_less(
target_accept_prob,
tf.ones_like(target_accept_prob),
message='`target_accept_prob` must be < 1.')
]):
return tf.identity(target_accept_prob)
|
[
"Validates",
"that",
"target_accept_prob",
"is",
"in",
"(",
"0",
"1",
")",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/simple_step_size_adaptation.py#L422-L434
|
[
"def",
"_maybe_validate_target_accept_prob",
"(",
"target_accept_prob",
",",
"validate_args",
")",
":",
"if",
"not",
"validate_args",
":",
"return",
"target_accept_prob",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"tf",
".",
"compat",
".",
"v1",
".",
"assert_positive",
"(",
"target_accept_prob",
",",
"message",
"=",
"'`target_accept_prob` must be > 0.'",
")",
",",
"tf",
".",
"compat",
".",
"v1",
".",
"assert_less",
"(",
"target_accept_prob",
",",
"tf",
".",
"ones_like",
"(",
"target_accept_prob",
")",
",",
"message",
"=",
"'`target_accept_prob` must be < 1.'",
")",
"]",
")",
":",
"return",
"tf",
".",
"identity",
"(",
"target_accept_prob",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
default_exchange_proposed_fn
|
Default exchange proposal function, for replica exchange MC.
With probability `prob_exchange` propose combinations of replica for exchange.
When exchanging, create combinations of adjacent replicas in
[Replica Exchange Monte Carlo](
https://en.wikipedia.org/wiki/Parallel_tempering)
```
exchange_fn = default_exchange_proposed_fn(prob_exchange=0.5)
exchange_proposed = exchange_fn(num_replica=3)
exchange_proposed.eval()
==> [[0, 1]] # 1 exchange, 0 <--> 1
exchange_proposed.eval()
==> [] # 0 exchanges
```
Args:
prob_exchange: Scalar `Tensor` giving probability that any exchanges will
be generated.
Returns:
default_exchange_proposed_fn_: Python callable which take a number of
replicas (a Python integer), and return combinations of replicas for
exchange as an [n, 2] integer `Tensor`, `0 <= n <= num_replica // 2`,
with *unique* values in the set `{0, ..., num_replica}`.
|
tensorflow_probability/python/mcmc/replica_exchange_mc.py
|
def default_exchange_proposed_fn(prob_exchange):
"""Default exchange proposal function, for replica exchange MC.
With probability `prob_exchange` propose combinations of replica for exchange.
When exchanging, create combinations of adjacent replicas in
[Replica Exchange Monte Carlo](
https://en.wikipedia.org/wiki/Parallel_tempering)
```
exchange_fn = default_exchange_proposed_fn(prob_exchange=0.5)
exchange_proposed = exchange_fn(num_replica=3)
exchange_proposed.eval()
==> [[0, 1]] # 1 exchange, 0 <--> 1
exchange_proposed.eval()
==> [] # 0 exchanges
```
Args:
prob_exchange: Scalar `Tensor` giving probability that any exchanges will
be generated.
Returns:
default_exchange_proposed_fn_: Python callable which take a number of
replicas (a Python integer), and return combinations of replicas for
exchange as an [n, 2] integer `Tensor`, `0 <= n <= num_replica // 2`,
with *unique* values in the set `{0, ..., num_replica}`.
"""
def default_exchange_proposed_fn_(num_replica, seed=None):
"""Default function for `exchange_proposed_fn` of `kernel`."""
seed_stream = distributions.SeedStream(seed, 'default_exchange_proposed_fn')
zero_start = tf.random.uniform([], seed=seed_stream()) > 0.5
if num_replica % 2 == 0:
def _exchange():
flat_exchange = tf.range(num_replica)
if num_replica > 2:
start = tf.cast(~zero_start, dtype=tf.int32)
end = num_replica - start
flat_exchange = flat_exchange[start:end]
return tf.reshape(flat_exchange, [tf.size(input=flat_exchange) // 2, 2])
else:
def _exchange():
start = tf.cast(zero_start, dtype=tf.int32)
end = num_replica - tf.cast(~zero_start, dtype=tf.int32)
flat_exchange = tf.range(num_replica)[start:end]
return tf.reshape(flat_exchange, [tf.size(input=flat_exchange) // 2, 2])
def _null_exchange():
return tf.reshape(tf.cast([], dtype=tf.int32), shape=[0, 2])
return tf.cond(
pred=tf.random.uniform([], seed=seed_stream()) < prob_exchange,
true_fn=_exchange,
false_fn=_null_exchange)
return default_exchange_proposed_fn_
|
def default_exchange_proposed_fn(prob_exchange):
"""Default exchange proposal function, for replica exchange MC.
With probability `prob_exchange` propose combinations of replica for exchange.
When exchanging, create combinations of adjacent replicas in
[Replica Exchange Monte Carlo](
https://en.wikipedia.org/wiki/Parallel_tempering)
```
exchange_fn = default_exchange_proposed_fn(prob_exchange=0.5)
exchange_proposed = exchange_fn(num_replica=3)
exchange_proposed.eval()
==> [[0, 1]] # 1 exchange, 0 <--> 1
exchange_proposed.eval()
==> [] # 0 exchanges
```
Args:
prob_exchange: Scalar `Tensor` giving probability that any exchanges will
be generated.
Returns:
default_exchange_proposed_fn_: Python callable which take a number of
replicas (a Python integer), and return combinations of replicas for
exchange as an [n, 2] integer `Tensor`, `0 <= n <= num_replica // 2`,
with *unique* values in the set `{0, ..., num_replica}`.
"""
def default_exchange_proposed_fn_(num_replica, seed=None):
"""Default function for `exchange_proposed_fn` of `kernel`."""
seed_stream = distributions.SeedStream(seed, 'default_exchange_proposed_fn')
zero_start = tf.random.uniform([], seed=seed_stream()) > 0.5
if num_replica % 2 == 0:
def _exchange():
flat_exchange = tf.range(num_replica)
if num_replica > 2:
start = tf.cast(~zero_start, dtype=tf.int32)
end = num_replica - start
flat_exchange = flat_exchange[start:end]
return tf.reshape(flat_exchange, [tf.size(input=flat_exchange) // 2, 2])
else:
def _exchange():
start = tf.cast(zero_start, dtype=tf.int32)
end = num_replica - tf.cast(~zero_start, dtype=tf.int32)
flat_exchange = tf.range(num_replica)[start:end]
return tf.reshape(flat_exchange, [tf.size(input=flat_exchange) // 2, 2])
def _null_exchange():
return tf.reshape(tf.cast([], dtype=tf.int32), shape=[0, 2])
return tf.cond(
pred=tf.random.uniform([], seed=seed_stream()) < prob_exchange,
true_fn=_exchange,
false_fn=_null_exchange)
return default_exchange_proposed_fn_
|
[
"Default",
"exchange",
"proposal",
"function",
"for",
"replica",
"exchange",
"MC",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/replica_exchange_mc.py#L49-L109
|
[
"def",
"default_exchange_proposed_fn",
"(",
"prob_exchange",
")",
":",
"def",
"default_exchange_proposed_fn_",
"(",
"num_replica",
",",
"seed",
"=",
"None",
")",
":",
"\"\"\"Default function for `exchange_proposed_fn` of `kernel`.\"\"\"",
"seed_stream",
"=",
"distributions",
".",
"SeedStream",
"(",
"seed",
",",
"'default_exchange_proposed_fn'",
")",
"zero_start",
"=",
"tf",
".",
"random",
".",
"uniform",
"(",
"[",
"]",
",",
"seed",
"=",
"seed_stream",
"(",
")",
")",
">",
"0.5",
"if",
"num_replica",
"%",
"2",
"==",
"0",
":",
"def",
"_exchange",
"(",
")",
":",
"flat_exchange",
"=",
"tf",
".",
"range",
"(",
"num_replica",
")",
"if",
"num_replica",
">",
"2",
":",
"start",
"=",
"tf",
".",
"cast",
"(",
"~",
"zero_start",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"end",
"=",
"num_replica",
"-",
"start",
"flat_exchange",
"=",
"flat_exchange",
"[",
"start",
":",
"end",
"]",
"return",
"tf",
".",
"reshape",
"(",
"flat_exchange",
",",
"[",
"tf",
".",
"size",
"(",
"input",
"=",
"flat_exchange",
")",
"//",
"2",
",",
"2",
"]",
")",
"else",
":",
"def",
"_exchange",
"(",
")",
":",
"start",
"=",
"tf",
".",
"cast",
"(",
"zero_start",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"end",
"=",
"num_replica",
"-",
"tf",
".",
"cast",
"(",
"~",
"zero_start",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"flat_exchange",
"=",
"tf",
".",
"range",
"(",
"num_replica",
")",
"[",
"start",
":",
"end",
"]",
"return",
"tf",
".",
"reshape",
"(",
"flat_exchange",
",",
"[",
"tf",
".",
"size",
"(",
"input",
"=",
"flat_exchange",
")",
"//",
"2",
",",
"2",
"]",
")",
"def",
"_null_exchange",
"(",
")",
":",
"return",
"tf",
".",
"reshape",
"(",
"tf",
".",
"cast",
"(",
"[",
"]",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"shape",
"=",
"[",
"0",
",",
"2",
"]",
")",
"return",
"tf",
".",
"cond",
"(",
"pred",
"=",
"tf",
".",
"random",
".",
"uniform",
"(",
"[",
"]",
",",
"seed",
"=",
"seed_stream",
"(",
")",
")",
"<",
"prob_exchange",
",",
"true_fn",
"=",
"_exchange",
",",
"false_fn",
"=",
"_null_exchange",
")",
"return",
"default_exchange_proposed_fn_"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_get_field
|
field_name from kernel_results or kernel_results.accepted_results.
|
tensorflow_probability/python/mcmc/replica_exchange_mc.py
|
def _get_field(kernel_results, field_name):
"""field_name from kernel_results or kernel_results.accepted_results."""
if hasattr(kernel_results, field_name):
return getattr(kernel_results, field_name)
if hasattr(kernel_results, 'accepted_results'):
return getattr(kernel_results.accepted_results, field_name)
raise TypeError('Cannot extract %s from %s' % (field_name, kernel_results))
|
def _get_field(kernel_results, field_name):
"""field_name from kernel_results or kernel_results.accepted_results."""
if hasattr(kernel_results, field_name):
return getattr(kernel_results, field_name)
if hasattr(kernel_results, 'accepted_results'):
return getattr(kernel_results.accepted_results, field_name)
raise TypeError('Cannot extract %s from %s' % (field_name, kernel_results))
|
[
"field_name",
"from",
"kernel_results",
"or",
"kernel_results",
".",
"accepted_results",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/replica_exchange_mc.py#L569-L575
|
[
"def",
"_get_field",
"(",
"kernel_results",
",",
"field_name",
")",
":",
"if",
"hasattr",
"(",
"kernel_results",
",",
"field_name",
")",
":",
"return",
"getattr",
"(",
"kernel_results",
",",
"field_name",
")",
"if",
"hasattr",
"(",
"kernel_results",
",",
"'accepted_results'",
")",
":",
"return",
"getattr",
"(",
"kernel_results",
".",
"accepted_results",
",",
"field_name",
")",
"raise",
"TypeError",
"(",
"'Cannot extract %s from %s'",
"%",
"(",
"field_name",
",",
"kernel_results",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
ReplicaExchangeMC.one_step
|
Takes one step of the TransitionKernel.
Args:
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s).
previous_kernel_results: A (possibly nested) `tuple`, `namedtuple` or
`list` of `Tensor`s representing internal calculations made within the
previous call to this function (or as returned by `bootstrap_results`).
Returns:
next_state: `Tensor` or Python `list` of `Tensor`s representing the
next state(s) of the Markov chain(s).
kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of
`Tensor`s representing internal calculations made within this function.
This inculdes replica states.
|
tensorflow_probability/python/mcmc/replica_exchange_mc.py
|
def one_step(self, current_state, previous_kernel_results):
"""Takes one step of the TransitionKernel.
Args:
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s).
previous_kernel_results: A (possibly nested) `tuple`, `namedtuple` or
`list` of `Tensor`s representing internal calculations made within the
previous call to this function (or as returned by `bootstrap_results`).
Returns:
next_state: `Tensor` or Python `list` of `Tensor`s representing the
next state(s) of the Markov chain(s).
kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of
`Tensor`s representing internal calculations made within this function.
This inculdes replica states.
"""
# Key difficulty: The type of exchanges differs from one call to the
# next...even the number of exchanges can differ.
# As a result, exchanges must happen dynamically, in while loops.
with tf.compat.v1.name_scope(
name=mcmc_util.make_name(self.name, 'remc', 'one_step'),
values=[current_state, previous_kernel_results]):
# Each replica does `one_step` to get pre-exchange states/KernelResults.
sampled_replica_states, sampled_replica_results = zip(*[
rk.one_step(previous_kernel_results.replica_states[i],
previous_kernel_results.replica_results[i])
for i, rk in enumerate(self.replica_kernels)
])
sampled_replica_states = list(sampled_replica_states)
sampled_replica_results = list(sampled_replica_results)
states_are_lists = mcmc_util.is_list_like(sampled_replica_states[0])
if not states_are_lists:
sampled_replica_states = [[s] for s in sampled_replica_states]
num_state_parts = len(sampled_replica_states[0])
dtype = sampled_replica_states[0][0].dtype
# Must put states into TensorArrays. Why? We will read/write states
# dynamically with Tensor index `i`, and you cannot do this with lists.
# old_states[k][i] is Tensor of (old) state part k, for replica i.
# The `k` will be known statically, and `i` is a Tensor.
old_states = [
tf.TensorArray(
dtype,
size=self.num_replica,
dynamic_size=False,
clear_after_read=False,
tensor_array_name='old_states',
# State part k has same shape, regardless of replica. So use 0.
element_shape=sampled_replica_states[0][k].shape)
for k in range(num_state_parts)
]
for k in range(num_state_parts):
for i in range(self.num_replica):
old_states[k] = old_states[k].write(i, sampled_replica_states[i][k])
exchange_proposed = self.exchange_proposed_fn(
self.num_replica, seed=self._seed_stream())
exchange_proposed_n = tf.shape(input=exchange_proposed)[0]
exchanged_states = self._get_exchanged_states(
old_states, exchange_proposed, exchange_proposed_n,
sampled_replica_states, sampled_replica_results)
no_exchange_proposed, _ = tf.compat.v1.setdiff1d(
tf.range(self.num_replica), tf.reshape(exchange_proposed, [-1]))
exchanged_states = self._insert_old_states_where_no_exchange_was_proposed(
no_exchange_proposed, old_states, exchanged_states)
next_replica_states = []
for i in range(self.num_replica):
next_replica_states_i = []
for k in range(num_state_parts):
next_replica_states_i.append(exchanged_states[k].read(i))
next_replica_states.append(next_replica_states_i)
if not states_are_lists:
next_replica_states = [s[0] for s in next_replica_states]
sampled_replica_states = [s[0] for s in sampled_replica_states]
# Now that states are/aren't exchanged, bootstrap next kernel_results.
# The viewpoint is that after each exchange, we are starting anew.
next_replica_results = [
rk.bootstrap_results(state)
for rk, state in zip(self.replica_kernels, next_replica_states)
]
next_state = next_replica_states[0] # Replica 0 is the returned state(s).
kernel_results = ReplicaExchangeMCKernelResults(
replica_states=next_replica_states,
replica_results=next_replica_results,
sampled_replica_states=sampled_replica_states,
sampled_replica_results=sampled_replica_results,
)
return next_state, kernel_results
|
def one_step(self, current_state, previous_kernel_results):
"""Takes one step of the TransitionKernel.
Args:
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s).
previous_kernel_results: A (possibly nested) `tuple`, `namedtuple` or
`list` of `Tensor`s representing internal calculations made within the
previous call to this function (or as returned by `bootstrap_results`).
Returns:
next_state: `Tensor` or Python `list` of `Tensor`s representing the
next state(s) of the Markov chain(s).
kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of
`Tensor`s representing internal calculations made within this function.
This inculdes replica states.
"""
# Key difficulty: The type of exchanges differs from one call to the
# next...even the number of exchanges can differ.
# As a result, exchanges must happen dynamically, in while loops.
with tf.compat.v1.name_scope(
name=mcmc_util.make_name(self.name, 'remc', 'one_step'),
values=[current_state, previous_kernel_results]):
# Each replica does `one_step` to get pre-exchange states/KernelResults.
sampled_replica_states, sampled_replica_results = zip(*[
rk.one_step(previous_kernel_results.replica_states[i],
previous_kernel_results.replica_results[i])
for i, rk in enumerate(self.replica_kernels)
])
sampled_replica_states = list(sampled_replica_states)
sampled_replica_results = list(sampled_replica_results)
states_are_lists = mcmc_util.is_list_like(sampled_replica_states[0])
if not states_are_lists:
sampled_replica_states = [[s] for s in sampled_replica_states]
num_state_parts = len(sampled_replica_states[0])
dtype = sampled_replica_states[0][0].dtype
# Must put states into TensorArrays. Why? We will read/write states
# dynamically with Tensor index `i`, and you cannot do this with lists.
# old_states[k][i] is Tensor of (old) state part k, for replica i.
# The `k` will be known statically, and `i` is a Tensor.
old_states = [
tf.TensorArray(
dtype,
size=self.num_replica,
dynamic_size=False,
clear_after_read=False,
tensor_array_name='old_states',
# State part k has same shape, regardless of replica. So use 0.
element_shape=sampled_replica_states[0][k].shape)
for k in range(num_state_parts)
]
for k in range(num_state_parts):
for i in range(self.num_replica):
old_states[k] = old_states[k].write(i, sampled_replica_states[i][k])
exchange_proposed = self.exchange_proposed_fn(
self.num_replica, seed=self._seed_stream())
exchange_proposed_n = tf.shape(input=exchange_proposed)[0]
exchanged_states = self._get_exchanged_states(
old_states, exchange_proposed, exchange_proposed_n,
sampled_replica_states, sampled_replica_results)
no_exchange_proposed, _ = tf.compat.v1.setdiff1d(
tf.range(self.num_replica), tf.reshape(exchange_proposed, [-1]))
exchanged_states = self._insert_old_states_where_no_exchange_was_proposed(
no_exchange_proposed, old_states, exchanged_states)
next_replica_states = []
for i in range(self.num_replica):
next_replica_states_i = []
for k in range(num_state_parts):
next_replica_states_i.append(exchanged_states[k].read(i))
next_replica_states.append(next_replica_states_i)
if not states_are_lists:
next_replica_states = [s[0] for s in next_replica_states]
sampled_replica_states = [s[0] for s in sampled_replica_states]
# Now that states are/aren't exchanged, bootstrap next kernel_results.
# The viewpoint is that after each exchange, we are starting anew.
next_replica_results = [
rk.bootstrap_results(state)
for rk, state in zip(self.replica_kernels, next_replica_states)
]
next_state = next_replica_states[0] # Replica 0 is the returned state(s).
kernel_results = ReplicaExchangeMCKernelResults(
replica_states=next_replica_states,
replica_results=next_replica_results,
sampled_replica_states=sampled_replica_states,
sampled_replica_results=sampled_replica_results,
)
return next_state, kernel_results
|
[
"Takes",
"one",
"step",
"of",
"the",
"TransitionKernel",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/replica_exchange_mc.py#L316-L417
|
[
"def",
"one_step",
"(",
"self",
",",
"current_state",
",",
"previous_kernel_results",
")",
":",
"# Key difficulty: The type of exchanges differs from one call to the",
"# next...even the number of exchanges can differ.",
"# As a result, exchanges must happen dynamically, in while loops.",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
"=",
"mcmc_util",
".",
"make_name",
"(",
"self",
".",
"name",
",",
"'remc'",
",",
"'one_step'",
")",
",",
"values",
"=",
"[",
"current_state",
",",
"previous_kernel_results",
"]",
")",
":",
"# Each replica does `one_step` to get pre-exchange states/KernelResults.",
"sampled_replica_states",
",",
"sampled_replica_results",
"=",
"zip",
"(",
"*",
"[",
"rk",
".",
"one_step",
"(",
"previous_kernel_results",
".",
"replica_states",
"[",
"i",
"]",
",",
"previous_kernel_results",
".",
"replica_results",
"[",
"i",
"]",
")",
"for",
"i",
",",
"rk",
"in",
"enumerate",
"(",
"self",
".",
"replica_kernels",
")",
"]",
")",
"sampled_replica_states",
"=",
"list",
"(",
"sampled_replica_states",
")",
"sampled_replica_results",
"=",
"list",
"(",
"sampled_replica_results",
")",
"states_are_lists",
"=",
"mcmc_util",
".",
"is_list_like",
"(",
"sampled_replica_states",
"[",
"0",
"]",
")",
"if",
"not",
"states_are_lists",
":",
"sampled_replica_states",
"=",
"[",
"[",
"s",
"]",
"for",
"s",
"in",
"sampled_replica_states",
"]",
"num_state_parts",
"=",
"len",
"(",
"sampled_replica_states",
"[",
"0",
"]",
")",
"dtype",
"=",
"sampled_replica_states",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"dtype",
"# Must put states into TensorArrays. Why? We will read/write states",
"# dynamically with Tensor index `i`, and you cannot do this with lists.",
"# old_states[k][i] is Tensor of (old) state part k, for replica i.",
"# The `k` will be known statically, and `i` is a Tensor.",
"old_states",
"=",
"[",
"tf",
".",
"TensorArray",
"(",
"dtype",
",",
"size",
"=",
"self",
".",
"num_replica",
",",
"dynamic_size",
"=",
"False",
",",
"clear_after_read",
"=",
"False",
",",
"tensor_array_name",
"=",
"'old_states'",
",",
"# State part k has same shape, regardless of replica. So use 0.",
"element_shape",
"=",
"sampled_replica_states",
"[",
"0",
"]",
"[",
"k",
"]",
".",
"shape",
")",
"for",
"k",
"in",
"range",
"(",
"num_state_parts",
")",
"]",
"for",
"k",
"in",
"range",
"(",
"num_state_parts",
")",
":",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"num_replica",
")",
":",
"old_states",
"[",
"k",
"]",
"=",
"old_states",
"[",
"k",
"]",
".",
"write",
"(",
"i",
",",
"sampled_replica_states",
"[",
"i",
"]",
"[",
"k",
"]",
")",
"exchange_proposed",
"=",
"self",
".",
"exchange_proposed_fn",
"(",
"self",
".",
"num_replica",
",",
"seed",
"=",
"self",
".",
"_seed_stream",
"(",
")",
")",
"exchange_proposed_n",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"exchange_proposed",
")",
"[",
"0",
"]",
"exchanged_states",
"=",
"self",
".",
"_get_exchanged_states",
"(",
"old_states",
",",
"exchange_proposed",
",",
"exchange_proposed_n",
",",
"sampled_replica_states",
",",
"sampled_replica_results",
")",
"no_exchange_proposed",
",",
"_",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"setdiff1d",
"(",
"tf",
".",
"range",
"(",
"self",
".",
"num_replica",
")",
",",
"tf",
".",
"reshape",
"(",
"exchange_proposed",
",",
"[",
"-",
"1",
"]",
")",
")",
"exchanged_states",
"=",
"self",
".",
"_insert_old_states_where_no_exchange_was_proposed",
"(",
"no_exchange_proposed",
",",
"old_states",
",",
"exchanged_states",
")",
"next_replica_states",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"num_replica",
")",
":",
"next_replica_states_i",
"=",
"[",
"]",
"for",
"k",
"in",
"range",
"(",
"num_state_parts",
")",
":",
"next_replica_states_i",
".",
"append",
"(",
"exchanged_states",
"[",
"k",
"]",
".",
"read",
"(",
"i",
")",
")",
"next_replica_states",
".",
"append",
"(",
"next_replica_states_i",
")",
"if",
"not",
"states_are_lists",
":",
"next_replica_states",
"=",
"[",
"s",
"[",
"0",
"]",
"for",
"s",
"in",
"next_replica_states",
"]",
"sampled_replica_states",
"=",
"[",
"s",
"[",
"0",
"]",
"for",
"s",
"in",
"sampled_replica_states",
"]",
"# Now that states are/aren't exchanged, bootstrap next kernel_results.",
"# The viewpoint is that after each exchange, we are starting anew.",
"next_replica_results",
"=",
"[",
"rk",
".",
"bootstrap_results",
"(",
"state",
")",
"for",
"rk",
",",
"state",
"in",
"zip",
"(",
"self",
".",
"replica_kernels",
",",
"next_replica_states",
")",
"]",
"next_state",
"=",
"next_replica_states",
"[",
"0",
"]",
"# Replica 0 is the returned state(s).",
"kernel_results",
"=",
"ReplicaExchangeMCKernelResults",
"(",
"replica_states",
"=",
"next_replica_states",
",",
"replica_results",
"=",
"next_replica_results",
",",
"sampled_replica_states",
"=",
"sampled_replica_states",
",",
"sampled_replica_results",
"=",
"sampled_replica_results",
",",
")",
"return",
"next_state",
",",
"kernel_results"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
ReplicaExchangeMC._get_exchanged_states
|
Get list of TensorArrays holding exchanged states, and zeros.
|
tensorflow_probability/python/mcmc/replica_exchange_mc.py
|
def _get_exchanged_states(self, old_states, exchange_proposed,
exchange_proposed_n, sampled_replica_states,
sampled_replica_results):
"""Get list of TensorArrays holding exchanged states, and zeros."""
with tf.compat.v1.name_scope('get_exchanged_states'):
target_log_probs = []
for replica in range(self.num_replica):
replica_log_prob = _get_field(sampled_replica_results[replica],
'target_log_prob')
inverse_temp = self.inverse_temperatures[replica]
target_log_probs.append(replica_log_prob / inverse_temp)
target_log_probs = tf.stack(target_log_probs, axis=0)
dtype = target_log_probs.dtype
num_state_parts = len(sampled_replica_states[0])
# exchanged_states[k][i] is Tensor of (new) state part k, for replica i.
# The `k` will be known statically, and `i` is a Tensor.
# We will insert values into indices `i` for every replica with a proposed
# exchange.
exchanged_states = [
tf.TensorArray(
dtype,
size=self.num_replica,
dynamic_size=False,
tensor_array_name='exchanged_states',
# State part k has same shape, regardless of replica. So use 0.
element_shape=sampled_replica_states[0][k].shape)
for k in range(num_state_parts)
]
# Draw random variables here, to avoid sampling in the loop (and losing
# reproducibility). This may mean we sample too many, but we will always
# have enough.
sample_shape = tf.concat(
([self.num_replica // 2], tf.shape(input=target_log_probs)[1:]),
axis=0)
log_uniforms = tf.math.log(
tf.random.uniform(
shape=sample_shape, dtype=dtype, seed=self._seed_stream()))
def _swap(is_exchange_accepted, x, y):
"""Swap batches of x, y where accepted."""
with tf.compat.v1.name_scope('swap_where_exchange_accepted'):
new_x = mcmc_util.choose(is_exchange_accepted, y, x)
new_y = mcmc_util.choose(is_exchange_accepted, x, y)
return new_x, new_y
def cond(i, unused_exchanged_states):
return i < exchange_proposed_n
def body(i, exchanged_states):
"""Body of while loop for exchanging states."""
# Propose exchange between replicas indexed by m and n.
m, n = tf.unstack(exchange_proposed[i])
# Construct log_accept_ratio: -temp_diff * target_log_prob_diff.
# Note target_log_prob_diff = -EnergyDiff (common definition is in terms
# of energy).
temp_diff = self.inverse_temperatures[m] - self.inverse_temperatures[n]
# Difference of target log probs may be +- Inf or NaN. We want the
# product of this with the temperature difference to have "alt value" of
# -Inf.
log_accept_ratio = mcmc_util.safe_sum(
[-temp_diff * target_log_probs[m], temp_diff * target_log_probs[n]])
is_exchange_accepted = log_uniforms[i] < log_accept_ratio
for k in range(num_state_parts):
new_m, new_n = _swap(is_exchange_accepted, old_states[k].read(m),
old_states[k].read(n))
exchanged_states[k] = exchanged_states[k].write(m, new_m)
exchanged_states[k] = exchanged_states[k].write(n, new_n)
return i + 1, exchanged_states
# At this point, exchanged_states[k] is a length num_replicas TensorArray.
return tf.while_loop(
cond=cond, body=body, loop_vars=[tf.constant(0),
exchanged_states])[1]
|
def _get_exchanged_states(self, old_states, exchange_proposed,
exchange_proposed_n, sampled_replica_states,
sampled_replica_results):
"""Get list of TensorArrays holding exchanged states, and zeros."""
with tf.compat.v1.name_scope('get_exchanged_states'):
target_log_probs = []
for replica in range(self.num_replica):
replica_log_prob = _get_field(sampled_replica_results[replica],
'target_log_prob')
inverse_temp = self.inverse_temperatures[replica]
target_log_probs.append(replica_log_prob / inverse_temp)
target_log_probs = tf.stack(target_log_probs, axis=0)
dtype = target_log_probs.dtype
num_state_parts = len(sampled_replica_states[0])
# exchanged_states[k][i] is Tensor of (new) state part k, for replica i.
# The `k` will be known statically, and `i` is a Tensor.
# We will insert values into indices `i` for every replica with a proposed
# exchange.
exchanged_states = [
tf.TensorArray(
dtype,
size=self.num_replica,
dynamic_size=False,
tensor_array_name='exchanged_states',
# State part k has same shape, regardless of replica. So use 0.
element_shape=sampled_replica_states[0][k].shape)
for k in range(num_state_parts)
]
# Draw random variables here, to avoid sampling in the loop (and losing
# reproducibility). This may mean we sample too many, but we will always
# have enough.
sample_shape = tf.concat(
([self.num_replica // 2], tf.shape(input=target_log_probs)[1:]),
axis=0)
log_uniforms = tf.math.log(
tf.random.uniform(
shape=sample_shape, dtype=dtype, seed=self._seed_stream()))
def _swap(is_exchange_accepted, x, y):
"""Swap batches of x, y where accepted."""
with tf.compat.v1.name_scope('swap_where_exchange_accepted'):
new_x = mcmc_util.choose(is_exchange_accepted, y, x)
new_y = mcmc_util.choose(is_exchange_accepted, x, y)
return new_x, new_y
def cond(i, unused_exchanged_states):
return i < exchange_proposed_n
def body(i, exchanged_states):
"""Body of while loop for exchanging states."""
# Propose exchange between replicas indexed by m and n.
m, n = tf.unstack(exchange_proposed[i])
# Construct log_accept_ratio: -temp_diff * target_log_prob_diff.
# Note target_log_prob_diff = -EnergyDiff (common definition is in terms
# of energy).
temp_diff = self.inverse_temperatures[m] - self.inverse_temperatures[n]
# Difference of target log probs may be +- Inf or NaN. We want the
# product of this with the temperature difference to have "alt value" of
# -Inf.
log_accept_ratio = mcmc_util.safe_sum(
[-temp_diff * target_log_probs[m], temp_diff * target_log_probs[n]])
is_exchange_accepted = log_uniforms[i] < log_accept_ratio
for k in range(num_state_parts):
new_m, new_n = _swap(is_exchange_accepted, old_states[k].read(m),
old_states[k].read(n))
exchanged_states[k] = exchanged_states[k].write(m, new_m)
exchanged_states[k] = exchanged_states[k].write(n, new_n)
return i + 1, exchanged_states
# At this point, exchanged_states[k] is a length num_replicas TensorArray.
return tf.while_loop(
cond=cond, body=body, loop_vars=[tf.constant(0),
exchanged_states])[1]
|
[
"Get",
"list",
"of",
"TensorArrays",
"holding",
"exchanged",
"states",
"and",
"zeros",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/replica_exchange_mc.py#L419-L498
|
[
"def",
"_get_exchanged_states",
"(",
"self",
",",
"old_states",
",",
"exchange_proposed",
",",
"exchange_proposed_n",
",",
"sampled_replica_states",
",",
"sampled_replica_results",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"'get_exchanged_states'",
")",
":",
"target_log_probs",
"=",
"[",
"]",
"for",
"replica",
"in",
"range",
"(",
"self",
".",
"num_replica",
")",
":",
"replica_log_prob",
"=",
"_get_field",
"(",
"sampled_replica_results",
"[",
"replica",
"]",
",",
"'target_log_prob'",
")",
"inverse_temp",
"=",
"self",
".",
"inverse_temperatures",
"[",
"replica",
"]",
"target_log_probs",
".",
"append",
"(",
"replica_log_prob",
"/",
"inverse_temp",
")",
"target_log_probs",
"=",
"tf",
".",
"stack",
"(",
"target_log_probs",
",",
"axis",
"=",
"0",
")",
"dtype",
"=",
"target_log_probs",
".",
"dtype",
"num_state_parts",
"=",
"len",
"(",
"sampled_replica_states",
"[",
"0",
"]",
")",
"# exchanged_states[k][i] is Tensor of (new) state part k, for replica i.",
"# The `k` will be known statically, and `i` is a Tensor.",
"# We will insert values into indices `i` for every replica with a proposed",
"# exchange.",
"exchanged_states",
"=",
"[",
"tf",
".",
"TensorArray",
"(",
"dtype",
",",
"size",
"=",
"self",
".",
"num_replica",
",",
"dynamic_size",
"=",
"False",
",",
"tensor_array_name",
"=",
"'exchanged_states'",
",",
"# State part k has same shape, regardless of replica. So use 0.",
"element_shape",
"=",
"sampled_replica_states",
"[",
"0",
"]",
"[",
"k",
"]",
".",
"shape",
")",
"for",
"k",
"in",
"range",
"(",
"num_state_parts",
")",
"]",
"# Draw random variables here, to avoid sampling in the loop (and losing",
"# reproducibility). This may mean we sample too many, but we will always",
"# have enough.",
"sample_shape",
"=",
"tf",
".",
"concat",
"(",
"(",
"[",
"self",
".",
"num_replica",
"//",
"2",
"]",
",",
"tf",
".",
"shape",
"(",
"input",
"=",
"target_log_probs",
")",
"[",
"1",
":",
"]",
")",
",",
"axis",
"=",
"0",
")",
"log_uniforms",
"=",
"tf",
".",
"math",
".",
"log",
"(",
"tf",
".",
"random",
".",
"uniform",
"(",
"shape",
"=",
"sample_shape",
",",
"dtype",
"=",
"dtype",
",",
"seed",
"=",
"self",
".",
"_seed_stream",
"(",
")",
")",
")",
"def",
"_swap",
"(",
"is_exchange_accepted",
",",
"x",
",",
"y",
")",
":",
"\"\"\"Swap batches of x, y where accepted.\"\"\"",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"'swap_where_exchange_accepted'",
")",
":",
"new_x",
"=",
"mcmc_util",
".",
"choose",
"(",
"is_exchange_accepted",
",",
"y",
",",
"x",
")",
"new_y",
"=",
"mcmc_util",
".",
"choose",
"(",
"is_exchange_accepted",
",",
"x",
",",
"y",
")",
"return",
"new_x",
",",
"new_y",
"def",
"cond",
"(",
"i",
",",
"unused_exchanged_states",
")",
":",
"return",
"i",
"<",
"exchange_proposed_n",
"def",
"body",
"(",
"i",
",",
"exchanged_states",
")",
":",
"\"\"\"Body of while loop for exchanging states.\"\"\"",
"# Propose exchange between replicas indexed by m and n.",
"m",
",",
"n",
"=",
"tf",
".",
"unstack",
"(",
"exchange_proposed",
"[",
"i",
"]",
")",
"# Construct log_accept_ratio: -temp_diff * target_log_prob_diff.",
"# Note target_log_prob_diff = -EnergyDiff (common definition is in terms",
"# of energy).",
"temp_diff",
"=",
"self",
".",
"inverse_temperatures",
"[",
"m",
"]",
"-",
"self",
".",
"inverse_temperatures",
"[",
"n",
"]",
"# Difference of target log probs may be +- Inf or NaN. We want the",
"# product of this with the temperature difference to have \"alt value\" of",
"# -Inf.",
"log_accept_ratio",
"=",
"mcmc_util",
".",
"safe_sum",
"(",
"[",
"-",
"temp_diff",
"*",
"target_log_probs",
"[",
"m",
"]",
",",
"temp_diff",
"*",
"target_log_probs",
"[",
"n",
"]",
"]",
")",
"is_exchange_accepted",
"=",
"log_uniforms",
"[",
"i",
"]",
"<",
"log_accept_ratio",
"for",
"k",
"in",
"range",
"(",
"num_state_parts",
")",
":",
"new_m",
",",
"new_n",
"=",
"_swap",
"(",
"is_exchange_accepted",
",",
"old_states",
"[",
"k",
"]",
".",
"read",
"(",
"m",
")",
",",
"old_states",
"[",
"k",
"]",
".",
"read",
"(",
"n",
")",
")",
"exchanged_states",
"[",
"k",
"]",
"=",
"exchanged_states",
"[",
"k",
"]",
".",
"write",
"(",
"m",
",",
"new_m",
")",
"exchanged_states",
"[",
"k",
"]",
"=",
"exchanged_states",
"[",
"k",
"]",
".",
"write",
"(",
"n",
",",
"new_n",
")",
"return",
"i",
"+",
"1",
",",
"exchanged_states",
"# At this point, exchanged_states[k] is a length num_replicas TensorArray.",
"return",
"tf",
".",
"while_loop",
"(",
"cond",
"=",
"cond",
",",
"body",
"=",
"body",
",",
"loop_vars",
"=",
"[",
"tf",
".",
"constant",
"(",
"0",
")",
",",
"exchanged_states",
"]",
")",
"[",
"1",
"]"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
ReplicaExchangeMC.bootstrap_results
|
Returns an object with the same type as returned by `one_step`.
Args:
init_state: `Tensor` or Python `list` of `Tensor`s representing the
initial state(s) of the Markov chain(s).
Returns:
kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of
`Tensor`s representing internal calculations made within this function.
This inculdes replica states.
|
tensorflow_probability/python/mcmc/replica_exchange_mc.py
|
def bootstrap_results(self, init_state):
"""Returns an object with the same type as returned by `one_step`.
Args:
init_state: `Tensor` or Python `list` of `Tensor`s representing the
initial state(s) of the Markov chain(s).
Returns:
kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of
`Tensor`s representing internal calculations made within this function.
This inculdes replica states.
"""
with tf.compat.v1.name_scope(
name=mcmc_util.make_name(self.name, 'remc', 'bootstrap_results'),
values=[init_state]):
replica_results = [
self.replica_kernels[i].bootstrap_results(init_state)
for i in range(self.num_replica)
]
init_state_parts = (
list(init_state)
if mcmc_util.is_list_like(init_state) else [init_state])
# Convert all states parts to tensor...
replica_states = [[
tf.convert_to_tensor(value=s) for s in init_state_parts
] for i in range(self.num_replica)]
if not mcmc_util.is_list_like(init_state):
replica_states = [s[0] for s in replica_states]
return ReplicaExchangeMCKernelResults(
replica_states=replica_states,
replica_results=replica_results,
sampled_replica_states=replica_states,
sampled_replica_results=replica_results,
)
|
def bootstrap_results(self, init_state):
"""Returns an object with the same type as returned by `one_step`.
Args:
init_state: `Tensor` or Python `list` of `Tensor`s representing the
initial state(s) of the Markov chain(s).
Returns:
kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of
`Tensor`s representing internal calculations made within this function.
This inculdes replica states.
"""
with tf.compat.v1.name_scope(
name=mcmc_util.make_name(self.name, 'remc', 'bootstrap_results'),
values=[init_state]):
replica_results = [
self.replica_kernels[i].bootstrap_results(init_state)
for i in range(self.num_replica)
]
init_state_parts = (
list(init_state)
if mcmc_util.is_list_like(init_state) else [init_state])
# Convert all states parts to tensor...
replica_states = [[
tf.convert_to_tensor(value=s) for s in init_state_parts
] for i in range(self.num_replica)]
if not mcmc_util.is_list_like(init_state):
replica_states = [s[0] for s in replica_states]
return ReplicaExchangeMCKernelResults(
replica_states=replica_states,
replica_results=replica_results,
sampled_replica_states=replica_states,
sampled_replica_results=replica_results,
)
|
[
"Returns",
"an",
"object",
"with",
"the",
"same",
"type",
"as",
"returned",
"by",
"one_step",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/replica_exchange_mc.py#L519-L556
|
[
"def",
"bootstrap_results",
"(",
"self",
",",
"init_state",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
"=",
"mcmc_util",
".",
"make_name",
"(",
"self",
".",
"name",
",",
"'remc'",
",",
"'bootstrap_results'",
")",
",",
"values",
"=",
"[",
"init_state",
"]",
")",
":",
"replica_results",
"=",
"[",
"self",
".",
"replica_kernels",
"[",
"i",
"]",
".",
"bootstrap_results",
"(",
"init_state",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"num_replica",
")",
"]",
"init_state_parts",
"=",
"(",
"list",
"(",
"init_state",
")",
"if",
"mcmc_util",
".",
"is_list_like",
"(",
"init_state",
")",
"else",
"[",
"init_state",
"]",
")",
"# Convert all states parts to tensor...",
"replica_states",
"=",
"[",
"[",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"s",
")",
"for",
"s",
"in",
"init_state_parts",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"num_replica",
")",
"]",
"if",
"not",
"mcmc_util",
".",
"is_list_like",
"(",
"init_state",
")",
":",
"replica_states",
"=",
"[",
"s",
"[",
"0",
"]",
"for",
"s",
"in",
"replica_states",
"]",
"return",
"ReplicaExchangeMCKernelResults",
"(",
"replica_states",
"=",
"replica_states",
",",
"replica_results",
"=",
"replica_results",
",",
"sampled_replica_states",
"=",
"replica_states",
",",
"sampled_replica_results",
"=",
"replica_results",
",",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
DirichletMultinomial._variance_scale_term
|
Helper to `_covariance` and `_variance` which computes a shared scale.
|
tensorflow_probability/python/distributions/dirichlet_multinomial.py
|
def _variance_scale_term(self):
"""Helper to `_covariance` and `_variance` which computes a shared scale."""
# Expand back the last dim so the shape of _variance_scale_term matches the
# shape of self.concentration.
c0 = self.total_concentration[..., tf.newaxis]
return tf.sqrt((1. + c0 / self.total_count[..., tf.newaxis]) / (1. + c0))
|
def _variance_scale_term(self):
"""Helper to `_covariance` and `_variance` which computes a shared scale."""
# Expand back the last dim so the shape of _variance_scale_term matches the
# shape of self.concentration.
c0 = self.total_concentration[..., tf.newaxis]
return tf.sqrt((1. + c0 / self.total_count[..., tf.newaxis]) / (1. + c0))
|
[
"Helper",
"to",
"_covariance",
"and",
"_variance",
"which",
"computes",
"a",
"shared",
"scale",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/dirichlet_multinomial.py#L322-L327
|
[
"def",
"_variance_scale_term",
"(",
"self",
")",
":",
"# Expand back the last dim so the shape of _variance_scale_term matches the",
"# shape of self.concentration.",
"c0",
"=",
"self",
".",
"total_concentration",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
"return",
"tf",
".",
"sqrt",
"(",
"(",
"1.",
"+",
"c0",
"/",
"self",
".",
"total_count",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
")",
"/",
"(",
"1.",
"+",
"c0",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
DirichletMultinomial._maybe_assert_valid_concentration
|
Checks the validity of the concentration parameter.
|
tensorflow_probability/python/distributions/dirichlet_multinomial.py
|
def _maybe_assert_valid_concentration(self, concentration, validate_args):
"""Checks the validity of the concentration parameter."""
if not validate_args:
return concentration
concentration = distribution_util.embed_check_categorical_event_shape(
concentration)
return distribution_util.with_dependencies([
assert_util.assert_positive(
concentration, message="Concentration parameter must be positive."),
], concentration)
|
def _maybe_assert_valid_concentration(self, concentration, validate_args):
"""Checks the validity of the concentration parameter."""
if not validate_args:
return concentration
concentration = distribution_util.embed_check_categorical_event_shape(
concentration)
return distribution_util.with_dependencies([
assert_util.assert_positive(
concentration, message="Concentration parameter must be positive."),
], concentration)
|
[
"Checks",
"the",
"validity",
"of",
"the",
"concentration",
"parameter",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/dirichlet_multinomial.py#L329-L338
|
[
"def",
"_maybe_assert_valid_concentration",
"(",
"self",
",",
"concentration",
",",
"validate_args",
")",
":",
"if",
"not",
"validate_args",
":",
"return",
"concentration",
"concentration",
"=",
"distribution_util",
".",
"embed_check_categorical_event_shape",
"(",
"concentration",
")",
"return",
"distribution_util",
".",
"with_dependencies",
"(",
"[",
"assert_util",
".",
"assert_positive",
"(",
"concentration",
",",
"message",
"=",
"\"Concentration parameter must be positive.\"",
")",
",",
"]",
",",
"concentration",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
DirichletMultinomial._maybe_assert_valid_sample
|
Check counts for proper shape, values, then return tensor version.
|
tensorflow_probability/python/distributions/dirichlet_multinomial.py
|
def _maybe_assert_valid_sample(self, counts):
"""Check counts for proper shape, values, then return tensor version."""
if not self.validate_args:
return counts
counts = distribution_util.embed_check_nonnegative_integer_form(counts)
return distribution_util.with_dependencies([
assert_util.assert_equal(
self.total_count,
tf.reduce_sum(input_tensor=counts, axis=-1),
message="counts last-dimension must sum to `self.total_count`"),
], counts)
|
def _maybe_assert_valid_sample(self, counts):
"""Check counts for proper shape, values, then return tensor version."""
if not self.validate_args:
return counts
counts = distribution_util.embed_check_nonnegative_integer_form(counts)
return distribution_util.with_dependencies([
assert_util.assert_equal(
self.total_count,
tf.reduce_sum(input_tensor=counts, axis=-1),
message="counts last-dimension must sum to `self.total_count`"),
], counts)
|
[
"Check",
"counts",
"for",
"proper",
"shape",
"values",
"then",
"return",
"tensor",
"version",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/dirichlet_multinomial.py#L340-L350
|
[
"def",
"_maybe_assert_valid_sample",
"(",
"self",
",",
"counts",
")",
":",
"if",
"not",
"self",
".",
"validate_args",
":",
"return",
"counts",
"counts",
"=",
"distribution_util",
".",
"embed_check_nonnegative_integer_form",
"(",
"counts",
")",
"return",
"distribution_util",
".",
"with_dependencies",
"(",
"[",
"assert_util",
".",
"assert_equal",
"(",
"self",
".",
"total_count",
",",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"counts",
",",
"axis",
"=",
"-",
"1",
")",
",",
"message",
"=",
"\"counts last-dimension must sum to `self.total_count`\"",
")",
",",
"]",
",",
"counts",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
forward_log_det_jacobian_fn
|
Makes a function which applies a list of Bijectors' `log_det_jacobian`s.
|
tensorflow_probability/python/mcmc/transformed_kernel.py
|
def forward_log_det_jacobian_fn(bijector):
"""Makes a function which applies a list of Bijectors' `log_det_jacobian`s."""
if not mcmc_util.is_list_like(bijector):
bijector = [bijector]
def fn(transformed_state_parts, event_ndims):
return sum([
b.forward_log_det_jacobian(sp, event_ndims=e)
for b, e, sp in zip(bijector, event_ndims, transformed_state_parts)
])
return fn
|
def forward_log_det_jacobian_fn(bijector):
"""Makes a function which applies a list of Bijectors' `log_det_jacobian`s."""
if not mcmc_util.is_list_like(bijector):
bijector = [bijector]
def fn(transformed_state_parts, event_ndims):
return sum([
b.forward_log_det_jacobian(sp, event_ndims=e)
for b, e, sp in zip(bijector, event_ndims, transformed_state_parts)
])
return fn
|
[
"Makes",
"a",
"function",
"which",
"applies",
"a",
"list",
"of",
"Bijectors",
"log_det_jacobian",
"s",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/transformed_kernel.py#L42-L53
|
[
"def",
"forward_log_det_jacobian_fn",
"(",
"bijector",
")",
":",
"if",
"not",
"mcmc_util",
".",
"is_list_like",
"(",
"bijector",
")",
":",
"bijector",
"=",
"[",
"bijector",
"]",
"def",
"fn",
"(",
"transformed_state_parts",
",",
"event_ndims",
")",
":",
"return",
"sum",
"(",
"[",
"b",
".",
"forward_log_det_jacobian",
"(",
"sp",
",",
"event_ndims",
"=",
"e",
")",
"for",
"b",
",",
"e",
",",
"sp",
"in",
"zip",
"(",
"bijector",
",",
"event_ndims",
",",
"transformed_state_parts",
")",
"]",
")",
"return",
"fn"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
forward_transform_fn
|
Makes a function which applies a list of Bijectors' `forward`s.
|
tensorflow_probability/python/mcmc/transformed_kernel.py
|
def forward_transform_fn(bijector):
"""Makes a function which applies a list of Bijectors' `forward`s."""
if not mcmc_util.is_list_like(bijector):
bijector = [bijector]
def fn(transformed_state_parts):
return [b.forward(sp) for b, sp in zip(bijector, transformed_state_parts)]
return fn
|
def forward_transform_fn(bijector):
"""Makes a function which applies a list of Bijectors' `forward`s."""
if not mcmc_util.is_list_like(bijector):
bijector = [bijector]
def fn(transformed_state_parts):
return [b.forward(sp) for b, sp in zip(bijector, transformed_state_parts)]
return fn
|
[
"Makes",
"a",
"function",
"which",
"applies",
"a",
"list",
"of",
"Bijectors",
"forward",
"s",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/transformed_kernel.py#L56-L64
|
[
"def",
"forward_transform_fn",
"(",
"bijector",
")",
":",
"if",
"not",
"mcmc_util",
".",
"is_list_like",
"(",
"bijector",
")",
":",
"bijector",
"=",
"[",
"bijector",
"]",
"def",
"fn",
"(",
"transformed_state_parts",
")",
":",
"return",
"[",
"b",
".",
"forward",
"(",
"sp",
")",
"for",
"b",
",",
"sp",
"in",
"zip",
"(",
"bijector",
",",
"transformed_state_parts",
")",
"]",
"return",
"fn"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
inverse_transform_fn
|
Makes a function which applies a list of Bijectors' `inverse`s.
|
tensorflow_probability/python/mcmc/transformed_kernel.py
|
def inverse_transform_fn(bijector):
"""Makes a function which applies a list of Bijectors' `inverse`s."""
if not mcmc_util.is_list_like(bijector):
bijector = [bijector]
def fn(state_parts):
return [b.inverse(sp)
for b, sp in zip(bijector, state_parts)]
return fn
|
def inverse_transform_fn(bijector):
"""Makes a function which applies a list of Bijectors' `inverse`s."""
if not mcmc_util.is_list_like(bijector):
bijector = [bijector]
def fn(state_parts):
return [b.inverse(sp)
for b, sp in zip(bijector, state_parts)]
return fn
|
[
"Makes",
"a",
"function",
"which",
"applies",
"a",
"list",
"of",
"Bijectors",
"inverse",
"s",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/transformed_kernel.py#L67-L74
|
[
"def",
"inverse_transform_fn",
"(",
"bijector",
")",
":",
"if",
"not",
"mcmc_util",
".",
"is_list_like",
"(",
"bijector",
")",
":",
"bijector",
"=",
"[",
"bijector",
"]",
"def",
"fn",
"(",
"state_parts",
")",
":",
"return",
"[",
"b",
".",
"inverse",
"(",
"sp",
")",
"for",
"b",
",",
"sp",
"in",
"zip",
"(",
"bijector",
",",
"state_parts",
")",
"]",
"return",
"fn"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
TransformedTransitionKernel.one_step
|
Runs one iteration of the Transformed Kernel.
Args:
current_state: `Tensor` or Python `list` of `Tensor`s
representing the current state(s) of the Markov chain(s),
_after_ application of `bijector.forward`. The first `r`
dimensions index independent chains,
`r = tf.rank(target_log_prob_fn(*current_state))`. The
`inner_kernel.one_step` does not actually use `current_state`,
rather it takes as input
`previous_kernel_results.transformed_state` (because
`TransformedTransitionKernel` creates a copy of the input
inner_kernel with a modified `target_log_prob_fn` which
internally applies the `bijector.forward`).
previous_kernel_results: `collections.namedtuple` containing `Tensor`s
representing values from previous calls to this function (or from the
`bootstrap_results` function.)
Returns:
next_state: Tensor or Python list of `Tensor`s representing the state(s)
of the Markov chain(s) after taking exactly one step. Has same type and
shape as `current_state`.
kernel_results: `collections.namedtuple` of internal calculations used to
advance the chain.
|
tensorflow_probability/python/mcmc/transformed_kernel.py
|
def one_step(self, current_state, previous_kernel_results):
"""Runs one iteration of the Transformed Kernel.
Args:
current_state: `Tensor` or Python `list` of `Tensor`s
representing the current state(s) of the Markov chain(s),
_after_ application of `bijector.forward`. The first `r`
dimensions index independent chains,
`r = tf.rank(target_log_prob_fn(*current_state))`. The
`inner_kernel.one_step` does not actually use `current_state`,
rather it takes as input
`previous_kernel_results.transformed_state` (because
`TransformedTransitionKernel` creates a copy of the input
inner_kernel with a modified `target_log_prob_fn` which
internally applies the `bijector.forward`).
previous_kernel_results: `collections.namedtuple` containing `Tensor`s
representing values from previous calls to this function (or from the
`bootstrap_results` function.)
Returns:
next_state: Tensor or Python list of `Tensor`s representing the state(s)
of the Markov chain(s) after taking exactly one step. Has same type and
shape as `current_state`.
kernel_results: `collections.namedtuple` of internal calculations used to
advance the chain.
"""
with tf.compat.v1.name_scope(
name=mcmc_util.make_name(self.name, 'transformed_kernel', 'one_step'),
values=[previous_kernel_results]):
transformed_next_state, kernel_results = self._inner_kernel.one_step(
previous_kernel_results.transformed_state,
previous_kernel_results.inner_results)
transformed_next_state_parts = (
transformed_next_state
if mcmc_util.is_list_like(transformed_next_state) else
[transformed_next_state])
next_state_parts = self._forward_transform(transformed_next_state_parts)
next_state = (
next_state_parts if mcmc_util.is_list_like(transformed_next_state)
else next_state_parts[0])
kernel_results = TransformedTransitionKernelResults(
transformed_state=transformed_next_state,
inner_results=kernel_results)
return next_state, kernel_results
|
def one_step(self, current_state, previous_kernel_results):
"""Runs one iteration of the Transformed Kernel.
Args:
current_state: `Tensor` or Python `list` of `Tensor`s
representing the current state(s) of the Markov chain(s),
_after_ application of `bijector.forward`. The first `r`
dimensions index independent chains,
`r = tf.rank(target_log_prob_fn(*current_state))`. The
`inner_kernel.one_step` does not actually use `current_state`,
rather it takes as input
`previous_kernel_results.transformed_state` (because
`TransformedTransitionKernel` creates a copy of the input
inner_kernel with a modified `target_log_prob_fn` which
internally applies the `bijector.forward`).
previous_kernel_results: `collections.namedtuple` containing `Tensor`s
representing values from previous calls to this function (or from the
`bootstrap_results` function.)
Returns:
next_state: Tensor or Python list of `Tensor`s representing the state(s)
of the Markov chain(s) after taking exactly one step. Has same type and
shape as `current_state`.
kernel_results: `collections.namedtuple` of internal calculations used to
advance the chain.
"""
with tf.compat.v1.name_scope(
name=mcmc_util.make_name(self.name, 'transformed_kernel', 'one_step'),
values=[previous_kernel_results]):
transformed_next_state, kernel_results = self._inner_kernel.one_step(
previous_kernel_results.transformed_state,
previous_kernel_results.inner_results)
transformed_next_state_parts = (
transformed_next_state
if mcmc_util.is_list_like(transformed_next_state) else
[transformed_next_state])
next_state_parts = self._forward_transform(transformed_next_state_parts)
next_state = (
next_state_parts if mcmc_util.is_list_like(transformed_next_state)
else next_state_parts[0])
kernel_results = TransformedTransitionKernelResults(
transformed_state=transformed_next_state,
inner_results=kernel_results)
return next_state, kernel_results
|
[
"Runs",
"one",
"iteration",
"of",
"the",
"Transformed",
"Kernel",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/transformed_kernel.py#L230-L273
|
[
"def",
"one_step",
"(",
"self",
",",
"current_state",
",",
"previous_kernel_results",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
"=",
"mcmc_util",
".",
"make_name",
"(",
"self",
".",
"name",
",",
"'transformed_kernel'",
",",
"'one_step'",
")",
",",
"values",
"=",
"[",
"previous_kernel_results",
"]",
")",
":",
"transformed_next_state",
",",
"kernel_results",
"=",
"self",
".",
"_inner_kernel",
".",
"one_step",
"(",
"previous_kernel_results",
".",
"transformed_state",
",",
"previous_kernel_results",
".",
"inner_results",
")",
"transformed_next_state_parts",
"=",
"(",
"transformed_next_state",
"if",
"mcmc_util",
".",
"is_list_like",
"(",
"transformed_next_state",
")",
"else",
"[",
"transformed_next_state",
"]",
")",
"next_state_parts",
"=",
"self",
".",
"_forward_transform",
"(",
"transformed_next_state_parts",
")",
"next_state",
"=",
"(",
"next_state_parts",
"if",
"mcmc_util",
".",
"is_list_like",
"(",
"transformed_next_state",
")",
"else",
"next_state_parts",
"[",
"0",
"]",
")",
"kernel_results",
"=",
"TransformedTransitionKernelResults",
"(",
"transformed_state",
"=",
"transformed_next_state",
",",
"inner_results",
"=",
"kernel_results",
")",
"return",
"next_state",
",",
"kernel_results"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
TransformedTransitionKernel.bootstrap_results
|
Returns an object with the same type as returned by `one_step`.
Unlike other `TransitionKernel`s,
`TransformedTransitionKernel.bootstrap_results` has the option of
initializing the `TransformedTransitionKernelResults` from either an initial
state, eg, requiring computing `bijector.inverse(init_state)`, or
directly from `transformed_init_state`, i.e., a `Tensor` or list
of `Tensor`s which is interpretted as the `bijector.inverse`
transformed state.
Args:
init_state: `Tensor` or Python `list` of `Tensor`s representing the a
state(s) of the Markov chain(s). Must specify `init_state` or
`transformed_init_state` but not both.
transformed_init_state: `Tensor` or Python `list` of `Tensor`s
representing the a state(s) of the Markov chain(s). Must specify
`init_state` or `transformed_init_state` but not both.
Returns:
kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of
`Tensor`s representing internal calculations made within this function.
Raises:
ValueError: if `inner_kernel` results doesn't contain the member
"target_log_prob".
#### Examples
To use `transformed_init_state` in context of
`tfp.mcmc.sample_chain`, you need to explicitly pass the
`previous_kernel_results`, e.g.,
```python
transformed_kernel = tfp.mcmc.TransformedTransitionKernel(...)
init_state = ... # Doesnt matter.
transformed_init_state = ... # Does matter.
results, _ = tfp.mcmc.sample_chain(
num_results=...,
current_state=init_state,
previous_kernel_results=transformed_kernel.bootstrap_results(
transformed_init_state=transformed_init_state),
kernel=transformed_kernel)
```
|
tensorflow_probability/python/mcmc/transformed_kernel.py
|
def bootstrap_results(self, init_state=None, transformed_init_state=None):
"""Returns an object with the same type as returned by `one_step`.
Unlike other `TransitionKernel`s,
`TransformedTransitionKernel.bootstrap_results` has the option of
initializing the `TransformedTransitionKernelResults` from either an initial
state, eg, requiring computing `bijector.inverse(init_state)`, or
directly from `transformed_init_state`, i.e., a `Tensor` or list
of `Tensor`s which is interpretted as the `bijector.inverse`
transformed state.
Args:
init_state: `Tensor` or Python `list` of `Tensor`s representing the a
state(s) of the Markov chain(s). Must specify `init_state` or
`transformed_init_state` but not both.
transformed_init_state: `Tensor` or Python `list` of `Tensor`s
representing the a state(s) of the Markov chain(s). Must specify
`init_state` or `transformed_init_state` but not both.
Returns:
kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of
`Tensor`s representing internal calculations made within this function.
Raises:
ValueError: if `inner_kernel` results doesn't contain the member
"target_log_prob".
#### Examples
To use `transformed_init_state` in context of
`tfp.mcmc.sample_chain`, you need to explicitly pass the
`previous_kernel_results`, e.g.,
```python
transformed_kernel = tfp.mcmc.TransformedTransitionKernel(...)
init_state = ... # Doesnt matter.
transformed_init_state = ... # Does matter.
results, _ = tfp.mcmc.sample_chain(
num_results=...,
current_state=init_state,
previous_kernel_results=transformed_kernel.bootstrap_results(
transformed_init_state=transformed_init_state),
kernel=transformed_kernel)
```
"""
if (init_state is None) == (transformed_init_state is None):
raise ValueError('Must specify exactly one of `init_state` '
'or `transformed_init_state`.')
with tf.compat.v1.name_scope(
name=mcmc_util.make_name(self.name, 'transformed_kernel',
'bootstrap_results'),
values=[init_state, transformed_init_state]):
if transformed_init_state is None:
init_state_parts = (init_state if mcmc_util.is_list_like(init_state)
else [init_state])
transformed_init_state_parts = self._inverse_transform(init_state_parts)
transformed_init_state = (
transformed_init_state_parts if mcmc_util.is_list_like(init_state)
else transformed_init_state_parts[0])
else:
if mcmc_util.is_list_like(transformed_init_state):
transformed_init_state = [
tf.convert_to_tensor(value=s, name='transformed_init_state')
for s in transformed_init_state
]
else:
transformed_init_state = tf.convert_to_tensor(
value=transformed_init_state, name='transformed_init_state')
kernel_results = TransformedTransitionKernelResults(
transformed_state=transformed_init_state,
inner_results=self._inner_kernel.bootstrap_results(
transformed_init_state))
return kernel_results
|
def bootstrap_results(self, init_state=None, transformed_init_state=None):
"""Returns an object with the same type as returned by `one_step`.
Unlike other `TransitionKernel`s,
`TransformedTransitionKernel.bootstrap_results` has the option of
initializing the `TransformedTransitionKernelResults` from either an initial
state, eg, requiring computing `bijector.inverse(init_state)`, or
directly from `transformed_init_state`, i.e., a `Tensor` or list
of `Tensor`s which is interpretted as the `bijector.inverse`
transformed state.
Args:
init_state: `Tensor` or Python `list` of `Tensor`s representing the a
state(s) of the Markov chain(s). Must specify `init_state` or
`transformed_init_state` but not both.
transformed_init_state: `Tensor` or Python `list` of `Tensor`s
representing the a state(s) of the Markov chain(s). Must specify
`init_state` or `transformed_init_state` but not both.
Returns:
kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of
`Tensor`s representing internal calculations made within this function.
Raises:
ValueError: if `inner_kernel` results doesn't contain the member
"target_log_prob".
#### Examples
To use `transformed_init_state` in context of
`tfp.mcmc.sample_chain`, you need to explicitly pass the
`previous_kernel_results`, e.g.,
```python
transformed_kernel = tfp.mcmc.TransformedTransitionKernel(...)
init_state = ... # Doesnt matter.
transformed_init_state = ... # Does matter.
results, _ = tfp.mcmc.sample_chain(
num_results=...,
current_state=init_state,
previous_kernel_results=transformed_kernel.bootstrap_results(
transformed_init_state=transformed_init_state),
kernel=transformed_kernel)
```
"""
if (init_state is None) == (transformed_init_state is None):
raise ValueError('Must specify exactly one of `init_state` '
'or `transformed_init_state`.')
with tf.compat.v1.name_scope(
name=mcmc_util.make_name(self.name, 'transformed_kernel',
'bootstrap_results'),
values=[init_state, transformed_init_state]):
if transformed_init_state is None:
init_state_parts = (init_state if mcmc_util.is_list_like(init_state)
else [init_state])
transformed_init_state_parts = self._inverse_transform(init_state_parts)
transformed_init_state = (
transformed_init_state_parts if mcmc_util.is_list_like(init_state)
else transformed_init_state_parts[0])
else:
if mcmc_util.is_list_like(transformed_init_state):
transformed_init_state = [
tf.convert_to_tensor(value=s, name='transformed_init_state')
for s in transformed_init_state
]
else:
transformed_init_state = tf.convert_to_tensor(
value=transformed_init_state, name='transformed_init_state')
kernel_results = TransformedTransitionKernelResults(
transformed_state=transformed_init_state,
inner_results=self._inner_kernel.bootstrap_results(
transformed_init_state))
return kernel_results
|
[
"Returns",
"an",
"object",
"with",
"the",
"same",
"type",
"as",
"returned",
"by",
"one_step",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/transformed_kernel.py#L275-L347
|
[
"def",
"bootstrap_results",
"(",
"self",
",",
"init_state",
"=",
"None",
",",
"transformed_init_state",
"=",
"None",
")",
":",
"if",
"(",
"init_state",
"is",
"None",
")",
"==",
"(",
"transformed_init_state",
"is",
"None",
")",
":",
"raise",
"ValueError",
"(",
"'Must specify exactly one of `init_state` '",
"'or `transformed_init_state`.'",
")",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
"=",
"mcmc_util",
".",
"make_name",
"(",
"self",
".",
"name",
",",
"'transformed_kernel'",
",",
"'bootstrap_results'",
")",
",",
"values",
"=",
"[",
"init_state",
",",
"transformed_init_state",
"]",
")",
":",
"if",
"transformed_init_state",
"is",
"None",
":",
"init_state_parts",
"=",
"(",
"init_state",
"if",
"mcmc_util",
".",
"is_list_like",
"(",
"init_state",
")",
"else",
"[",
"init_state",
"]",
")",
"transformed_init_state_parts",
"=",
"self",
".",
"_inverse_transform",
"(",
"init_state_parts",
")",
"transformed_init_state",
"=",
"(",
"transformed_init_state_parts",
"if",
"mcmc_util",
".",
"is_list_like",
"(",
"init_state",
")",
"else",
"transformed_init_state_parts",
"[",
"0",
"]",
")",
"else",
":",
"if",
"mcmc_util",
".",
"is_list_like",
"(",
"transformed_init_state",
")",
":",
"transformed_init_state",
"=",
"[",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"s",
",",
"name",
"=",
"'transformed_init_state'",
")",
"for",
"s",
"in",
"transformed_init_state",
"]",
"else",
":",
"transformed_init_state",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"transformed_init_state",
",",
"name",
"=",
"'transformed_init_state'",
")",
"kernel_results",
"=",
"TransformedTransitionKernelResults",
"(",
"transformed_state",
"=",
"transformed_init_state",
",",
"inner_results",
"=",
"self",
".",
"_inner_kernel",
".",
"bootstrap_results",
"(",
"transformed_init_state",
")",
")",
"return",
"kernel_results"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
val_where
|
Like tf.where but works on namedtuples.
|
tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py
|
def val_where(cond, tval, fval):
"""Like tf.where but works on namedtuples."""
if isinstance(tval, tf.Tensor):
return tf.where(cond, tval, fval)
elif isinstance(tval, tuple):
cls = type(tval)
return cls(*(val_where(cond, t, f) for t, f in zip(tval, fval)))
else:
raise Exception(TypeError)
|
def val_where(cond, tval, fval):
"""Like tf.where but works on namedtuples."""
if isinstance(tval, tf.Tensor):
return tf.where(cond, tval, fval)
elif isinstance(tval, tuple):
cls = type(tval)
return cls(*(val_where(cond, t, f) for t, f in zip(tval, fval)))
else:
raise Exception(TypeError)
|
[
"Like",
"tf",
".",
"where",
"but",
"works",
"on",
"namedtuples",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py#L39-L47
|
[
"def",
"val_where",
"(",
"cond",
",",
"tval",
",",
"fval",
")",
":",
"if",
"isinstance",
"(",
"tval",
",",
"tf",
".",
"Tensor",
")",
":",
"return",
"tf",
".",
"where",
"(",
"cond",
",",
"tval",
",",
"fval",
")",
"elif",
"isinstance",
"(",
"tval",
",",
"tuple",
")",
":",
"cls",
"=",
"type",
"(",
"tval",
")",
"return",
"cls",
"(",
"*",
"(",
"val_where",
"(",
"cond",
",",
"t",
",",
"f",
")",
"for",
"t",
",",
"f",
"in",
"zip",
"(",
"tval",
",",
"fval",
")",
")",
")",
"else",
":",
"raise",
"Exception",
"(",
"TypeError",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
secant2
|
Performs the secant square procedure of Hager Zhang.
Given an interval that brackets a root, this procedure performs an update of
both end points using two intermediate points generated using the secant
interpolation. For details see the steps S1-S4 in [Hager and Zhang (2006)][2].
The interval [a, b] must satisfy the opposite slope conditions described in
the documentation for `update`.
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns an object that can be converted to a namedtuple.
The namedtuple should have fields 'f' and 'df' that correspond to scalar
tensors of real dtype containing the value of the function and its
derivative at that point. The other namedtuple fields, if present,
should be tensors or sequences (possibly nested) of tensors.
In usual optimization application, this function would be generated by
projecting the multivariate objective function along some specific
direction. The direction is determined by some other procedure but should
be a descent direction (i.e. the derivative of the projected univariate
function must be negative at 0.).
Alternatively, the function may represent the batching of `n` such line
functions (e.g. projecting a single multivariate objective function along
`n` distinct directions at once) accepting n points as input, i.e. a
tensor of shape [n], and the fields 'f' and 'df' in the returned
namedtuple should each be a tensor of shape [n], with the corresponding
function values and derivatives at the input points.
val_0: A namedtuple, as returned by value_and_gradients_function evaluated
at `0.`.
search_interval: A namedtuple describing the current search interval,
must include the fields:
- converged: Boolean `Tensor` of shape [n], indicating batch members
where search has already converged. Interval for these batch members
won't be modified.
- failed: Boolean `Tensor` of shape [n], indicating batch members
where search has already failed. Interval for these batch members
wont be modified.
- iterations: Scalar int32 `Tensor`. Number of line search iterations
so far.
- func_evals: Scalar int32 `Tensor`. Number of function evaluations
so far.
- left: A namedtuple, as returned by value_and_gradients_function,
of the left end point of the current search interval.
- right: A namedtuple, as returned by value_and_gradients_function,
of the right end point of the current search interval.
f_lim: Scalar `Tensor` of real dtype. The function value threshold for
the approximate Wolfe conditions to be checked.
sufficient_decrease_param: Positive scalar `Tensor` of real dtype.
Bounded above by the curvature param. Corresponds to 'delta' in the
terminology of [Hager and Zhang (2006)][2].
curvature_param: Positive scalar `Tensor` of real dtype. Bounded above
by `1.`. Corresponds to 'sigma' in the terminology of
[Hager and Zhang (2006)][2].
name: (Optional) Python str. The name prefixed to the ops created by this
function. If not supplied, the default name 'secant2' is used.
Returns:
A namedtuple containing the following fields.
active: A boolean `Tensor` of shape [n]. Used internally by the procedure
to indicate batch members on which there is work left to do.
converged: A boolean `Tensor` of shape [n]. Indicates whether a point
satisfying the Wolfe conditions has been found. If this is True, the
interval will be degenerate (i.e. `left` and `right` below will be
identical).
failed: A boolean `Tensor` of shape [n]. Indicates if invalid function or
gradient values were encountered (i.e. infinity or NaNs).
num_evals: A scalar int32 `Tensor`. The total number of function
evaluations made.
left: Return value of value_and_gradients_function at the updated left
end point of the interval.
right: Return value of value_and_gradients_function at the updated right
end point of the interval.
|
tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py
|
def secant2(value_and_gradients_function,
val_0,
search_interval,
f_lim,
sufficient_decrease_param=0.1,
curvature_param=0.9,
name=None):
"""Performs the secant square procedure of Hager Zhang.
Given an interval that brackets a root, this procedure performs an update of
both end points using two intermediate points generated using the secant
interpolation. For details see the steps S1-S4 in [Hager and Zhang (2006)][2].
The interval [a, b] must satisfy the opposite slope conditions described in
the documentation for `update`.
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns an object that can be converted to a namedtuple.
The namedtuple should have fields 'f' and 'df' that correspond to scalar
tensors of real dtype containing the value of the function and its
derivative at that point. The other namedtuple fields, if present,
should be tensors or sequences (possibly nested) of tensors.
In usual optimization application, this function would be generated by
projecting the multivariate objective function along some specific
direction. The direction is determined by some other procedure but should
be a descent direction (i.e. the derivative of the projected univariate
function must be negative at 0.).
Alternatively, the function may represent the batching of `n` such line
functions (e.g. projecting a single multivariate objective function along
`n` distinct directions at once) accepting n points as input, i.e. a
tensor of shape [n], and the fields 'f' and 'df' in the returned
namedtuple should each be a tensor of shape [n], with the corresponding
function values and derivatives at the input points.
val_0: A namedtuple, as returned by value_and_gradients_function evaluated
at `0.`.
search_interval: A namedtuple describing the current search interval,
must include the fields:
- converged: Boolean `Tensor` of shape [n], indicating batch members
where search has already converged. Interval for these batch members
won't be modified.
- failed: Boolean `Tensor` of shape [n], indicating batch members
where search has already failed. Interval for these batch members
wont be modified.
- iterations: Scalar int32 `Tensor`. Number of line search iterations
so far.
- func_evals: Scalar int32 `Tensor`. Number of function evaluations
so far.
- left: A namedtuple, as returned by value_and_gradients_function,
of the left end point of the current search interval.
- right: A namedtuple, as returned by value_and_gradients_function,
of the right end point of the current search interval.
f_lim: Scalar `Tensor` of real dtype. The function value threshold for
the approximate Wolfe conditions to be checked.
sufficient_decrease_param: Positive scalar `Tensor` of real dtype.
Bounded above by the curvature param. Corresponds to 'delta' in the
terminology of [Hager and Zhang (2006)][2].
curvature_param: Positive scalar `Tensor` of real dtype. Bounded above
by `1.`. Corresponds to 'sigma' in the terminology of
[Hager and Zhang (2006)][2].
name: (Optional) Python str. The name prefixed to the ops created by this
function. If not supplied, the default name 'secant2' is used.
Returns:
A namedtuple containing the following fields.
active: A boolean `Tensor` of shape [n]. Used internally by the procedure
to indicate batch members on which there is work left to do.
converged: A boolean `Tensor` of shape [n]. Indicates whether a point
satisfying the Wolfe conditions has been found. If this is True, the
interval will be degenerate (i.e. `left` and `right` below will be
identical).
failed: A boolean `Tensor` of shape [n]. Indicates if invalid function or
gradient values were encountered (i.e. infinity or NaNs).
num_evals: A scalar int32 `Tensor`. The total number of function
evaluations made.
left: Return value of value_and_gradients_function at the updated left
end point of the interval.
right: Return value of value_and_gradients_function at the updated right
end point of the interval.
"""
with tf.compat.v1.name_scope(name, 'secant2', [
val_0, search_interval, f_lim, sufficient_decrease_param,
curvature_param]):
# This will always be s.t. left <= c <= right
val_c = value_and_gradients_function(
_secant(search_interval.left, search_interval.right))
failed = search_interval.failed | ~is_finite(val_c)
converged = search_interval.converged | (~failed & _satisfies_wolfe(
val_0, val_c, f_lim, sufficient_decrease_param, curvature_param))
new_converged = converged & ~search_interval.converged
val_left = val_where(new_converged, val_c, search_interval.left)
val_right = val_where(new_converged, val_c, search_interval.right)
initial_args = _Secant2Result(
active=~failed & ~converged,
converged=converged,
failed=failed,
num_evals=search_interval.func_evals + 1,
left=val_left,
right=val_right)
def _apply_secant2_inner():
return _secant2_inner(
value_and_gradients_function,
initial_args,
val_0,
val_c,
f_lim,
sufficient_decrease_param,
curvature_param)
return prefer_static.cond(
tf.reduce_any(input_tensor=initial_args.active),
_apply_secant2_inner,
lambda: initial_args)
|
def secant2(value_and_gradients_function,
val_0,
search_interval,
f_lim,
sufficient_decrease_param=0.1,
curvature_param=0.9,
name=None):
"""Performs the secant square procedure of Hager Zhang.
Given an interval that brackets a root, this procedure performs an update of
both end points using two intermediate points generated using the secant
interpolation. For details see the steps S1-S4 in [Hager and Zhang (2006)][2].
The interval [a, b] must satisfy the opposite slope conditions described in
the documentation for `update`.
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns an object that can be converted to a namedtuple.
The namedtuple should have fields 'f' and 'df' that correspond to scalar
tensors of real dtype containing the value of the function and its
derivative at that point. The other namedtuple fields, if present,
should be tensors or sequences (possibly nested) of tensors.
In usual optimization application, this function would be generated by
projecting the multivariate objective function along some specific
direction. The direction is determined by some other procedure but should
be a descent direction (i.e. the derivative of the projected univariate
function must be negative at 0.).
Alternatively, the function may represent the batching of `n` such line
functions (e.g. projecting a single multivariate objective function along
`n` distinct directions at once) accepting n points as input, i.e. a
tensor of shape [n], and the fields 'f' and 'df' in the returned
namedtuple should each be a tensor of shape [n], with the corresponding
function values and derivatives at the input points.
val_0: A namedtuple, as returned by value_and_gradients_function evaluated
at `0.`.
search_interval: A namedtuple describing the current search interval,
must include the fields:
- converged: Boolean `Tensor` of shape [n], indicating batch members
where search has already converged. Interval for these batch members
won't be modified.
- failed: Boolean `Tensor` of shape [n], indicating batch members
where search has already failed. Interval for these batch members
wont be modified.
- iterations: Scalar int32 `Tensor`. Number of line search iterations
so far.
- func_evals: Scalar int32 `Tensor`. Number of function evaluations
so far.
- left: A namedtuple, as returned by value_and_gradients_function,
of the left end point of the current search interval.
- right: A namedtuple, as returned by value_and_gradients_function,
of the right end point of the current search interval.
f_lim: Scalar `Tensor` of real dtype. The function value threshold for
the approximate Wolfe conditions to be checked.
sufficient_decrease_param: Positive scalar `Tensor` of real dtype.
Bounded above by the curvature param. Corresponds to 'delta' in the
terminology of [Hager and Zhang (2006)][2].
curvature_param: Positive scalar `Tensor` of real dtype. Bounded above
by `1.`. Corresponds to 'sigma' in the terminology of
[Hager and Zhang (2006)][2].
name: (Optional) Python str. The name prefixed to the ops created by this
function. If not supplied, the default name 'secant2' is used.
Returns:
A namedtuple containing the following fields.
active: A boolean `Tensor` of shape [n]. Used internally by the procedure
to indicate batch members on which there is work left to do.
converged: A boolean `Tensor` of shape [n]. Indicates whether a point
satisfying the Wolfe conditions has been found. If this is True, the
interval will be degenerate (i.e. `left` and `right` below will be
identical).
failed: A boolean `Tensor` of shape [n]. Indicates if invalid function or
gradient values were encountered (i.e. infinity or NaNs).
num_evals: A scalar int32 `Tensor`. The total number of function
evaluations made.
left: Return value of value_and_gradients_function at the updated left
end point of the interval.
right: Return value of value_and_gradients_function at the updated right
end point of the interval.
"""
with tf.compat.v1.name_scope(name, 'secant2', [
val_0, search_interval, f_lim, sufficient_decrease_param,
curvature_param]):
# This will always be s.t. left <= c <= right
val_c = value_and_gradients_function(
_secant(search_interval.left, search_interval.right))
failed = search_interval.failed | ~is_finite(val_c)
converged = search_interval.converged | (~failed & _satisfies_wolfe(
val_0, val_c, f_lim, sufficient_decrease_param, curvature_param))
new_converged = converged & ~search_interval.converged
val_left = val_where(new_converged, val_c, search_interval.left)
val_right = val_where(new_converged, val_c, search_interval.right)
initial_args = _Secant2Result(
active=~failed & ~converged,
converged=converged,
failed=failed,
num_evals=search_interval.func_evals + 1,
left=val_left,
right=val_right)
def _apply_secant2_inner():
return _secant2_inner(
value_and_gradients_function,
initial_args,
val_0,
val_c,
f_lim,
sufficient_decrease_param,
curvature_param)
return prefer_static.cond(
tf.reduce_any(input_tensor=initial_args.active),
_apply_secant2_inner,
lambda: initial_args)
|
[
"Performs",
"the",
"secant",
"square",
"procedure",
"of",
"Hager",
"Zhang",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py#L60-L174
|
[
"def",
"secant2",
"(",
"value_and_gradients_function",
",",
"val_0",
",",
"search_interval",
",",
"f_lim",
",",
"sufficient_decrease_param",
"=",
"0.1",
",",
"curvature_param",
"=",
"0.9",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'secant2'",
",",
"[",
"val_0",
",",
"search_interval",
",",
"f_lim",
",",
"sufficient_decrease_param",
",",
"curvature_param",
"]",
")",
":",
"# This will always be s.t. left <= c <= right",
"val_c",
"=",
"value_and_gradients_function",
"(",
"_secant",
"(",
"search_interval",
".",
"left",
",",
"search_interval",
".",
"right",
")",
")",
"failed",
"=",
"search_interval",
".",
"failed",
"|",
"~",
"is_finite",
"(",
"val_c",
")",
"converged",
"=",
"search_interval",
".",
"converged",
"|",
"(",
"~",
"failed",
"&",
"_satisfies_wolfe",
"(",
"val_0",
",",
"val_c",
",",
"f_lim",
",",
"sufficient_decrease_param",
",",
"curvature_param",
")",
")",
"new_converged",
"=",
"converged",
"&",
"~",
"search_interval",
".",
"converged",
"val_left",
"=",
"val_where",
"(",
"new_converged",
",",
"val_c",
",",
"search_interval",
".",
"left",
")",
"val_right",
"=",
"val_where",
"(",
"new_converged",
",",
"val_c",
",",
"search_interval",
".",
"right",
")",
"initial_args",
"=",
"_Secant2Result",
"(",
"active",
"=",
"~",
"failed",
"&",
"~",
"converged",
",",
"converged",
"=",
"converged",
",",
"failed",
"=",
"failed",
",",
"num_evals",
"=",
"search_interval",
".",
"func_evals",
"+",
"1",
",",
"left",
"=",
"val_left",
",",
"right",
"=",
"val_right",
")",
"def",
"_apply_secant2_inner",
"(",
")",
":",
"return",
"_secant2_inner",
"(",
"value_and_gradients_function",
",",
"initial_args",
",",
"val_0",
",",
"val_c",
",",
"f_lim",
",",
"sufficient_decrease_param",
",",
"curvature_param",
")",
"return",
"prefer_static",
".",
"cond",
"(",
"tf",
".",
"reduce_any",
"(",
"input_tensor",
"=",
"initial_args",
".",
"active",
")",
",",
"_apply_secant2_inner",
",",
"lambda",
":",
"initial_args",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_secant2_inner
|
Helper function for secant square.
|
tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py
|
def _secant2_inner(value_and_gradients_function,
initial_args,
val_0,
val_c,
f_lim,
sufficient_decrease_param,
curvature_param):
"""Helper function for secant square."""
# Apply the `update` function on active branch members to squeeze their
# bracketing interval.
update_result = update(value_and_gradients_function,
initial_args.left,
initial_args.right,
val_c,
f_lim,
active=initial_args.active)
# Update active and failed flags, update left/right on non-failed entries.
active = initial_args.active & ~update_result.failed
failed = initial_args.failed | update_result.failed
val_left = val_where(active, update_result.left, initial_args.left)
val_right = val_where(active, update_result.right, initial_args.right)
# Check if new `c` points should be generated.
updated_left = active & tf.equal(val_left.x, val_c.x)
updated_right = active & tf.equal(val_right.x, val_c.x)
is_new = updated_left | updated_right
next_c = tf.where(updated_left,
_secant(initial_args.left, val_left),
val_c.x)
next_c = tf.where(updated_right,
_secant(initial_args.right, val_right),
next_c)
in_range = (val_left.x <= next_c) & (next_c <= val_right.x)
# Figure out if an extra function evaluation is needed for new `c` points.
needs_extra_eval = tf.reduce_any(input_tensor=in_range & is_new)
num_evals = initial_args.num_evals + update_result.num_evals
num_evals = num_evals + tf.cast(needs_extra_eval, num_evals.dtype)
next_args = _Secant2Result(
active=active & in_range, # No longer active if `c` is out of range.
converged=initial_args.converged,
failed=failed,
num_evals=num_evals,
left=val_left,
right=val_right)
def _apply_inner_update():
next_val_c = prefer_static.cond(
needs_extra_eval,
(lambda: value_and_gradients_function(next_c)),
(lambda: val_c))
return _secant2_inner_update(
value_and_gradients_function, next_args, val_0, next_val_c, f_lim,
sufficient_decrease_param, curvature_param)
return prefer_static.cond(
tf.reduce_any(input_tensor=next_args.active),
_apply_inner_update,
lambda: next_args)
|
def _secant2_inner(value_and_gradients_function,
initial_args,
val_0,
val_c,
f_lim,
sufficient_decrease_param,
curvature_param):
"""Helper function for secant square."""
# Apply the `update` function on active branch members to squeeze their
# bracketing interval.
update_result = update(value_and_gradients_function,
initial_args.left,
initial_args.right,
val_c,
f_lim,
active=initial_args.active)
# Update active and failed flags, update left/right on non-failed entries.
active = initial_args.active & ~update_result.failed
failed = initial_args.failed | update_result.failed
val_left = val_where(active, update_result.left, initial_args.left)
val_right = val_where(active, update_result.right, initial_args.right)
# Check if new `c` points should be generated.
updated_left = active & tf.equal(val_left.x, val_c.x)
updated_right = active & tf.equal(val_right.x, val_c.x)
is_new = updated_left | updated_right
next_c = tf.where(updated_left,
_secant(initial_args.left, val_left),
val_c.x)
next_c = tf.where(updated_right,
_secant(initial_args.right, val_right),
next_c)
in_range = (val_left.x <= next_c) & (next_c <= val_right.x)
# Figure out if an extra function evaluation is needed for new `c` points.
needs_extra_eval = tf.reduce_any(input_tensor=in_range & is_new)
num_evals = initial_args.num_evals + update_result.num_evals
num_evals = num_evals + tf.cast(needs_extra_eval, num_evals.dtype)
next_args = _Secant2Result(
active=active & in_range, # No longer active if `c` is out of range.
converged=initial_args.converged,
failed=failed,
num_evals=num_evals,
left=val_left,
right=val_right)
def _apply_inner_update():
next_val_c = prefer_static.cond(
needs_extra_eval,
(lambda: value_and_gradients_function(next_c)),
(lambda: val_c))
return _secant2_inner_update(
value_and_gradients_function, next_args, val_0, next_val_c, f_lim,
sufficient_decrease_param, curvature_param)
return prefer_static.cond(
tf.reduce_any(input_tensor=next_args.active),
_apply_inner_update,
lambda: next_args)
|
[
"Helper",
"function",
"for",
"secant",
"square",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py#L177-L238
|
[
"def",
"_secant2_inner",
"(",
"value_and_gradients_function",
",",
"initial_args",
",",
"val_0",
",",
"val_c",
",",
"f_lim",
",",
"sufficient_decrease_param",
",",
"curvature_param",
")",
":",
"# Apply the `update` function on active branch members to squeeze their",
"# bracketing interval.",
"update_result",
"=",
"update",
"(",
"value_and_gradients_function",
",",
"initial_args",
".",
"left",
",",
"initial_args",
".",
"right",
",",
"val_c",
",",
"f_lim",
",",
"active",
"=",
"initial_args",
".",
"active",
")",
"# Update active and failed flags, update left/right on non-failed entries.",
"active",
"=",
"initial_args",
".",
"active",
"&",
"~",
"update_result",
".",
"failed",
"failed",
"=",
"initial_args",
".",
"failed",
"|",
"update_result",
".",
"failed",
"val_left",
"=",
"val_where",
"(",
"active",
",",
"update_result",
".",
"left",
",",
"initial_args",
".",
"left",
")",
"val_right",
"=",
"val_where",
"(",
"active",
",",
"update_result",
".",
"right",
",",
"initial_args",
".",
"right",
")",
"# Check if new `c` points should be generated.",
"updated_left",
"=",
"active",
"&",
"tf",
".",
"equal",
"(",
"val_left",
".",
"x",
",",
"val_c",
".",
"x",
")",
"updated_right",
"=",
"active",
"&",
"tf",
".",
"equal",
"(",
"val_right",
".",
"x",
",",
"val_c",
".",
"x",
")",
"is_new",
"=",
"updated_left",
"|",
"updated_right",
"next_c",
"=",
"tf",
".",
"where",
"(",
"updated_left",
",",
"_secant",
"(",
"initial_args",
".",
"left",
",",
"val_left",
")",
",",
"val_c",
".",
"x",
")",
"next_c",
"=",
"tf",
".",
"where",
"(",
"updated_right",
",",
"_secant",
"(",
"initial_args",
".",
"right",
",",
"val_right",
")",
",",
"next_c",
")",
"in_range",
"=",
"(",
"val_left",
".",
"x",
"<=",
"next_c",
")",
"&",
"(",
"next_c",
"<=",
"val_right",
".",
"x",
")",
"# Figure out if an extra function evaluation is needed for new `c` points.",
"needs_extra_eval",
"=",
"tf",
".",
"reduce_any",
"(",
"input_tensor",
"=",
"in_range",
"&",
"is_new",
")",
"num_evals",
"=",
"initial_args",
".",
"num_evals",
"+",
"update_result",
".",
"num_evals",
"num_evals",
"=",
"num_evals",
"+",
"tf",
".",
"cast",
"(",
"needs_extra_eval",
",",
"num_evals",
".",
"dtype",
")",
"next_args",
"=",
"_Secant2Result",
"(",
"active",
"=",
"active",
"&",
"in_range",
",",
"# No longer active if `c` is out of range.",
"converged",
"=",
"initial_args",
".",
"converged",
",",
"failed",
"=",
"failed",
",",
"num_evals",
"=",
"num_evals",
",",
"left",
"=",
"val_left",
",",
"right",
"=",
"val_right",
")",
"def",
"_apply_inner_update",
"(",
")",
":",
"next_val_c",
"=",
"prefer_static",
".",
"cond",
"(",
"needs_extra_eval",
",",
"(",
"lambda",
":",
"value_and_gradients_function",
"(",
"next_c",
")",
")",
",",
"(",
"lambda",
":",
"val_c",
")",
")",
"return",
"_secant2_inner_update",
"(",
"value_and_gradients_function",
",",
"next_args",
",",
"val_0",
",",
"next_val_c",
",",
"f_lim",
",",
"sufficient_decrease_param",
",",
"curvature_param",
")",
"return",
"prefer_static",
".",
"cond",
"(",
"tf",
".",
"reduce_any",
"(",
"input_tensor",
"=",
"next_args",
".",
"active",
")",
",",
"_apply_inner_update",
",",
"lambda",
":",
"next_args",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_secant2_inner_update
|
Helper function for secant-square step.
|
tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py
|
def _secant2_inner_update(value_and_gradients_function,
initial_args,
val_0,
val_c,
f_lim,
sufficient_decrease_param,
curvature_param):
"""Helper function for secant-square step."""
# Fail if `val_c` is no longer finite.
new_failed = initial_args.active & ~is_finite(val_c)
active = initial_args.active & ~new_failed
failed = initial_args.failed | new_failed
# We converge when we find a point satisfying the Wolfe conditions, in those
# cases we set `val_left = val_right = val_c`.
found_wolfe = active & _satisfies_wolfe(
val_0, val_c, f_lim, sufficient_decrease_param, curvature_param)
val_left = val_where(found_wolfe, val_c, initial_args.left)
val_right = val_where(found_wolfe, val_c, initial_args.right)
converged = initial_args.converged | found_wolfe
active = active & ~found_wolfe
# If any active batch members remain, we apply the `update` function to
# squeeze further their corresponding left/right bracketing interval.
def _apply_update():
update_result = update(
value_and_gradients_function, val_left, val_right, val_c, f_lim,
active=active)
return _Secant2Result(
active=tf.zeros_like(active), # End of secant2, no actives anymore.
converged=converged,
failed=failed | update_result.failed,
num_evals=initial_args.num_evals + update_result.num_evals,
left=update_result.left,
right=update_result.right)
# Otherwise just return the current results.
def _default():
return _Secant2Result(
active=active,
converged=converged,
failed=failed,
num_evals=initial_args.num_evals,
left=val_left,
right=val_right)
return prefer_static.cond(
tf.reduce_any(input_tensor=active), _apply_update, _default)
|
def _secant2_inner_update(value_and_gradients_function,
initial_args,
val_0,
val_c,
f_lim,
sufficient_decrease_param,
curvature_param):
"""Helper function for secant-square step."""
# Fail if `val_c` is no longer finite.
new_failed = initial_args.active & ~is_finite(val_c)
active = initial_args.active & ~new_failed
failed = initial_args.failed | new_failed
# We converge when we find a point satisfying the Wolfe conditions, in those
# cases we set `val_left = val_right = val_c`.
found_wolfe = active & _satisfies_wolfe(
val_0, val_c, f_lim, sufficient_decrease_param, curvature_param)
val_left = val_where(found_wolfe, val_c, initial_args.left)
val_right = val_where(found_wolfe, val_c, initial_args.right)
converged = initial_args.converged | found_wolfe
active = active & ~found_wolfe
# If any active batch members remain, we apply the `update` function to
# squeeze further their corresponding left/right bracketing interval.
def _apply_update():
update_result = update(
value_and_gradients_function, val_left, val_right, val_c, f_lim,
active=active)
return _Secant2Result(
active=tf.zeros_like(active), # End of secant2, no actives anymore.
converged=converged,
failed=failed | update_result.failed,
num_evals=initial_args.num_evals + update_result.num_evals,
left=update_result.left,
right=update_result.right)
# Otherwise just return the current results.
def _default():
return _Secant2Result(
active=active,
converged=converged,
failed=failed,
num_evals=initial_args.num_evals,
left=val_left,
right=val_right)
return prefer_static.cond(
tf.reduce_any(input_tensor=active), _apply_update, _default)
|
[
"Helper",
"function",
"for",
"secant",
"-",
"square",
"step",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py#L241-L288
|
[
"def",
"_secant2_inner_update",
"(",
"value_and_gradients_function",
",",
"initial_args",
",",
"val_0",
",",
"val_c",
",",
"f_lim",
",",
"sufficient_decrease_param",
",",
"curvature_param",
")",
":",
"# Fail if `val_c` is no longer finite.",
"new_failed",
"=",
"initial_args",
".",
"active",
"&",
"~",
"is_finite",
"(",
"val_c",
")",
"active",
"=",
"initial_args",
".",
"active",
"&",
"~",
"new_failed",
"failed",
"=",
"initial_args",
".",
"failed",
"|",
"new_failed",
"# We converge when we find a point satisfying the Wolfe conditions, in those",
"# cases we set `val_left = val_right = val_c`.",
"found_wolfe",
"=",
"active",
"&",
"_satisfies_wolfe",
"(",
"val_0",
",",
"val_c",
",",
"f_lim",
",",
"sufficient_decrease_param",
",",
"curvature_param",
")",
"val_left",
"=",
"val_where",
"(",
"found_wolfe",
",",
"val_c",
",",
"initial_args",
".",
"left",
")",
"val_right",
"=",
"val_where",
"(",
"found_wolfe",
",",
"val_c",
",",
"initial_args",
".",
"right",
")",
"converged",
"=",
"initial_args",
".",
"converged",
"|",
"found_wolfe",
"active",
"=",
"active",
"&",
"~",
"found_wolfe",
"# If any active batch members remain, we apply the `update` function to",
"# squeeze further their corresponding left/right bracketing interval.",
"def",
"_apply_update",
"(",
")",
":",
"update_result",
"=",
"update",
"(",
"value_and_gradients_function",
",",
"val_left",
",",
"val_right",
",",
"val_c",
",",
"f_lim",
",",
"active",
"=",
"active",
")",
"return",
"_Secant2Result",
"(",
"active",
"=",
"tf",
".",
"zeros_like",
"(",
"active",
")",
",",
"# End of secant2, no actives anymore.",
"converged",
"=",
"converged",
",",
"failed",
"=",
"failed",
"|",
"update_result",
".",
"failed",
",",
"num_evals",
"=",
"initial_args",
".",
"num_evals",
"+",
"update_result",
".",
"num_evals",
",",
"left",
"=",
"update_result",
".",
"left",
",",
"right",
"=",
"update_result",
".",
"right",
")",
"# Otherwise just return the current results.",
"def",
"_default",
"(",
")",
":",
"return",
"_Secant2Result",
"(",
"active",
"=",
"active",
",",
"converged",
"=",
"converged",
",",
"failed",
"=",
"failed",
",",
"num_evals",
"=",
"initial_args",
".",
"num_evals",
",",
"left",
"=",
"val_left",
",",
"right",
"=",
"val_right",
")",
"return",
"prefer_static",
".",
"cond",
"(",
"tf",
".",
"reduce_any",
"(",
"input_tensor",
"=",
"active",
")",
",",
"_apply_update",
",",
"_default",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
update
|
Squeezes a bracketing interval containing the minimum.
Given an interval which brackets a minimum and a point in that interval,
finds a smaller nested interval which also brackets the minimum. If the
supplied point does not lie in the bracketing interval, the current interval
is returned.
The following description is given in terms of individual points evaluated on
a line function to be minimized. Note, however, the implementation also
accepts batches of points allowing to minimize multiple line functions at
once. See details on the docstring of `value_and_gradients_function` below.
The requirement of the interval bracketing a minimum is expressed through the
opposite slope conditions. Assume the left end point is 'a', the right
end point is 'b', the function to be minimized is 'f' and the derivative is
'df'. The update procedure relies on the following conditions being satisfied:
'''
f(a) <= f(0) + epsilon (1)
df(a) < 0 (2)
df(b) > 0 (3)
'''
In the first condition, epsilon is a small positive constant. The condition
demands that the function at the left end point be not much bigger than the
starting point (i.e. 0). This is an easy to satisfy condition because by
assumption, we are in a direction where the function value is decreasing.
The second and third conditions together demand that there is at least one
zero of the derivative in between a and b.
In addition to the interval, the update algorithm requires a third point to
be supplied. Usually, this point would lie within the interval [a, b]. If the
point is outside this interval, the current interval is returned. If the
point lies within the interval, the behaviour of the function and derivative
value at this point is used to squeeze the original interval in a manner that
preserves the opposite slope conditions.
For further details of this component, see the procedure U0-U3 on page 123 of
the [Hager and Zhang (2006)][2] article.
Note that this function does not explicitly verify whether the opposite slope
conditions are satisfied for the supplied interval. It is assumed that this
is so.
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns an object that can be converted to a namedtuple.
The namedtuple should have fields 'f' and 'df' that correspond to scalar
tensors of real dtype containing the value of the function and its
derivative at that point. The other namedtuple fields, if present,
should be tensors or sequences (possibly nested) of tensors.
In usual optimization application, this function would be generated by
projecting the multivariate objective function along some specific
direction. The direction is determined by some other procedure but should
be a descent direction (i.e. the derivative of the projected univariate
function must be negative at 0.).
Alternatively, the function may represent the batching of `n` such line
functions (e.g. projecting a single multivariate objective function along
`n` distinct directions at once) accepting n points as input, i.e. a
tensor of shape [n], and the fields 'f' and 'df' in the returned
namedtuple should each be a tensor of shape [n], with the corresponding
function values and derivatives at the input points.
val_left: Return value of value_and_gradients_function at the left
end point of the bracketing interval (labelles 'a' above).
val_right: Return value of value_and_gradients_function at the right
end point of the bracketing interval (labelles 'b' above).
val_trial: Return value of value_and_gradients_function at the trial point
to be used to shrink the interval (labelled 'c' above).
f_lim: real `Tensor` of shape [n]. The function value threshold for
the approximate Wolfe conditions to be checked for each batch member.
active: optional boolean `Tensor` of shape [n]. Relevant in batching mode
only, indicates batch members on which the update procedure should be
applied. On non-active members the current left/right interval is returned
unmodified.
Returns:
A namedtuple containing the following fields:
iteration: An int32 scalar `Tensor`. The number of iterations performed
by the bisect algorithm.
stopped: A boolean `Tensor` of shape [n]. True for those batch members
where the bisection algorithm terminated.
failed: A boolean `Tensor` of shape [n]. True for those batch members
where an error was encountered.
num_evals: An int32 scalar `Tensor`. The number of times the objective
function was evaluated.
left: Return value of value_and_gradients_function at the updated left
end point of the interval found.
right: Return value of value_and_gradients_function at the updated right
end point of the interval found.
|
tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py
|
def update(value_and_gradients_function, val_left, val_right, val_trial, f_lim,
active=None):
"""Squeezes a bracketing interval containing the minimum.
Given an interval which brackets a minimum and a point in that interval,
finds a smaller nested interval which also brackets the minimum. If the
supplied point does not lie in the bracketing interval, the current interval
is returned.
The following description is given in terms of individual points evaluated on
a line function to be minimized. Note, however, the implementation also
accepts batches of points allowing to minimize multiple line functions at
once. See details on the docstring of `value_and_gradients_function` below.
The requirement of the interval bracketing a minimum is expressed through the
opposite slope conditions. Assume the left end point is 'a', the right
end point is 'b', the function to be minimized is 'f' and the derivative is
'df'. The update procedure relies on the following conditions being satisfied:
'''
f(a) <= f(0) + epsilon (1)
df(a) < 0 (2)
df(b) > 0 (3)
'''
In the first condition, epsilon is a small positive constant. The condition
demands that the function at the left end point be not much bigger than the
starting point (i.e. 0). This is an easy to satisfy condition because by
assumption, we are in a direction where the function value is decreasing.
The second and third conditions together demand that there is at least one
zero of the derivative in between a and b.
In addition to the interval, the update algorithm requires a third point to
be supplied. Usually, this point would lie within the interval [a, b]. If the
point is outside this interval, the current interval is returned. If the
point lies within the interval, the behaviour of the function and derivative
value at this point is used to squeeze the original interval in a manner that
preserves the opposite slope conditions.
For further details of this component, see the procedure U0-U3 on page 123 of
the [Hager and Zhang (2006)][2] article.
Note that this function does not explicitly verify whether the opposite slope
conditions are satisfied for the supplied interval. It is assumed that this
is so.
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns an object that can be converted to a namedtuple.
The namedtuple should have fields 'f' and 'df' that correspond to scalar
tensors of real dtype containing the value of the function and its
derivative at that point. The other namedtuple fields, if present,
should be tensors or sequences (possibly nested) of tensors.
In usual optimization application, this function would be generated by
projecting the multivariate objective function along some specific
direction. The direction is determined by some other procedure but should
be a descent direction (i.e. the derivative of the projected univariate
function must be negative at 0.).
Alternatively, the function may represent the batching of `n` such line
functions (e.g. projecting a single multivariate objective function along
`n` distinct directions at once) accepting n points as input, i.e. a
tensor of shape [n], and the fields 'f' and 'df' in the returned
namedtuple should each be a tensor of shape [n], with the corresponding
function values and derivatives at the input points.
val_left: Return value of value_and_gradients_function at the left
end point of the bracketing interval (labelles 'a' above).
val_right: Return value of value_and_gradients_function at the right
end point of the bracketing interval (labelles 'b' above).
val_trial: Return value of value_and_gradients_function at the trial point
to be used to shrink the interval (labelled 'c' above).
f_lim: real `Tensor` of shape [n]. The function value threshold for
the approximate Wolfe conditions to be checked for each batch member.
active: optional boolean `Tensor` of shape [n]. Relevant in batching mode
only, indicates batch members on which the update procedure should be
applied. On non-active members the current left/right interval is returned
unmodified.
Returns:
A namedtuple containing the following fields:
iteration: An int32 scalar `Tensor`. The number of iterations performed
by the bisect algorithm.
stopped: A boolean `Tensor` of shape [n]. True for those batch members
where the bisection algorithm terminated.
failed: A boolean `Tensor` of shape [n]. True for those batch members
where an error was encountered.
num_evals: An int32 scalar `Tensor`. The number of times the objective
function was evaluated.
left: Return value of value_and_gradients_function at the updated left
end point of the interval found.
right: Return value of value_and_gradients_function at the updated right
end point of the interval found.
"""
# We should only update if the trial point is within the interval.
within_range = (val_left.x < val_trial.x) & (val_trial.x < val_right.x)
if active is not None:
within_range = within_range & active
# The new point is a valid left end point if it has negative slope
# and the value at the point is not too large.
valid_left = (val_trial.df < 0) & (val_trial.f <= f_lim)
# If the trial point has a negative slope but the value at that point
# is too high, bisect can narrow down an interval between the current left
# and the trial point.
needs_bisect = within_range & (val_trial.df < 0) & (val_trial.f > f_lim)
# Note that if `~valid_left` it is because either:
# - the slope at the trial point is positive, so it is a valid right
# point, or
# - the needs_bisect condition is true.
# In both cases we want to keep the current left and replace right
# with the trial point.
left = val_where(within_range & valid_left, val_trial, val_left)
right = val_where(within_range & ~valid_left, val_trial, val_right)
bisect_args = _IntermediateResult(
iteration=tf.convert_to_tensor(value=0),
stopped=~needs_bisect,
failed=tf.zeros_like(within_range), # i.e. all false.
num_evals=tf.convert_to_tensor(value=0),
left=left,
right=right)
return _bisect(value_and_gradients_function, bisect_args, f_lim)
|
def update(value_and_gradients_function, val_left, val_right, val_trial, f_lim,
active=None):
"""Squeezes a bracketing interval containing the minimum.
Given an interval which brackets a minimum and a point in that interval,
finds a smaller nested interval which also brackets the minimum. If the
supplied point does not lie in the bracketing interval, the current interval
is returned.
The following description is given in terms of individual points evaluated on
a line function to be minimized. Note, however, the implementation also
accepts batches of points allowing to minimize multiple line functions at
once. See details on the docstring of `value_and_gradients_function` below.
The requirement of the interval bracketing a minimum is expressed through the
opposite slope conditions. Assume the left end point is 'a', the right
end point is 'b', the function to be minimized is 'f' and the derivative is
'df'. The update procedure relies on the following conditions being satisfied:
'''
f(a) <= f(0) + epsilon (1)
df(a) < 0 (2)
df(b) > 0 (3)
'''
In the first condition, epsilon is a small positive constant. The condition
demands that the function at the left end point be not much bigger than the
starting point (i.e. 0). This is an easy to satisfy condition because by
assumption, we are in a direction where the function value is decreasing.
The second and third conditions together demand that there is at least one
zero of the derivative in between a and b.
In addition to the interval, the update algorithm requires a third point to
be supplied. Usually, this point would lie within the interval [a, b]. If the
point is outside this interval, the current interval is returned. If the
point lies within the interval, the behaviour of the function and derivative
value at this point is used to squeeze the original interval in a manner that
preserves the opposite slope conditions.
For further details of this component, see the procedure U0-U3 on page 123 of
the [Hager and Zhang (2006)][2] article.
Note that this function does not explicitly verify whether the opposite slope
conditions are satisfied for the supplied interval. It is assumed that this
is so.
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns an object that can be converted to a namedtuple.
The namedtuple should have fields 'f' and 'df' that correspond to scalar
tensors of real dtype containing the value of the function and its
derivative at that point. The other namedtuple fields, if present,
should be tensors or sequences (possibly nested) of tensors.
In usual optimization application, this function would be generated by
projecting the multivariate objective function along some specific
direction. The direction is determined by some other procedure but should
be a descent direction (i.e. the derivative of the projected univariate
function must be negative at 0.).
Alternatively, the function may represent the batching of `n` such line
functions (e.g. projecting a single multivariate objective function along
`n` distinct directions at once) accepting n points as input, i.e. a
tensor of shape [n], and the fields 'f' and 'df' in the returned
namedtuple should each be a tensor of shape [n], with the corresponding
function values and derivatives at the input points.
val_left: Return value of value_and_gradients_function at the left
end point of the bracketing interval (labelles 'a' above).
val_right: Return value of value_and_gradients_function at the right
end point of the bracketing interval (labelles 'b' above).
val_trial: Return value of value_and_gradients_function at the trial point
to be used to shrink the interval (labelled 'c' above).
f_lim: real `Tensor` of shape [n]. The function value threshold for
the approximate Wolfe conditions to be checked for each batch member.
active: optional boolean `Tensor` of shape [n]. Relevant in batching mode
only, indicates batch members on which the update procedure should be
applied. On non-active members the current left/right interval is returned
unmodified.
Returns:
A namedtuple containing the following fields:
iteration: An int32 scalar `Tensor`. The number of iterations performed
by the bisect algorithm.
stopped: A boolean `Tensor` of shape [n]. True for those batch members
where the bisection algorithm terminated.
failed: A boolean `Tensor` of shape [n]. True for those batch members
where an error was encountered.
num_evals: An int32 scalar `Tensor`. The number of times the objective
function was evaluated.
left: Return value of value_and_gradients_function at the updated left
end point of the interval found.
right: Return value of value_and_gradients_function at the updated right
end point of the interval found.
"""
# We should only update if the trial point is within the interval.
within_range = (val_left.x < val_trial.x) & (val_trial.x < val_right.x)
if active is not None:
within_range = within_range & active
# The new point is a valid left end point if it has negative slope
# and the value at the point is not too large.
valid_left = (val_trial.df < 0) & (val_trial.f <= f_lim)
# If the trial point has a negative slope but the value at that point
# is too high, bisect can narrow down an interval between the current left
# and the trial point.
needs_bisect = within_range & (val_trial.df < 0) & (val_trial.f > f_lim)
# Note that if `~valid_left` it is because either:
# - the slope at the trial point is positive, so it is a valid right
# point, or
# - the needs_bisect condition is true.
# In both cases we want to keep the current left and replace right
# with the trial point.
left = val_where(within_range & valid_left, val_trial, val_left)
right = val_where(within_range & ~valid_left, val_trial, val_right)
bisect_args = _IntermediateResult(
iteration=tf.convert_to_tensor(value=0),
stopped=~needs_bisect,
failed=tf.zeros_like(within_range), # i.e. all false.
num_evals=tf.convert_to_tensor(value=0),
left=left,
right=right)
return _bisect(value_and_gradients_function, bisect_args, f_lim)
|
[
"Squeezes",
"a",
"bracketing",
"interval",
"containing",
"the",
"minimum",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py#L301-L423
|
[
"def",
"update",
"(",
"value_and_gradients_function",
",",
"val_left",
",",
"val_right",
",",
"val_trial",
",",
"f_lim",
",",
"active",
"=",
"None",
")",
":",
"# We should only update if the trial point is within the interval.",
"within_range",
"=",
"(",
"val_left",
".",
"x",
"<",
"val_trial",
".",
"x",
")",
"&",
"(",
"val_trial",
".",
"x",
"<",
"val_right",
".",
"x",
")",
"if",
"active",
"is",
"not",
"None",
":",
"within_range",
"=",
"within_range",
"&",
"active",
"# The new point is a valid left end point if it has negative slope",
"# and the value at the point is not too large.",
"valid_left",
"=",
"(",
"val_trial",
".",
"df",
"<",
"0",
")",
"&",
"(",
"val_trial",
".",
"f",
"<=",
"f_lim",
")",
"# If the trial point has a negative slope but the value at that point",
"# is too high, bisect can narrow down an interval between the current left",
"# and the trial point.",
"needs_bisect",
"=",
"within_range",
"&",
"(",
"val_trial",
".",
"df",
"<",
"0",
")",
"&",
"(",
"val_trial",
".",
"f",
">",
"f_lim",
")",
"# Note that if `~valid_left` it is because either:",
"# - the slope at the trial point is positive, so it is a valid right",
"# point, or",
"# - the needs_bisect condition is true.",
"# In both cases we want to keep the current left and replace right",
"# with the trial point.",
"left",
"=",
"val_where",
"(",
"within_range",
"&",
"valid_left",
",",
"val_trial",
",",
"val_left",
")",
"right",
"=",
"val_where",
"(",
"within_range",
"&",
"~",
"valid_left",
",",
"val_trial",
",",
"val_right",
")",
"bisect_args",
"=",
"_IntermediateResult",
"(",
"iteration",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"0",
")",
",",
"stopped",
"=",
"~",
"needs_bisect",
",",
"failed",
"=",
"tf",
".",
"zeros_like",
"(",
"within_range",
")",
",",
"# i.e. all false.",
"num_evals",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"0",
")",
",",
"left",
"=",
"left",
",",
"right",
"=",
"right",
")",
"return",
"_bisect",
"(",
"value_and_gradients_function",
",",
"bisect_args",
",",
"f_lim",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
bracket
|
Brackets the minimum given an initial starting point.
Applies the Hager Zhang bracketing algorithm to find an interval containing
a region with points satisfying Wolfe conditions. Uses the supplied initial
step size 'c', the right end point of the provided search interval, to find
such an interval. The only condition on 'c' is that it should be positive.
For more details see steps B0-B3 in [Hager and Zhang (2006)][2].
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns a namedtuple containing the value filed `f` of the
function and its derivative value field `df` at that point.
Alternatively, the function may representthe batching of `n` such line
functions (e.g. projecting a single multivariate objective function along
`n` distinct directions at once) accepting n points as input, i.e. a
tensor of shape [n], and return a tuple of two tensors of shape [n], the
function values and the corresponding derivatives at the input points.
search_interval: A namedtuple describing the current search interval,
must include the fields:
- converged: Boolean `Tensor` of shape [n], indicating batch members
where search has already converged. Interval for these batch members
wont be modified.
- failed: Boolean `Tensor` of shape [n], indicating batch members
where search has already failed. Interval for these batch members
wont be modified.
- iterations: Scalar int32 `Tensor`. Number of line search iterations
so far.
- func_evals: Scalar int32 `Tensor`. Number of function evaluations
so far.
- left: A namedtuple, as returned by value_and_gradients_function
evaluated at 0, the left end point of the current interval.
- right: A namedtuple, as returned by value_and_gradients_function,
of the right end point of the current interval (labelled 'c' above).
f_lim: real `Tensor` of shape [n]. The function value threshold for
the approximate Wolfe conditions to be checked for each batch member.
max_iterations: Int32 scalar `Tensor`. The maximum number of iterations
permitted. The limit applies equally to all batch members.
expansion_param: Scalar positive `Tensor` of real dtype. Must be greater
than `1.`. Used to expand the initial interval in case it does not bracket
a minimum.
Returns:
A namedtuple with the following fields.
iteration: An int32 scalar `Tensor`. The number of iterations performed.
Bounded above by `max_iterations` parameter.
stopped: A boolean `Tensor` of shape [n]. True for those batch members
where the algorithm terminated before reaching `max_iterations`.
failed: A boolean `Tensor` of shape [n]. True for those batch members
where an error was encountered during bracketing.
num_evals: An int32 scalar `Tensor`. The number of times the objective
function was evaluated.
left: Return value of value_and_gradients_function at the updated left
end point of the interval found.
right: Return value of value_and_gradients_function at the updated right
end point of the interval found.
|
tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py
|
def bracket(value_and_gradients_function,
search_interval,
f_lim,
max_iterations,
expansion_param=5.0):
"""Brackets the minimum given an initial starting point.
Applies the Hager Zhang bracketing algorithm to find an interval containing
a region with points satisfying Wolfe conditions. Uses the supplied initial
step size 'c', the right end point of the provided search interval, to find
such an interval. The only condition on 'c' is that it should be positive.
For more details see steps B0-B3 in [Hager and Zhang (2006)][2].
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns a namedtuple containing the value filed `f` of the
function and its derivative value field `df` at that point.
Alternatively, the function may representthe batching of `n` such line
functions (e.g. projecting a single multivariate objective function along
`n` distinct directions at once) accepting n points as input, i.e. a
tensor of shape [n], and return a tuple of two tensors of shape [n], the
function values and the corresponding derivatives at the input points.
search_interval: A namedtuple describing the current search interval,
must include the fields:
- converged: Boolean `Tensor` of shape [n], indicating batch members
where search has already converged. Interval for these batch members
wont be modified.
- failed: Boolean `Tensor` of shape [n], indicating batch members
where search has already failed. Interval for these batch members
wont be modified.
- iterations: Scalar int32 `Tensor`. Number of line search iterations
so far.
- func_evals: Scalar int32 `Tensor`. Number of function evaluations
so far.
- left: A namedtuple, as returned by value_and_gradients_function
evaluated at 0, the left end point of the current interval.
- right: A namedtuple, as returned by value_and_gradients_function,
of the right end point of the current interval (labelled 'c' above).
f_lim: real `Tensor` of shape [n]. The function value threshold for
the approximate Wolfe conditions to be checked for each batch member.
max_iterations: Int32 scalar `Tensor`. The maximum number of iterations
permitted. The limit applies equally to all batch members.
expansion_param: Scalar positive `Tensor` of real dtype. Must be greater
than `1.`. Used to expand the initial interval in case it does not bracket
a minimum.
Returns:
A namedtuple with the following fields.
iteration: An int32 scalar `Tensor`. The number of iterations performed.
Bounded above by `max_iterations` parameter.
stopped: A boolean `Tensor` of shape [n]. True for those batch members
where the algorithm terminated before reaching `max_iterations`.
failed: A boolean `Tensor` of shape [n]. True for those batch members
where an error was encountered during bracketing.
num_evals: An int32 scalar `Tensor`. The number of times the objective
function was evaluated.
left: Return value of value_and_gradients_function at the updated left
end point of the interval found.
right: Return value of value_and_gradients_function at the updated right
end point of the interval found.
"""
already_stopped = search_interval.failed | search_interval.converged
# If the slope at right end point is positive, step B1 in [2], then the given
# initial points already bracket a minimum.
bracketed = search_interval.right.df >= 0
# Bisection is needed, step B2, if right end point almost works as a new left
# end point but the objective value is too high.
needs_bisect = (
search_interval.right.df < 0) & (search_interval.right.f > f_lim)
# In these three cases bracketing is already `stopped` and there is no need
# to perform further evaluations. Otherwise the bracketing loop is needed to
# expand the interval, step B3, until the conditions are met.
initial_args = _IntermediateResult(
iteration=search_interval.iterations,
stopped=already_stopped | bracketed | needs_bisect,
failed=search_interval.failed,
num_evals=search_interval.func_evals,
left=search_interval.left,
right=search_interval.right)
def _loop_cond(curr):
return (curr.iteration <
max_iterations) & ~tf.reduce_all(input_tensor=curr.stopped)
def _loop_body(curr):
"""Main body of bracketing loop."""
# The loop maintains the invariant that curr.stopped is true if we have
# either: failed, successfully bracketed, or not yet bracketed but needs
# bisect. On the only remaining case, step B3 in [2]. case we need to
# expand and update the left/right values appropriately.
new_right = value_and_gradients_function(expansion_param * curr.right.x)
left = val_where(curr.stopped, curr.left, curr.right)
right = val_where(curr.stopped, curr.right, new_right)
# Updated the failed, bracketed, and needs_bisect conditions.
failed = curr.failed | ~is_finite(right)
bracketed = right.df >= 0
needs_bisect = (right.df < 0) & (right.f > f_lim)
return [_IntermediateResult(
iteration=curr.iteration + 1,
stopped=curr.stopped | failed | bracketed | needs_bisect,
failed=failed,
num_evals=curr.num_evals + 1,
left=left,
right=right)]
bracket_result = tf.while_loop(
cond=_loop_cond, body=_loop_body, loop_vars=[initial_args])[0]
# For entries where bisect is still needed, mark them as not yet stopped,
# reset the left end point, and run `_bisect` on them.
needs_bisect = (
(bracket_result.right.df < 0) & (bracket_result.right.f > f_lim))
stopped = already_stopped | bracket_result.failed | ~needs_bisect
left = val_where(stopped, bracket_result.left, search_interval.left)
bisect_args = bracket_result._replace(stopped=stopped, left=left)
return _bisect(value_and_gradients_function, bisect_args, f_lim)
|
def bracket(value_and_gradients_function,
search_interval,
f_lim,
max_iterations,
expansion_param=5.0):
"""Brackets the minimum given an initial starting point.
Applies the Hager Zhang bracketing algorithm to find an interval containing
a region with points satisfying Wolfe conditions. Uses the supplied initial
step size 'c', the right end point of the provided search interval, to find
such an interval. The only condition on 'c' is that it should be positive.
For more details see steps B0-B3 in [Hager and Zhang (2006)][2].
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns a namedtuple containing the value filed `f` of the
function and its derivative value field `df` at that point.
Alternatively, the function may representthe batching of `n` such line
functions (e.g. projecting a single multivariate objective function along
`n` distinct directions at once) accepting n points as input, i.e. a
tensor of shape [n], and return a tuple of two tensors of shape [n], the
function values and the corresponding derivatives at the input points.
search_interval: A namedtuple describing the current search interval,
must include the fields:
- converged: Boolean `Tensor` of shape [n], indicating batch members
where search has already converged. Interval for these batch members
wont be modified.
- failed: Boolean `Tensor` of shape [n], indicating batch members
where search has already failed. Interval for these batch members
wont be modified.
- iterations: Scalar int32 `Tensor`. Number of line search iterations
so far.
- func_evals: Scalar int32 `Tensor`. Number of function evaluations
so far.
- left: A namedtuple, as returned by value_and_gradients_function
evaluated at 0, the left end point of the current interval.
- right: A namedtuple, as returned by value_and_gradients_function,
of the right end point of the current interval (labelled 'c' above).
f_lim: real `Tensor` of shape [n]. The function value threshold for
the approximate Wolfe conditions to be checked for each batch member.
max_iterations: Int32 scalar `Tensor`. The maximum number of iterations
permitted. The limit applies equally to all batch members.
expansion_param: Scalar positive `Tensor` of real dtype. Must be greater
than `1.`. Used to expand the initial interval in case it does not bracket
a minimum.
Returns:
A namedtuple with the following fields.
iteration: An int32 scalar `Tensor`. The number of iterations performed.
Bounded above by `max_iterations` parameter.
stopped: A boolean `Tensor` of shape [n]. True for those batch members
where the algorithm terminated before reaching `max_iterations`.
failed: A boolean `Tensor` of shape [n]. True for those batch members
where an error was encountered during bracketing.
num_evals: An int32 scalar `Tensor`. The number of times the objective
function was evaluated.
left: Return value of value_and_gradients_function at the updated left
end point of the interval found.
right: Return value of value_and_gradients_function at the updated right
end point of the interval found.
"""
already_stopped = search_interval.failed | search_interval.converged
# If the slope at right end point is positive, step B1 in [2], then the given
# initial points already bracket a minimum.
bracketed = search_interval.right.df >= 0
# Bisection is needed, step B2, if right end point almost works as a new left
# end point but the objective value is too high.
needs_bisect = (
search_interval.right.df < 0) & (search_interval.right.f > f_lim)
# In these three cases bracketing is already `stopped` and there is no need
# to perform further evaluations. Otherwise the bracketing loop is needed to
# expand the interval, step B3, until the conditions are met.
initial_args = _IntermediateResult(
iteration=search_interval.iterations,
stopped=already_stopped | bracketed | needs_bisect,
failed=search_interval.failed,
num_evals=search_interval.func_evals,
left=search_interval.left,
right=search_interval.right)
def _loop_cond(curr):
return (curr.iteration <
max_iterations) & ~tf.reduce_all(input_tensor=curr.stopped)
def _loop_body(curr):
"""Main body of bracketing loop."""
# The loop maintains the invariant that curr.stopped is true if we have
# either: failed, successfully bracketed, or not yet bracketed but needs
# bisect. On the only remaining case, step B3 in [2]. case we need to
# expand and update the left/right values appropriately.
new_right = value_and_gradients_function(expansion_param * curr.right.x)
left = val_where(curr.stopped, curr.left, curr.right)
right = val_where(curr.stopped, curr.right, new_right)
# Updated the failed, bracketed, and needs_bisect conditions.
failed = curr.failed | ~is_finite(right)
bracketed = right.df >= 0
needs_bisect = (right.df < 0) & (right.f > f_lim)
return [_IntermediateResult(
iteration=curr.iteration + 1,
stopped=curr.stopped | failed | bracketed | needs_bisect,
failed=failed,
num_evals=curr.num_evals + 1,
left=left,
right=right)]
bracket_result = tf.while_loop(
cond=_loop_cond, body=_loop_body, loop_vars=[initial_args])[0]
# For entries where bisect is still needed, mark them as not yet stopped,
# reset the left end point, and run `_bisect` on them.
needs_bisect = (
(bracket_result.right.df < 0) & (bracket_result.right.f > f_lim))
stopped = already_stopped | bracket_result.failed | ~needs_bisect
left = val_where(stopped, bracket_result.left, search_interval.left)
bisect_args = bracket_result._replace(stopped=stopped, left=left)
return _bisect(value_and_gradients_function, bisect_args, f_lim)
|
[
"Brackets",
"the",
"minimum",
"given",
"an",
"initial",
"starting",
"point",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py#L426-L545
|
[
"def",
"bracket",
"(",
"value_and_gradients_function",
",",
"search_interval",
",",
"f_lim",
",",
"max_iterations",
",",
"expansion_param",
"=",
"5.0",
")",
":",
"already_stopped",
"=",
"search_interval",
".",
"failed",
"|",
"search_interval",
".",
"converged",
"# If the slope at right end point is positive, step B1 in [2], then the given",
"# initial points already bracket a minimum.",
"bracketed",
"=",
"search_interval",
".",
"right",
".",
"df",
">=",
"0",
"# Bisection is needed, step B2, if right end point almost works as a new left",
"# end point but the objective value is too high.",
"needs_bisect",
"=",
"(",
"search_interval",
".",
"right",
".",
"df",
"<",
"0",
")",
"&",
"(",
"search_interval",
".",
"right",
".",
"f",
">",
"f_lim",
")",
"# In these three cases bracketing is already `stopped` and there is no need",
"# to perform further evaluations. Otherwise the bracketing loop is needed to",
"# expand the interval, step B3, until the conditions are met.",
"initial_args",
"=",
"_IntermediateResult",
"(",
"iteration",
"=",
"search_interval",
".",
"iterations",
",",
"stopped",
"=",
"already_stopped",
"|",
"bracketed",
"|",
"needs_bisect",
",",
"failed",
"=",
"search_interval",
".",
"failed",
",",
"num_evals",
"=",
"search_interval",
".",
"func_evals",
",",
"left",
"=",
"search_interval",
".",
"left",
",",
"right",
"=",
"search_interval",
".",
"right",
")",
"def",
"_loop_cond",
"(",
"curr",
")",
":",
"return",
"(",
"curr",
".",
"iteration",
"<",
"max_iterations",
")",
"&",
"~",
"tf",
".",
"reduce_all",
"(",
"input_tensor",
"=",
"curr",
".",
"stopped",
")",
"def",
"_loop_body",
"(",
"curr",
")",
":",
"\"\"\"Main body of bracketing loop.\"\"\"",
"# The loop maintains the invariant that curr.stopped is true if we have",
"# either: failed, successfully bracketed, or not yet bracketed but needs",
"# bisect. On the only remaining case, step B3 in [2]. case we need to",
"# expand and update the left/right values appropriately.",
"new_right",
"=",
"value_and_gradients_function",
"(",
"expansion_param",
"*",
"curr",
".",
"right",
".",
"x",
")",
"left",
"=",
"val_where",
"(",
"curr",
".",
"stopped",
",",
"curr",
".",
"left",
",",
"curr",
".",
"right",
")",
"right",
"=",
"val_where",
"(",
"curr",
".",
"stopped",
",",
"curr",
".",
"right",
",",
"new_right",
")",
"# Updated the failed, bracketed, and needs_bisect conditions.",
"failed",
"=",
"curr",
".",
"failed",
"|",
"~",
"is_finite",
"(",
"right",
")",
"bracketed",
"=",
"right",
".",
"df",
">=",
"0",
"needs_bisect",
"=",
"(",
"right",
".",
"df",
"<",
"0",
")",
"&",
"(",
"right",
".",
"f",
">",
"f_lim",
")",
"return",
"[",
"_IntermediateResult",
"(",
"iteration",
"=",
"curr",
".",
"iteration",
"+",
"1",
",",
"stopped",
"=",
"curr",
".",
"stopped",
"|",
"failed",
"|",
"bracketed",
"|",
"needs_bisect",
",",
"failed",
"=",
"failed",
",",
"num_evals",
"=",
"curr",
".",
"num_evals",
"+",
"1",
",",
"left",
"=",
"left",
",",
"right",
"=",
"right",
")",
"]",
"bracket_result",
"=",
"tf",
".",
"while_loop",
"(",
"cond",
"=",
"_loop_cond",
",",
"body",
"=",
"_loop_body",
",",
"loop_vars",
"=",
"[",
"initial_args",
"]",
")",
"[",
"0",
"]",
"# For entries where bisect is still needed, mark them as not yet stopped,",
"# reset the left end point, and run `_bisect` on them.",
"needs_bisect",
"=",
"(",
"(",
"bracket_result",
".",
"right",
".",
"df",
"<",
"0",
")",
"&",
"(",
"bracket_result",
".",
"right",
".",
"f",
">",
"f_lim",
")",
")",
"stopped",
"=",
"already_stopped",
"|",
"bracket_result",
".",
"failed",
"|",
"~",
"needs_bisect",
"left",
"=",
"val_where",
"(",
"stopped",
",",
"bracket_result",
".",
"left",
",",
"search_interval",
".",
"left",
")",
"bisect_args",
"=",
"bracket_result",
".",
"_replace",
"(",
"stopped",
"=",
"stopped",
",",
"left",
"=",
"left",
")",
"return",
"_bisect",
"(",
"value_and_gradients_function",
",",
"bisect_args",
",",
"f_lim",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
bisect
|
Bisects an interval and updates to satisfy opposite slope conditions.
Corresponds to the step U3 in [Hager and Zhang (2006)][2].
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns a namedtuple containing the value filed `f` of the
function and its derivative value field `df` at that point.
Alternatively, the function may representthe batching of `n` such line
functions (e.g. projecting a single multivariate objective function along
`n` distinct directions at once) accepting n points as input, i.e. a
tensor of shape [n], and return a tuple of two tensors of shape [n], the
function values and the corresponding derivatives at the input points.
initial_left: Return value of value_and_gradients_function at the left end
point of the current bracketing interval.
initial_right: Return value of value_and_gradients_function at the right end
point of the current bracketing interval.
f_lim: real `Tensor` of shape [n]. The function value threshold for
the approximate Wolfe conditions to be checked for each batch member.
Returns:
A namedtuple containing the following fields:
iteration: An int32 scalar `Tensor`. The number of iterations performed.
Bounded above by `max_iterations` parameter.
stopped: A boolean scalar `Tensor`. True if the bisect algorithm
terminated.
failed: A scalar boolean tensor. Indicates whether the objective function
failed to produce a finite value.
num_evals: A scalar int32 tensor. The number of value and gradients
function evaluations.
left: Return value of value_and_gradients_function at the left end
point of the bracketing interval found.
right: Return value of value_and_gradients_function at the right end
point of the bracketing interval found.
|
tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py
|
def bisect(value_and_gradients_function,
initial_left,
initial_right,
f_lim):
"""Bisects an interval and updates to satisfy opposite slope conditions.
Corresponds to the step U3 in [Hager and Zhang (2006)][2].
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns a namedtuple containing the value filed `f` of the
function and its derivative value field `df` at that point.
Alternatively, the function may representthe batching of `n` such line
functions (e.g. projecting a single multivariate objective function along
`n` distinct directions at once) accepting n points as input, i.e. a
tensor of shape [n], and return a tuple of two tensors of shape [n], the
function values and the corresponding derivatives at the input points.
initial_left: Return value of value_and_gradients_function at the left end
point of the current bracketing interval.
initial_right: Return value of value_and_gradients_function at the right end
point of the current bracketing interval.
f_lim: real `Tensor` of shape [n]. The function value threshold for
the approximate Wolfe conditions to be checked for each batch member.
Returns:
A namedtuple containing the following fields:
iteration: An int32 scalar `Tensor`. The number of iterations performed.
Bounded above by `max_iterations` parameter.
stopped: A boolean scalar `Tensor`. True if the bisect algorithm
terminated.
failed: A scalar boolean tensor. Indicates whether the objective function
failed to produce a finite value.
num_evals: A scalar int32 tensor. The number of value and gradients
function evaluations.
left: Return value of value_and_gradients_function at the left end
point of the bracketing interval found.
right: Return value of value_and_gradients_function at the right end
point of the bracketing interval found.
"""
failed = ~is_finite(initial_left, initial_right)
needs_bisect = (initial_right.df < 0) & (initial_right.f > f_lim)
bisect_args = _IntermediateResult(
iteration=tf.convert_to_tensor(value=0),
stopped=failed | ~needs_bisect,
failed=failed,
num_evals=tf.convert_to_tensor(value=0),
left=initial_left,
right=initial_right)
return _bisect(value_and_gradients_function, bisect_args, f_lim)
|
def bisect(value_and_gradients_function,
initial_left,
initial_right,
f_lim):
"""Bisects an interval and updates to satisfy opposite slope conditions.
Corresponds to the step U3 in [Hager and Zhang (2006)][2].
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns a namedtuple containing the value filed `f` of the
function and its derivative value field `df` at that point.
Alternatively, the function may representthe batching of `n` such line
functions (e.g. projecting a single multivariate objective function along
`n` distinct directions at once) accepting n points as input, i.e. a
tensor of shape [n], and return a tuple of two tensors of shape [n], the
function values and the corresponding derivatives at the input points.
initial_left: Return value of value_and_gradients_function at the left end
point of the current bracketing interval.
initial_right: Return value of value_and_gradients_function at the right end
point of the current bracketing interval.
f_lim: real `Tensor` of shape [n]. The function value threshold for
the approximate Wolfe conditions to be checked for each batch member.
Returns:
A namedtuple containing the following fields:
iteration: An int32 scalar `Tensor`. The number of iterations performed.
Bounded above by `max_iterations` parameter.
stopped: A boolean scalar `Tensor`. True if the bisect algorithm
terminated.
failed: A scalar boolean tensor. Indicates whether the objective function
failed to produce a finite value.
num_evals: A scalar int32 tensor. The number of value and gradients
function evaluations.
left: Return value of value_and_gradients_function at the left end
point of the bracketing interval found.
right: Return value of value_and_gradients_function at the right end
point of the bracketing interval found.
"""
failed = ~is_finite(initial_left, initial_right)
needs_bisect = (initial_right.df < 0) & (initial_right.f > f_lim)
bisect_args = _IntermediateResult(
iteration=tf.convert_to_tensor(value=0),
stopped=failed | ~needs_bisect,
failed=failed,
num_evals=tf.convert_to_tensor(value=0),
left=initial_left,
right=initial_right)
return _bisect(value_and_gradients_function, bisect_args, f_lim)
|
[
"Bisects",
"an",
"interval",
"and",
"updates",
"to",
"satisfy",
"opposite",
"slope",
"conditions",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py#L548-L596
|
[
"def",
"bisect",
"(",
"value_and_gradients_function",
",",
"initial_left",
",",
"initial_right",
",",
"f_lim",
")",
":",
"failed",
"=",
"~",
"is_finite",
"(",
"initial_left",
",",
"initial_right",
")",
"needs_bisect",
"=",
"(",
"initial_right",
".",
"df",
"<",
"0",
")",
"&",
"(",
"initial_right",
".",
"f",
">",
"f_lim",
")",
"bisect_args",
"=",
"_IntermediateResult",
"(",
"iteration",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"0",
")",
",",
"stopped",
"=",
"failed",
"|",
"~",
"needs_bisect",
",",
"failed",
"=",
"failed",
",",
"num_evals",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"0",
")",
",",
"left",
"=",
"initial_left",
",",
"right",
"=",
"initial_right",
")",
"return",
"_bisect",
"(",
"value_and_gradients_function",
",",
"bisect_args",
",",
"f_lim",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_bisect
|
Actual implementation of bisect given initial_args in a _BracketResult.
|
tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py
|
def _bisect(value_and_gradients_function, initial_args, f_lim):
"""Actual implementation of bisect given initial_args in a _BracketResult."""
def _loop_cond(curr):
# TODO(b/112524024): Also take into account max_iterations.
return ~tf.reduce_all(input_tensor=curr.stopped)
def _loop_body(curr):
"""Narrow down interval to satisfy opposite slope conditions."""
mid = value_and_gradients_function((curr.left.x + curr.right.x) / 2)
# Fail if function values at mid point are no longer finite; or left/right
# points are so close to it that we can't distinguish them any more.
failed = (curr.failed | ~is_finite(mid) |
tf.equal(mid.x, curr.left.x) | tf.equal(mid.x, curr.right.x))
# If mid point has a negative slope and the function value at that point is
# small enough, we can use it as a new left end point to narrow down the
# interval. If mid point has a positive slope, then we have found a suitable
# right end point to bracket a minima within opposite slopes. Otherwise, the
# mid point has a negative slope but the function value at that point is too
# high to work as left end point, we are in the same situation in which we
# started the loop so we just update the right end point and continue.
to_update = ~(curr.stopped | failed)
update_left = (mid.df < 0) & (mid.f <= f_lim)
left = val_where(to_update & update_left, mid, curr.left)
right = val_where(to_update & ~update_left, mid, curr.right)
# We're done when the right end point has a positive slope.
stopped = curr.stopped | failed | (right.df >= 0)
return [_IntermediateResult(
iteration=curr.iteration,
stopped=stopped,
failed=failed,
num_evals=curr.num_evals + 1,
left=left,
right=right)]
# The interval needs updating if the right end point has a negative slope and
# the value of the function at that point is too high. It is not a valid left
# end point but along with the current left end point, it encloses another
# minima. The loop above tries to narrow the interval so that it satisfies the
# opposite slope conditions.
return tf.while_loop(
cond=_loop_cond, body=_loop_body, loop_vars=[initial_args])[0]
|
def _bisect(value_and_gradients_function, initial_args, f_lim):
"""Actual implementation of bisect given initial_args in a _BracketResult."""
def _loop_cond(curr):
# TODO(b/112524024): Also take into account max_iterations.
return ~tf.reduce_all(input_tensor=curr.stopped)
def _loop_body(curr):
"""Narrow down interval to satisfy opposite slope conditions."""
mid = value_and_gradients_function((curr.left.x + curr.right.x) / 2)
# Fail if function values at mid point are no longer finite; or left/right
# points are so close to it that we can't distinguish them any more.
failed = (curr.failed | ~is_finite(mid) |
tf.equal(mid.x, curr.left.x) | tf.equal(mid.x, curr.right.x))
# If mid point has a negative slope and the function value at that point is
# small enough, we can use it as a new left end point to narrow down the
# interval. If mid point has a positive slope, then we have found a suitable
# right end point to bracket a minima within opposite slopes. Otherwise, the
# mid point has a negative slope but the function value at that point is too
# high to work as left end point, we are in the same situation in which we
# started the loop so we just update the right end point and continue.
to_update = ~(curr.stopped | failed)
update_left = (mid.df < 0) & (mid.f <= f_lim)
left = val_where(to_update & update_left, mid, curr.left)
right = val_where(to_update & ~update_left, mid, curr.right)
# We're done when the right end point has a positive slope.
stopped = curr.stopped | failed | (right.df >= 0)
return [_IntermediateResult(
iteration=curr.iteration,
stopped=stopped,
failed=failed,
num_evals=curr.num_evals + 1,
left=left,
right=right)]
# The interval needs updating if the right end point has a negative slope and
# the value of the function at that point is too high. It is not a valid left
# end point but along with the current left end point, it encloses another
# minima. The loop above tries to narrow the interval so that it satisfies the
# opposite slope conditions.
return tf.while_loop(
cond=_loop_cond, body=_loop_body, loop_vars=[initial_args])[0]
|
[
"Actual",
"implementation",
"of",
"bisect",
"given",
"initial_args",
"in",
"a",
"_BracketResult",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py#L599-L643
|
[
"def",
"_bisect",
"(",
"value_and_gradients_function",
",",
"initial_args",
",",
"f_lim",
")",
":",
"def",
"_loop_cond",
"(",
"curr",
")",
":",
"# TODO(b/112524024): Also take into account max_iterations.",
"return",
"~",
"tf",
".",
"reduce_all",
"(",
"input_tensor",
"=",
"curr",
".",
"stopped",
")",
"def",
"_loop_body",
"(",
"curr",
")",
":",
"\"\"\"Narrow down interval to satisfy opposite slope conditions.\"\"\"",
"mid",
"=",
"value_and_gradients_function",
"(",
"(",
"curr",
".",
"left",
".",
"x",
"+",
"curr",
".",
"right",
".",
"x",
")",
"/",
"2",
")",
"# Fail if function values at mid point are no longer finite; or left/right",
"# points are so close to it that we can't distinguish them any more.",
"failed",
"=",
"(",
"curr",
".",
"failed",
"|",
"~",
"is_finite",
"(",
"mid",
")",
"|",
"tf",
".",
"equal",
"(",
"mid",
".",
"x",
",",
"curr",
".",
"left",
".",
"x",
")",
"|",
"tf",
".",
"equal",
"(",
"mid",
".",
"x",
",",
"curr",
".",
"right",
".",
"x",
")",
")",
"# If mid point has a negative slope and the function value at that point is",
"# small enough, we can use it as a new left end point to narrow down the",
"# interval. If mid point has a positive slope, then we have found a suitable",
"# right end point to bracket a minima within opposite slopes. Otherwise, the",
"# mid point has a negative slope but the function value at that point is too",
"# high to work as left end point, we are in the same situation in which we",
"# started the loop so we just update the right end point and continue.",
"to_update",
"=",
"~",
"(",
"curr",
".",
"stopped",
"|",
"failed",
")",
"update_left",
"=",
"(",
"mid",
".",
"df",
"<",
"0",
")",
"&",
"(",
"mid",
".",
"f",
"<=",
"f_lim",
")",
"left",
"=",
"val_where",
"(",
"to_update",
"&",
"update_left",
",",
"mid",
",",
"curr",
".",
"left",
")",
"right",
"=",
"val_where",
"(",
"to_update",
"&",
"~",
"update_left",
",",
"mid",
",",
"curr",
".",
"right",
")",
"# We're done when the right end point has a positive slope.",
"stopped",
"=",
"curr",
".",
"stopped",
"|",
"failed",
"|",
"(",
"right",
".",
"df",
">=",
"0",
")",
"return",
"[",
"_IntermediateResult",
"(",
"iteration",
"=",
"curr",
".",
"iteration",
",",
"stopped",
"=",
"stopped",
",",
"failed",
"=",
"failed",
",",
"num_evals",
"=",
"curr",
".",
"num_evals",
"+",
"1",
",",
"left",
"=",
"left",
",",
"right",
"=",
"right",
")",
"]",
"# The interval needs updating if the right end point has a negative slope and",
"# the value of the function at that point is too high. It is not a valid left",
"# end point but along with the current left end point, it encloses another",
"# minima. The loop above tries to narrow the interval so that it satisfies the",
"# opposite slope conditions.",
"return",
"tf",
".",
"while_loop",
"(",
"cond",
"=",
"_loop_cond",
",",
"body",
"=",
"_loop_body",
",",
"loop_vars",
"=",
"[",
"initial_args",
"]",
")",
"[",
"0",
"]"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
is_finite
|
Checks if the supplied values are finite.
Args:
val_1: A namedtuple instance with the function value and derivative,
as returned e.g. by value_and_gradients_function evaluations.
val_2: (Optional) A namedtuple instance with the function value and
derivative, as returned e.g. by value_and_gradients_function evaluations.
Returns:
is_finite: Scalar boolean `Tensor` indicating whether the function value
and the derivative in `val_1` (and optionally in `val_2`) are all finite.
|
tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py
|
def is_finite(val_1, val_2=None):
"""Checks if the supplied values are finite.
Args:
val_1: A namedtuple instance with the function value and derivative,
as returned e.g. by value_and_gradients_function evaluations.
val_2: (Optional) A namedtuple instance with the function value and
derivative, as returned e.g. by value_and_gradients_function evaluations.
Returns:
is_finite: Scalar boolean `Tensor` indicating whether the function value
and the derivative in `val_1` (and optionally in `val_2`) are all finite.
"""
val_1_finite = tf.math.is_finite(val_1.f) & tf.math.is_finite(val_1.df)
if val_2 is not None:
return val_1_finite & tf.math.is_finite(val_2.f) & tf.math.is_finite(
val_2.df)
return val_1_finite
|
def is_finite(val_1, val_2=None):
"""Checks if the supplied values are finite.
Args:
val_1: A namedtuple instance with the function value and derivative,
as returned e.g. by value_and_gradients_function evaluations.
val_2: (Optional) A namedtuple instance with the function value and
derivative, as returned e.g. by value_and_gradients_function evaluations.
Returns:
is_finite: Scalar boolean `Tensor` indicating whether the function value
and the derivative in `val_1` (and optionally in `val_2`) are all finite.
"""
val_1_finite = tf.math.is_finite(val_1.f) & tf.math.is_finite(val_1.df)
if val_2 is not None:
return val_1_finite & tf.math.is_finite(val_2.f) & tf.math.is_finite(
val_2.df)
return val_1_finite
|
[
"Checks",
"if",
"the",
"supplied",
"values",
"are",
"finite",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py#L646-L663
|
[
"def",
"is_finite",
"(",
"val_1",
",",
"val_2",
"=",
"None",
")",
":",
"val_1_finite",
"=",
"tf",
".",
"math",
".",
"is_finite",
"(",
"val_1",
".",
"f",
")",
"&",
"tf",
".",
"math",
".",
"is_finite",
"(",
"val_1",
".",
"df",
")",
"if",
"val_2",
"is",
"not",
"None",
":",
"return",
"val_1_finite",
"&",
"tf",
".",
"math",
".",
"is_finite",
"(",
"val_2",
".",
"f",
")",
"&",
"tf",
".",
"math",
".",
"is_finite",
"(",
"val_2",
".",
"df",
")",
"return",
"val_1_finite"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_satisfies_wolfe
|
Checks whether the Wolfe or approx Wolfe conditions are satisfied.
The Wolfe conditions are a set of stopping criteria for an inexact line search
algorithm. Let f(a) be the function value along the search direction and
df(a) the derivative along the search direction evaluated a distance 'a'.
Here 'a' is the distance along the search direction. The Wolfe conditions are:
```None
f(a) <= f(0) + delta * a * df(0) (Armijo/Sufficient decrease condition)
df(a) >= sigma * df(0) (Weak curvature condition)
```
`delta` and `sigma` are two user supplied parameters satisfying:
`0 < delta < sigma <= 1.`. In the following, delta is called
`sufficient_decrease_param` and sigma is called `curvature_param`.
On a finite precision machine, the Wolfe conditions are difficult to satisfy
when one is close to the minimum. Hence, Hager-Zhang propose replacing
the sufficient decrease condition with the following condition on the
derivative in the vicinity of a minimum.
```None
df(a) <= (2 * delta - 1) * df(0) (Approx Wolfe sufficient decrease)
```
This condition is only used if one is near the minimum. This is tested using
```None
f(a) <= f(0) + epsilon * |f(0)|
```
The following function checks both the Wolfe and approx Wolfe conditions.
Here, `epsilon` is a small positive constant. In the following, the argument
`f_lim` corresponds to the product: epsilon * |f(0)|.
Args:
val_0: A namedtuple, as returned by value_and_gradients_function
evaluated at 0.
val_c: A namedtuple, as returned by value_and_gradients_function
evaluated at the point to be tested.
f_lim: Scalar `Tensor` of real dtype. The function value threshold for
the approximate Wolfe conditions to be checked.
sufficient_decrease_param: Positive scalar `Tensor` of real dtype.
Bounded above by the curvature param. Corresponds to 'delta' in the
terminology of [Hager and Zhang (2006)][2].
curvature_param: Positive scalar `Tensor` of real dtype. Bounded above
by `1.`. Corresponds to 'sigma' in the terminology of
[Hager Zhang (2005)][1].
Returns:
is_satisfied: A scalar boolean `Tensor` which is True if either the
Wolfe or approximate Wolfe conditions are satisfied.
|
tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py
|
def _satisfies_wolfe(val_0,
val_c,
f_lim,
sufficient_decrease_param,
curvature_param):
"""Checks whether the Wolfe or approx Wolfe conditions are satisfied.
The Wolfe conditions are a set of stopping criteria for an inexact line search
algorithm. Let f(a) be the function value along the search direction and
df(a) the derivative along the search direction evaluated a distance 'a'.
Here 'a' is the distance along the search direction. The Wolfe conditions are:
```None
f(a) <= f(0) + delta * a * df(0) (Armijo/Sufficient decrease condition)
df(a) >= sigma * df(0) (Weak curvature condition)
```
`delta` and `sigma` are two user supplied parameters satisfying:
`0 < delta < sigma <= 1.`. In the following, delta is called
`sufficient_decrease_param` and sigma is called `curvature_param`.
On a finite precision machine, the Wolfe conditions are difficult to satisfy
when one is close to the minimum. Hence, Hager-Zhang propose replacing
the sufficient decrease condition with the following condition on the
derivative in the vicinity of a minimum.
```None
df(a) <= (2 * delta - 1) * df(0) (Approx Wolfe sufficient decrease)
```
This condition is only used if one is near the minimum. This is tested using
```None
f(a) <= f(0) + epsilon * |f(0)|
```
The following function checks both the Wolfe and approx Wolfe conditions.
Here, `epsilon` is a small positive constant. In the following, the argument
`f_lim` corresponds to the product: epsilon * |f(0)|.
Args:
val_0: A namedtuple, as returned by value_and_gradients_function
evaluated at 0.
val_c: A namedtuple, as returned by value_and_gradients_function
evaluated at the point to be tested.
f_lim: Scalar `Tensor` of real dtype. The function value threshold for
the approximate Wolfe conditions to be checked.
sufficient_decrease_param: Positive scalar `Tensor` of real dtype.
Bounded above by the curvature param. Corresponds to 'delta' in the
terminology of [Hager and Zhang (2006)][2].
curvature_param: Positive scalar `Tensor` of real dtype. Bounded above
by `1.`. Corresponds to 'sigma' in the terminology of
[Hager Zhang (2005)][1].
Returns:
is_satisfied: A scalar boolean `Tensor` which is True if either the
Wolfe or approximate Wolfe conditions are satisfied.
"""
exact_wolfe_suff_dec = (sufficient_decrease_param * val_0.df >=
(val_c.f - val_0.f) / val_c.x)
wolfe_curvature = val_c.df >= curvature_param * val_0.df
exact_wolfe = exact_wolfe_suff_dec & wolfe_curvature
approx_wolfe_applies = val_c.f <= f_lim
approx_wolfe_suff_dec = ((2 * sufficient_decrease_param - 1) * val_0.df
>= val_c.df)
approx_wolfe = approx_wolfe_applies & approx_wolfe_suff_dec & wolfe_curvature
is_satisfied = exact_wolfe | approx_wolfe
return is_satisfied
|
def _satisfies_wolfe(val_0,
val_c,
f_lim,
sufficient_decrease_param,
curvature_param):
"""Checks whether the Wolfe or approx Wolfe conditions are satisfied.
The Wolfe conditions are a set of stopping criteria for an inexact line search
algorithm. Let f(a) be the function value along the search direction and
df(a) the derivative along the search direction evaluated a distance 'a'.
Here 'a' is the distance along the search direction. The Wolfe conditions are:
```None
f(a) <= f(0) + delta * a * df(0) (Armijo/Sufficient decrease condition)
df(a) >= sigma * df(0) (Weak curvature condition)
```
`delta` and `sigma` are two user supplied parameters satisfying:
`0 < delta < sigma <= 1.`. In the following, delta is called
`sufficient_decrease_param` and sigma is called `curvature_param`.
On a finite precision machine, the Wolfe conditions are difficult to satisfy
when one is close to the minimum. Hence, Hager-Zhang propose replacing
the sufficient decrease condition with the following condition on the
derivative in the vicinity of a minimum.
```None
df(a) <= (2 * delta - 1) * df(0) (Approx Wolfe sufficient decrease)
```
This condition is only used if one is near the minimum. This is tested using
```None
f(a) <= f(0) + epsilon * |f(0)|
```
The following function checks both the Wolfe and approx Wolfe conditions.
Here, `epsilon` is a small positive constant. In the following, the argument
`f_lim` corresponds to the product: epsilon * |f(0)|.
Args:
val_0: A namedtuple, as returned by value_and_gradients_function
evaluated at 0.
val_c: A namedtuple, as returned by value_and_gradients_function
evaluated at the point to be tested.
f_lim: Scalar `Tensor` of real dtype. The function value threshold for
the approximate Wolfe conditions to be checked.
sufficient_decrease_param: Positive scalar `Tensor` of real dtype.
Bounded above by the curvature param. Corresponds to 'delta' in the
terminology of [Hager and Zhang (2006)][2].
curvature_param: Positive scalar `Tensor` of real dtype. Bounded above
by `1.`. Corresponds to 'sigma' in the terminology of
[Hager Zhang (2005)][1].
Returns:
is_satisfied: A scalar boolean `Tensor` which is True if either the
Wolfe or approximate Wolfe conditions are satisfied.
"""
exact_wolfe_suff_dec = (sufficient_decrease_param * val_0.df >=
(val_c.f - val_0.f) / val_c.x)
wolfe_curvature = val_c.df >= curvature_param * val_0.df
exact_wolfe = exact_wolfe_suff_dec & wolfe_curvature
approx_wolfe_applies = val_c.f <= f_lim
approx_wolfe_suff_dec = ((2 * sufficient_decrease_param - 1) * val_0.df
>= val_c.df)
approx_wolfe = approx_wolfe_applies & approx_wolfe_suff_dec & wolfe_curvature
is_satisfied = exact_wolfe | approx_wolfe
return is_satisfied
|
[
"Checks",
"whether",
"the",
"Wolfe",
"or",
"approx",
"Wolfe",
"conditions",
"are",
"satisfied",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py#L666-L730
|
[
"def",
"_satisfies_wolfe",
"(",
"val_0",
",",
"val_c",
",",
"f_lim",
",",
"sufficient_decrease_param",
",",
"curvature_param",
")",
":",
"exact_wolfe_suff_dec",
"=",
"(",
"sufficient_decrease_param",
"*",
"val_0",
".",
"df",
">=",
"(",
"val_c",
".",
"f",
"-",
"val_0",
".",
"f",
")",
"/",
"val_c",
".",
"x",
")",
"wolfe_curvature",
"=",
"val_c",
".",
"df",
">=",
"curvature_param",
"*",
"val_0",
".",
"df",
"exact_wolfe",
"=",
"exact_wolfe_suff_dec",
"&",
"wolfe_curvature",
"approx_wolfe_applies",
"=",
"val_c",
".",
"f",
"<=",
"f_lim",
"approx_wolfe_suff_dec",
"=",
"(",
"(",
"2",
"*",
"sufficient_decrease_param",
"-",
"1",
")",
"*",
"val_0",
".",
"df",
">=",
"val_c",
".",
"df",
")",
"approx_wolfe",
"=",
"approx_wolfe_applies",
"&",
"approx_wolfe_suff_dec",
"&",
"wolfe_curvature",
"is_satisfied",
"=",
"exact_wolfe",
"|",
"approx_wolfe",
"return",
"is_satisfied"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_secant
|
Returns the secant interpolation for the minimum.
The secant method is a technique for finding roots of nonlinear functions.
When finding the minimum, one applies the secant method to the derivative
of the function.
For an arbitrary function and a bounding interval, the secant approximation
can produce the next point which is outside the bounding interval. However,
with the assumption of opposite slope condtion on the interval [a,b] the new
point c is always bracketed by [a,b]. Note that by assumption,
f'(a) < 0 and f'(b) > 0.
Hence c is a weighted average of a and b and thus always in [a, b].
Args:
val_a: A namedtuple with the left end point, function value and derivative,
of the current interval (i.e. a).
val_b: A namedtuple with the right end point, function value and derivative,
of the current interval (i.e. b).
Returns:
approx_minimum: A scalar real `Tensor`. An approximation to the point
at which the derivative vanishes.
|
tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py
|
def _secant(val_a, val_b):
"""Returns the secant interpolation for the minimum.
The secant method is a technique for finding roots of nonlinear functions.
When finding the minimum, one applies the secant method to the derivative
of the function.
For an arbitrary function and a bounding interval, the secant approximation
can produce the next point which is outside the bounding interval. However,
with the assumption of opposite slope condtion on the interval [a,b] the new
point c is always bracketed by [a,b]. Note that by assumption,
f'(a) < 0 and f'(b) > 0.
Hence c is a weighted average of a and b and thus always in [a, b].
Args:
val_a: A namedtuple with the left end point, function value and derivative,
of the current interval (i.e. a).
val_b: A namedtuple with the right end point, function value and derivative,
of the current interval (i.e. b).
Returns:
approx_minimum: A scalar real `Tensor`. An approximation to the point
at which the derivative vanishes.
"""
return (val_a.x * val_b.df - val_b.x * val_a.df) / (val_b.df - val_a.df)
|
def _secant(val_a, val_b):
"""Returns the secant interpolation for the minimum.
The secant method is a technique for finding roots of nonlinear functions.
When finding the minimum, one applies the secant method to the derivative
of the function.
For an arbitrary function and a bounding interval, the secant approximation
can produce the next point which is outside the bounding interval. However,
with the assumption of opposite slope condtion on the interval [a,b] the new
point c is always bracketed by [a,b]. Note that by assumption,
f'(a) < 0 and f'(b) > 0.
Hence c is a weighted average of a and b and thus always in [a, b].
Args:
val_a: A namedtuple with the left end point, function value and derivative,
of the current interval (i.e. a).
val_b: A namedtuple with the right end point, function value and derivative,
of the current interval (i.e. b).
Returns:
approx_minimum: A scalar real `Tensor`. An approximation to the point
at which the derivative vanishes.
"""
return (val_a.x * val_b.df - val_b.x * val_a.df) / (val_b.df - val_a.df)
|
[
"Returns",
"the",
"secant",
"interpolation",
"for",
"the",
"minimum",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py#L733-L756
|
[
"def",
"_secant",
"(",
"val_a",
",",
"val_b",
")",
":",
"return",
"(",
"val_a",
".",
"x",
"*",
"val_b",
".",
"df",
"-",
"val_b",
".",
"x",
"*",
"val_a",
".",
"df",
")",
"/",
"(",
"val_b",
".",
"df",
"-",
"val_a",
".",
"df",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
make_simple_step_size_update_policy
|
Create a function implementing a step-size update policy.
The simple policy increases or decreases the `step_size_var` based on the
average of `exp(minimum(0., log_accept_ratio))`. It is based on
[Section 4.2 of Andrieu and Thoms (2008)](
https://people.eecs.berkeley.edu/~jordan/sail/readings/andrieu-thoms.pdf).
The `num_adaptation_steps` argument is set independently of any burnin
for the overall chain. In general, adaptation prevents the chain from
reaching a stationary distribution, so obtaining consistent samples requires
`num_adaptation_steps` be set to a value [somewhat smaller](
http://andrewgelman.com/2017/12/15/burn-vs-warm-iterative-simulation-algorithms/#comment-627745)
than the number of burnin steps. However, it may sometimes be helpful to set
`num_adaptation_steps` to a larger value during development in order to
inspect the behavior of the chain during adaptation.
Args:
num_adaptation_steps: Scalar `int` `Tensor` number of initial steps to
during which to adjust the step size. This may be greater, less than, or
equal to the number of burnin steps. If `None`, the step size is adapted
on every step (note this breaks stationarity of the chain!).
target_rate: Scalar `Tensor` representing desired `accept_ratio`.
Default value: `0.75` (i.e., [center of asymptotically optimal
rate](https://arxiv.org/abs/1411.6669)).
decrement_multiplier: `Tensor` representing amount to downscale current
`step_size`.
Default value: `0.01`.
increment_multiplier: `Tensor` representing amount to upscale current
`step_size`.
Default value: `0.01`.
step_counter: Scalar `int` `Variable` specifying the current step. The step
size is adapted iff `step_counter < num_adaptation_steps`.
Default value: if `None`, an internal variable
`step_size_adaptation_step_counter` is created and initialized to `-1`.
Returns:
step_size_simple_update_fn: Callable that takes args
`step_size_var, kernel_results` and returns updated step size(s).
|
tensorflow_probability/python/mcmc/hmc.py
|
def make_simple_step_size_update_policy(num_adaptation_steps,
target_rate=0.75,
decrement_multiplier=0.01,
increment_multiplier=0.01,
step_counter=None):
"""Create a function implementing a step-size update policy.
The simple policy increases or decreases the `step_size_var` based on the
average of `exp(minimum(0., log_accept_ratio))`. It is based on
[Section 4.2 of Andrieu and Thoms (2008)](
https://people.eecs.berkeley.edu/~jordan/sail/readings/andrieu-thoms.pdf).
The `num_adaptation_steps` argument is set independently of any burnin
for the overall chain. In general, adaptation prevents the chain from
reaching a stationary distribution, so obtaining consistent samples requires
`num_adaptation_steps` be set to a value [somewhat smaller](
http://andrewgelman.com/2017/12/15/burn-vs-warm-iterative-simulation-algorithms/#comment-627745)
than the number of burnin steps. However, it may sometimes be helpful to set
`num_adaptation_steps` to a larger value during development in order to
inspect the behavior of the chain during adaptation.
Args:
num_adaptation_steps: Scalar `int` `Tensor` number of initial steps to
during which to adjust the step size. This may be greater, less than, or
equal to the number of burnin steps. If `None`, the step size is adapted
on every step (note this breaks stationarity of the chain!).
target_rate: Scalar `Tensor` representing desired `accept_ratio`.
Default value: `0.75` (i.e., [center of asymptotically optimal
rate](https://arxiv.org/abs/1411.6669)).
decrement_multiplier: `Tensor` representing amount to downscale current
`step_size`.
Default value: `0.01`.
increment_multiplier: `Tensor` representing amount to upscale current
`step_size`.
Default value: `0.01`.
step_counter: Scalar `int` `Variable` specifying the current step. The step
size is adapted iff `step_counter < num_adaptation_steps`.
Default value: if `None`, an internal variable
`step_size_adaptation_step_counter` is created and initialized to `-1`.
Returns:
step_size_simple_update_fn: Callable that takes args
`step_size_var, kernel_results` and returns updated step size(s).
"""
if step_counter is None and num_adaptation_steps is not None:
step_counter = tf.compat.v1.get_variable(
name='step_size_adaptation_step_counter',
initializer=np.array(-1, dtype=np.int32),
# Specify the dtype for variable sharing to work correctly
# (b/120599991).
dtype=tf.int32,
trainable=False,
use_resource=True)
def step_size_simple_update_fn(step_size_var, kernel_results):
"""Updates (list of) `step_size` using a standard adaptive MCMC procedure.
Args:
step_size_var: (List of) `tf.Variable`s representing the per `state_part`
HMC `step_size`.
kernel_results: `collections.namedtuple` containing `Tensor`s
representing values from most recent call to `one_step`.
Returns:
step_size_assign: (List of) `Tensor`(s) representing updated
`step_size_var`(s).
"""
if kernel_results is None:
if mcmc_util.is_list_like(step_size_var):
return [tf.identity(ss) for ss in step_size_var]
return tf.identity(step_size_var)
log_n = tf.math.log(
tf.cast(
tf.size(input=kernel_results.log_accept_ratio),
kernel_results.log_accept_ratio.dtype))
log_mean_accept_ratio = tf.reduce_logsumexp(
input_tensor=tf.minimum(kernel_results.log_accept_ratio, 0.)) - log_n
adjustment = tf.where(
log_mean_accept_ratio < tf.cast(
tf.math.log(target_rate), log_mean_accept_ratio.dtype),
-decrement_multiplier / (1. + decrement_multiplier),
increment_multiplier)
def build_assign_op():
if mcmc_util.is_list_like(step_size_var):
return [
ss.assign_add(ss * tf.cast(adjustment, ss.dtype))
for ss in step_size_var
]
return step_size_var.assign_add(
step_size_var * tf.cast(adjustment, step_size_var.dtype))
if num_adaptation_steps is None:
return build_assign_op()
else:
with tf.control_dependencies([step_counter.assign_add(1)]):
return tf.cond(
pred=step_counter < num_adaptation_steps,
true_fn=build_assign_op,
false_fn=lambda: step_size_var)
return step_size_simple_update_fn
|
def make_simple_step_size_update_policy(num_adaptation_steps,
target_rate=0.75,
decrement_multiplier=0.01,
increment_multiplier=0.01,
step_counter=None):
"""Create a function implementing a step-size update policy.
The simple policy increases or decreases the `step_size_var` based on the
average of `exp(minimum(0., log_accept_ratio))`. It is based on
[Section 4.2 of Andrieu and Thoms (2008)](
https://people.eecs.berkeley.edu/~jordan/sail/readings/andrieu-thoms.pdf).
The `num_adaptation_steps` argument is set independently of any burnin
for the overall chain. In general, adaptation prevents the chain from
reaching a stationary distribution, so obtaining consistent samples requires
`num_adaptation_steps` be set to a value [somewhat smaller](
http://andrewgelman.com/2017/12/15/burn-vs-warm-iterative-simulation-algorithms/#comment-627745)
than the number of burnin steps. However, it may sometimes be helpful to set
`num_adaptation_steps` to a larger value during development in order to
inspect the behavior of the chain during adaptation.
Args:
num_adaptation_steps: Scalar `int` `Tensor` number of initial steps to
during which to adjust the step size. This may be greater, less than, or
equal to the number of burnin steps. If `None`, the step size is adapted
on every step (note this breaks stationarity of the chain!).
target_rate: Scalar `Tensor` representing desired `accept_ratio`.
Default value: `0.75` (i.e., [center of asymptotically optimal
rate](https://arxiv.org/abs/1411.6669)).
decrement_multiplier: `Tensor` representing amount to downscale current
`step_size`.
Default value: `0.01`.
increment_multiplier: `Tensor` representing amount to upscale current
`step_size`.
Default value: `0.01`.
step_counter: Scalar `int` `Variable` specifying the current step. The step
size is adapted iff `step_counter < num_adaptation_steps`.
Default value: if `None`, an internal variable
`step_size_adaptation_step_counter` is created and initialized to `-1`.
Returns:
step_size_simple_update_fn: Callable that takes args
`step_size_var, kernel_results` and returns updated step size(s).
"""
if step_counter is None and num_adaptation_steps is not None:
step_counter = tf.compat.v1.get_variable(
name='step_size_adaptation_step_counter',
initializer=np.array(-1, dtype=np.int32),
# Specify the dtype for variable sharing to work correctly
# (b/120599991).
dtype=tf.int32,
trainable=False,
use_resource=True)
def step_size_simple_update_fn(step_size_var, kernel_results):
"""Updates (list of) `step_size` using a standard adaptive MCMC procedure.
Args:
step_size_var: (List of) `tf.Variable`s representing the per `state_part`
HMC `step_size`.
kernel_results: `collections.namedtuple` containing `Tensor`s
representing values from most recent call to `one_step`.
Returns:
step_size_assign: (List of) `Tensor`(s) representing updated
`step_size_var`(s).
"""
if kernel_results is None:
if mcmc_util.is_list_like(step_size_var):
return [tf.identity(ss) for ss in step_size_var]
return tf.identity(step_size_var)
log_n = tf.math.log(
tf.cast(
tf.size(input=kernel_results.log_accept_ratio),
kernel_results.log_accept_ratio.dtype))
log_mean_accept_ratio = tf.reduce_logsumexp(
input_tensor=tf.minimum(kernel_results.log_accept_ratio, 0.)) - log_n
adjustment = tf.where(
log_mean_accept_ratio < tf.cast(
tf.math.log(target_rate), log_mean_accept_ratio.dtype),
-decrement_multiplier / (1. + decrement_multiplier),
increment_multiplier)
def build_assign_op():
if mcmc_util.is_list_like(step_size_var):
return [
ss.assign_add(ss * tf.cast(adjustment, ss.dtype))
for ss in step_size_var
]
return step_size_var.assign_add(
step_size_var * tf.cast(adjustment, step_size_var.dtype))
if num_adaptation_steps is None:
return build_assign_op()
else:
with tf.control_dependencies([step_counter.assign_add(1)]):
return tf.cond(
pred=step_counter < num_adaptation_steps,
true_fn=build_assign_op,
false_fn=lambda: step_size_var)
return step_size_simple_update_fn
|
[
"Create",
"a",
"function",
"implementing",
"a",
"step",
"-",
"size",
"update",
"policy",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/hmc.py#L60-L162
|
[
"def",
"make_simple_step_size_update_policy",
"(",
"num_adaptation_steps",
",",
"target_rate",
"=",
"0.75",
",",
"decrement_multiplier",
"=",
"0.01",
",",
"increment_multiplier",
"=",
"0.01",
",",
"step_counter",
"=",
"None",
")",
":",
"if",
"step_counter",
"is",
"None",
"and",
"num_adaptation_steps",
"is",
"not",
"None",
":",
"step_counter",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"get_variable",
"(",
"name",
"=",
"'step_size_adaptation_step_counter'",
",",
"initializer",
"=",
"np",
".",
"array",
"(",
"-",
"1",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
",",
"# Specify the dtype for variable sharing to work correctly",
"# (b/120599991).",
"dtype",
"=",
"tf",
".",
"int32",
",",
"trainable",
"=",
"False",
",",
"use_resource",
"=",
"True",
")",
"def",
"step_size_simple_update_fn",
"(",
"step_size_var",
",",
"kernel_results",
")",
":",
"\"\"\"Updates (list of) `step_size` using a standard adaptive MCMC procedure.\n\n Args:\n step_size_var: (List of) `tf.Variable`s representing the per `state_part`\n HMC `step_size`.\n kernel_results: `collections.namedtuple` containing `Tensor`s\n representing values from most recent call to `one_step`.\n\n Returns:\n step_size_assign: (List of) `Tensor`(s) representing updated\n `step_size_var`(s).\n \"\"\"",
"if",
"kernel_results",
"is",
"None",
":",
"if",
"mcmc_util",
".",
"is_list_like",
"(",
"step_size_var",
")",
":",
"return",
"[",
"tf",
".",
"identity",
"(",
"ss",
")",
"for",
"ss",
"in",
"step_size_var",
"]",
"return",
"tf",
".",
"identity",
"(",
"step_size_var",
")",
"log_n",
"=",
"tf",
".",
"math",
".",
"log",
"(",
"tf",
".",
"cast",
"(",
"tf",
".",
"size",
"(",
"input",
"=",
"kernel_results",
".",
"log_accept_ratio",
")",
",",
"kernel_results",
".",
"log_accept_ratio",
".",
"dtype",
")",
")",
"log_mean_accept_ratio",
"=",
"tf",
".",
"reduce_logsumexp",
"(",
"input_tensor",
"=",
"tf",
".",
"minimum",
"(",
"kernel_results",
".",
"log_accept_ratio",
",",
"0.",
")",
")",
"-",
"log_n",
"adjustment",
"=",
"tf",
".",
"where",
"(",
"log_mean_accept_ratio",
"<",
"tf",
".",
"cast",
"(",
"tf",
".",
"math",
".",
"log",
"(",
"target_rate",
")",
",",
"log_mean_accept_ratio",
".",
"dtype",
")",
",",
"-",
"decrement_multiplier",
"/",
"(",
"1.",
"+",
"decrement_multiplier",
")",
",",
"increment_multiplier",
")",
"def",
"build_assign_op",
"(",
")",
":",
"if",
"mcmc_util",
".",
"is_list_like",
"(",
"step_size_var",
")",
":",
"return",
"[",
"ss",
".",
"assign_add",
"(",
"ss",
"*",
"tf",
".",
"cast",
"(",
"adjustment",
",",
"ss",
".",
"dtype",
")",
")",
"for",
"ss",
"in",
"step_size_var",
"]",
"return",
"step_size_var",
".",
"assign_add",
"(",
"step_size_var",
"*",
"tf",
".",
"cast",
"(",
"adjustment",
",",
"step_size_var",
".",
"dtype",
")",
")",
"if",
"num_adaptation_steps",
"is",
"None",
":",
"return",
"build_assign_op",
"(",
")",
"else",
":",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"step_counter",
".",
"assign_add",
"(",
"1",
")",
"]",
")",
":",
"return",
"tf",
".",
"cond",
"(",
"pred",
"=",
"step_counter",
"<",
"num_adaptation_steps",
",",
"true_fn",
"=",
"build_assign_op",
",",
"false_fn",
"=",
"lambda",
":",
"step_size_var",
")",
"return",
"step_size_simple_update_fn"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_leapfrog_integrator_one_step
|
Applies `num_leapfrog_steps` of the leapfrog integrator.
Assumes a simple quadratic kinetic energy function: `0.5 ||momentum||**2`.
#### Examples:
##### Simple quadratic potential.
```python
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import tensorflow as tf
from tensorflow_probability.python.mcmc.hmc import _leapfrog_integrator_one_step # pylint: disable=line-too-long
tfd = tfp.distributions
dims = 10
num_iter = int(1e3)
dtype = np.float32
position = tf.placeholder(np.float32)
momentum = tf.placeholder(np.float32)
target_log_prob_fn = tfd.MultivariateNormalDiag(
loc=tf.zeros(dims, dtype)).log_prob
def _leapfrog_one_step(*args):
# Closure representing computation done during each leapfrog step.
return _leapfrog_integrator_one_step(
target_log_prob_fn=target_log_prob_fn,
independent_chain_ndims=0,
step_sizes=[0.1],
current_momentum_parts=args[0],
current_state_parts=args[1],
current_target_log_prob=args[2],
current_target_log_prob_grad_parts=args[3])
# Do leapfrog integration.
[
[next_momentum],
[next_position],
next_target_log_prob,
next_target_log_prob_grad_parts,
] = tf.while_loop(
cond=lambda *args: True,
body=_leapfrog_one_step,
loop_vars=[
[momentum],
[position],
target_log_prob_fn(position),
tf.gradients(target_log_prob_fn(position), position),
],
maximum_iterations=3)
momentum_ = np.random.randn(dims).astype(dtype)
position_ = np.random.randn(dims).astype(dtype)
positions = np.zeros([num_iter, dims], dtype)
with tf.Session() as sess:
for i in xrange(num_iter):
position_, momentum_ = sess.run(
[next_momentum, next_position],
feed_dict={position: position_, momentum: momentum_})
positions[i] = position_
plt.plot(positions[:, 0]); # Sinusoidal.
```
Args:
target_log_prob_fn: Python callable which takes an argument like
`*current_state_parts` and returns its (possibly unnormalized) log-density
under the target distribution.
independent_chain_ndims: Scalar `int` `Tensor` representing the number of
leftmost `Tensor` dimensions which index independent chains.
step_sizes: Python `list` of `Tensor`s representing the step size for the
leapfrog integrator. Must broadcast with the shape of
`current_state_parts`. Larger step sizes lead to faster progress, but
too-large step sizes make rejection exponentially more likely. When
possible, it's often helpful to match per-variable step sizes to the
standard deviations of the target distribution in each variable.
current_momentum_parts: Tensor containing the value(s) of the momentum
variable(s) to update.
current_state_parts: Python `list` of `Tensor`s representing the current
state(s) of the Markov chain(s). The first `independent_chain_ndims` of
the `Tensor`(s) index different chains.
current_target_log_prob: `Tensor` representing the value of
`target_log_prob_fn(*current_state_parts)`. The only reason to specify
this argument is to reduce TF graph size.
current_target_log_prob_grad_parts: Python list of `Tensor`s representing
gradient of `target_log_prob_fn(*current_state_parts`) wrt
`current_state_parts`. Must have same shape as `current_state_parts`. The
only reason to specify this argument is to reduce TF graph size.
state_gradients_are_stopped: Python `bool` indicating that the proposed new
state be run through `tf.stop_gradient`. This is particularly useful when
combining optimization over samples from the HMC chain.
Default value: `False` (i.e., do not apply `stop_gradient`).
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'hmc_leapfrog_integrator').
Returns:
proposed_momentum_parts: Updated value of the momentum.
proposed_state_parts: Tensor or Python list of `Tensor`s representing the
state(s) of the Markov chain(s) at each result step. Has same shape as
input `current_state_parts`.
proposed_target_log_prob: `Tensor` representing the value of
`target_log_prob_fn` at `next_state`.
proposed_target_log_prob_grad_parts: Gradient of `proposed_target_log_prob`
wrt `next_state`.
Raises:
ValueError: if `len(momentum_parts) != len(state_parts)`.
ValueError: if `len(state_parts) != len(step_sizes)`.
ValueError: if `len(state_parts) != len(grads_target_log_prob)`.
TypeError: if `not target_log_prob.dtype.is_floating`.
|
tensorflow_probability/python/mcmc/hmc.py
|
def _leapfrog_integrator_one_step(
target_log_prob_fn,
independent_chain_ndims,
step_sizes,
current_momentum_parts,
current_state_parts,
current_target_log_prob,
current_target_log_prob_grad_parts,
state_gradients_are_stopped=False,
name=None):
"""Applies `num_leapfrog_steps` of the leapfrog integrator.
Assumes a simple quadratic kinetic energy function: `0.5 ||momentum||**2`.
#### Examples:
##### Simple quadratic potential.
```python
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import tensorflow as tf
from tensorflow_probability.python.mcmc.hmc import _leapfrog_integrator_one_step # pylint: disable=line-too-long
tfd = tfp.distributions
dims = 10
num_iter = int(1e3)
dtype = np.float32
position = tf.placeholder(np.float32)
momentum = tf.placeholder(np.float32)
target_log_prob_fn = tfd.MultivariateNormalDiag(
loc=tf.zeros(dims, dtype)).log_prob
def _leapfrog_one_step(*args):
# Closure representing computation done during each leapfrog step.
return _leapfrog_integrator_one_step(
target_log_prob_fn=target_log_prob_fn,
independent_chain_ndims=0,
step_sizes=[0.1],
current_momentum_parts=args[0],
current_state_parts=args[1],
current_target_log_prob=args[2],
current_target_log_prob_grad_parts=args[3])
# Do leapfrog integration.
[
[next_momentum],
[next_position],
next_target_log_prob,
next_target_log_prob_grad_parts,
] = tf.while_loop(
cond=lambda *args: True,
body=_leapfrog_one_step,
loop_vars=[
[momentum],
[position],
target_log_prob_fn(position),
tf.gradients(target_log_prob_fn(position), position),
],
maximum_iterations=3)
momentum_ = np.random.randn(dims).astype(dtype)
position_ = np.random.randn(dims).astype(dtype)
positions = np.zeros([num_iter, dims], dtype)
with tf.Session() as sess:
for i in xrange(num_iter):
position_, momentum_ = sess.run(
[next_momentum, next_position],
feed_dict={position: position_, momentum: momentum_})
positions[i] = position_
plt.plot(positions[:, 0]); # Sinusoidal.
```
Args:
target_log_prob_fn: Python callable which takes an argument like
`*current_state_parts` and returns its (possibly unnormalized) log-density
under the target distribution.
independent_chain_ndims: Scalar `int` `Tensor` representing the number of
leftmost `Tensor` dimensions which index independent chains.
step_sizes: Python `list` of `Tensor`s representing the step size for the
leapfrog integrator. Must broadcast with the shape of
`current_state_parts`. Larger step sizes lead to faster progress, but
too-large step sizes make rejection exponentially more likely. When
possible, it's often helpful to match per-variable step sizes to the
standard deviations of the target distribution in each variable.
current_momentum_parts: Tensor containing the value(s) of the momentum
variable(s) to update.
current_state_parts: Python `list` of `Tensor`s representing the current
state(s) of the Markov chain(s). The first `independent_chain_ndims` of
the `Tensor`(s) index different chains.
current_target_log_prob: `Tensor` representing the value of
`target_log_prob_fn(*current_state_parts)`. The only reason to specify
this argument is to reduce TF graph size.
current_target_log_prob_grad_parts: Python list of `Tensor`s representing
gradient of `target_log_prob_fn(*current_state_parts`) wrt
`current_state_parts`. Must have same shape as `current_state_parts`. The
only reason to specify this argument is to reduce TF graph size.
state_gradients_are_stopped: Python `bool` indicating that the proposed new
state be run through `tf.stop_gradient`. This is particularly useful when
combining optimization over samples from the HMC chain.
Default value: `False` (i.e., do not apply `stop_gradient`).
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'hmc_leapfrog_integrator').
Returns:
proposed_momentum_parts: Updated value of the momentum.
proposed_state_parts: Tensor or Python list of `Tensor`s representing the
state(s) of the Markov chain(s) at each result step. Has same shape as
input `current_state_parts`.
proposed_target_log_prob: `Tensor` representing the value of
`target_log_prob_fn` at `next_state`.
proposed_target_log_prob_grad_parts: Gradient of `proposed_target_log_prob`
wrt `next_state`.
Raises:
ValueError: if `len(momentum_parts) != len(state_parts)`.
ValueError: if `len(state_parts) != len(step_sizes)`.
ValueError: if `len(state_parts) != len(grads_target_log_prob)`.
TypeError: if `not target_log_prob.dtype.is_floating`.
"""
# Note on per-variable step sizes:
#
# Using per-variable step sizes is equivalent to using the same step
# size for all variables and adding a diagonal mass matrix in the
# kinetic energy term of the Hamiltonian being integrated. This is
# hinted at by Neal (2011) but not derived in detail there.
#
# Let x and v be position and momentum variables respectively.
# Let g(x) be the gradient of `target_log_prob_fn(x)`.
# Let S be a diagonal matrix of per-variable step sizes.
# Let the Hamiltonian H(x, v) = -target_log_prob_fn(x) + 0.5 * ||v||**2.
#
# Using per-variable step sizes gives the updates
# v' = v + 0.5 * matmul(S, g(x))
# x'' = x + matmul(S, v')
# v'' = v' + 0.5 * matmul(S, g(x''))
#
# Let u = matmul(inv(S), v).
# Multiplying v by inv(S) in the updates above gives the transformed dynamics
# u' = matmul(inv(S), v') = matmul(inv(S), v) + 0.5 * g(x)
# = u + 0.5 * g(x)
# x'' = x + matmul(S, v') = x + matmul(S**2, u')
# u'' = matmul(inv(S), v'') = matmul(inv(S), v') + 0.5 * g(x'')
# = u' + 0.5 * g(x'')
#
# These are exactly the leapfrog updates for the Hamiltonian
# H'(x, u) = -target_log_prob_fn(x) + 0.5 * u^T S**2 u
# = -target_log_prob_fn(x) + 0.5 * ||v||**2 = H(x, v).
#
# To summarize:
#
# * Using per-variable step sizes implicitly simulates the dynamics
# of the Hamiltonian H' (which are energy-conserving in H'). We
# keep track of v instead of u, but the underlying dynamics are
# the same if we transform back.
# * The value of the Hamiltonian H'(x, u) is the same as the value
# of the original Hamiltonian H(x, v) after we transform back from
# u to v.
# * Sampling v ~ N(0, I) is equivalent to sampling u ~ N(0, S**-2).
#
# So using per-variable step sizes in HMC will give results that are
# exactly identical to explicitly using a diagonal mass matrix.
with tf.compat.v1.name_scope(name, 'hmc_leapfrog_integrator_one_step', [
independent_chain_ndims, step_sizes, current_momentum_parts,
current_state_parts, current_target_log_prob,
current_target_log_prob_grad_parts
]):
# Step 1: Update momentum.
proposed_momentum_parts = [
v + 0.5 * tf.cast(eps, v.dtype) * g
for v, eps, g
in zip(current_momentum_parts,
step_sizes,
current_target_log_prob_grad_parts)]
# Step 2: Update state.
proposed_state_parts = [
x + tf.cast(eps, v.dtype) * v
for x, eps, v
in zip(current_state_parts,
step_sizes,
proposed_momentum_parts)]
if state_gradients_are_stopped:
proposed_state_parts = [tf.stop_gradient(x) for x in proposed_state_parts]
# Step 3a: Re-evaluate target-log-prob (and grad) at proposed state.
[
proposed_target_log_prob,
proposed_target_log_prob_grad_parts,
] = mcmc_util.maybe_call_fn_and_grads(
target_log_prob_fn,
proposed_state_parts)
if not proposed_target_log_prob.dtype.is_floating:
raise TypeError('`target_log_prob_fn` must produce a `Tensor` '
'with `float` `dtype`.')
if any(g is None for g in proposed_target_log_prob_grad_parts):
raise ValueError(
'Encountered `None` gradient. Does your target `target_log_prob_fn` '
'access all `tf.Variable`s via `tf.get_variable`?\n'
' current_state_parts: {}\n'
' proposed_state_parts: {}\n'
' proposed_target_log_prob_grad_parts: {}'.format(
current_state_parts,
proposed_state_parts,
proposed_target_log_prob_grad_parts))
# Step 3b: Update momentum (again).
proposed_momentum_parts = [
v + 0.5 * tf.cast(eps, v.dtype) * g
for v, eps, g
in zip(proposed_momentum_parts,
step_sizes,
proposed_target_log_prob_grad_parts)]
return [
proposed_momentum_parts,
proposed_state_parts,
proposed_target_log_prob,
proposed_target_log_prob_grad_parts,
]
|
def _leapfrog_integrator_one_step(
target_log_prob_fn,
independent_chain_ndims,
step_sizes,
current_momentum_parts,
current_state_parts,
current_target_log_prob,
current_target_log_prob_grad_parts,
state_gradients_are_stopped=False,
name=None):
"""Applies `num_leapfrog_steps` of the leapfrog integrator.
Assumes a simple quadratic kinetic energy function: `0.5 ||momentum||**2`.
#### Examples:
##### Simple quadratic potential.
```python
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import tensorflow as tf
from tensorflow_probability.python.mcmc.hmc import _leapfrog_integrator_one_step # pylint: disable=line-too-long
tfd = tfp.distributions
dims = 10
num_iter = int(1e3)
dtype = np.float32
position = tf.placeholder(np.float32)
momentum = tf.placeholder(np.float32)
target_log_prob_fn = tfd.MultivariateNormalDiag(
loc=tf.zeros(dims, dtype)).log_prob
def _leapfrog_one_step(*args):
# Closure representing computation done during each leapfrog step.
return _leapfrog_integrator_one_step(
target_log_prob_fn=target_log_prob_fn,
independent_chain_ndims=0,
step_sizes=[0.1],
current_momentum_parts=args[0],
current_state_parts=args[1],
current_target_log_prob=args[2],
current_target_log_prob_grad_parts=args[3])
# Do leapfrog integration.
[
[next_momentum],
[next_position],
next_target_log_prob,
next_target_log_prob_grad_parts,
] = tf.while_loop(
cond=lambda *args: True,
body=_leapfrog_one_step,
loop_vars=[
[momentum],
[position],
target_log_prob_fn(position),
tf.gradients(target_log_prob_fn(position), position),
],
maximum_iterations=3)
momentum_ = np.random.randn(dims).astype(dtype)
position_ = np.random.randn(dims).astype(dtype)
positions = np.zeros([num_iter, dims], dtype)
with tf.Session() as sess:
for i in xrange(num_iter):
position_, momentum_ = sess.run(
[next_momentum, next_position],
feed_dict={position: position_, momentum: momentum_})
positions[i] = position_
plt.plot(positions[:, 0]); # Sinusoidal.
```
Args:
target_log_prob_fn: Python callable which takes an argument like
`*current_state_parts` and returns its (possibly unnormalized) log-density
under the target distribution.
independent_chain_ndims: Scalar `int` `Tensor` representing the number of
leftmost `Tensor` dimensions which index independent chains.
step_sizes: Python `list` of `Tensor`s representing the step size for the
leapfrog integrator. Must broadcast with the shape of
`current_state_parts`. Larger step sizes lead to faster progress, but
too-large step sizes make rejection exponentially more likely. When
possible, it's often helpful to match per-variable step sizes to the
standard deviations of the target distribution in each variable.
current_momentum_parts: Tensor containing the value(s) of the momentum
variable(s) to update.
current_state_parts: Python `list` of `Tensor`s representing the current
state(s) of the Markov chain(s). The first `independent_chain_ndims` of
the `Tensor`(s) index different chains.
current_target_log_prob: `Tensor` representing the value of
`target_log_prob_fn(*current_state_parts)`. The only reason to specify
this argument is to reduce TF graph size.
current_target_log_prob_grad_parts: Python list of `Tensor`s representing
gradient of `target_log_prob_fn(*current_state_parts`) wrt
`current_state_parts`. Must have same shape as `current_state_parts`. The
only reason to specify this argument is to reduce TF graph size.
state_gradients_are_stopped: Python `bool` indicating that the proposed new
state be run through `tf.stop_gradient`. This is particularly useful when
combining optimization over samples from the HMC chain.
Default value: `False` (i.e., do not apply `stop_gradient`).
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'hmc_leapfrog_integrator').
Returns:
proposed_momentum_parts: Updated value of the momentum.
proposed_state_parts: Tensor or Python list of `Tensor`s representing the
state(s) of the Markov chain(s) at each result step. Has same shape as
input `current_state_parts`.
proposed_target_log_prob: `Tensor` representing the value of
`target_log_prob_fn` at `next_state`.
proposed_target_log_prob_grad_parts: Gradient of `proposed_target_log_prob`
wrt `next_state`.
Raises:
ValueError: if `len(momentum_parts) != len(state_parts)`.
ValueError: if `len(state_parts) != len(step_sizes)`.
ValueError: if `len(state_parts) != len(grads_target_log_prob)`.
TypeError: if `not target_log_prob.dtype.is_floating`.
"""
# Note on per-variable step sizes:
#
# Using per-variable step sizes is equivalent to using the same step
# size for all variables and adding a diagonal mass matrix in the
# kinetic energy term of the Hamiltonian being integrated. This is
# hinted at by Neal (2011) but not derived in detail there.
#
# Let x and v be position and momentum variables respectively.
# Let g(x) be the gradient of `target_log_prob_fn(x)`.
# Let S be a diagonal matrix of per-variable step sizes.
# Let the Hamiltonian H(x, v) = -target_log_prob_fn(x) + 0.5 * ||v||**2.
#
# Using per-variable step sizes gives the updates
# v' = v + 0.5 * matmul(S, g(x))
# x'' = x + matmul(S, v')
# v'' = v' + 0.5 * matmul(S, g(x''))
#
# Let u = matmul(inv(S), v).
# Multiplying v by inv(S) in the updates above gives the transformed dynamics
# u' = matmul(inv(S), v') = matmul(inv(S), v) + 0.5 * g(x)
# = u + 0.5 * g(x)
# x'' = x + matmul(S, v') = x + matmul(S**2, u')
# u'' = matmul(inv(S), v'') = matmul(inv(S), v') + 0.5 * g(x'')
# = u' + 0.5 * g(x'')
#
# These are exactly the leapfrog updates for the Hamiltonian
# H'(x, u) = -target_log_prob_fn(x) + 0.5 * u^T S**2 u
# = -target_log_prob_fn(x) + 0.5 * ||v||**2 = H(x, v).
#
# To summarize:
#
# * Using per-variable step sizes implicitly simulates the dynamics
# of the Hamiltonian H' (which are energy-conserving in H'). We
# keep track of v instead of u, but the underlying dynamics are
# the same if we transform back.
# * The value of the Hamiltonian H'(x, u) is the same as the value
# of the original Hamiltonian H(x, v) after we transform back from
# u to v.
# * Sampling v ~ N(0, I) is equivalent to sampling u ~ N(0, S**-2).
#
# So using per-variable step sizes in HMC will give results that are
# exactly identical to explicitly using a diagonal mass matrix.
with tf.compat.v1.name_scope(name, 'hmc_leapfrog_integrator_one_step', [
independent_chain_ndims, step_sizes, current_momentum_parts,
current_state_parts, current_target_log_prob,
current_target_log_prob_grad_parts
]):
# Step 1: Update momentum.
proposed_momentum_parts = [
v + 0.5 * tf.cast(eps, v.dtype) * g
for v, eps, g
in zip(current_momentum_parts,
step_sizes,
current_target_log_prob_grad_parts)]
# Step 2: Update state.
proposed_state_parts = [
x + tf.cast(eps, v.dtype) * v
for x, eps, v
in zip(current_state_parts,
step_sizes,
proposed_momentum_parts)]
if state_gradients_are_stopped:
proposed_state_parts = [tf.stop_gradient(x) for x in proposed_state_parts]
# Step 3a: Re-evaluate target-log-prob (and grad) at proposed state.
[
proposed_target_log_prob,
proposed_target_log_prob_grad_parts,
] = mcmc_util.maybe_call_fn_and_grads(
target_log_prob_fn,
proposed_state_parts)
if not proposed_target_log_prob.dtype.is_floating:
raise TypeError('`target_log_prob_fn` must produce a `Tensor` '
'with `float` `dtype`.')
if any(g is None for g in proposed_target_log_prob_grad_parts):
raise ValueError(
'Encountered `None` gradient. Does your target `target_log_prob_fn` '
'access all `tf.Variable`s via `tf.get_variable`?\n'
' current_state_parts: {}\n'
' proposed_state_parts: {}\n'
' proposed_target_log_prob_grad_parts: {}'.format(
current_state_parts,
proposed_state_parts,
proposed_target_log_prob_grad_parts))
# Step 3b: Update momentum (again).
proposed_momentum_parts = [
v + 0.5 * tf.cast(eps, v.dtype) * g
for v, eps, g
in zip(proposed_momentum_parts,
step_sizes,
proposed_target_log_prob_grad_parts)]
return [
proposed_momentum_parts,
proposed_state_parts,
proposed_target_log_prob,
proposed_target_log_prob_grad_parts,
]
|
[
"Applies",
"num_leapfrog_steps",
"of",
"the",
"leapfrog",
"integrator",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/hmc.py#L820-L1049
|
[
"def",
"_leapfrog_integrator_one_step",
"(",
"target_log_prob_fn",
",",
"independent_chain_ndims",
",",
"step_sizes",
",",
"current_momentum_parts",
",",
"current_state_parts",
",",
"current_target_log_prob",
",",
"current_target_log_prob_grad_parts",
",",
"state_gradients_are_stopped",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"# Note on per-variable step sizes:",
"#",
"# Using per-variable step sizes is equivalent to using the same step",
"# size for all variables and adding a diagonal mass matrix in the",
"# kinetic energy term of the Hamiltonian being integrated. This is",
"# hinted at by Neal (2011) but not derived in detail there.",
"#",
"# Let x and v be position and momentum variables respectively.",
"# Let g(x) be the gradient of `target_log_prob_fn(x)`.",
"# Let S be a diagonal matrix of per-variable step sizes.",
"# Let the Hamiltonian H(x, v) = -target_log_prob_fn(x) + 0.5 * ||v||**2.",
"#",
"# Using per-variable step sizes gives the updates",
"# v' = v + 0.5 * matmul(S, g(x))",
"# x'' = x + matmul(S, v')",
"# v'' = v' + 0.5 * matmul(S, g(x''))",
"#",
"# Let u = matmul(inv(S), v).",
"# Multiplying v by inv(S) in the updates above gives the transformed dynamics",
"# u' = matmul(inv(S), v') = matmul(inv(S), v) + 0.5 * g(x)",
"# = u + 0.5 * g(x)",
"# x'' = x + matmul(S, v') = x + matmul(S**2, u')",
"# u'' = matmul(inv(S), v'') = matmul(inv(S), v') + 0.5 * g(x'')",
"# = u' + 0.5 * g(x'')",
"#",
"# These are exactly the leapfrog updates for the Hamiltonian",
"# H'(x, u) = -target_log_prob_fn(x) + 0.5 * u^T S**2 u",
"# = -target_log_prob_fn(x) + 0.5 * ||v||**2 = H(x, v).",
"#",
"# To summarize:",
"#",
"# * Using per-variable step sizes implicitly simulates the dynamics",
"# of the Hamiltonian H' (which are energy-conserving in H'). We",
"# keep track of v instead of u, but the underlying dynamics are",
"# the same if we transform back.",
"# * The value of the Hamiltonian H'(x, u) is the same as the value",
"# of the original Hamiltonian H(x, v) after we transform back from",
"# u to v.",
"# * Sampling v ~ N(0, I) is equivalent to sampling u ~ N(0, S**-2).",
"#",
"# So using per-variable step sizes in HMC will give results that are",
"# exactly identical to explicitly using a diagonal mass matrix.",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'hmc_leapfrog_integrator_one_step'",
",",
"[",
"independent_chain_ndims",
",",
"step_sizes",
",",
"current_momentum_parts",
",",
"current_state_parts",
",",
"current_target_log_prob",
",",
"current_target_log_prob_grad_parts",
"]",
")",
":",
"# Step 1: Update momentum.",
"proposed_momentum_parts",
"=",
"[",
"v",
"+",
"0.5",
"*",
"tf",
".",
"cast",
"(",
"eps",
",",
"v",
".",
"dtype",
")",
"*",
"g",
"for",
"v",
",",
"eps",
",",
"g",
"in",
"zip",
"(",
"current_momentum_parts",
",",
"step_sizes",
",",
"current_target_log_prob_grad_parts",
")",
"]",
"# Step 2: Update state.",
"proposed_state_parts",
"=",
"[",
"x",
"+",
"tf",
".",
"cast",
"(",
"eps",
",",
"v",
".",
"dtype",
")",
"*",
"v",
"for",
"x",
",",
"eps",
",",
"v",
"in",
"zip",
"(",
"current_state_parts",
",",
"step_sizes",
",",
"proposed_momentum_parts",
")",
"]",
"if",
"state_gradients_are_stopped",
":",
"proposed_state_parts",
"=",
"[",
"tf",
".",
"stop_gradient",
"(",
"x",
")",
"for",
"x",
"in",
"proposed_state_parts",
"]",
"# Step 3a: Re-evaluate target-log-prob (and grad) at proposed state.",
"[",
"proposed_target_log_prob",
",",
"proposed_target_log_prob_grad_parts",
",",
"]",
"=",
"mcmc_util",
".",
"maybe_call_fn_and_grads",
"(",
"target_log_prob_fn",
",",
"proposed_state_parts",
")",
"if",
"not",
"proposed_target_log_prob",
".",
"dtype",
".",
"is_floating",
":",
"raise",
"TypeError",
"(",
"'`target_log_prob_fn` must produce a `Tensor` '",
"'with `float` `dtype`.'",
")",
"if",
"any",
"(",
"g",
"is",
"None",
"for",
"g",
"in",
"proposed_target_log_prob_grad_parts",
")",
":",
"raise",
"ValueError",
"(",
"'Encountered `None` gradient. Does your target `target_log_prob_fn` '",
"'access all `tf.Variable`s via `tf.get_variable`?\\n'",
"' current_state_parts: {}\\n'",
"' proposed_state_parts: {}\\n'",
"' proposed_target_log_prob_grad_parts: {}'",
".",
"format",
"(",
"current_state_parts",
",",
"proposed_state_parts",
",",
"proposed_target_log_prob_grad_parts",
")",
")",
"# Step 3b: Update momentum (again).",
"proposed_momentum_parts",
"=",
"[",
"v",
"+",
"0.5",
"*",
"tf",
".",
"cast",
"(",
"eps",
",",
"v",
".",
"dtype",
")",
"*",
"g",
"for",
"v",
",",
"eps",
",",
"g",
"in",
"zip",
"(",
"proposed_momentum_parts",
",",
"step_sizes",
",",
"proposed_target_log_prob_grad_parts",
")",
"]",
"return",
"[",
"proposed_momentum_parts",
",",
"proposed_state_parts",
",",
"proposed_target_log_prob",
",",
"proposed_target_log_prob_grad_parts",
",",
"]"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_compute_log_acceptance_correction
|
Helper to `kernel` which computes the log acceptance-correction.
A sufficient but not necessary condition for the existence of a stationary
distribution, `p(x)`, is "detailed balance", i.e.:
```none
p(x'|x) p(x) = p(x|x') p(x')
```
In the Metropolis-Hastings algorithm, a state is proposed according to
`g(x'|x)` and accepted according to `a(x'|x)`, hence
`p(x'|x) = g(x'|x) a(x'|x)`.
Inserting this into the detailed balance equation implies:
```none
g(x'|x) a(x'|x) p(x) = g(x|x') a(x|x') p(x')
==> a(x'|x) / a(x|x') = p(x') / p(x) [g(x|x') / g(x'|x)] (*)
```
One definition of `a(x'|x)` which satisfies (*) is:
```none
a(x'|x) = min(1, p(x') / p(x) [g(x|x') / g(x'|x)])
```
(To see that this satisfies (*), notice that under this definition only at
most one `a(x'|x)` and `a(x|x') can be other than one.)
We call the bracketed term the "acceptance correction".
In the case of UncalibratedHMC, the log acceptance-correction is not the log
proposal-ratio. UncalibratedHMC augments the state-space with momentum, z.
Assuming a standard Gaussian distribution for momentums, the chain eventually
converges to:
```none
p([x, z]) propto= target_prob(x) exp(-0.5 z**2)
```
Relating this back to Metropolis-Hastings parlance, for HMC we have:
```none
p([x, z]) propto= target_prob(x) exp(-0.5 z**2)
g([x, z] | [x', z']) = g([x', z'] | [x, z])
```
In other words, the MH bracketed term is `1`. However, because we desire to
use a general MH framework, we can place the momentum probability ratio inside
the metropolis-correction factor thus getting an acceptance probability:
```none
target_prob(x')
accept_prob(x'|x) = ----------------- [exp(-0.5 z**2) / exp(-0.5 z'**2)]
target_prob(x)
```
(Note: we actually need to handle the kinetic energy change at each leapfrog
step, but this is the idea.)
Args:
current_momentums: `Tensor` representing the value(s) of the current
momentum(s) of the state (parts).
proposed_momentums: `Tensor` representing the value(s) of the proposed
momentum(s) of the state (parts).
independent_chain_ndims: Scalar `int` `Tensor` representing the number of
leftmost `Tensor` dimensions which index independent chains.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'compute_log_acceptance_correction').
Returns:
log_acceptance_correction: `Tensor` representing the `log`
acceptance-correction. (See docstring for mathematical definition.)
|
tensorflow_probability/python/mcmc/hmc.py
|
def _compute_log_acceptance_correction(current_momentums,
proposed_momentums,
independent_chain_ndims,
name=None):
"""Helper to `kernel` which computes the log acceptance-correction.
A sufficient but not necessary condition for the existence of a stationary
distribution, `p(x)`, is "detailed balance", i.e.:
```none
p(x'|x) p(x) = p(x|x') p(x')
```
In the Metropolis-Hastings algorithm, a state is proposed according to
`g(x'|x)` and accepted according to `a(x'|x)`, hence
`p(x'|x) = g(x'|x) a(x'|x)`.
Inserting this into the detailed balance equation implies:
```none
g(x'|x) a(x'|x) p(x) = g(x|x') a(x|x') p(x')
==> a(x'|x) / a(x|x') = p(x') / p(x) [g(x|x') / g(x'|x)] (*)
```
One definition of `a(x'|x)` which satisfies (*) is:
```none
a(x'|x) = min(1, p(x') / p(x) [g(x|x') / g(x'|x)])
```
(To see that this satisfies (*), notice that under this definition only at
most one `a(x'|x)` and `a(x|x') can be other than one.)
We call the bracketed term the "acceptance correction".
In the case of UncalibratedHMC, the log acceptance-correction is not the log
proposal-ratio. UncalibratedHMC augments the state-space with momentum, z.
Assuming a standard Gaussian distribution for momentums, the chain eventually
converges to:
```none
p([x, z]) propto= target_prob(x) exp(-0.5 z**2)
```
Relating this back to Metropolis-Hastings parlance, for HMC we have:
```none
p([x, z]) propto= target_prob(x) exp(-0.5 z**2)
g([x, z] | [x', z']) = g([x', z'] | [x, z])
```
In other words, the MH bracketed term is `1`. However, because we desire to
use a general MH framework, we can place the momentum probability ratio inside
the metropolis-correction factor thus getting an acceptance probability:
```none
target_prob(x')
accept_prob(x'|x) = ----------------- [exp(-0.5 z**2) / exp(-0.5 z'**2)]
target_prob(x)
```
(Note: we actually need to handle the kinetic energy change at each leapfrog
step, but this is the idea.)
Args:
current_momentums: `Tensor` representing the value(s) of the current
momentum(s) of the state (parts).
proposed_momentums: `Tensor` representing the value(s) of the proposed
momentum(s) of the state (parts).
independent_chain_ndims: Scalar `int` `Tensor` representing the number of
leftmost `Tensor` dimensions which index independent chains.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'compute_log_acceptance_correction').
Returns:
log_acceptance_correction: `Tensor` representing the `log`
acceptance-correction. (See docstring for mathematical definition.)
"""
with tf.compat.v1.name_scope(
name, 'compute_log_acceptance_correction',
[independent_chain_ndims, current_momentums, proposed_momentums]):
log_current_kinetic, log_proposed_kinetic = [], []
for current_momentum, proposed_momentum in zip(
current_momentums, proposed_momentums):
axis = tf.range(independent_chain_ndims, tf.rank(current_momentum))
log_current_kinetic.append(_log_sum_sq(current_momentum, axis))
log_proposed_kinetic.append(_log_sum_sq(proposed_momentum, axis))
current_kinetic = 0.5 * tf.exp(
tf.reduce_logsumexp(
input_tensor=tf.stack(log_current_kinetic, axis=-1), axis=-1))
proposed_kinetic = 0.5 * tf.exp(
tf.reduce_logsumexp(
input_tensor=tf.stack(log_proposed_kinetic, axis=-1), axis=-1))
return mcmc_util.safe_sum([current_kinetic, -proposed_kinetic])
|
def _compute_log_acceptance_correction(current_momentums,
proposed_momentums,
independent_chain_ndims,
name=None):
"""Helper to `kernel` which computes the log acceptance-correction.
A sufficient but not necessary condition for the existence of a stationary
distribution, `p(x)`, is "detailed balance", i.e.:
```none
p(x'|x) p(x) = p(x|x') p(x')
```
In the Metropolis-Hastings algorithm, a state is proposed according to
`g(x'|x)` and accepted according to `a(x'|x)`, hence
`p(x'|x) = g(x'|x) a(x'|x)`.
Inserting this into the detailed balance equation implies:
```none
g(x'|x) a(x'|x) p(x) = g(x|x') a(x|x') p(x')
==> a(x'|x) / a(x|x') = p(x') / p(x) [g(x|x') / g(x'|x)] (*)
```
One definition of `a(x'|x)` which satisfies (*) is:
```none
a(x'|x) = min(1, p(x') / p(x) [g(x|x') / g(x'|x)])
```
(To see that this satisfies (*), notice that under this definition only at
most one `a(x'|x)` and `a(x|x') can be other than one.)
We call the bracketed term the "acceptance correction".
In the case of UncalibratedHMC, the log acceptance-correction is not the log
proposal-ratio. UncalibratedHMC augments the state-space with momentum, z.
Assuming a standard Gaussian distribution for momentums, the chain eventually
converges to:
```none
p([x, z]) propto= target_prob(x) exp(-0.5 z**2)
```
Relating this back to Metropolis-Hastings parlance, for HMC we have:
```none
p([x, z]) propto= target_prob(x) exp(-0.5 z**2)
g([x, z] | [x', z']) = g([x', z'] | [x, z])
```
In other words, the MH bracketed term is `1`. However, because we desire to
use a general MH framework, we can place the momentum probability ratio inside
the metropolis-correction factor thus getting an acceptance probability:
```none
target_prob(x')
accept_prob(x'|x) = ----------------- [exp(-0.5 z**2) / exp(-0.5 z'**2)]
target_prob(x)
```
(Note: we actually need to handle the kinetic energy change at each leapfrog
step, but this is the idea.)
Args:
current_momentums: `Tensor` representing the value(s) of the current
momentum(s) of the state (parts).
proposed_momentums: `Tensor` representing the value(s) of the proposed
momentum(s) of the state (parts).
independent_chain_ndims: Scalar `int` `Tensor` representing the number of
leftmost `Tensor` dimensions which index independent chains.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'compute_log_acceptance_correction').
Returns:
log_acceptance_correction: `Tensor` representing the `log`
acceptance-correction. (See docstring for mathematical definition.)
"""
with tf.compat.v1.name_scope(
name, 'compute_log_acceptance_correction',
[independent_chain_ndims, current_momentums, proposed_momentums]):
log_current_kinetic, log_proposed_kinetic = [], []
for current_momentum, proposed_momentum in zip(
current_momentums, proposed_momentums):
axis = tf.range(independent_chain_ndims, tf.rank(current_momentum))
log_current_kinetic.append(_log_sum_sq(current_momentum, axis))
log_proposed_kinetic.append(_log_sum_sq(proposed_momentum, axis))
current_kinetic = 0.5 * tf.exp(
tf.reduce_logsumexp(
input_tensor=tf.stack(log_current_kinetic, axis=-1), axis=-1))
proposed_kinetic = 0.5 * tf.exp(
tf.reduce_logsumexp(
input_tensor=tf.stack(log_proposed_kinetic, axis=-1), axis=-1))
return mcmc_util.safe_sum([current_kinetic, -proposed_kinetic])
|
[
"Helper",
"to",
"kernel",
"which",
"computes",
"the",
"log",
"acceptance",
"-",
"correction",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/hmc.py#L1052-L1145
|
[
"def",
"_compute_log_acceptance_correction",
"(",
"current_momentums",
",",
"proposed_momentums",
",",
"independent_chain_ndims",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'compute_log_acceptance_correction'",
",",
"[",
"independent_chain_ndims",
",",
"current_momentums",
",",
"proposed_momentums",
"]",
")",
":",
"log_current_kinetic",
",",
"log_proposed_kinetic",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"current_momentum",
",",
"proposed_momentum",
"in",
"zip",
"(",
"current_momentums",
",",
"proposed_momentums",
")",
":",
"axis",
"=",
"tf",
".",
"range",
"(",
"independent_chain_ndims",
",",
"tf",
".",
"rank",
"(",
"current_momentum",
")",
")",
"log_current_kinetic",
".",
"append",
"(",
"_log_sum_sq",
"(",
"current_momentum",
",",
"axis",
")",
")",
"log_proposed_kinetic",
".",
"append",
"(",
"_log_sum_sq",
"(",
"proposed_momentum",
",",
"axis",
")",
")",
"current_kinetic",
"=",
"0.5",
"*",
"tf",
".",
"exp",
"(",
"tf",
".",
"reduce_logsumexp",
"(",
"input_tensor",
"=",
"tf",
".",
"stack",
"(",
"log_current_kinetic",
",",
"axis",
"=",
"-",
"1",
")",
",",
"axis",
"=",
"-",
"1",
")",
")",
"proposed_kinetic",
"=",
"0.5",
"*",
"tf",
".",
"exp",
"(",
"tf",
".",
"reduce_logsumexp",
"(",
"input_tensor",
"=",
"tf",
".",
"stack",
"(",
"log_proposed_kinetic",
",",
"axis",
"=",
"-",
"1",
")",
",",
"axis",
"=",
"-",
"1",
")",
")",
"return",
"mcmc_util",
".",
"safe_sum",
"(",
"[",
"current_kinetic",
",",
"-",
"proposed_kinetic",
"]",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_prepare_args
|
Helper which processes input args to meet list-like assumptions.
|
tensorflow_probability/python/mcmc/hmc.py
|
def _prepare_args(target_log_prob_fn,
state,
step_size,
target_log_prob=None,
grads_target_log_prob=None,
maybe_expand=False,
state_gradients_are_stopped=False):
"""Helper which processes input args to meet list-like assumptions."""
state_parts = list(state) if mcmc_util.is_list_like(state) else [state]
state_parts = [
tf.convert_to_tensor(value=s, name='current_state') for s in state_parts
]
if state_gradients_are_stopped:
state_parts = [tf.stop_gradient(x) for x in state_parts]
target_log_prob, grads_target_log_prob = mcmc_util.maybe_call_fn_and_grads(
target_log_prob_fn,
state_parts,
target_log_prob,
grads_target_log_prob)
step_sizes = (list(step_size) if mcmc_util.is_list_like(step_size)
else [step_size])
step_sizes = [
tf.convert_to_tensor(
value=s, name='step_size', dtype=target_log_prob.dtype)
for s in step_sizes
]
if len(step_sizes) == 1:
step_sizes *= len(state_parts)
if len(state_parts) != len(step_sizes):
raise ValueError('There should be exactly one `step_size` or it should '
'have same length as `current_state`.')
def maybe_flatten(x):
return x if maybe_expand or mcmc_util.is_list_like(state) else x[0]
return [
maybe_flatten(state_parts),
maybe_flatten(step_sizes),
target_log_prob,
grads_target_log_prob,
]
|
def _prepare_args(target_log_prob_fn,
state,
step_size,
target_log_prob=None,
grads_target_log_prob=None,
maybe_expand=False,
state_gradients_are_stopped=False):
"""Helper which processes input args to meet list-like assumptions."""
state_parts = list(state) if mcmc_util.is_list_like(state) else [state]
state_parts = [
tf.convert_to_tensor(value=s, name='current_state') for s in state_parts
]
if state_gradients_are_stopped:
state_parts = [tf.stop_gradient(x) for x in state_parts]
target_log_prob, grads_target_log_prob = mcmc_util.maybe_call_fn_and_grads(
target_log_prob_fn,
state_parts,
target_log_prob,
grads_target_log_prob)
step_sizes = (list(step_size) if mcmc_util.is_list_like(step_size)
else [step_size])
step_sizes = [
tf.convert_to_tensor(
value=s, name='step_size', dtype=target_log_prob.dtype)
for s in step_sizes
]
if len(step_sizes) == 1:
step_sizes *= len(state_parts)
if len(state_parts) != len(step_sizes):
raise ValueError('There should be exactly one `step_size` or it should '
'have same length as `current_state`.')
def maybe_flatten(x):
return x if maybe_expand or mcmc_util.is_list_like(state) else x[0]
return [
maybe_flatten(state_parts),
maybe_flatten(step_sizes),
target_log_prob,
grads_target_log_prob,
]
|
[
"Helper",
"which",
"processes",
"input",
"args",
"to",
"meet",
"list",
"-",
"like",
"assumptions",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/hmc.py#L1148-L1186
|
[
"def",
"_prepare_args",
"(",
"target_log_prob_fn",
",",
"state",
",",
"step_size",
",",
"target_log_prob",
"=",
"None",
",",
"grads_target_log_prob",
"=",
"None",
",",
"maybe_expand",
"=",
"False",
",",
"state_gradients_are_stopped",
"=",
"False",
")",
":",
"state_parts",
"=",
"list",
"(",
"state",
")",
"if",
"mcmc_util",
".",
"is_list_like",
"(",
"state",
")",
"else",
"[",
"state",
"]",
"state_parts",
"=",
"[",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"s",
",",
"name",
"=",
"'current_state'",
")",
"for",
"s",
"in",
"state_parts",
"]",
"if",
"state_gradients_are_stopped",
":",
"state_parts",
"=",
"[",
"tf",
".",
"stop_gradient",
"(",
"x",
")",
"for",
"x",
"in",
"state_parts",
"]",
"target_log_prob",
",",
"grads_target_log_prob",
"=",
"mcmc_util",
".",
"maybe_call_fn_and_grads",
"(",
"target_log_prob_fn",
",",
"state_parts",
",",
"target_log_prob",
",",
"grads_target_log_prob",
")",
"step_sizes",
"=",
"(",
"list",
"(",
"step_size",
")",
"if",
"mcmc_util",
".",
"is_list_like",
"(",
"step_size",
")",
"else",
"[",
"step_size",
"]",
")",
"step_sizes",
"=",
"[",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"s",
",",
"name",
"=",
"'step_size'",
",",
"dtype",
"=",
"target_log_prob",
".",
"dtype",
")",
"for",
"s",
"in",
"step_sizes",
"]",
"if",
"len",
"(",
"step_sizes",
")",
"==",
"1",
":",
"step_sizes",
"*=",
"len",
"(",
"state_parts",
")",
"if",
"len",
"(",
"state_parts",
")",
"!=",
"len",
"(",
"step_sizes",
")",
":",
"raise",
"ValueError",
"(",
"'There should be exactly one `step_size` or it should '",
"'have same length as `current_state`.'",
")",
"def",
"maybe_flatten",
"(",
"x",
")",
":",
"return",
"x",
"if",
"maybe_expand",
"or",
"mcmc_util",
".",
"is_list_like",
"(",
"state",
")",
"else",
"x",
"[",
"0",
"]",
"return",
"[",
"maybe_flatten",
"(",
"state_parts",
")",
",",
"maybe_flatten",
"(",
"step_sizes",
")",
",",
"target_log_prob",
",",
"grads_target_log_prob",
",",
"]"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_log_sum_sq
|
Computes log(sum(x**2)).
|
tensorflow_probability/python/mcmc/hmc.py
|
def _log_sum_sq(x, axis=None):
"""Computes log(sum(x**2))."""
return tf.reduce_logsumexp(
input_tensor=2. * tf.math.log(tf.abs(x)), axis=axis)
|
def _log_sum_sq(x, axis=None):
"""Computes log(sum(x**2))."""
return tf.reduce_logsumexp(
input_tensor=2. * tf.math.log(tf.abs(x)), axis=axis)
|
[
"Computes",
"log",
"(",
"sum",
"(",
"x",
"**",
"2",
"))",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/hmc.py#L1189-L1192
|
[
"def",
"_log_sum_sq",
"(",
"x",
",",
"axis",
"=",
"None",
")",
":",
"return",
"tf",
".",
"reduce_logsumexp",
"(",
"input_tensor",
"=",
"2.",
"*",
"tf",
".",
"math",
".",
"log",
"(",
"tf",
".",
"abs",
"(",
"x",
")",
")",
",",
"axis",
"=",
"axis",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
HamiltonianMonteCarlo.one_step
|
Runs one iteration of Hamiltonian Monte Carlo.
Args:
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s). The first `r` dimensions index
independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.
previous_kernel_results: `collections.namedtuple` containing `Tensor`s
representing values from previous calls to this function (or from the
`bootstrap_results` function.)
Returns:
next_state: Tensor or Python list of `Tensor`s representing the state(s)
of the Markov chain(s) after taking exactly one step. Has same type and
shape as `current_state`.
kernel_results: `collections.namedtuple` of internal calculations used to
advance the chain.
Raises:
ValueError: if there isn't one `step_size` or a list with same length as
`current_state`.
|
tensorflow_probability/python/mcmc/hmc.py
|
def one_step(self, current_state, previous_kernel_results):
"""Runs one iteration of Hamiltonian Monte Carlo.
Args:
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s). The first `r` dimensions index
independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.
previous_kernel_results: `collections.namedtuple` containing `Tensor`s
representing values from previous calls to this function (or from the
`bootstrap_results` function.)
Returns:
next_state: Tensor or Python list of `Tensor`s representing the state(s)
of the Markov chain(s) after taking exactly one step. Has same type and
shape as `current_state`.
kernel_results: `collections.namedtuple` of internal calculations used to
advance the chain.
Raises:
ValueError: if there isn't one `step_size` or a list with same length as
`current_state`.
"""
previous_step_size_assign = (
[] if self.step_size_update_fn is None
else (previous_kernel_results.extra.step_size_assign
if mcmc_util.is_list_like(
previous_kernel_results.extra.step_size_assign)
else [previous_kernel_results.extra.step_size_assign]))
with tf.control_dependencies(previous_step_size_assign):
next_state, kernel_results = self._impl.one_step(
current_state, previous_kernel_results)
if self.step_size_update_fn is not None:
step_size_assign = self.step_size_update_fn( # pylint: disable=not-callable
self.step_size, kernel_results)
kernel_results = kernel_results._replace(
extra=HamiltonianMonteCarloExtraKernelResults(
step_size_assign=step_size_assign))
return next_state, kernel_results
|
def one_step(self, current_state, previous_kernel_results):
"""Runs one iteration of Hamiltonian Monte Carlo.
Args:
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s). The first `r` dimensions index
independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.
previous_kernel_results: `collections.namedtuple` containing `Tensor`s
representing values from previous calls to this function (or from the
`bootstrap_results` function.)
Returns:
next_state: Tensor or Python list of `Tensor`s representing the state(s)
of the Markov chain(s) after taking exactly one step. Has same type and
shape as `current_state`.
kernel_results: `collections.namedtuple` of internal calculations used to
advance the chain.
Raises:
ValueError: if there isn't one `step_size` or a list with same length as
`current_state`.
"""
previous_step_size_assign = (
[] if self.step_size_update_fn is None
else (previous_kernel_results.extra.step_size_assign
if mcmc_util.is_list_like(
previous_kernel_results.extra.step_size_assign)
else [previous_kernel_results.extra.step_size_assign]))
with tf.control_dependencies(previous_step_size_assign):
next_state, kernel_results = self._impl.one_step(
current_state, previous_kernel_results)
if self.step_size_update_fn is not None:
step_size_assign = self.step_size_update_fn( # pylint: disable=not-callable
self.step_size, kernel_results)
kernel_results = kernel_results._replace(
extra=HamiltonianMonteCarloExtraKernelResults(
step_size_assign=step_size_assign))
return next_state, kernel_results
|
[
"Runs",
"one",
"iteration",
"of",
"Hamiltonian",
"Monte",
"Carlo",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/hmc.py#L516-L554
|
[
"def",
"one_step",
"(",
"self",
",",
"current_state",
",",
"previous_kernel_results",
")",
":",
"previous_step_size_assign",
"=",
"(",
"[",
"]",
"if",
"self",
".",
"step_size_update_fn",
"is",
"None",
"else",
"(",
"previous_kernel_results",
".",
"extra",
".",
"step_size_assign",
"if",
"mcmc_util",
".",
"is_list_like",
"(",
"previous_kernel_results",
".",
"extra",
".",
"step_size_assign",
")",
"else",
"[",
"previous_kernel_results",
".",
"extra",
".",
"step_size_assign",
"]",
")",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"previous_step_size_assign",
")",
":",
"next_state",
",",
"kernel_results",
"=",
"self",
".",
"_impl",
".",
"one_step",
"(",
"current_state",
",",
"previous_kernel_results",
")",
"if",
"self",
".",
"step_size_update_fn",
"is",
"not",
"None",
":",
"step_size_assign",
"=",
"self",
".",
"step_size_update_fn",
"(",
"# pylint: disable=not-callable",
"self",
".",
"step_size",
",",
"kernel_results",
")",
"kernel_results",
"=",
"kernel_results",
".",
"_replace",
"(",
"extra",
"=",
"HamiltonianMonteCarloExtraKernelResults",
"(",
"step_size_assign",
"=",
"step_size_assign",
")",
")",
"return",
"next_state",
",",
"kernel_results"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
HamiltonianMonteCarlo.bootstrap_results
|
Creates initial `previous_kernel_results` using a supplied `state`.
|
tensorflow_probability/python/mcmc/hmc.py
|
def bootstrap_results(self, init_state):
"""Creates initial `previous_kernel_results` using a supplied `state`."""
kernel_results = self._impl.bootstrap_results(init_state)
if self.step_size_update_fn is not None:
step_size_assign = self.step_size_update_fn(self.step_size, None) # pylint: disable=not-callable
kernel_results = kernel_results._replace(
extra=HamiltonianMonteCarloExtraKernelResults(
step_size_assign=step_size_assign))
return kernel_results
|
def bootstrap_results(self, init_state):
"""Creates initial `previous_kernel_results` using a supplied `state`."""
kernel_results = self._impl.bootstrap_results(init_state)
if self.step_size_update_fn is not None:
step_size_assign = self.step_size_update_fn(self.step_size, None) # pylint: disable=not-callable
kernel_results = kernel_results._replace(
extra=HamiltonianMonteCarloExtraKernelResults(
step_size_assign=step_size_assign))
return kernel_results
|
[
"Creates",
"initial",
"previous_kernel_results",
"using",
"a",
"supplied",
"state",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/hmc.py#L556-L564
|
[
"def",
"bootstrap_results",
"(",
"self",
",",
"init_state",
")",
":",
"kernel_results",
"=",
"self",
".",
"_impl",
".",
"bootstrap_results",
"(",
"init_state",
")",
"if",
"self",
".",
"step_size_update_fn",
"is",
"not",
"None",
":",
"step_size_assign",
"=",
"self",
".",
"step_size_update_fn",
"(",
"self",
".",
"step_size",
",",
"None",
")",
"# pylint: disable=not-callable",
"kernel_results",
"=",
"kernel_results",
".",
"_replace",
"(",
"extra",
"=",
"HamiltonianMonteCarloExtraKernelResults",
"(",
"step_size_assign",
"=",
"step_size_assign",
")",
")",
"return",
"kernel_results"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
bayesian_resnet
|
Constructs a ResNet18 model.
Args:
input_shape: A `tuple` indicating the Tensor shape.
num_classes: `int` representing the number of class labels.
kernel_posterior_scale_mean: Python `int` number for the kernel
posterior's scale (log variance) mean. The smaller the mean the closer
is the initialization to a deterministic network.
kernel_posterior_scale_stddev: Python `float` number for the initial kernel
posterior's scale stddev.
```
q(W|x) ~ N(mu, var),
log_var ~ N(kernel_posterior_scale_mean, kernel_posterior_scale_stddev)
````
kernel_posterior_scale_constraint: Python `float` number for the log value
to constrain the log variance throughout training.
i.e. log_var <= log(kernel_posterior_scale_constraint).
Returns:
tf.keras.Model.
|
tensorflow_probability/examples/models/bayesian_resnet.py
|
def bayesian_resnet(input_shape,
num_classes=10,
kernel_posterior_scale_mean=-9.0,
kernel_posterior_scale_stddev=0.1,
kernel_posterior_scale_constraint=0.2):
"""Constructs a ResNet18 model.
Args:
input_shape: A `tuple` indicating the Tensor shape.
num_classes: `int` representing the number of class labels.
kernel_posterior_scale_mean: Python `int` number for the kernel
posterior's scale (log variance) mean. The smaller the mean the closer
is the initialization to a deterministic network.
kernel_posterior_scale_stddev: Python `float` number for the initial kernel
posterior's scale stddev.
```
q(W|x) ~ N(mu, var),
log_var ~ N(kernel_posterior_scale_mean, kernel_posterior_scale_stddev)
````
kernel_posterior_scale_constraint: Python `float` number for the log value
to constrain the log variance throughout training.
i.e. log_var <= log(kernel_posterior_scale_constraint).
Returns:
tf.keras.Model.
"""
filters = [64, 128, 256, 512]
kernels = [3, 3, 3, 3]
strides = [1, 2, 2, 2]
def _untransformed_scale_constraint(t):
return tf.clip_by_value(t, -1000,
tf.math.log(kernel_posterior_scale_constraint))
kernel_posterior_fn = tfp.layers.default_mean_field_normal_fn(
untransformed_scale_initializer=tf.compat.v1.initializers.random_normal(
mean=kernel_posterior_scale_mean,
stddev=kernel_posterior_scale_stddev),
untransformed_scale_constraint=_untransformed_scale_constraint)
image = tf.keras.layers.Input(shape=input_shape, dtype='float32')
x = tfp.layers.Convolution2DFlipout(
64,
3,
strides=1,
padding='same',
kernel_posterior_fn=kernel_posterior_fn)(image)
for i in range(len(kernels)):
x = _resnet_block(
x,
filters[i],
kernels[i],
strides[i],
kernel_posterior_fn)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.AveragePooling2D(4, 1)(x)
x = tf.keras.layers.Flatten()(x)
x = tfp.layers.DenseFlipout(
num_classes,
kernel_posterior_fn=kernel_posterior_fn)(x)
model = tf.keras.Model(inputs=image, outputs=x, name='resnet18')
return model
|
def bayesian_resnet(input_shape,
num_classes=10,
kernel_posterior_scale_mean=-9.0,
kernel_posterior_scale_stddev=0.1,
kernel_posterior_scale_constraint=0.2):
"""Constructs a ResNet18 model.
Args:
input_shape: A `tuple` indicating the Tensor shape.
num_classes: `int` representing the number of class labels.
kernel_posterior_scale_mean: Python `int` number for the kernel
posterior's scale (log variance) mean. The smaller the mean the closer
is the initialization to a deterministic network.
kernel_posterior_scale_stddev: Python `float` number for the initial kernel
posterior's scale stddev.
```
q(W|x) ~ N(mu, var),
log_var ~ N(kernel_posterior_scale_mean, kernel_posterior_scale_stddev)
````
kernel_posterior_scale_constraint: Python `float` number for the log value
to constrain the log variance throughout training.
i.e. log_var <= log(kernel_posterior_scale_constraint).
Returns:
tf.keras.Model.
"""
filters = [64, 128, 256, 512]
kernels = [3, 3, 3, 3]
strides = [1, 2, 2, 2]
def _untransformed_scale_constraint(t):
return tf.clip_by_value(t, -1000,
tf.math.log(kernel_posterior_scale_constraint))
kernel_posterior_fn = tfp.layers.default_mean_field_normal_fn(
untransformed_scale_initializer=tf.compat.v1.initializers.random_normal(
mean=kernel_posterior_scale_mean,
stddev=kernel_posterior_scale_stddev),
untransformed_scale_constraint=_untransformed_scale_constraint)
image = tf.keras.layers.Input(shape=input_shape, dtype='float32')
x = tfp.layers.Convolution2DFlipout(
64,
3,
strides=1,
padding='same',
kernel_posterior_fn=kernel_posterior_fn)(image)
for i in range(len(kernels)):
x = _resnet_block(
x,
filters[i],
kernels[i],
strides[i],
kernel_posterior_fn)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.AveragePooling2D(4, 1)(x)
x = tf.keras.layers.Flatten()(x)
x = tfp.layers.DenseFlipout(
num_classes,
kernel_posterior_fn=kernel_posterior_fn)(x)
model = tf.keras.Model(inputs=image, outputs=x, name='resnet18')
return model
|
[
"Constructs",
"a",
"ResNet18",
"model",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/models/bayesian_resnet.py#L25-L92
|
[
"def",
"bayesian_resnet",
"(",
"input_shape",
",",
"num_classes",
"=",
"10",
",",
"kernel_posterior_scale_mean",
"=",
"-",
"9.0",
",",
"kernel_posterior_scale_stddev",
"=",
"0.1",
",",
"kernel_posterior_scale_constraint",
"=",
"0.2",
")",
":",
"filters",
"=",
"[",
"64",
",",
"128",
",",
"256",
",",
"512",
"]",
"kernels",
"=",
"[",
"3",
",",
"3",
",",
"3",
",",
"3",
"]",
"strides",
"=",
"[",
"1",
",",
"2",
",",
"2",
",",
"2",
"]",
"def",
"_untransformed_scale_constraint",
"(",
"t",
")",
":",
"return",
"tf",
".",
"clip_by_value",
"(",
"t",
",",
"-",
"1000",
",",
"tf",
".",
"math",
".",
"log",
"(",
"kernel_posterior_scale_constraint",
")",
")",
"kernel_posterior_fn",
"=",
"tfp",
".",
"layers",
".",
"default_mean_field_normal_fn",
"(",
"untransformed_scale_initializer",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"initializers",
".",
"random_normal",
"(",
"mean",
"=",
"kernel_posterior_scale_mean",
",",
"stddev",
"=",
"kernel_posterior_scale_stddev",
")",
",",
"untransformed_scale_constraint",
"=",
"_untransformed_scale_constraint",
")",
"image",
"=",
"tf",
".",
"keras",
".",
"layers",
".",
"Input",
"(",
"shape",
"=",
"input_shape",
",",
"dtype",
"=",
"'float32'",
")",
"x",
"=",
"tfp",
".",
"layers",
".",
"Convolution2DFlipout",
"(",
"64",
",",
"3",
",",
"strides",
"=",
"1",
",",
"padding",
"=",
"'same'",
",",
"kernel_posterior_fn",
"=",
"kernel_posterior_fn",
")",
"(",
"image",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"kernels",
")",
")",
":",
"x",
"=",
"_resnet_block",
"(",
"x",
",",
"filters",
"[",
"i",
"]",
",",
"kernels",
"[",
"i",
"]",
",",
"strides",
"[",
"i",
"]",
",",
"kernel_posterior_fn",
")",
"x",
"=",
"tf",
".",
"keras",
".",
"layers",
".",
"BatchNormalization",
"(",
")",
"(",
"x",
")",
"x",
"=",
"tf",
".",
"keras",
".",
"layers",
".",
"Activation",
"(",
"'relu'",
")",
"(",
"x",
")",
"x",
"=",
"tf",
".",
"keras",
".",
"layers",
".",
"AveragePooling2D",
"(",
"4",
",",
"1",
")",
"(",
"x",
")",
"x",
"=",
"tf",
".",
"keras",
".",
"layers",
".",
"Flatten",
"(",
")",
"(",
"x",
")",
"x",
"=",
"tfp",
".",
"layers",
".",
"DenseFlipout",
"(",
"num_classes",
",",
"kernel_posterior_fn",
"=",
"kernel_posterior_fn",
")",
"(",
"x",
")",
"model",
"=",
"tf",
".",
"keras",
".",
"Model",
"(",
"inputs",
"=",
"image",
",",
"outputs",
"=",
"x",
",",
"name",
"=",
"'resnet18'",
")",
"return",
"model"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_resnet_block
|
Network block for ResNet.
|
tensorflow_probability/examples/models/bayesian_resnet.py
|
def _resnet_block(x, filters, kernel, stride, kernel_posterior_fn):
"""Network block for ResNet."""
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('relu')(x)
if stride != 1 or filters != x.shape[1]:
shortcut = _projection_shortcut(x, filters, stride, kernel_posterior_fn)
else:
shortcut = x
x = tfp.layers.Convolution2DFlipout(
filters,
kernel,
strides=stride,
padding='same',
kernel_posterior_fn=kernel_posterior_fn)(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('relu')(x)
x = tfp.layers.Convolution2DFlipout(
filters,
kernel,
strides=1,
padding='same',
kernel_posterior_fn=kernel_posterior_fn)(x)
x = tf.keras.layers.add([x, shortcut])
return x
|
def _resnet_block(x, filters, kernel, stride, kernel_posterior_fn):
"""Network block for ResNet."""
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('relu')(x)
if stride != 1 or filters != x.shape[1]:
shortcut = _projection_shortcut(x, filters, stride, kernel_posterior_fn)
else:
shortcut = x
x = tfp.layers.Convolution2DFlipout(
filters,
kernel,
strides=stride,
padding='same',
kernel_posterior_fn=kernel_posterior_fn)(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('relu')(x)
x = tfp.layers.Convolution2DFlipout(
filters,
kernel,
strides=1,
padding='same',
kernel_posterior_fn=kernel_posterior_fn)(x)
x = tf.keras.layers.add([x, shortcut])
return x
|
[
"Network",
"block",
"for",
"ResNet",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/models/bayesian_resnet.py#L95-L121
|
[
"def",
"_resnet_block",
"(",
"x",
",",
"filters",
",",
"kernel",
",",
"stride",
",",
"kernel_posterior_fn",
")",
":",
"x",
"=",
"tf",
".",
"keras",
".",
"layers",
".",
"BatchNormalization",
"(",
")",
"(",
"x",
")",
"x",
"=",
"tf",
".",
"keras",
".",
"layers",
".",
"Activation",
"(",
"'relu'",
")",
"(",
"x",
")",
"if",
"stride",
"!=",
"1",
"or",
"filters",
"!=",
"x",
".",
"shape",
"[",
"1",
"]",
":",
"shortcut",
"=",
"_projection_shortcut",
"(",
"x",
",",
"filters",
",",
"stride",
",",
"kernel_posterior_fn",
")",
"else",
":",
"shortcut",
"=",
"x",
"x",
"=",
"tfp",
".",
"layers",
".",
"Convolution2DFlipout",
"(",
"filters",
",",
"kernel",
",",
"strides",
"=",
"stride",
",",
"padding",
"=",
"'same'",
",",
"kernel_posterior_fn",
"=",
"kernel_posterior_fn",
")",
"(",
"x",
")",
"x",
"=",
"tf",
".",
"keras",
".",
"layers",
".",
"BatchNormalization",
"(",
")",
"(",
"x",
")",
"x",
"=",
"tf",
".",
"keras",
".",
"layers",
".",
"Activation",
"(",
"'relu'",
")",
"(",
"x",
")",
"x",
"=",
"tfp",
".",
"layers",
".",
"Convolution2DFlipout",
"(",
"filters",
",",
"kernel",
",",
"strides",
"=",
"1",
",",
"padding",
"=",
"'same'",
",",
"kernel_posterior_fn",
"=",
"kernel_posterior_fn",
")",
"(",
"x",
")",
"x",
"=",
"tf",
".",
"keras",
".",
"layers",
".",
"add",
"(",
"[",
"x",
",",
"shortcut",
"]",
")",
"return",
"x"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
make_encoder
|
Create the encoder function.
Args:
activation: Activation function to use.
num_topics: The number of topics.
layer_sizes: The number of hidden units per layer in the encoder.
Returns:
encoder: A `callable` mapping a bag-of-words `Tensor` to a
`tfd.Distribution` instance over topics.
|
tensorflow_probability/examples/latent_dirichlet_allocation_distributions.py
|
def make_encoder(activation, num_topics, layer_sizes):
"""Create the encoder function.
Args:
activation: Activation function to use.
num_topics: The number of topics.
layer_sizes: The number of hidden units per layer in the encoder.
Returns:
encoder: A `callable` mapping a bag-of-words `Tensor` to a
`tfd.Distribution` instance over topics.
"""
encoder_net = tf.keras.Sequential()
for num_hidden_units in layer_sizes:
encoder_net.add(
tf.keras.layers.Dense(
num_hidden_units,
activation=activation,
kernel_initializer=tf.compat.v1.glorot_normal_initializer()))
encoder_net.add(
tf.keras.layers.Dense(
num_topics,
activation=tf.nn.softplus,
kernel_initializer=tf.compat.v1.glorot_normal_initializer()))
def encoder(bag_of_words):
net = _clip_dirichlet_parameters(encoder_net(bag_of_words))
return tfd.Dirichlet(concentration=net,
name="topics_posterior")
return encoder
|
def make_encoder(activation, num_topics, layer_sizes):
"""Create the encoder function.
Args:
activation: Activation function to use.
num_topics: The number of topics.
layer_sizes: The number of hidden units per layer in the encoder.
Returns:
encoder: A `callable` mapping a bag-of-words `Tensor` to a
`tfd.Distribution` instance over topics.
"""
encoder_net = tf.keras.Sequential()
for num_hidden_units in layer_sizes:
encoder_net.add(
tf.keras.layers.Dense(
num_hidden_units,
activation=activation,
kernel_initializer=tf.compat.v1.glorot_normal_initializer()))
encoder_net.add(
tf.keras.layers.Dense(
num_topics,
activation=tf.nn.softplus,
kernel_initializer=tf.compat.v1.glorot_normal_initializer()))
def encoder(bag_of_words):
net = _clip_dirichlet_parameters(encoder_net(bag_of_words))
return tfd.Dirichlet(concentration=net,
name="topics_posterior")
return encoder
|
[
"Create",
"the",
"encoder",
"function",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/latent_dirichlet_allocation_distributions.py#L164-L194
|
[
"def",
"make_encoder",
"(",
"activation",
",",
"num_topics",
",",
"layer_sizes",
")",
":",
"encoder_net",
"=",
"tf",
".",
"keras",
".",
"Sequential",
"(",
")",
"for",
"num_hidden_units",
"in",
"layer_sizes",
":",
"encoder_net",
".",
"add",
"(",
"tf",
".",
"keras",
".",
"layers",
".",
"Dense",
"(",
"num_hidden_units",
",",
"activation",
"=",
"activation",
",",
"kernel_initializer",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"glorot_normal_initializer",
"(",
")",
")",
")",
"encoder_net",
".",
"add",
"(",
"tf",
".",
"keras",
".",
"layers",
".",
"Dense",
"(",
"num_topics",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"softplus",
",",
"kernel_initializer",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"glorot_normal_initializer",
"(",
")",
")",
")",
"def",
"encoder",
"(",
"bag_of_words",
")",
":",
"net",
"=",
"_clip_dirichlet_parameters",
"(",
"encoder_net",
"(",
"bag_of_words",
")",
")",
"return",
"tfd",
".",
"Dirichlet",
"(",
"concentration",
"=",
"net",
",",
"name",
"=",
"\"topics_posterior\"",
")",
"return",
"encoder"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
make_decoder
|
Create the decoder function.
Args:
num_topics: The number of topics.
num_words: The number of words.
Returns:
decoder: A `callable` mapping a `Tensor` of encodings to a
`tfd.Distribution` instance over words.
|
tensorflow_probability/examples/latent_dirichlet_allocation_distributions.py
|
def make_decoder(num_topics, num_words):
"""Create the decoder function.
Args:
num_topics: The number of topics.
num_words: The number of words.
Returns:
decoder: A `callable` mapping a `Tensor` of encodings to a
`tfd.Distribution` instance over words.
"""
topics_words_logits = tf.compat.v1.get_variable(
"topics_words_logits",
shape=[num_topics, num_words],
initializer=tf.compat.v1.glorot_normal_initializer())
topics_words = tf.nn.softmax(topics_words_logits, axis=-1)
def decoder(topics):
word_probs = tf.matmul(topics, topics_words)
# The observations are bag of words and therefore not one-hot. However,
# log_prob of OneHotCategorical computes the probability correctly in
# this case.
return tfd.OneHotCategorical(probs=word_probs,
name="bag_of_words")
return decoder, topics_words
|
def make_decoder(num_topics, num_words):
"""Create the decoder function.
Args:
num_topics: The number of topics.
num_words: The number of words.
Returns:
decoder: A `callable` mapping a `Tensor` of encodings to a
`tfd.Distribution` instance over words.
"""
topics_words_logits = tf.compat.v1.get_variable(
"topics_words_logits",
shape=[num_topics, num_words],
initializer=tf.compat.v1.glorot_normal_initializer())
topics_words = tf.nn.softmax(topics_words_logits, axis=-1)
def decoder(topics):
word_probs = tf.matmul(topics, topics_words)
# The observations are bag of words and therefore not one-hot. However,
# log_prob of OneHotCategorical computes the probability correctly in
# this case.
return tfd.OneHotCategorical(probs=word_probs,
name="bag_of_words")
return decoder, topics_words
|
[
"Create",
"the",
"decoder",
"function",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/latent_dirichlet_allocation_distributions.py#L197-L222
|
[
"def",
"make_decoder",
"(",
"num_topics",
",",
"num_words",
")",
":",
"topics_words_logits",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"get_variable",
"(",
"\"topics_words_logits\"",
",",
"shape",
"=",
"[",
"num_topics",
",",
"num_words",
"]",
",",
"initializer",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"glorot_normal_initializer",
"(",
")",
")",
"topics_words",
"=",
"tf",
".",
"nn",
".",
"softmax",
"(",
"topics_words_logits",
",",
"axis",
"=",
"-",
"1",
")",
"def",
"decoder",
"(",
"topics",
")",
":",
"word_probs",
"=",
"tf",
".",
"matmul",
"(",
"topics",
",",
"topics_words",
")",
"# The observations are bag of words and therefore not one-hot. However,",
"# log_prob of OneHotCategorical computes the probability correctly in",
"# this case.",
"return",
"tfd",
".",
"OneHotCategorical",
"(",
"probs",
"=",
"word_probs",
",",
"name",
"=",
"\"bag_of_words\"",
")",
"return",
"decoder",
",",
"topics_words"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
make_prior
|
Create the prior distribution.
Args:
num_topics: Number of topics.
initial_value: The starting value for the prior parameters.
Returns:
prior: A `callable` that returns a `tf.distribution.Distribution`
instance, the prior distribution.
prior_variables: A `list` of `Variable` objects, the trainable parameters
of the prior.
|
tensorflow_probability/examples/latent_dirichlet_allocation_distributions.py
|
def make_prior(num_topics, initial_value):
"""Create the prior distribution.
Args:
num_topics: Number of topics.
initial_value: The starting value for the prior parameters.
Returns:
prior: A `callable` that returns a `tf.distribution.Distribution`
instance, the prior distribution.
prior_variables: A `list` of `Variable` objects, the trainable parameters
of the prior.
"""
def _softplus_inverse(x):
return np.log(np.expm1(x))
logit_concentration = tf.compat.v1.get_variable(
"logit_concentration",
shape=[1, num_topics],
initializer=tf.compat.v1.initializers.constant(
_softplus_inverse(initial_value)))
concentration = _clip_dirichlet_parameters(
tf.nn.softplus(logit_concentration))
def prior():
return tfd.Dirichlet(concentration=concentration,
name="topics_prior")
prior_variables = [logit_concentration]
return prior, prior_variables
|
def make_prior(num_topics, initial_value):
"""Create the prior distribution.
Args:
num_topics: Number of topics.
initial_value: The starting value for the prior parameters.
Returns:
prior: A `callable` that returns a `tf.distribution.Distribution`
instance, the prior distribution.
prior_variables: A `list` of `Variable` objects, the trainable parameters
of the prior.
"""
def _softplus_inverse(x):
return np.log(np.expm1(x))
logit_concentration = tf.compat.v1.get_variable(
"logit_concentration",
shape=[1, num_topics],
initializer=tf.compat.v1.initializers.constant(
_softplus_inverse(initial_value)))
concentration = _clip_dirichlet_parameters(
tf.nn.softplus(logit_concentration))
def prior():
return tfd.Dirichlet(concentration=concentration,
name="topics_prior")
prior_variables = [logit_concentration]
return prior, prior_variables
|
[
"Create",
"the",
"prior",
"distribution",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/latent_dirichlet_allocation_distributions.py#L225-L255
|
[
"def",
"make_prior",
"(",
"num_topics",
",",
"initial_value",
")",
":",
"def",
"_softplus_inverse",
"(",
"x",
")",
":",
"return",
"np",
".",
"log",
"(",
"np",
".",
"expm1",
"(",
"x",
")",
")",
"logit_concentration",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"get_variable",
"(",
"\"logit_concentration\"",
",",
"shape",
"=",
"[",
"1",
",",
"num_topics",
"]",
",",
"initializer",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"initializers",
".",
"constant",
"(",
"_softplus_inverse",
"(",
"initial_value",
")",
")",
")",
"concentration",
"=",
"_clip_dirichlet_parameters",
"(",
"tf",
".",
"nn",
".",
"softplus",
"(",
"logit_concentration",
")",
")",
"def",
"prior",
"(",
")",
":",
"return",
"tfd",
".",
"Dirichlet",
"(",
"concentration",
"=",
"concentration",
",",
"name",
"=",
"\"topics_prior\"",
")",
"prior_variables",
"=",
"[",
"logit_concentration",
"]",
"return",
"prior",
",",
"prior_variables"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
model_fn
|
Build the model function for use in an estimator.
Arguments:
features: The input features for the estimator.
labels: The labels, unused here.
mode: Signifies whether it is train or test or predict.
params: Some hyperparameters as a dictionary.
config: The RunConfig, unused here.
Returns:
EstimatorSpec: A tf.estimator.EstimatorSpec instance.
|
tensorflow_probability/examples/latent_dirichlet_allocation_distributions.py
|
def model_fn(features, labels, mode, params, config):
"""Build the model function for use in an estimator.
Arguments:
features: The input features for the estimator.
labels: The labels, unused here.
mode: Signifies whether it is train or test or predict.
params: Some hyperparameters as a dictionary.
config: The RunConfig, unused here.
Returns:
EstimatorSpec: A tf.estimator.EstimatorSpec instance.
"""
del labels, config
encoder = make_encoder(params["activation"],
params["num_topics"],
params["layer_sizes"])
decoder, topics_words = make_decoder(params["num_topics"],
features.shape[1])
prior, prior_variables = make_prior(params["num_topics"],
params["prior_initial_value"])
topics_prior = prior()
alpha = topics_prior.concentration
topics_posterior = encoder(features)
topics = topics_posterior.sample()
random_reconstruction = decoder(topics)
reconstruction = random_reconstruction.log_prob(features)
tf.compat.v1.summary.scalar("reconstruction",
tf.reduce_mean(input_tensor=reconstruction))
# Compute the KL-divergence between two Dirichlets analytically.
# The sampled KL does not work well for "sparse" distributions
# (see Appendix D of [2]).
kl = tfd.kl_divergence(topics_posterior, topics_prior)
tf.compat.v1.summary.scalar("kl", tf.reduce_mean(input_tensor=kl))
# Ensure that the KL is non-negative (up to a very small slack).
# Negative KL can happen due to numerical instability.
with tf.control_dependencies(
[tf.compat.v1.assert_greater(kl, -1e-3, message="kl")]):
kl = tf.identity(kl)
elbo = reconstruction - kl
avg_elbo = tf.reduce_mean(input_tensor=elbo)
tf.compat.v1.summary.scalar("elbo", avg_elbo)
loss = -avg_elbo
# Perform variational inference by minimizing the -ELBO.
global_step = tf.compat.v1.train.get_or_create_global_step()
optimizer = tf.compat.v1.train.AdamOptimizer(params["learning_rate"])
# This implements the "burn-in" for prior parameters (see Appendix D of [2]).
# For the first prior_burn_in_steps steps they are fixed, and then trained
# jointly with the other parameters.
grads_and_vars = optimizer.compute_gradients(loss)
grads_and_vars_except_prior = [
x for x in grads_and_vars if x[1] not in prior_variables]
def train_op_except_prior():
return optimizer.apply_gradients(
grads_and_vars_except_prior,
global_step=global_step)
def train_op_all():
return optimizer.apply_gradients(
grads_and_vars,
global_step=global_step)
train_op = tf.cond(
pred=global_step < params["prior_burn_in_steps"],
true_fn=train_op_except_prior,
false_fn=train_op_all)
# The perplexity is an exponent of the average negative ELBO per word.
words_per_document = tf.reduce_sum(input_tensor=features, axis=1)
log_perplexity = -elbo / words_per_document
tf.compat.v1.summary.scalar(
"perplexity", tf.exp(tf.reduce_mean(input_tensor=log_perplexity)))
(log_perplexity_tensor,
log_perplexity_update) = tf.compat.v1.metrics.mean(log_perplexity)
perplexity_tensor = tf.exp(log_perplexity_tensor)
# Obtain the topics summary. Implemented as a py_func for simplicity.
topics = tf.compat.v1.py_func(
functools.partial(get_topics_strings, vocabulary=params["vocabulary"]),
[topics_words, alpha],
tf.string,
stateful=False)
tf.compat.v1.summary.text("topics", topics)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops={
"elbo": tf.compat.v1.metrics.mean(elbo),
"reconstruction": tf.compat.v1.metrics.mean(reconstruction),
"kl": tf.compat.v1.metrics.mean(kl),
"perplexity": (perplexity_tensor, log_perplexity_update),
"topics": (topics, tf.no_op()),
},
)
|
def model_fn(features, labels, mode, params, config):
"""Build the model function for use in an estimator.
Arguments:
features: The input features for the estimator.
labels: The labels, unused here.
mode: Signifies whether it is train or test or predict.
params: Some hyperparameters as a dictionary.
config: The RunConfig, unused here.
Returns:
EstimatorSpec: A tf.estimator.EstimatorSpec instance.
"""
del labels, config
encoder = make_encoder(params["activation"],
params["num_topics"],
params["layer_sizes"])
decoder, topics_words = make_decoder(params["num_topics"],
features.shape[1])
prior, prior_variables = make_prior(params["num_topics"],
params["prior_initial_value"])
topics_prior = prior()
alpha = topics_prior.concentration
topics_posterior = encoder(features)
topics = topics_posterior.sample()
random_reconstruction = decoder(topics)
reconstruction = random_reconstruction.log_prob(features)
tf.compat.v1.summary.scalar("reconstruction",
tf.reduce_mean(input_tensor=reconstruction))
# Compute the KL-divergence between two Dirichlets analytically.
# The sampled KL does not work well for "sparse" distributions
# (see Appendix D of [2]).
kl = tfd.kl_divergence(topics_posterior, topics_prior)
tf.compat.v1.summary.scalar("kl", tf.reduce_mean(input_tensor=kl))
# Ensure that the KL is non-negative (up to a very small slack).
# Negative KL can happen due to numerical instability.
with tf.control_dependencies(
[tf.compat.v1.assert_greater(kl, -1e-3, message="kl")]):
kl = tf.identity(kl)
elbo = reconstruction - kl
avg_elbo = tf.reduce_mean(input_tensor=elbo)
tf.compat.v1.summary.scalar("elbo", avg_elbo)
loss = -avg_elbo
# Perform variational inference by minimizing the -ELBO.
global_step = tf.compat.v1.train.get_or_create_global_step()
optimizer = tf.compat.v1.train.AdamOptimizer(params["learning_rate"])
# This implements the "burn-in" for prior parameters (see Appendix D of [2]).
# For the first prior_burn_in_steps steps they are fixed, and then trained
# jointly with the other parameters.
grads_and_vars = optimizer.compute_gradients(loss)
grads_and_vars_except_prior = [
x for x in grads_and_vars if x[1] not in prior_variables]
def train_op_except_prior():
return optimizer.apply_gradients(
grads_and_vars_except_prior,
global_step=global_step)
def train_op_all():
return optimizer.apply_gradients(
grads_and_vars,
global_step=global_step)
train_op = tf.cond(
pred=global_step < params["prior_burn_in_steps"],
true_fn=train_op_except_prior,
false_fn=train_op_all)
# The perplexity is an exponent of the average negative ELBO per word.
words_per_document = tf.reduce_sum(input_tensor=features, axis=1)
log_perplexity = -elbo / words_per_document
tf.compat.v1.summary.scalar(
"perplexity", tf.exp(tf.reduce_mean(input_tensor=log_perplexity)))
(log_perplexity_tensor,
log_perplexity_update) = tf.compat.v1.metrics.mean(log_perplexity)
perplexity_tensor = tf.exp(log_perplexity_tensor)
# Obtain the topics summary. Implemented as a py_func for simplicity.
topics = tf.compat.v1.py_func(
functools.partial(get_topics_strings, vocabulary=params["vocabulary"]),
[topics_words, alpha],
tf.string,
stateful=False)
tf.compat.v1.summary.text("topics", topics)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops={
"elbo": tf.compat.v1.metrics.mean(elbo),
"reconstruction": tf.compat.v1.metrics.mean(reconstruction),
"kl": tf.compat.v1.metrics.mean(kl),
"perplexity": (perplexity_tensor, log_perplexity_update),
"topics": (topics, tf.no_op()),
},
)
|
[
"Build",
"the",
"model",
"function",
"for",
"use",
"in",
"an",
"estimator",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/latent_dirichlet_allocation_distributions.py#L258-L362
|
[
"def",
"model_fn",
"(",
"features",
",",
"labels",
",",
"mode",
",",
"params",
",",
"config",
")",
":",
"del",
"labels",
",",
"config",
"encoder",
"=",
"make_encoder",
"(",
"params",
"[",
"\"activation\"",
"]",
",",
"params",
"[",
"\"num_topics\"",
"]",
",",
"params",
"[",
"\"layer_sizes\"",
"]",
")",
"decoder",
",",
"topics_words",
"=",
"make_decoder",
"(",
"params",
"[",
"\"num_topics\"",
"]",
",",
"features",
".",
"shape",
"[",
"1",
"]",
")",
"prior",
",",
"prior_variables",
"=",
"make_prior",
"(",
"params",
"[",
"\"num_topics\"",
"]",
",",
"params",
"[",
"\"prior_initial_value\"",
"]",
")",
"topics_prior",
"=",
"prior",
"(",
")",
"alpha",
"=",
"topics_prior",
".",
"concentration",
"topics_posterior",
"=",
"encoder",
"(",
"features",
")",
"topics",
"=",
"topics_posterior",
".",
"sample",
"(",
")",
"random_reconstruction",
"=",
"decoder",
"(",
"topics",
")",
"reconstruction",
"=",
"random_reconstruction",
".",
"log_prob",
"(",
"features",
")",
"tf",
".",
"compat",
".",
"v1",
".",
"summary",
".",
"scalar",
"(",
"\"reconstruction\"",
",",
"tf",
".",
"reduce_mean",
"(",
"input_tensor",
"=",
"reconstruction",
")",
")",
"# Compute the KL-divergence between two Dirichlets analytically.",
"# The sampled KL does not work well for \"sparse\" distributions",
"# (see Appendix D of [2]).",
"kl",
"=",
"tfd",
".",
"kl_divergence",
"(",
"topics_posterior",
",",
"topics_prior",
")",
"tf",
".",
"compat",
".",
"v1",
".",
"summary",
".",
"scalar",
"(",
"\"kl\"",
",",
"tf",
".",
"reduce_mean",
"(",
"input_tensor",
"=",
"kl",
")",
")",
"# Ensure that the KL is non-negative (up to a very small slack).",
"# Negative KL can happen due to numerical instability.",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"tf",
".",
"compat",
".",
"v1",
".",
"assert_greater",
"(",
"kl",
",",
"-",
"1e-3",
",",
"message",
"=",
"\"kl\"",
")",
"]",
")",
":",
"kl",
"=",
"tf",
".",
"identity",
"(",
"kl",
")",
"elbo",
"=",
"reconstruction",
"-",
"kl",
"avg_elbo",
"=",
"tf",
".",
"reduce_mean",
"(",
"input_tensor",
"=",
"elbo",
")",
"tf",
".",
"compat",
".",
"v1",
".",
"summary",
".",
"scalar",
"(",
"\"elbo\"",
",",
"avg_elbo",
")",
"loss",
"=",
"-",
"avg_elbo",
"# Perform variational inference by minimizing the -ELBO.",
"global_step",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"train",
".",
"get_or_create_global_step",
"(",
")",
"optimizer",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"train",
".",
"AdamOptimizer",
"(",
"params",
"[",
"\"learning_rate\"",
"]",
")",
"# This implements the \"burn-in\" for prior parameters (see Appendix D of [2]).",
"# For the first prior_burn_in_steps steps they are fixed, and then trained",
"# jointly with the other parameters.",
"grads_and_vars",
"=",
"optimizer",
".",
"compute_gradients",
"(",
"loss",
")",
"grads_and_vars_except_prior",
"=",
"[",
"x",
"for",
"x",
"in",
"grads_and_vars",
"if",
"x",
"[",
"1",
"]",
"not",
"in",
"prior_variables",
"]",
"def",
"train_op_except_prior",
"(",
")",
":",
"return",
"optimizer",
".",
"apply_gradients",
"(",
"grads_and_vars_except_prior",
",",
"global_step",
"=",
"global_step",
")",
"def",
"train_op_all",
"(",
")",
":",
"return",
"optimizer",
".",
"apply_gradients",
"(",
"grads_and_vars",
",",
"global_step",
"=",
"global_step",
")",
"train_op",
"=",
"tf",
".",
"cond",
"(",
"pred",
"=",
"global_step",
"<",
"params",
"[",
"\"prior_burn_in_steps\"",
"]",
",",
"true_fn",
"=",
"train_op_except_prior",
",",
"false_fn",
"=",
"train_op_all",
")",
"# The perplexity is an exponent of the average negative ELBO per word.",
"words_per_document",
"=",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"features",
",",
"axis",
"=",
"1",
")",
"log_perplexity",
"=",
"-",
"elbo",
"/",
"words_per_document",
"tf",
".",
"compat",
".",
"v1",
".",
"summary",
".",
"scalar",
"(",
"\"perplexity\"",
",",
"tf",
".",
"exp",
"(",
"tf",
".",
"reduce_mean",
"(",
"input_tensor",
"=",
"log_perplexity",
")",
")",
")",
"(",
"log_perplexity_tensor",
",",
"log_perplexity_update",
")",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"metrics",
".",
"mean",
"(",
"log_perplexity",
")",
"perplexity_tensor",
"=",
"tf",
".",
"exp",
"(",
"log_perplexity_tensor",
")",
"# Obtain the topics summary. Implemented as a py_func for simplicity.",
"topics",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"py_func",
"(",
"functools",
".",
"partial",
"(",
"get_topics_strings",
",",
"vocabulary",
"=",
"params",
"[",
"\"vocabulary\"",
"]",
")",
",",
"[",
"topics_words",
",",
"alpha",
"]",
",",
"tf",
".",
"string",
",",
"stateful",
"=",
"False",
")",
"tf",
".",
"compat",
".",
"v1",
".",
"summary",
".",
"text",
"(",
"\"topics\"",
",",
"topics",
")",
"return",
"tf",
".",
"estimator",
".",
"EstimatorSpec",
"(",
"mode",
"=",
"mode",
",",
"loss",
"=",
"loss",
",",
"train_op",
"=",
"train_op",
",",
"eval_metric_ops",
"=",
"{",
"\"elbo\"",
":",
"tf",
".",
"compat",
".",
"v1",
".",
"metrics",
".",
"mean",
"(",
"elbo",
")",
",",
"\"reconstruction\"",
":",
"tf",
".",
"compat",
".",
"v1",
".",
"metrics",
".",
"mean",
"(",
"reconstruction",
")",
",",
"\"kl\"",
":",
"tf",
".",
"compat",
".",
"v1",
".",
"metrics",
".",
"mean",
"(",
"kl",
")",
",",
"\"perplexity\"",
":",
"(",
"perplexity_tensor",
",",
"log_perplexity_update",
")",
",",
"\"topics\"",
":",
"(",
"topics",
",",
"tf",
".",
"no_op",
"(",
")",
")",
",",
"}",
",",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
sample_chain
|
Implements Markov chain Monte Carlo via repeated `TransitionKernel` steps.
This function samples from an Markov chain at `current_state` and whose
stationary distribution is governed by the supplied `TransitionKernel`
instance (`kernel`).
This function can sample from multiple chains, in parallel. (Whether or not
there are multiple chains is dictated by the `kernel`.)
The `current_state` can be represented as a single `Tensor` or a `list` of
`Tensors` which collectively represent the current state.
Since MCMC states are correlated, it is sometimes desirable to produce
additional intermediate states, and then discard them, ending up with a set of
states with decreased autocorrelation. See [Owen (2017)][1]. Such "thinning"
is made possible by setting `num_steps_between_results > 0`. The chain then
takes `num_steps_between_results` extra steps between the steps that make it
into the results. The extra steps are never materialized (in calls to
`sess.run`), and thus do not increase memory requirements.
Warning: when setting a `seed` in the `kernel`, ensure that `sample_chain`'s
`parallel_iterations=1`, otherwise results will not be reproducible.
In addition to returning the chain state, this function supports tracing of
auxiliary variables used by the kernel. The traced values are selected by
specifying `trace_fn`. By default, all kernel results are traced but in the
future the default will be changed to no results being traced, so plan
accordingly. See below for some examples of this feature.
Args:
num_results: Integer number of Markov chain draws.
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s).
previous_kernel_results: A `Tensor` or a nested collection of `Tensor`s
representing internal calculations made within the previous call to this
function (or as returned by `bootstrap_results`).
kernel: An instance of `tfp.mcmc.TransitionKernel` which implements one step
of the Markov chain.
num_burnin_steps: Integer number of chain steps to take before starting to
collect results.
Default value: 0 (i.e., no burn-in).
num_steps_between_results: Integer number of chain steps between collecting
a result. Only one out of every `num_steps_between_samples + 1` steps is
included in the returned results. The number of returned chain states is
still equal to `num_results`. Default value: 0 (i.e., no thinning).
trace_fn: A callable that takes in the current chain state and the previous
kernel results and return a `Tensor` or a nested collection of `Tensor`s
that is then traced along with the chain state.
return_final_kernel_results: If `True`, then the final kernel results are
returned alongside the chain state and the trace specified by the
`trace_fn`.
parallel_iterations: The number of iterations allowed to run in parallel. It
must be a positive integer. See `tf.while_loop` for more details.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "mcmc_sample_chain").
Returns:
checkpointable_states_and_trace: if `return_final_kernel_results` is
`True`. The return value is an instance of
`CheckpointableStatesAndTrace`.
all_states: if `return_final_kernel_results` is `False` and `trace_fn` is
`None`. The return value is a `Tensor` or Python list of `Tensor`s
representing the state(s) of the Markov chain(s) at each result step. Has
same shape as input `current_state` but with a prepended
`num_results`-size dimension.
states_and_trace: if `return_final_kernel_results` is `False` and
`trace_fn` is not `None`. The return value is an instance of
`StatesAndTrace`.
#### Examples
##### Sample from a diagonal-variance Gaussian.
I.e.,
```none
for i=1..n:
x[i] ~ MultivariateNormal(loc=0, scale=diag(true_stddev)) # likelihood
```
```python
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
dims = 10
true_stddev = np.sqrt(np.linspace(1., 3., dims))
likelihood = tfd.MultivariateNormalDiag(loc=0., scale_diag=true_stddev)
states = tfp.mcmc.sample_chain(
num_results=1000,
num_burnin_steps=500,
current_state=tf.zeros(dims),
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=likelihood.log_prob,
step_size=0.5,
num_leapfrog_steps=2),
trace_fn=None)
sample_mean = tf.reduce_mean(states, axis=0)
# ==> approx all zeros
sample_stddev = tf.sqrt(tf.reduce_mean(
tf.squared_difference(states, sample_mean),
axis=0))
# ==> approx equal true_stddev
```
##### Sampling from factor-analysis posteriors with known factors.
I.e.,
```none
# prior
w ~ MultivariateNormal(loc=0, scale=eye(d))
for i=1..n:
# likelihood
x[i] ~ Normal(loc=w^T F[i], scale=1)
```
where `F` denotes factors.
```python
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
# Specify model.
def make_prior(dims):
return tfd.MultivariateNormalDiag(
loc=tf.zeros(dims))
def make_likelihood(weights, factors):
return tfd.MultivariateNormalDiag(
loc=tf.matmul(weights, factors, adjoint_b=True))
def joint_log_prob(num_weights, factors, x, w):
return (make_prior(num_weights).log_prob(w) +
make_likelihood(w, factors).log_prob(x))
def unnormalized_log_posterior(w):
# Posterior is proportional to: `p(W, X=x | factors)`.
return joint_log_prob(num_weights, factors, x, w)
# Setup data.
num_weights = 10 # == d
num_factors = 40 # == n
num_chains = 100
weights = make_prior(num_weights).sample(1)
factors = tf.random_normal([num_factors, num_weights])
x = make_likelihood(weights, factors).sample()
# Sample from Hamiltonian Monte Carlo Markov Chain.
# Get `num_results` samples from `num_chains` independent chains.
chains_states, kernels_results = tfp.mcmc.sample_chain(
num_results=1000,
num_burnin_steps=500,
current_state=tf.zeros([num_chains, num_weights], name='init_weights'),
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=unnormalized_log_posterior,
step_size=0.1,
num_leapfrog_steps=2))
# Compute sample stats.
sample_mean = tf.reduce_mean(chains_states, axis=[0, 1])
# ==> approx equal to weights
sample_var = tf.reduce_mean(
tf.squared_difference(chains_states, sample_mean),
axis=[0, 1])
# ==> less than 1
```
##### Custom tracing functions.
```python
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
likelihood = tfd.Normal(loc=0., scale=1.)
def sample_chain(trace_fn):
return tfp.mcmc.sample_chain(
num_results=1000,
num_burnin_steps=500,
current_state=0.,
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=likelihood.log_prob,
step_size=0.5,
num_leapfrog_steps=2),
trace_fn=trace_fn)
def trace_log_accept_ratio(states, previous_kernel_results):
return previous_kernel_results.log_accept_ratio
def trace_everything(states, previous_kernel_results):
return previous_kernel_results
_, log_accept_ratio = sample_chain(trace_fn=trace_log_accept_ratio)
_, kernel_results = sample_chain(trace_fn=trace_everything)
acceptance_prob = tf.exp(tf.minimum(log_accept_ratio_, 0.))
# Equivalent to, but more efficient than:
acceptance_prob = tf.exp(tf.minimum(kernel_results.log_accept_ratio_, 0.))
```
#### References
[1]: Art B. Owen. Statistically efficient thinning of a Markov chain sampler.
_Technical Report_, 2017.
http://statweb.stanford.edu/~owen/reports/bestthinning.pdf
|
tensorflow_probability/python/mcmc/sample.py
|
def sample_chain(
num_results,
current_state,
previous_kernel_results=None,
kernel=None,
num_burnin_steps=0,
num_steps_between_results=0,
trace_fn=lambda current_state, kernel_results: kernel_results,
return_final_kernel_results=False,
parallel_iterations=10,
name=None,
):
"""Implements Markov chain Monte Carlo via repeated `TransitionKernel` steps.
This function samples from an Markov chain at `current_state` and whose
stationary distribution is governed by the supplied `TransitionKernel`
instance (`kernel`).
This function can sample from multiple chains, in parallel. (Whether or not
there are multiple chains is dictated by the `kernel`.)
The `current_state` can be represented as a single `Tensor` or a `list` of
`Tensors` which collectively represent the current state.
Since MCMC states are correlated, it is sometimes desirable to produce
additional intermediate states, and then discard them, ending up with a set of
states with decreased autocorrelation. See [Owen (2017)][1]. Such "thinning"
is made possible by setting `num_steps_between_results > 0`. The chain then
takes `num_steps_between_results` extra steps between the steps that make it
into the results. The extra steps are never materialized (in calls to
`sess.run`), and thus do not increase memory requirements.
Warning: when setting a `seed` in the `kernel`, ensure that `sample_chain`'s
`parallel_iterations=1`, otherwise results will not be reproducible.
In addition to returning the chain state, this function supports tracing of
auxiliary variables used by the kernel. The traced values are selected by
specifying `trace_fn`. By default, all kernel results are traced but in the
future the default will be changed to no results being traced, so plan
accordingly. See below for some examples of this feature.
Args:
num_results: Integer number of Markov chain draws.
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s).
previous_kernel_results: A `Tensor` or a nested collection of `Tensor`s
representing internal calculations made within the previous call to this
function (or as returned by `bootstrap_results`).
kernel: An instance of `tfp.mcmc.TransitionKernel` which implements one step
of the Markov chain.
num_burnin_steps: Integer number of chain steps to take before starting to
collect results.
Default value: 0 (i.e., no burn-in).
num_steps_between_results: Integer number of chain steps between collecting
a result. Only one out of every `num_steps_between_samples + 1` steps is
included in the returned results. The number of returned chain states is
still equal to `num_results`. Default value: 0 (i.e., no thinning).
trace_fn: A callable that takes in the current chain state and the previous
kernel results and return a `Tensor` or a nested collection of `Tensor`s
that is then traced along with the chain state.
return_final_kernel_results: If `True`, then the final kernel results are
returned alongside the chain state and the trace specified by the
`trace_fn`.
parallel_iterations: The number of iterations allowed to run in parallel. It
must be a positive integer. See `tf.while_loop` for more details.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "mcmc_sample_chain").
Returns:
checkpointable_states_and_trace: if `return_final_kernel_results` is
`True`. The return value is an instance of
`CheckpointableStatesAndTrace`.
all_states: if `return_final_kernel_results` is `False` and `trace_fn` is
`None`. The return value is a `Tensor` or Python list of `Tensor`s
representing the state(s) of the Markov chain(s) at each result step. Has
same shape as input `current_state` but with a prepended
`num_results`-size dimension.
states_and_trace: if `return_final_kernel_results` is `False` and
`trace_fn` is not `None`. The return value is an instance of
`StatesAndTrace`.
#### Examples
##### Sample from a diagonal-variance Gaussian.
I.e.,
```none
for i=1..n:
x[i] ~ MultivariateNormal(loc=0, scale=diag(true_stddev)) # likelihood
```
```python
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
dims = 10
true_stddev = np.sqrt(np.linspace(1., 3., dims))
likelihood = tfd.MultivariateNormalDiag(loc=0., scale_diag=true_stddev)
states = tfp.mcmc.sample_chain(
num_results=1000,
num_burnin_steps=500,
current_state=tf.zeros(dims),
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=likelihood.log_prob,
step_size=0.5,
num_leapfrog_steps=2),
trace_fn=None)
sample_mean = tf.reduce_mean(states, axis=0)
# ==> approx all zeros
sample_stddev = tf.sqrt(tf.reduce_mean(
tf.squared_difference(states, sample_mean),
axis=0))
# ==> approx equal true_stddev
```
##### Sampling from factor-analysis posteriors with known factors.
I.e.,
```none
# prior
w ~ MultivariateNormal(loc=0, scale=eye(d))
for i=1..n:
# likelihood
x[i] ~ Normal(loc=w^T F[i], scale=1)
```
where `F` denotes factors.
```python
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
# Specify model.
def make_prior(dims):
return tfd.MultivariateNormalDiag(
loc=tf.zeros(dims))
def make_likelihood(weights, factors):
return tfd.MultivariateNormalDiag(
loc=tf.matmul(weights, factors, adjoint_b=True))
def joint_log_prob(num_weights, factors, x, w):
return (make_prior(num_weights).log_prob(w) +
make_likelihood(w, factors).log_prob(x))
def unnormalized_log_posterior(w):
# Posterior is proportional to: `p(W, X=x | factors)`.
return joint_log_prob(num_weights, factors, x, w)
# Setup data.
num_weights = 10 # == d
num_factors = 40 # == n
num_chains = 100
weights = make_prior(num_weights).sample(1)
factors = tf.random_normal([num_factors, num_weights])
x = make_likelihood(weights, factors).sample()
# Sample from Hamiltonian Monte Carlo Markov Chain.
# Get `num_results` samples from `num_chains` independent chains.
chains_states, kernels_results = tfp.mcmc.sample_chain(
num_results=1000,
num_burnin_steps=500,
current_state=tf.zeros([num_chains, num_weights], name='init_weights'),
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=unnormalized_log_posterior,
step_size=0.1,
num_leapfrog_steps=2))
# Compute sample stats.
sample_mean = tf.reduce_mean(chains_states, axis=[0, 1])
# ==> approx equal to weights
sample_var = tf.reduce_mean(
tf.squared_difference(chains_states, sample_mean),
axis=[0, 1])
# ==> less than 1
```
##### Custom tracing functions.
```python
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
likelihood = tfd.Normal(loc=0., scale=1.)
def sample_chain(trace_fn):
return tfp.mcmc.sample_chain(
num_results=1000,
num_burnin_steps=500,
current_state=0.,
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=likelihood.log_prob,
step_size=0.5,
num_leapfrog_steps=2),
trace_fn=trace_fn)
def trace_log_accept_ratio(states, previous_kernel_results):
return previous_kernel_results.log_accept_ratio
def trace_everything(states, previous_kernel_results):
return previous_kernel_results
_, log_accept_ratio = sample_chain(trace_fn=trace_log_accept_ratio)
_, kernel_results = sample_chain(trace_fn=trace_everything)
acceptance_prob = tf.exp(tf.minimum(log_accept_ratio_, 0.))
# Equivalent to, but more efficient than:
acceptance_prob = tf.exp(tf.minimum(kernel_results.log_accept_ratio_, 0.))
```
#### References
[1]: Art B. Owen. Statistically efficient thinning of a Markov chain sampler.
_Technical Report_, 2017.
http://statweb.stanford.edu/~owen/reports/bestthinning.pdf
"""
if not kernel.is_calibrated:
warnings.warn("supplied `TransitionKernel` is not calibrated. Markov "
"chain may not converge to intended target distribution.")
with tf.compat.v1.name_scope(
name, "mcmc_sample_chain",
[num_results, num_burnin_steps, num_steps_between_results]):
num_results = tf.convert_to_tensor(
value=num_results, dtype=tf.int32, name="num_results")
num_burnin_steps = tf.convert_to_tensor(
value=num_burnin_steps, dtype=tf.int32, name="num_burnin_steps")
num_steps_between_results = tf.convert_to_tensor(
value=num_steps_between_results,
dtype=tf.int32,
name="num_steps_between_results")
current_state = tf.nest.map_structure(
lambda x: tf.convert_to_tensor(value=x, name="current_state"),
current_state)
if previous_kernel_results is None:
previous_kernel_results = kernel.bootstrap_results(current_state)
if trace_fn is None:
# It simplifies the logic to use a dummy function here.
trace_fn = lambda *args: ()
no_trace = True
else:
no_trace = False
if trace_fn is sample_chain.__defaults__[4]:
warnings.warn("Tracing all kernel results by default is deprecated. Set "
"the `trace_fn` argument to None (the future default "
"value) or an explicit callback that traces the values "
"you are interested in.")
def _trace_scan_fn(state_and_results, num_steps):
next_state, current_kernel_results = mcmc_util.smart_for_loop(
loop_num_iter=num_steps,
body_fn=kernel.one_step,
initial_loop_vars=list(state_and_results),
parallel_iterations=parallel_iterations)
return next_state, current_kernel_results
(_, final_kernel_results), (all_states, trace) = mcmc_util.trace_scan(
loop_fn=_trace_scan_fn,
initial_state=(current_state, previous_kernel_results),
elems=tf.one_hot(
indices=0,
depth=num_results,
on_value=1 + num_burnin_steps,
off_value=1 + num_steps_between_results,
dtype=tf.int32),
# pylint: disable=g-long-lambda
trace_fn=lambda state_and_results: (state_and_results[0],
trace_fn(*state_and_results)),
# pylint: enable=g-long-lambda
parallel_iterations=parallel_iterations)
if return_final_kernel_results:
return CheckpointableStatesAndTrace(
all_states=all_states,
trace=trace,
final_kernel_results=final_kernel_results)
else:
if no_trace:
return all_states
else:
return StatesAndTrace(all_states=all_states, trace=trace)
|
def sample_chain(
num_results,
current_state,
previous_kernel_results=None,
kernel=None,
num_burnin_steps=0,
num_steps_between_results=0,
trace_fn=lambda current_state, kernel_results: kernel_results,
return_final_kernel_results=False,
parallel_iterations=10,
name=None,
):
"""Implements Markov chain Monte Carlo via repeated `TransitionKernel` steps.
This function samples from an Markov chain at `current_state` and whose
stationary distribution is governed by the supplied `TransitionKernel`
instance (`kernel`).
This function can sample from multiple chains, in parallel. (Whether or not
there are multiple chains is dictated by the `kernel`.)
The `current_state` can be represented as a single `Tensor` or a `list` of
`Tensors` which collectively represent the current state.
Since MCMC states are correlated, it is sometimes desirable to produce
additional intermediate states, and then discard them, ending up with a set of
states with decreased autocorrelation. See [Owen (2017)][1]. Such "thinning"
is made possible by setting `num_steps_between_results > 0`. The chain then
takes `num_steps_between_results` extra steps between the steps that make it
into the results. The extra steps are never materialized (in calls to
`sess.run`), and thus do not increase memory requirements.
Warning: when setting a `seed` in the `kernel`, ensure that `sample_chain`'s
`parallel_iterations=1`, otherwise results will not be reproducible.
In addition to returning the chain state, this function supports tracing of
auxiliary variables used by the kernel. The traced values are selected by
specifying `trace_fn`. By default, all kernel results are traced but in the
future the default will be changed to no results being traced, so plan
accordingly. See below for some examples of this feature.
Args:
num_results: Integer number of Markov chain draws.
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s).
previous_kernel_results: A `Tensor` or a nested collection of `Tensor`s
representing internal calculations made within the previous call to this
function (or as returned by `bootstrap_results`).
kernel: An instance of `tfp.mcmc.TransitionKernel` which implements one step
of the Markov chain.
num_burnin_steps: Integer number of chain steps to take before starting to
collect results.
Default value: 0 (i.e., no burn-in).
num_steps_between_results: Integer number of chain steps between collecting
a result. Only one out of every `num_steps_between_samples + 1` steps is
included in the returned results. The number of returned chain states is
still equal to `num_results`. Default value: 0 (i.e., no thinning).
trace_fn: A callable that takes in the current chain state and the previous
kernel results and return a `Tensor` or a nested collection of `Tensor`s
that is then traced along with the chain state.
return_final_kernel_results: If `True`, then the final kernel results are
returned alongside the chain state and the trace specified by the
`trace_fn`.
parallel_iterations: The number of iterations allowed to run in parallel. It
must be a positive integer. See `tf.while_loop` for more details.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "mcmc_sample_chain").
Returns:
checkpointable_states_and_trace: if `return_final_kernel_results` is
`True`. The return value is an instance of
`CheckpointableStatesAndTrace`.
all_states: if `return_final_kernel_results` is `False` and `trace_fn` is
`None`. The return value is a `Tensor` or Python list of `Tensor`s
representing the state(s) of the Markov chain(s) at each result step. Has
same shape as input `current_state` but with a prepended
`num_results`-size dimension.
states_and_trace: if `return_final_kernel_results` is `False` and
`trace_fn` is not `None`. The return value is an instance of
`StatesAndTrace`.
#### Examples
##### Sample from a diagonal-variance Gaussian.
I.e.,
```none
for i=1..n:
x[i] ~ MultivariateNormal(loc=0, scale=diag(true_stddev)) # likelihood
```
```python
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
dims = 10
true_stddev = np.sqrt(np.linspace(1., 3., dims))
likelihood = tfd.MultivariateNormalDiag(loc=0., scale_diag=true_stddev)
states = tfp.mcmc.sample_chain(
num_results=1000,
num_burnin_steps=500,
current_state=tf.zeros(dims),
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=likelihood.log_prob,
step_size=0.5,
num_leapfrog_steps=2),
trace_fn=None)
sample_mean = tf.reduce_mean(states, axis=0)
# ==> approx all zeros
sample_stddev = tf.sqrt(tf.reduce_mean(
tf.squared_difference(states, sample_mean),
axis=0))
# ==> approx equal true_stddev
```
##### Sampling from factor-analysis posteriors with known factors.
I.e.,
```none
# prior
w ~ MultivariateNormal(loc=0, scale=eye(d))
for i=1..n:
# likelihood
x[i] ~ Normal(loc=w^T F[i], scale=1)
```
where `F` denotes factors.
```python
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
# Specify model.
def make_prior(dims):
return tfd.MultivariateNormalDiag(
loc=tf.zeros(dims))
def make_likelihood(weights, factors):
return tfd.MultivariateNormalDiag(
loc=tf.matmul(weights, factors, adjoint_b=True))
def joint_log_prob(num_weights, factors, x, w):
return (make_prior(num_weights).log_prob(w) +
make_likelihood(w, factors).log_prob(x))
def unnormalized_log_posterior(w):
# Posterior is proportional to: `p(W, X=x | factors)`.
return joint_log_prob(num_weights, factors, x, w)
# Setup data.
num_weights = 10 # == d
num_factors = 40 # == n
num_chains = 100
weights = make_prior(num_weights).sample(1)
factors = tf.random_normal([num_factors, num_weights])
x = make_likelihood(weights, factors).sample()
# Sample from Hamiltonian Monte Carlo Markov Chain.
# Get `num_results` samples from `num_chains` independent chains.
chains_states, kernels_results = tfp.mcmc.sample_chain(
num_results=1000,
num_burnin_steps=500,
current_state=tf.zeros([num_chains, num_weights], name='init_weights'),
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=unnormalized_log_posterior,
step_size=0.1,
num_leapfrog_steps=2))
# Compute sample stats.
sample_mean = tf.reduce_mean(chains_states, axis=[0, 1])
# ==> approx equal to weights
sample_var = tf.reduce_mean(
tf.squared_difference(chains_states, sample_mean),
axis=[0, 1])
# ==> less than 1
```
##### Custom tracing functions.
```python
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
likelihood = tfd.Normal(loc=0., scale=1.)
def sample_chain(trace_fn):
return tfp.mcmc.sample_chain(
num_results=1000,
num_burnin_steps=500,
current_state=0.,
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=likelihood.log_prob,
step_size=0.5,
num_leapfrog_steps=2),
trace_fn=trace_fn)
def trace_log_accept_ratio(states, previous_kernel_results):
return previous_kernel_results.log_accept_ratio
def trace_everything(states, previous_kernel_results):
return previous_kernel_results
_, log_accept_ratio = sample_chain(trace_fn=trace_log_accept_ratio)
_, kernel_results = sample_chain(trace_fn=trace_everything)
acceptance_prob = tf.exp(tf.minimum(log_accept_ratio_, 0.))
# Equivalent to, but more efficient than:
acceptance_prob = tf.exp(tf.minimum(kernel_results.log_accept_ratio_, 0.))
```
#### References
[1]: Art B. Owen. Statistically efficient thinning of a Markov chain sampler.
_Technical Report_, 2017.
http://statweb.stanford.edu/~owen/reports/bestthinning.pdf
"""
if not kernel.is_calibrated:
warnings.warn("supplied `TransitionKernel` is not calibrated. Markov "
"chain may not converge to intended target distribution.")
with tf.compat.v1.name_scope(
name, "mcmc_sample_chain",
[num_results, num_burnin_steps, num_steps_between_results]):
num_results = tf.convert_to_tensor(
value=num_results, dtype=tf.int32, name="num_results")
num_burnin_steps = tf.convert_to_tensor(
value=num_burnin_steps, dtype=tf.int32, name="num_burnin_steps")
num_steps_between_results = tf.convert_to_tensor(
value=num_steps_between_results,
dtype=tf.int32,
name="num_steps_between_results")
current_state = tf.nest.map_structure(
lambda x: tf.convert_to_tensor(value=x, name="current_state"),
current_state)
if previous_kernel_results is None:
previous_kernel_results = kernel.bootstrap_results(current_state)
if trace_fn is None:
# It simplifies the logic to use a dummy function here.
trace_fn = lambda *args: ()
no_trace = True
else:
no_trace = False
if trace_fn is sample_chain.__defaults__[4]:
warnings.warn("Tracing all kernel results by default is deprecated. Set "
"the `trace_fn` argument to None (the future default "
"value) or an explicit callback that traces the values "
"you are interested in.")
def _trace_scan_fn(state_and_results, num_steps):
next_state, current_kernel_results = mcmc_util.smart_for_loop(
loop_num_iter=num_steps,
body_fn=kernel.one_step,
initial_loop_vars=list(state_and_results),
parallel_iterations=parallel_iterations)
return next_state, current_kernel_results
(_, final_kernel_results), (all_states, trace) = mcmc_util.trace_scan(
loop_fn=_trace_scan_fn,
initial_state=(current_state, previous_kernel_results),
elems=tf.one_hot(
indices=0,
depth=num_results,
on_value=1 + num_burnin_steps,
off_value=1 + num_steps_between_results,
dtype=tf.int32),
# pylint: disable=g-long-lambda
trace_fn=lambda state_and_results: (state_and_results[0],
trace_fn(*state_and_results)),
# pylint: enable=g-long-lambda
parallel_iterations=parallel_iterations)
if return_final_kernel_results:
return CheckpointableStatesAndTrace(
all_states=all_states,
trace=trace,
final_kernel_results=final_kernel_results)
else:
if no_trace:
return all_states
else:
return StatesAndTrace(all_states=all_states, trace=trace)
|
[
"Implements",
"Markov",
"chain",
"Monte",
"Carlo",
"via",
"repeated",
"TransitionKernel",
"steps",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/sample.py#L81-L372
|
[
"def",
"sample_chain",
"(",
"num_results",
",",
"current_state",
",",
"previous_kernel_results",
"=",
"None",
",",
"kernel",
"=",
"None",
",",
"num_burnin_steps",
"=",
"0",
",",
"num_steps_between_results",
"=",
"0",
",",
"trace_fn",
"=",
"lambda",
"current_state",
",",
"kernel_results",
":",
"kernel_results",
",",
"return_final_kernel_results",
"=",
"False",
",",
"parallel_iterations",
"=",
"10",
",",
"name",
"=",
"None",
",",
")",
":",
"if",
"not",
"kernel",
".",
"is_calibrated",
":",
"warnings",
".",
"warn",
"(",
"\"supplied `TransitionKernel` is not calibrated. Markov \"",
"\"chain may not converge to intended target distribution.\"",
")",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"\"mcmc_sample_chain\"",
",",
"[",
"num_results",
",",
"num_burnin_steps",
",",
"num_steps_between_results",
"]",
")",
":",
"num_results",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"num_results",
",",
"dtype",
"=",
"tf",
".",
"int32",
",",
"name",
"=",
"\"num_results\"",
")",
"num_burnin_steps",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"num_burnin_steps",
",",
"dtype",
"=",
"tf",
".",
"int32",
",",
"name",
"=",
"\"num_burnin_steps\"",
")",
"num_steps_between_results",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"num_steps_between_results",
",",
"dtype",
"=",
"tf",
".",
"int32",
",",
"name",
"=",
"\"num_steps_between_results\"",
")",
"current_state",
"=",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"lambda",
"x",
":",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"\"current_state\"",
")",
",",
"current_state",
")",
"if",
"previous_kernel_results",
"is",
"None",
":",
"previous_kernel_results",
"=",
"kernel",
".",
"bootstrap_results",
"(",
"current_state",
")",
"if",
"trace_fn",
"is",
"None",
":",
"# It simplifies the logic to use a dummy function here.",
"trace_fn",
"=",
"lambda",
"*",
"args",
":",
"(",
")",
"no_trace",
"=",
"True",
"else",
":",
"no_trace",
"=",
"False",
"if",
"trace_fn",
"is",
"sample_chain",
".",
"__defaults__",
"[",
"4",
"]",
":",
"warnings",
".",
"warn",
"(",
"\"Tracing all kernel results by default is deprecated. Set \"",
"\"the `trace_fn` argument to None (the future default \"",
"\"value) or an explicit callback that traces the values \"",
"\"you are interested in.\"",
")",
"def",
"_trace_scan_fn",
"(",
"state_and_results",
",",
"num_steps",
")",
":",
"next_state",
",",
"current_kernel_results",
"=",
"mcmc_util",
".",
"smart_for_loop",
"(",
"loop_num_iter",
"=",
"num_steps",
",",
"body_fn",
"=",
"kernel",
".",
"one_step",
",",
"initial_loop_vars",
"=",
"list",
"(",
"state_and_results",
")",
",",
"parallel_iterations",
"=",
"parallel_iterations",
")",
"return",
"next_state",
",",
"current_kernel_results",
"(",
"_",
",",
"final_kernel_results",
")",
",",
"(",
"all_states",
",",
"trace",
")",
"=",
"mcmc_util",
".",
"trace_scan",
"(",
"loop_fn",
"=",
"_trace_scan_fn",
",",
"initial_state",
"=",
"(",
"current_state",
",",
"previous_kernel_results",
")",
",",
"elems",
"=",
"tf",
".",
"one_hot",
"(",
"indices",
"=",
"0",
",",
"depth",
"=",
"num_results",
",",
"on_value",
"=",
"1",
"+",
"num_burnin_steps",
",",
"off_value",
"=",
"1",
"+",
"num_steps_between_results",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"# pylint: disable=g-long-lambda",
"trace_fn",
"=",
"lambda",
"state_and_results",
":",
"(",
"state_and_results",
"[",
"0",
"]",
",",
"trace_fn",
"(",
"*",
"state_and_results",
")",
")",
",",
"# pylint: enable=g-long-lambda",
"parallel_iterations",
"=",
"parallel_iterations",
")",
"if",
"return_final_kernel_results",
":",
"return",
"CheckpointableStatesAndTrace",
"(",
"all_states",
"=",
"all_states",
",",
"trace",
"=",
"trace",
",",
"final_kernel_results",
"=",
"final_kernel_results",
")",
"else",
":",
"if",
"no_trace",
":",
"return",
"all_states",
"else",
":",
"return",
"StatesAndTrace",
"(",
"all_states",
"=",
"all_states",
",",
"trace",
"=",
"trace",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
deep_exponential_family
|
A multi-layered topic model over a documents-by-terms matrix.
|
tensorflow_probability/examples/deep_exponential_family.py
|
def deep_exponential_family(data_size, feature_size, units, shape):
"""A multi-layered topic model over a documents-by-terms matrix."""
w2 = ed.Gamma(0.1, 0.3, sample_shape=[units[2], units[1]], name="w2")
w1 = ed.Gamma(0.1, 0.3, sample_shape=[units[1], units[0]], name="w1")
w0 = ed.Gamma(0.1, 0.3, sample_shape=[units[0], feature_size], name="w0")
z2 = ed.Gamma(0.1, 0.1, sample_shape=[data_size, units[2]], name="z2")
z1 = ed.Gamma(shape, shape / tf.matmul(z2, w2), name="z1")
z0 = ed.Gamma(shape, shape / tf.matmul(z1, w1), name="z0")
x = ed.Poisson(tf.matmul(z0, w0), name="x")
return x
|
def deep_exponential_family(data_size, feature_size, units, shape):
"""A multi-layered topic model over a documents-by-terms matrix."""
w2 = ed.Gamma(0.1, 0.3, sample_shape=[units[2], units[1]], name="w2")
w1 = ed.Gamma(0.1, 0.3, sample_shape=[units[1], units[0]], name="w1")
w0 = ed.Gamma(0.1, 0.3, sample_shape=[units[0], feature_size], name="w0")
z2 = ed.Gamma(0.1, 0.1, sample_shape=[data_size, units[2]], name="z2")
z1 = ed.Gamma(shape, shape / tf.matmul(z2, w2), name="z1")
z0 = ed.Gamma(shape, shape / tf.matmul(z1, w1), name="z0")
x = ed.Poisson(tf.matmul(z0, w0), name="x")
return x
|
[
"A",
"multi",
"-",
"layered",
"topic",
"model",
"over",
"a",
"documents",
"-",
"by",
"-",
"terms",
"matrix",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/deep_exponential_family.py#L105-L115
|
[
"def",
"deep_exponential_family",
"(",
"data_size",
",",
"feature_size",
",",
"units",
",",
"shape",
")",
":",
"w2",
"=",
"ed",
".",
"Gamma",
"(",
"0.1",
",",
"0.3",
",",
"sample_shape",
"=",
"[",
"units",
"[",
"2",
"]",
",",
"units",
"[",
"1",
"]",
"]",
",",
"name",
"=",
"\"w2\"",
")",
"w1",
"=",
"ed",
".",
"Gamma",
"(",
"0.1",
",",
"0.3",
",",
"sample_shape",
"=",
"[",
"units",
"[",
"1",
"]",
",",
"units",
"[",
"0",
"]",
"]",
",",
"name",
"=",
"\"w1\"",
")",
"w0",
"=",
"ed",
".",
"Gamma",
"(",
"0.1",
",",
"0.3",
",",
"sample_shape",
"=",
"[",
"units",
"[",
"0",
"]",
",",
"feature_size",
"]",
",",
"name",
"=",
"\"w0\"",
")",
"z2",
"=",
"ed",
".",
"Gamma",
"(",
"0.1",
",",
"0.1",
",",
"sample_shape",
"=",
"[",
"data_size",
",",
"units",
"[",
"2",
"]",
"]",
",",
"name",
"=",
"\"z2\"",
")",
"z1",
"=",
"ed",
".",
"Gamma",
"(",
"shape",
",",
"shape",
"/",
"tf",
".",
"matmul",
"(",
"z2",
",",
"w2",
")",
",",
"name",
"=",
"\"z1\"",
")",
"z0",
"=",
"ed",
".",
"Gamma",
"(",
"shape",
",",
"shape",
"/",
"tf",
".",
"matmul",
"(",
"z1",
",",
"w1",
")",
",",
"name",
"=",
"\"z0\"",
")",
"x",
"=",
"ed",
".",
"Poisson",
"(",
"tf",
".",
"matmul",
"(",
"z0",
",",
"w0",
")",
",",
"name",
"=",
"\"x\"",
")",
"return",
"x"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
trainable_positive_deterministic
|
Learnable Deterministic distribution over positive reals.
|
tensorflow_probability/examples/deep_exponential_family.py
|
def trainable_positive_deterministic(shape, min_loc=1e-3, name=None):
"""Learnable Deterministic distribution over positive reals."""
with tf.compat.v1.variable_scope(
None, default_name="trainable_positive_deterministic"):
unconstrained_loc = tf.compat.v1.get_variable("unconstrained_loc", shape)
loc = tf.maximum(tf.nn.softplus(unconstrained_loc), min_loc)
rv = ed.Deterministic(loc=loc, name=name)
return rv
|
def trainable_positive_deterministic(shape, min_loc=1e-3, name=None):
"""Learnable Deterministic distribution over positive reals."""
with tf.compat.v1.variable_scope(
None, default_name="trainable_positive_deterministic"):
unconstrained_loc = tf.compat.v1.get_variable("unconstrained_loc", shape)
loc = tf.maximum(tf.nn.softplus(unconstrained_loc), min_loc)
rv = ed.Deterministic(loc=loc, name=name)
return rv
|
[
"Learnable",
"Deterministic",
"distribution",
"over",
"positive",
"reals",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/deep_exponential_family.py#L118-L125
|
[
"def",
"trainable_positive_deterministic",
"(",
"shape",
",",
"min_loc",
"=",
"1e-3",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"variable_scope",
"(",
"None",
",",
"default_name",
"=",
"\"trainable_positive_deterministic\"",
")",
":",
"unconstrained_loc",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"get_variable",
"(",
"\"unconstrained_loc\"",
",",
"shape",
")",
"loc",
"=",
"tf",
".",
"maximum",
"(",
"tf",
".",
"nn",
".",
"softplus",
"(",
"unconstrained_loc",
")",
",",
"min_loc",
")",
"rv",
"=",
"ed",
".",
"Deterministic",
"(",
"loc",
"=",
"loc",
",",
"name",
"=",
"name",
")",
"return",
"rv"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
trainable_gamma
|
Learnable Gamma via concentration and scale parameterization.
|
tensorflow_probability/examples/deep_exponential_family.py
|
def trainable_gamma(shape, min_concentration=1e-3, min_scale=1e-5, name=None):
"""Learnable Gamma via concentration and scale parameterization."""
with tf.compat.v1.variable_scope(None, default_name="trainable_gamma"):
unconstrained_concentration = tf.compat.v1.get_variable(
"unconstrained_concentration",
shape,
initializer=tf.compat.v1.initializers.random_normal(
mean=0.5, stddev=0.1))
unconstrained_scale = tf.compat.v1.get_variable(
"unconstrained_scale",
shape,
initializer=tf.compat.v1.initializers.random_normal(stddev=0.1))
concentration = tf.maximum(tf.nn.softplus(unconstrained_concentration),
min_concentration)
rate = tf.maximum(1. / tf.nn.softplus(unconstrained_scale), 1. / min_scale)
rv = ed.Gamma(concentration=concentration, rate=rate, name=name)
return rv
|
def trainable_gamma(shape, min_concentration=1e-3, min_scale=1e-5, name=None):
"""Learnable Gamma via concentration and scale parameterization."""
with tf.compat.v1.variable_scope(None, default_name="trainable_gamma"):
unconstrained_concentration = tf.compat.v1.get_variable(
"unconstrained_concentration",
shape,
initializer=tf.compat.v1.initializers.random_normal(
mean=0.5, stddev=0.1))
unconstrained_scale = tf.compat.v1.get_variable(
"unconstrained_scale",
shape,
initializer=tf.compat.v1.initializers.random_normal(stddev=0.1))
concentration = tf.maximum(tf.nn.softplus(unconstrained_concentration),
min_concentration)
rate = tf.maximum(1. / tf.nn.softplus(unconstrained_scale), 1. / min_scale)
rv = ed.Gamma(concentration=concentration, rate=rate, name=name)
return rv
|
[
"Learnable",
"Gamma",
"via",
"concentration",
"and",
"scale",
"parameterization",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/deep_exponential_family.py#L128-L144
|
[
"def",
"trainable_gamma",
"(",
"shape",
",",
"min_concentration",
"=",
"1e-3",
",",
"min_scale",
"=",
"1e-5",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"variable_scope",
"(",
"None",
",",
"default_name",
"=",
"\"trainable_gamma\"",
")",
":",
"unconstrained_concentration",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"get_variable",
"(",
"\"unconstrained_concentration\"",
",",
"shape",
",",
"initializer",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"initializers",
".",
"random_normal",
"(",
"mean",
"=",
"0.5",
",",
"stddev",
"=",
"0.1",
")",
")",
"unconstrained_scale",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"get_variable",
"(",
"\"unconstrained_scale\"",
",",
"shape",
",",
"initializer",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"initializers",
".",
"random_normal",
"(",
"stddev",
"=",
"0.1",
")",
")",
"concentration",
"=",
"tf",
".",
"maximum",
"(",
"tf",
".",
"nn",
".",
"softplus",
"(",
"unconstrained_concentration",
")",
",",
"min_concentration",
")",
"rate",
"=",
"tf",
".",
"maximum",
"(",
"1.",
"/",
"tf",
".",
"nn",
".",
"softplus",
"(",
"unconstrained_scale",
")",
",",
"1.",
"/",
"min_scale",
")",
"rv",
"=",
"ed",
".",
"Gamma",
"(",
"concentration",
"=",
"concentration",
",",
"rate",
"=",
"rate",
",",
"name",
"=",
"name",
")",
"return",
"rv"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
deep_exponential_family_variational
|
Posterior approx. for deep exponential family p(w{0,1,2}, z{1,2,3} | x).
|
tensorflow_probability/examples/deep_exponential_family.py
|
def deep_exponential_family_variational(data_size, feature_size, units):
"""Posterior approx. for deep exponential family p(w{0,1,2}, z{1,2,3} | x)."""
qw2 = trainable_positive_deterministic([units[2], units[1]], name="qw2")
qw1 = trainable_positive_deterministic([units[1], units[0]], name="qw1")
qw0 = trainable_positive_deterministic([units[0], feature_size], name="qw0")
qz2 = trainable_gamma([data_size, units[2]], name="qz2")
qz1 = trainable_gamma([data_size, units[1]], name="qz1")
qz0 = trainable_gamma([data_size, units[0]], name="qz0")
return qw2, qw1, qw0, qz2, qz1, qz0
|
def deep_exponential_family_variational(data_size, feature_size, units):
"""Posterior approx. for deep exponential family p(w{0,1,2}, z{1,2,3} | x)."""
qw2 = trainable_positive_deterministic([units[2], units[1]], name="qw2")
qw1 = trainable_positive_deterministic([units[1], units[0]], name="qw1")
qw0 = trainable_positive_deterministic([units[0], feature_size], name="qw0")
qz2 = trainable_gamma([data_size, units[2]], name="qz2")
qz1 = trainable_gamma([data_size, units[1]], name="qz1")
qz0 = trainable_gamma([data_size, units[0]], name="qz0")
return qw2, qw1, qw0, qz2, qz1, qz0
|
[
"Posterior",
"approx",
".",
"for",
"deep",
"exponential",
"family",
"p",
"(",
"w",
"{",
"0",
"1",
"2",
"}",
"z",
"{",
"1",
"2",
"3",
"}",
"|",
"x",
")",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/deep_exponential_family.py#L147-L155
|
[
"def",
"deep_exponential_family_variational",
"(",
"data_size",
",",
"feature_size",
",",
"units",
")",
":",
"qw2",
"=",
"trainable_positive_deterministic",
"(",
"[",
"units",
"[",
"2",
"]",
",",
"units",
"[",
"1",
"]",
"]",
",",
"name",
"=",
"\"qw2\"",
")",
"qw1",
"=",
"trainable_positive_deterministic",
"(",
"[",
"units",
"[",
"1",
"]",
",",
"units",
"[",
"0",
"]",
"]",
",",
"name",
"=",
"\"qw1\"",
")",
"qw0",
"=",
"trainable_positive_deterministic",
"(",
"[",
"units",
"[",
"0",
"]",
",",
"feature_size",
"]",
",",
"name",
"=",
"\"qw0\"",
")",
"qz2",
"=",
"trainable_gamma",
"(",
"[",
"data_size",
",",
"units",
"[",
"2",
"]",
"]",
",",
"name",
"=",
"\"qz2\"",
")",
"qz1",
"=",
"trainable_gamma",
"(",
"[",
"data_size",
",",
"units",
"[",
"1",
"]",
"]",
",",
"name",
"=",
"\"qz1\"",
")",
"qz0",
"=",
"trainable_gamma",
"(",
"[",
"data_size",
",",
"units",
"[",
"0",
"]",
"]",
",",
"name",
"=",
"\"qz0\"",
")",
"return",
"qw2",
",",
"qw1",
",",
"qw0",
",",
"qz2",
",",
"qz1",
",",
"qz0"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
load_nips2011_papers
|
Loads NIPS 2011 conference papers.
The NIPS 1987-2015 data set is in the form of a 11,463 x 5,812 matrix of
per-paper word counts, containing 11,463 words and 5,811 NIPS conference
papers (Perrone et al., 2016). We subset to papers in 2011 and words appearing
in at least two documents and having a total word count of at least 10.
Built from the Observations Python package.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there. Filename is `NIPS_1987-2015.csv`.
Returns:
bag_of_words: np.ndarray of shape [num_documents, num_words]. Each element
denotes the number of occurrences of a specific word in a specific
document.
words: List of strings, denoting the words for `bag_of_words`'s columns.
|
tensorflow_probability/examples/deep_exponential_family.py
|
def load_nips2011_papers(path):
"""Loads NIPS 2011 conference papers.
The NIPS 1987-2015 data set is in the form of a 11,463 x 5,812 matrix of
per-paper word counts, containing 11,463 words and 5,811 NIPS conference
papers (Perrone et al., 2016). We subset to papers in 2011 and words appearing
in at least two documents and having a total word count of at least 10.
Built from the Observations Python package.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there. Filename is `NIPS_1987-2015.csv`.
Returns:
bag_of_words: np.ndarray of shape [num_documents, num_words]. Each element
denotes the number of occurrences of a specific word in a specific
document.
words: List of strings, denoting the words for `bag_of_words`'s columns.
"""
path = os.path.expanduser(path)
filename = "NIPS_1987-2015.csv"
filepath = os.path.join(path, filename)
if not os.path.exists(filepath):
url = ("https://archive.ics.uci.edu/ml/machine-learning-databases/"
"00371/NIPS_1987-2015.csv")
if not tf.io.gfile.exists(path):
tf.io.gfile.makedirs(path)
print("Downloading %s to %s" % (url, filepath))
urllib.request.urlretrieve(url, filepath)
with open(filepath) as f:
iterator = csv.reader(f)
documents = next(iterator)[1:]
words = []
x_train = []
for row in iterator:
words.append(row[0])
x_train.append(row[1:])
x_train = np.array(x_train, dtype=np.int)
# Subset to documents in 2011 and words appearing in at least two documents
# and have a total word count of at least 10.
doc_idx = [i for i, document in enumerate(documents)
if document.startswith("2011")]
documents = [documents[doc] for doc in doc_idx]
x_train = x_train[:, doc_idx]
word_idx = np.logical_and(np.sum(x_train != 0, 1) >= 2,
np.sum(x_train, 1) >= 10)
words = [word for word, idx in zip(words, word_idx) if idx]
bag_of_words = x_train[word_idx, :].T
return bag_of_words, words
|
def load_nips2011_papers(path):
"""Loads NIPS 2011 conference papers.
The NIPS 1987-2015 data set is in the form of a 11,463 x 5,812 matrix of
per-paper word counts, containing 11,463 words and 5,811 NIPS conference
papers (Perrone et al., 2016). We subset to papers in 2011 and words appearing
in at least two documents and having a total word count of at least 10.
Built from the Observations Python package.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there. Filename is `NIPS_1987-2015.csv`.
Returns:
bag_of_words: np.ndarray of shape [num_documents, num_words]. Each element
denotes the number of occurrences of a specific word in a specific
document.
words: List of strings, denoting the words for `bag_of_words`'s columns.
"""
path = os.path.expanduser(path)
filename = "NIPS_1987-2015.csv"
filepath = os.path.join(path, filename)
if not os.path.exists(filepath):
url = ("https://archive.ics.uci.edu/ml/machine-learning-databases/"
"00371/NIPS_1987-2015.csv")
if not tf.io.gfile.exists(path):
tf.io.gfile.makedirs(path)
print("Downloading %s to %s" % (url, filepath))
urllib.request.urlretrieve(url, filepath)
with open(filepath) as f:
iterator = csv.reader(f)
documents = next(iterator)[1:]
words = []
x_train = []
for row in iterator:
words.append(row[0])
x_train.append(row[1:])
x_train = np.array(x_train, dtype=np.int)
# Subset to documents in 2011 and words appearing in at least two documents
# and have a total word count of at least 10.
doc_idx = [i for i, document in enumerate(documents)
if document.startswith("2011")]
documents = [documents[doc] for doc in doc_idx]
x_train = x_train[:, doc_idx]
word_idx = np.logical_and(np.sum(x_train != 0, 1) >= 2,
np.sum(x_train, 1) >= 10)
words = [word for word, idx in zip(words, word_idx) if idx]
bag_of_words = x_train[word_idx, :].T
return bag_of_words, words
|
[
"Loads",
"NIPS",
"2011",
"conference",
"papers",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/deep_exponential_family.py#L178-L231
|
[
"def",
"load_nips2011_papers",
"(",
"path",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"path",
")",
"filename",
"=",
"\"NIPS_1987-2015.csv\"",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"filename",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"filepath",
")",
":",
"url",
"=",
"(",
"\"https://archive.ics.uci.edu/ml/machine-learning-databases/\"",
"\"00371/NIPS_1987-2015.csv\"",
")",
"if",
"not",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"path",
")",
":",
"tf",
".",
"io",
".",
"gfile",
".",
"makedirs",
"(",
"path",
")",
"print",
"(",
"\"Downloading %s to %s\"",
"%",
"(",
"url",
",",
"filepath",
")",
")",
"urllib",
".",
"request",
".",
"urlretrieve",
"(",
"url",
",",
"filepath",
")",
"with",
"open",
"(",
"filepath",
")",
"as",
"f",
":",
"iterator",
"=",
"csv",
".",
"reader",
"(",
"f",
")",
"documents",
"=",
"next",
"(",
"iterator",
")",
"[",
"1",
":",
"]",
"words",
"=",
"[",
"]",
"x_train",
"=",
"[",
"]",
"for",
"row",
"in",
"iterator",
":",
"words",
".",
"append",
"(",
"row",
"[",
"0",
"]",
")",
"x_train",
".",
"append",
"(",
"row",
"[",
"1",
":",
"]",
")",
"x_train",
"=",
"np",
".",
"array",
"(",
"x_train",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"# Subset to documents in 2011 and words appearing in at least two documents",
"# and have a total word count of at least 10.",
"doc_idx",
"=",
"[",
"i",
"for",
"i",
",",
"document",
"in",
"enumerate",
"(",
"documents",
")",
"if",
"document",
".",
"startswith",
"(",
"\"2011\"",
")",
"]",
"documents",
"=",
"[",
"documents",
"[",
"doc",
"]",
"for",
"doc",
"in",
"doc_idx",
"]",
"x_train",
"=",
"x_train",
"[",
":",
",",
"doc_idx",
"]",
"word_idx",
"=",
"np",
".",
"logical_and",
"(",
"np",
".",
"sum",
"(",
"x_train",
"!=",
"0",
",",
"1",
")",
">=",
"2",
",",
"np",
".",
"sum",
"(",
"x_train",
",",
"1",
")",
">=",
"10",
")",
"words",
"=",
"[",
"word",
"for",
"word",
",",
"idx",
"in",
"zip",
"(",
"words",
",",
"word_idx",
")",
"if",
"idx",
"]",
"bag_of_words",
"=",
"x_train",
"[",
"word_idx",
",",
":",
"]",
".",
"T",
"return",
"bag_of_words",
",",
"words"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_AmplitudeLengthScaleMixin._init_params
|
Shared init logic for `amplitude` and `length_scale` params.
Args:
amplitude: `Tensor` (or convertible) or `None` to convert, validate.
length_scale: `Tensor` (or convertible) or `None` to convert, validate.
validate_args: If `True`, parameters are checked for validity despite
possibly degrading runtime performance
Returns:
dtype: The common `DType` of the parameters.
|
tensorflow_probability/python/positive_semidefinite_kernels/matern.py
|
def _init_params(self, amplitude, length_scale, validate_args):
"""Shared init logic for `amplitude` and `length_scale` params.
Args:
amplitude: `Tensor` (or convertible) or `None` to convert, validate.
length_scale: `Tensor` (or convertible) or `None` to convert, validate.
validate_args: If `True`, parameters are checked for validity despite
possibly degrading runtime performance
Returns:
dtype: The common `DType` of the parameters.
"""
dtype = util.maybe_get_common_dtype(
[amplitude, length_scale])
if amplitude is not None:
amplitude = tf.convert_to_tensor(
value=amplitude, name='amplitude', dtype=dtype)
self._amplitude = _validate_arg_if_not_none(
amplitude, tf.compat.v1.assert_positive, validate_args)
if length_scale is not None:
length_scale = tf.convert_to_tensor(
value=length_scale, name='length_scale', dtype=dtype)
self._length_scale = _validate_arg_if_not_none(
length_scale, tf.compat.v1.assert_positive, validate_args)
return dtype
|
def _init_params(self, amplitude, length_scale, validate_args):
"""Shared init logic for `amplitude` and `length_scale` params.
Args:
amplitude: `Tensor` (or convertible) or `None` to convert, validate.
length_scale: `Tensor` (or convertible) or `None` to convert, validate.
validate_args: If `True`, parameters are checked for validity despite
possibly degrading runtime performance
Returns:
dtype: The common `DType` of the parameters.
"""
dtype = util.maybe_get_common_dtype(
[amplitude, length_scale])
if amplitude is not None:
amplitude = tf.convert_to_tensor(
value=amplitude, name='amplitude', dtype=dtype)
self._amplitude = _validate_arg_if_not_none(
amplitude, tf.compat.v1.assert_positive, validate_args)
if length_scale is not None:
length_scale = tf.convert_to_tensor(
value=length_scale, name='length_scale', dtype=dtype)
self._length_scale = _validate_arg_if_not_none(
length_scale, tf.compat.v1.assert_positive, validate_args)
return dtype
|
[
"Shared",
"init",
"logic",
"for",
"amplitude",
"and",
"length_scale",
"params",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/positive_semidefinite_kernels/matern.py#L44-L68
|
[
"def",
"_init_params",
"(",
"self",
",",
"amplitude",
",",
"length_scale",
",",
"validate_args",
")",
":",
"dtype",
"=",
"util",
".",
"maybe_get_common_dtype",
"(",
"[",
"amplitude",
",",
"length_scale",
"]",
")",
"if",
"amplitude",
"is",
"not",
"None",
":",
"amplitude",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"amplitude",
",",
"name",
"=",
"'amplitude'",
",",
"dtype",
"=",
"dtype",
")",
"self",
".",
"_amplitude",
"=",
"_validate_arg_if_not_none",
"(",
"amplitude",
",",
"tf",
".",
"compat",
".",
"v1",
".",
"assert_positive",
",",
"validate_args",
")",
"if",
"length_scale",
"is",
"not",
"None",
":",
"length_scale",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"length_scale",
",",
"name",
"=",
"'length_scale'",
",",
"dtype",
"=",
"dtype",
")",
"self",
".",
"_length_scale",
"=",
"_validate_arg_if_not_none",
"(",
"length_scale",
",",
"tf",
".",
"compat",
".",
"v1",
".",
"assert_positive",
",",
"validate_args",
")",
"return",
"dtype"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_registered_kl
|
Get the KL function registered for classes a and b.
|
tensorflow_probability/python/distributions/kullback_leibler.py
|
def _registered_kl(type_a, type_b):
"""Get the KL function registered for classes a and b."""
hierarchy_a = tf_inspect.getmro(type_a)
hierarchy_b = tf_inspect.getmro(type_b)
dist_to_children = None
kl_fn = None
for mro_to_a, parent_a in enumerate(hierarchy_a):
for mro_to_b, parent_b in enumerate(hierarchy_b):
candidate_dist = mro_to_a + mro_to_b
candidate_kl_fn = _DIVERGENCES.get((parent_a, parent_b), None)
if not kl_fn or (candidate_kl_fn and candidate_dist < dist_to_children):
dist_to_children = candidate_dist
kl_fn = candidate_kl_fn
return kl_fn
|
def _registered_kl(type_a, type_b):
"""Get the KL function registered for classes a and b."""
hierarchy_a = tf_inspect.getmro(type_a)
hierarchy_b = tf_inspect.getmro(type_b)
dist_to_children = None
kl_fn = None
for mro_to_a, parent_a in enumerate(hierarchy_a):
for mro_to_b, parent_b in enumerate(hierarchy_b):
candidate_dist = mro_to_a + mro_to_b
candidate_kl_fn = _DIVERGENCES.get((parent_a, parent_b), None)
if not kl_fn or (candidate_kl_fn and candidate_dist < dist_to_children):
dist_to_children = candidate_dist
kl_fn = candidate_kl_fn
return kl_fn
|
[
"Get",
"the",
"KL",
"function",
"registered",
"for",
"classes",
"a",
"and",
"b",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/kullback_leibler.py#L34-L47
|
[
"def",
"_registered_kl",
"(",
"type_a",
",",
"type_b",
")",
":",
"hierarchy_a",
"=",
"tf_inspect",
".",
"getmro",
"(",
"type_a",
")",
"hierarchy_b",
"=",
"tf_inspect",
".",
"getmro",
"(",
"type_b",
")",
"dist_to_children",
"=",
"None",
"kl_fn",
"=",
"None",
"for",
"mro_to_a",
",",
"parent_a",
"in",
"enumerate",
"(",
"hierarchy_a",
")",
":",
"for",
"mro_to_b",
",",
"parent_b",
"in",
"enumerate",
"(",
"hierarchy_b",
")",
":",
"candidate_dist",
"=",
"mro_to_a",
"+",
"mro_to_b",
"candidate_kl_fn",
"=",
"_DIVERGENCES",
".",
"get",
"(",
"(",
"parent_a",
",",
"parent_b",
")",
",",
"None",
")",
"if",
"not",
"kl_fn",
"or",
"(",
"candidate_kl_fn",
"and",
"candidate_dist",
"<",
"dist_to_children",
")",
":",
"dist_to_children",
"=",
"candidate_dist",
"kl_fn",
"=",
"candidate_kl_fn",
"return",
"kl_fn"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
kl_divergence
|
Get the KL-divergence KL(distribution_a || distribution_b).
If there is no KL method registered specifically for `type(distribution_a)`
and `type(distribution_b)`, then the class hierarchies of these types are
searched.
If one KL method is registered between any pairs of classes in these two
parent hierarchies, it is used.
If more than one such registered method exists, the method whose registered
classes have the shortest sum MRO paths to the input types is used.
If more than one such shortest path exists, the first method
identified in the search is used (favoring a shorter MRO distance to
`type(distribution_a)`).
Args:
distribution_a: The first distribution.
distribution_b: The second distribution.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Returns:
A Tensor with the batchwise KL-divergence between `distribution_a`
and `distribution_b`.
Raises:
NotImplementedError: If no KL method is defined for distribution types
of `distribution_a` and `distribution_b`.
|
tensorflow_probability/python/distributions/kullback_leibler.py
|
def kl_divergence(distribution_a, distribution_b,
allow_nan_stats=True, name=None):
"""Get the KL-divergence KL(distribution_a || distribution_b).
If there is no KL method registered specifically for `type(distribution_a)`
and `type(distribution_b)`, then the class hierarchies of these types are
searched.
If one KL method is registered between any pairs of classes in these two
parent hierarchies, it is used.
If more than one such registered method exists, the method whose registered
classes have the shortest sum MRO paths to the input types is used.
If more than one such shortest path exists, the first method
identified in the search is used (favoring a shorter MRO distance to
`type(distribution_a)`).
Args:
distribution_a: The first distribution.
distribution_b: The second distribution.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Returns:
A Tensor with the batchwise KL-divergence between `distribution_a`
and `distribution_b`.
Raises:
NotImplementedError: If no KL method is defined for distribution types
of `distribution_a` and `distribution_b`.
"""
kl_fn = _registered_kl(type(distribution_a), type(distribution_b))
if kl_fn is None:
raise NotImplementedError(
"No KL(distribution_a || distribution_b) registered for distribution_a "
"type {} and distribution_b type {}".format(
type(distribution_a).__name__, type(distribution_b).__name__))
with tf.name_scope("KullbackLeibler"):
kl_t = kl_fn(distribution_a, distribution_b, name=name)
if allow_nan_stats:
return kl_t
# Check KL for NaNs
kl_t = tf.identity(kl_t, name="kl")
with tf.control_dependencies([
tf.Assert(
tf.logical_not(tf.reduce_any(input_tensor=tf.math.is_nan(kl_t))),
[
("KL calculation between {} and {} returned NaN values "
"(and was called with allow_nan_stats=False). Values:".format(
distribution_a.name, distribution_b.name)),
kl_t
])
]):
return tf.identity(kl_t, name="checked_kl")
|
def kl_divergence(distribution_a, distribution_b,
allow_nan_stats=True, name=None):
"""Get the KL-divergence KL(distribution_a || distribution_b).
If there is no KL method registered specifically for `type(distribution_a)`
and `type(distribution_b)`, then the class hierarchies of these types are
searched.
If one KL method is registered between any pairs of classes in these two
parent hierarchies, it is used.
If more than one such registered method exists, the method whose registered
classes have the shortest sum MRO paths to the input types is used.
If more than one such shortest path exists, the first method
identified in the search is used (favoring a shorter MRO distance to
`type(distribution_a)`).
Args:
distribution_a: The first distribution.
distribution_b: The second distribution.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Returns:
A Tensor with the batchwise KL-divergence between `distribution_a`
and `distribution_b`.
Raises:
NotImplementedError: If no KL method is defined for distribution types
of `distribution_a` and `distribution_b`.
"""
kl_fn = _registered_kl(type(distribution_a), type(distribution_b))
if kl_fn is None:
raise NotImplementedError(
"No KL(distribution_a || distribution_b) registered for distribution_a "
"type {} and distribution_b type {}".format(
type(distribution_a).__name__, type(distribution_b).__name__))
with tf.name_scope("KullbackLeibler"):
kl_t = kl_fn(distribution_a, distribution_b, name=name)
if allow_nan_stats:
return kl_t
# Check KL for NaNs
kl_t = tf.identity(kl_t, name="kl")
with tf.control_dependencies([
tf.Assert(
tf.logical_not(tf.reduce_any(input_tensor=tf.math.is_nan(kl_t))),
[
("KL calculation between {} and {} returned NaN values "
"(and was called with allow_nan_stats=False). Values:".format(
distribution_a.name, distribution_b.name)),
kl_t
])
]):
return tf.identity(kl_t, name="checked_kl")
|
[
"Get",
"the",
"KL",
"-",
"divergence",
"KL",
"(",
"distribution_a",
"||",
"distribution_b",
")",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/kullback_leibler.py#L50-L110
|
[
"def",
"kl_divergence",
"(",
"distribution_a",
",",
"distribution_b",
",",
"allow_nan_stats",
"=",
"True",
",",
"name",
"=",
"None",
")",
":",
"kl_fn",
"=",
"_registered_kl",
"(",
"type",
"(",
"distribution_a",
")",
",",
"type",
"(",
"distribution_b",
")",
")",
"if",
"kl_fn",
"is",
"None",
":",
"raise",
"NotImplementedError",
"(",
"\"No KL(distribution_a || distribution_b) registered for distribution_a \"",
"\"type {} and distribution_b type {}\"",
".",
"format",
"(",
"type",
"(",
"distribution_a",
")",
".",
"__name__",
",",
"type",
"(",
"distribution_b",
")",
".",
"__name__",
")",
")",
"with",
"tf",
".",
"name_scope",
"(",
"\"KullbackLeibler\"",
")",
":",
"kl_t",
"=",
"kl_fn",
"(",
"distribution_a",
",",
"distribution_b",
",",
"name",
"=",
"name",
")",
"if",
"allow_nan_stats",
":",
"return",
"kl_t",
"# Check KL for NaNs",
"kl_t",
"=",
"tf",
".",
"identity",
"(",
"kl_t",
",",
"name",
"=",
"\"kl\"",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"tf",
".",
"Assert",
"(",
"tf",
".",
"logical_not",
"(",
"tf",
".",
"reduce_any",
"(",
"input_tensor",
"=",
"tf",
".",
"math",
".",
"is_nan",
"(",
"kl_t",
")",
")",
")",
",",
"[",
"(",
"\"KL calculation between {} and {} returned NaN values \"",
"\"(and was called with allow_nan_stats=False). Values:\"",
".",
"format",
"(",
"distribution_a",
".",
"name",
",",
"distribution_b",
".",
"name",
")",
")",
",",
"kl_t",
"]",
")",
"]",
")",
":",
"return",
"tf",
".",
"identity",
"(",
"kl_t",
",",
"name",
"=",
"\"checked_kl\"",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
cross_entropy
|
Computes the (Shannon) cross entropy.
Denote two distributions by `P` (`ref`) and `Q` (`other`). Assuming `P, Q`
are absolutely continuous with respect to one another and permit densities
`p(x) dr(x)` and `q(x) dr(x)`, (Shanon) cross entropy is defined as:
```none
H[P, Q] = E_p[-log q(X)] = -int_F p(x) log q(x) dr(x)
```
where `F` denotes the support of the random variable `X ~ P`.
Args:
ref: `tfd.Distribution` instance.
other: `tfd.Distribution` instance.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` prepended to names of ops created by this function.
Returns:
cross_entropy: `ref.dtype` `Tensor` with shape `[B1, ..., Bn]`
representing `n` different calculations of (Shanon) cross entropy.
|
tensorflow_probability/python/distributions/kullback_leibler.py
|
def cross_entropy(ref, other,
allow_nan_stats=True, name=None):
"""Computes the (Shannon) cross entropy.
Denote two distributions by `P` (`ref`) and `Q` (`other`). Assuming `P, Q`
are absolutely continuous with respect to one another and permit densities
`p(x) dr(x)` and `q(x) dr(x)`, (Shanon) cross entropy is defined as:
```none
H[P, Q] = E_p[-log q(X)] = -int_F p(x) log q(x) dr(x)
```
where `F` denotes the support of the random variable `X ~ P`.
Args:
ref: `tfd.Distribution` instance.
other: `tfd.Distribution` instance.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` prepended to names of ops created by this function.
Returns:
cross_entropy: `ref.dtype` `Tensor` with shape `[B1, ..., Bn]`
representing `n` different calculations of (Shanon) cross entropy.
"""
with tf.name_scope(name or "cross_entropy"):
return ref.entropy() + kl_divergence(
ref, other, allow_nan_stats=allow_nan_stats)
|
def cross_entropy(ref, other,
allow_nan_stats=True, name=None):
"""Computes the (Shannon) cross entropy.
Denote two distributions by `P` (`ref`) and `Q` (`other`). Assuming `P, Q`
are absolutely continuous with respect to one another and permit densities
`p(x) dr(x)` and `q(x) dr(x)`, (Shanon) cross entropy is defined as:
```none
H[P, Q] = E_p[-log q(X)] = -int_F p(x) log q(x) dr(x)
```
where `F` denotes the support of the random variable `X ~ P`.
Args:
ref: `tfd.Distribution` instance.
other: `tfd.Distribution` instance.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` prepended to names of ops created by this function.
Returns:
cross_entropy: `ref.dtype` `Tensor` with shape `[B1, ..., Bn]`
representing `n` different calculations of (Shanon) cross entropy.
"""
with tf.name_scope(name or "cross_entropy"):
return ref.entropy() + kl_divergence(
ref, other, allow_nan_stats=allow_nan_stats)
|
[
"Computes",
"the",
"(",
"Shannon",
")",
"cross",
"entropy",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/kullback_leibler.py#L113-L142
|
[
"def",
"cross_entropy",
"(",
"ref",
",",
"other",
",",
"allow_nan_stats",
"=",
"True",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"\"cross_entropy\"",
")",
":",
"return",
"ref",
".",
"entropy",
"(",
")",
"+",
"kl_divergence",
"(",
"ref",
",",
"other",
",",
"allow_nan_stats",
"=",
"allow_nan_stats",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
read_image
|
Returns an image tensor.
|
tensorflow_probability/examples/sprites_dataset.py
|
def read_image(filepath):
"""Returns an image tensor."""
im_bytes = tf.io.read_file(filepath)
im = tf.image.decode_image(im_bytes, channels=CHANNELS)
im = tf.image.convert_image_dtype(im, tf.float32)
return im
|
def read_image(filepath):
"""Returns an image tensor."""
im_bytes = tf.io.read_file(filepath)
im = tf.image.decode_image(im_bytes, channels=CHANNELS)
im = tf.image.convert_image_dtype(im, tf.float32)
return im
|
[
"Returns",
"an",
"image",
"tensor",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/sprites_dataset.py#L113-L118
|
[
"def",
"read_image",
"(",
"filepath",
")",
":",
"im_bytes",
"=",
"tf",
".",
"io",
".",
"read_file",
"(",
"filepath",
")",
"im",
"=",
"tf",
".",
"image",
".",
"decode_image",
"(",
"im_bytes",
",",
"channels",
"=",
"CHANNELS",
")",
"im",
"=",
"tf",
".",
"image",
".",
"convert_image_dtype",
"(",
"im",
",",
"tf",
".",
"float32",
")",
"return",
"im"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
download_sprites
|
Downloads the sprites data and returns the saved filepath.
|
tensorflow_probability/examples/sprites_dataset.py
|
def download_sprites():
"""Downloads the sprites data and returns the saved filepath."""
filepath = os.path.join(FLAGS.data_dir, DATA_SPRITES_DIR)
if not tf.io.gfile.exists(filepath):
if not tf.io.gfile.exists(FLAGS.data_dir):
tf.io.gfile.makedirs(FLAGS.data_dir)
zip_name = "{}.zip".format(filepath)
urllib.request.urlretrieve(DATA_SPRITES_URL, zip_name)
with zipfile.ZipFile(zip_name, "r") as zip_file:
zip_file.extractall(FLAGS.data_dir)
tf.io.gfile.remove(zip_name)
return filepath
|
def download_sprites():
"""Downloads the sprites data and returns the saved filepath."""
filepath = os.path.join(FLAGS.data_dir, DATA_SPRITES_DIR)
if not tf.io.gfile.exists(filepath):
if not tf.io.gfile.exists(FLAGS.data_dir):
tf.io.gfile.makedirs(FLAGS.data_dir)
zip_name = "{}.zip".format(filepath)
urllib.request.urlretrieve(DATA_SPRITES_URL, zip_name)
with zipfile.ZipFile(zip_name, "r") as zip_file:
zip_file.extractall(FLAGS.data_dir)
tf.io.gfile.remove(zip_name)
return filepath
|
[
"Downloads",
"the",
"sprites",
"data",
"and",
"returns",
"the",
"saved",
"filepath",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/sprites_dataset.py#L126-L137
|
[
"def",
"download_sprites",
"(",
")",
":",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"FLAGS",
".",
"data_dir",
",",
"DATA_SPRITES_DIR",
")",
"if",
"not",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"filepath",
")",
":",
"if",
"not",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"FLAGS",
".",
"data_dir",
")",
":",
"tf",
".",
"io",
".",
"gfile",
".",
"makedirs",
"(",
"FLAGS",
".",
"data_dir",
")",
"zip_name",
"=",
"\"{}.zip\"",
".",
"format",
"(",
"filepath",
")",
"urllib",
".",
"request",
".",
"urlretrieve",
"(",
"DATA_SPRITES_URL",
",",
"zip_name",
")",
"with",
"zipfile",
".",
"ZipFile",
"(",
"zip_name",
",",
"\"r\"",
")",
"as",
"zip_file",
":",
"zip_file",
".",
"extractall",
"(",
"FLAGS",
".",
"data_dir",
")",
"tf",
".",
"io",
".",
"gfile",
".",
"remove",
"(",
"zip_name",
")",
"return",
"filepath"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
create_character
|
Creates a character sprite from a set of attribute sprites.
|
tensorflow_probability/examples/sprites_dataset.py
|
def create_character(skin, hair, top, pants):
"""Creates a character sprite from a set of attribute sprites."""
dtype = skin.dtype
hair_mask = tf.cast(hair[..., -1:] <= 0, dtype)
top_mask = tf.cast(top[..., -1:] <= 0, dtype)
pants_mask = tf.cast(pants[..., -1:] <= 0, dtype)
char = (skin * hair_mask) + hair
char = (char * top_mask) + top
char = (char * pants_mask) + pants
return char
|
def create_character(skin, hair, top, pants):
"""Creates a character sprite from a set of attribute sprites."""
dtype = skin.dtype
hair_mask = tf.cast(hair[..., -1:] <= 0, dtype)
top_mask = tf.cast(top[..., -1:] <= 0, dtype)
pants_mask = tf.cast(pants[..., -1:] <= 0, dtype)
char = (skin * hair_mask) + hair
char = (char * top_mask) + top
char = (char * pants_mask) + pants
return char
|
[
"Creates",
"a",
"character",
"sprite",
"from",
"a",
"set",
"of",
"attribute",
"sprites",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/sprites_dataset.py#L140-L149
|
[
"def",
"create_character",
"(",
"skin",
",",
"hair",
",",
"top",
",",
"pants",
")",
":",
"dtype",
"=",
"skin",
".",
"dtype",
"hair_mask",
"=",
"tf",
".",
"cast",
"(",
"hair",
"[",
"...",
",",
"-",
"1",
":",
"]",
"<=",
"0",
",",
"dtype",
")",
"top_mask",
"=",
"tf",
".",
"cast",
"(",
"top",
"[",
"...",
",",
"-",
"1",
":",
"]",
"<=",
"0",
",",
"dtype",
")",
"pants_mask",
"=",
"tf",
".",
"cast",
"(",
"pants",
"[",
"...",
",",
"-",
"1",
":",
"]",
"<=",
"0",
",",
"dtype",
")",
"char",
"=",
"(",
"skin",
"*",
"hair_mask",
")",
"+",
"hair",
"char",
"=",
"(",
"char",
"*",
"top_mask",
")",
"+",
"top",
"char",
"=",
"(",
"char",
"*",
"pants_mask",
")",
"+",
"pants",
"return",
"char"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
create_seq
|
Creates a sequence.
Args:
character: A character sprite tensor.
action_metadata: An action metadata tuple.
direction: An integer representing the direction, i.e., the row
offset within each action group corresponding to a particular
direction.
length: Desired length of the sequence. If this is longer than
the number of available frames, it will roll over to the
beginning.
start: Index of possible frames at which to start the sequence.
Returns:
A sequence tensor.
|
tensorflow_probability/examples/sprites_dataset.py
|
def create_seq(character, action_metadata, direction, length=8, start=0):
"""Creates a sequence.
Args:
character: A character sprite tensor.
action_metadata: An action metadata tuple.
direction: An integer representing the direction, i.e., the row
offset within each action group corresponding to a particular
direction.
length: Desired length of the sequence. If this is longer than
the number of available frames, it will roll over to the
beginning.
start: Index of possible frames at which to start the sequence.
Returns:
A sequence tensor.
"""
sprite_start = (action_metadata[0]+direction) * FRAME_SIZE
sprite_end = (action_metadata[0]+direction+1) * FRAME_SIZE
sprite_line = character[sprite_start:sprite_end, ...]
# Extract 64x64 patches that are side-by-side in the sprite, and limit
# to the actual number of frames for the given action.
frames = tf.stack(tf.split(sprite_line, 13, axis=1)) # 13 is a hack
frames = frames[0:action_metadata[1]]
# Extract a slice of the desired length.
# NOTE: Length could be longer than the number of frames, so tile as needed.
frames = tf.roll(frames, shift=-start, axis=0)
frames = tf.tile(frames, [2, 1, 1, 1]) # 2 is a hack
frames = frames[:length]
frames = tf.cast(frames, dtype=tf.float32)
frames.set_shape([length, FRAME_SIZE, FRAME_SIZE, CHANNELS])
return frames
|
def create_seq(character, action_metadata, direction, length=8, start=0):
"""Creates a sequence.
Args:
character: A character sprite tensor.
action_metadata: An action metadata tuple.
direction: An integer representing the direction, i.e., the row
offset within each action group corresponding to a particular
direction.
length: Desired length of the sequence. If this is longer than
the number of available frames, it will roll over to the
beginning.
start: Index of possible frames at which to start the sequence.
Returns:
A sequence tensor.
"""
sprite_start = (action_metadata[0]+direction) * FRAME_SIZE
sprite_end = (action_metadata[0]+direction+1) * FRAME_SIZE
sprite_line = character[sprite_start:sprite_end, ...]
# Extract 64x64 patches that are side-by-side in the sprite, and limit
# to the actual number of frames for the given action.
frames = tf.stack(tf.split(sprite_line, 13, axis=1)) # 13 is a hack
frames = frames[0:action_metadata[1]]
# Extract a slice of the desired length.
# NOTE: Length could be longer than the number of frames, so tile as needed.
frames = tf.roll(frames, shift=-start, axis=0)
frames = tf.tile(frames, [2, 1, 1, 1]) # 2 is a hack
frames = frames[:length]
frames = tf.cast(frames, dtype=tf.float32)
frames.set_shape([length, FRAME_SIZE, FRAME_SIZE, CHANNELS])
return frames
|
[
"Creates",
"a",
"sequence",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/sprites_dataset.py#L152-L185
|
[
"def",
"create_seq",
"(",
"character",
",",
"action_metadata",
",",
"direction",
",",
"length",
"=",
"8",
",",
"start",
"=",
"0",
")",
":",
"sprite_start",
"=",
"(",
"action_metadata",
"[",
"0",
"]",
"+",
"direction",
")",
"*",
"FRAME_SIZE",
"sprite_end",
"=",
"(",
"action_metadata",
"[",
"0",
"]",
"+",
"direction",
"+",
"1",
")",
"*",
"FRAME_SIZE",
"sprite_line",
"=",
"character",
"[",
"sprite_start",
":",
"sprite_end",
",",
"...",
"]",
"# Extract 64x64 patches that are side-by-side in the sprite, and limit",
"# to the actual number of frames for the given action.",
"frames",
"=",
"tf",
".",
"stack",
"(",
"tf",
".",
"split",
"(",
"sprite_line",
",",
"13",
",",
"axis",
"=",
"1",
")",
")",
"# 13 is a hack",
"frames",
"=",
"frames",
"[",
"0",
":",
"action_metadata",
"[",
"1",
"]",
"]",
"# Extract a slice of the desired length.",
"# NOTE: Length could be longer than the number of frames, so tile as needed.",
"frames",
"=",
"tf",
".",
"roll",
"(",
"frames",
",",
"shift",
"=",
"-",
"start",
",",
"axis",
"=",
"0",
")",
"frames",
"=",
"tf",
".",
"tile",
"(",
"frames",
",",
"[",
"2",
",",
"1",
",",
"1",
",",
"1",
"]",
")",
"# 2 is a hack",
"frames",
"=",
"frames",
"[",
":",
"length",
"]",
"frames",
"=",
"tf",
".",
"cast",
"(",
"frames",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"frames",
".",
"set_shape",
"(",
"[",
"length",
",",
"FRAME_SIZE",
",",
"FRAME_SIZE",
",",
"CHANNELS",
"]",
")",
"return",
"frames"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
create_random_seq
|
Creates a random sequence.
|
tensorflow_probability/examples/sprites_dataset.py
|
def create_random_seq(character, action_metadata, direction, length=8):
"""Creates a random sequence."""
start = tf.random.uniform([], maxval=action_metadata[1], dtype=tf.int32)
return create_seq(character, action_metadata, direction, length, start)
|
def create_random_seq(character, action_metadata, direction, length=8):
"""Creates a random sequence."""
start = tf.random.uniform([], maxval=action_metadata[1], dtype=tf.int32)
return create_seq(character, action_metadata, direction, length, start)
|
[
"Creates",
"a",
"random",
"sequence",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/sprites_dataset.py#L188-L191
|
[
"def",
"create_random_seq",
"(",
"character",
",",
"action_metadata",
",",
"direction",
",",
"length",
"=",
"8",
")",
":",
"start",
"=",
"tf",
".",
"random",
".",
"uniform",
"(",
"[",
"]",
",",
"maxval",
"=",
"action_metadata",
"[",
"1",
"]",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"return",
"create_seq",
"(",
"character",
",",
"action_metadata",
",",
"direction",
",",
"length",
",",
"start",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
create_sprites_dataset
|
Creates a tf.data pipeline for the sprites dataset.
Args:
characters: A list of (skin, hair, top, pants) tuples containing
relative paths to the sprite png image for each attribute.
actions: A list of Actions.
directions: A list of Directions.
channels: Number of image channels to yield.
length: Desired length of the sequences.
shuffle: Whether or not to shuffle the characters and sequences
start frame.
fake_data: Boolean for whether or not to yield synthetic data.
Returns:
A tf.data.Dataset yielding (seq, skin label index, hair label index,
top label index, pants label index, action label index, skin label
name, hair label_name, top label name, pants label name, action
label name) tuples.
|
tensorflow_probability/examples/sprites_dataset.py
|
def create_sprites_dataset(characters, actions, directions, channels=3,
length=8, shuffle=False, fake_data=False):
"""Creates a tf.data pipeline for the sprites dataset.
Args:
characters: A list of (skin, hair, top, pants) tuples containing
relative paths to the sprite png image for each attribute.
actions: A list of Actions.
directions: A list of Directions.
channels: Number of image channels to yield.
length: Desired length of the sequences.
shuffle: Whether or not to shuffle the characters and sequences
start frame.
fake_data: Boolean for whether or not to yield synthetic data.
Returns:
A tf.data.Dataset yielding (seq, skin label index, hair label index,
top label index, pants label index, action label index, skin label
name, hair label_name, top label name, pants label name, action
label name) tuples.
"""
if fake_data:
dummy_image = tf.random.normal([HEIGHT, WIDTH, CHANNELS])
else:
basedir = download_sprites()
action_names = [action.name for action in actions]
action_metadata = [(action.start_row, action.frames) for action in actions]
direction_rows = [direction.row_offset for direction in directions]
chars = tf.data.Dataset.from_tensor_slices(characters)
act_names = tf.data.Dataset.from_tensor_slices(action_names).repeat()
acts_metadata = tf.data.Dataset.from_tensor_slices(action_metadata).repeat()
dir_rows = tf.data.Dataset.from_tensor_slices(direction_rows).repeat()
if shuffle:
chars = chars.shuffle(len(characters))
dataset = tf.data.Dataset.zip((chars, act_names, acts_metadata, dir_rows))
skin_table = tf.contrib.lookup.index_table_from_tensor(sorted(SKIN_COLORS))
hair_table = tf.contrib.lookup.index_table_from_tensor(sorted(HAIRSTYLES))
top_table = tf.contrib.lookup.index_table_from_tensor(sorted(TOPS))
pants_table = tf.contrib.lookup.index_table_from_tensor(sorted(PANTS))
action_table = tf.contrib.lookup.index_table_from_tensor(sorted(action_names))
def process_example(attrs, act_name, act_metadata, dir_row_offset):
"""Processes a dataset row."""
skin_name = attrs[0]
hair_name = attrs[1]
top_name = attrs[2]
pants_name = attrs[3]
if fake_data:
char = dummy_image
else:
skin = read_image(basedir + os.sep + skin_name)
hair = read_image(basedir + os.sep + hair_name)
top = read_image(basedir + os.sep + top_name)
pants = read_image(basedir + os.sep + pants_name)
char = create_character(skin, hair, top, pants)
if shuffle:
seq = create_random_seq(char, act_metadata, dir_row_offset, length)
else:
seq = create_seq(char, act_metadata, dir_row_offset, length)
seq = seq[..., :channels] # limit output channels
skin_idx = skin_table.lookup(skin_name)
hair_idx = hair_table.lookup(hair_name)
top_idx = top_table.lookup(top_name)
pants_idx = pants_table.lookup(pants_name)
act_idx = action_table.lookup(act_name)
return (seq, skin_idx, hair_idx, top_idx, pants_idx, act_idx,
skin_name, hair_name, top_name, pants_name, act_name)
dataset = dataset.map(process_example)
return dataset
|
def create_sprites_dataset(characters, actions, directions, channels=3,
length=8, shuffle=False, fake_data=False):
"""Creates a tf.data pipeline for the sprites dataset.
Args:
characters: A list of (skin, hair, top, pants) tuples containing
relative paths to the sprite png image for each attribute.
actions: A list of Actions.
directions: A list of Directions.
channels: Number of image channels to yield.
length: Desired length of the sequences.
shuffle: Whether or not to shuffle the characters and sequences
start frame.
fake_data: Boolean for whether or not to yield synthetic data.
Returns:
A tf.data.Dataset yielding (seq, skin label index, hair label index,
top label index, pants label index, action label index, skin label
name, hair label_name, top label name, pants label name, action
label name) tuples.
"""
if fake_data:
dummy_image = tf.random.normal([HEIGHT, WIDTH, CHANNELS])
else:
basedir = download_sprites()
action_names = [action.name for action in actions]
action_metadata = [(action.start_row, action.frames) for action in actions]
direction_rows = [direction.row_offset for direction in directions]
chars = tf.data.Dataset.from_tensor_slices(characters)
act_names = tf.data.Dataset.from_tensor_slices(action_names).repeat()
acts_metadata = tf.data.Dataset.from_tensor_slices(action_metadata).repeat()
dir_rows = tf.data.Dataset.from_tensor_slices(direction_rows).repeat()
if shuffle:
chars = chars.shuffle(len(characters))
dataset = tf.data.Dataset.zip((chars, act_names, acts_metadata, dir_rows))
skin_table = tf.contrib.lookup.index_table_from_tensor(sorted(SKIN_COLORS))
hair_table = tf.contrib.lookup.index_table_from_tensor(sorted(HAIRSTYLES))
top_table = tf.contrib.lookup.index_table_from_tensor(sorted(TOPS))
pants_table = tf.contrib.lookup.index_table_from_tensor(sorted(PANTS))
action_table = tf.contrib.lookup.index_table_from_tensor(sorted(action_names))
def process_example(attrs, act_name, act_metadata, dir_row_offset):
"""Processes a dataset row."""
skin_name = attrs[0]
hair_name = attrs[1]
top_name = attrs[2]
pants_name = attrs[3]
if fake_data:
char = dummy_image
else:
skin = read_image(basedir + os.sep + skin_name)
hair = read_image(basedir + os.sep + hair_name)
top = read_image(basedir + os.sep + top_name)
pants = read_image(basedir + os.sep + pants_name)
char = create_character(skin, hair, top, pants)
if shuffle:
seq = create_random_seq(char, act_metadata, dir_row_offset, length)
else:
seq = create_seq(char, act_metadata, dir_row_offset, length)
seq = seq[..., :channels] # limit output channels
skin_idx = skin_table.lookup(skin_name)
hair_idx = hair_table.lookup(hair_name)
top_idx = top_table.lookup(top_name)
pants_idx = pants_table.lookup(pants_name)
act_idx = action_table.lookup(act_name)
return (seq, skin_idx, hair_idx, top_idx, pants_idx, act_idx,
skin_name, hair_name, top_name, pants_name, act_name)
dataset = dataset.map(process_example)
return dataset
|
[
"Creates",
"a",
"tf",
".",
"data",
"pipeline",
"for",
"the",
"sprites",
"dataset",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/sprites_dataset.py#L194-L273
|
[
"def",
"create_sprites_dataset",
"(",
"characters",
",",
"actions",
",",
"directions",
",",
"channels",
"=",
"3",
",",
"length",
"=",
"8",
",",
"shuffle",
"=",
"False",
",",
"fake_data",
"=",
"False",
")",
":",
"if",
"fake_data",
":",
"dummy_image",
"=",
"tf",
".",
"random",
".",
"normal",
"(",
"[",
"HEIGHT",
",",
"WIDTH",
",",
"CHANNELS",
"]",
")",
"else",
":",
"basedir",
"=",
"download_sprites",
"(",
")",
"action_names",
"=",
"[",
"action",
".",
"name",
"for",
"action",
"in",
"actions",
"]",
"action_metadata",
"=",
"[",
"(",
"action",
".",
"start_row",
",",
"action",
".",
"frames",
")",
"for",
"action",
"in",
"actions",
"]",
"direction_rows",
"=",
"[",
"direction",
".",
"row_offset",
"for",
"direction",
"in",
"directions",
"]",
"chars",
"=",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"characters",
")",
"act_names",
"=",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"action_names",
")",
".",
"repeat",
"(",
")",
"acts_metadata",
"=",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"action_metadata",
")",
".",
"repeat",
"(",
")",
"dir_rows",
"=",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"direction_rows",
")",
".",
"repeat",
"(",
")",
"if",
"shuffle",
":",
"chars",
"=",
"chars",
".",
"shuffle",
"(",
"len",
"(",
"characters",
")",
")",
"dataset",
"=",
"tf",
".",
"data",
".",
"Dataset",
".",
"zip",
"(",
"(",
"chars",
",",
"act_names",
",",
"acts_metadata",
",",
"dir_rows",
")",
")",
"skin_table",
"=",
"tf",
".",
"contrib",
".",
"lookup",
".",
"index_table_from_tensor",
"(",
"sorted",
"(",
"SKIN_COLORS",
")",
")",
"hair_table",
"=",
"tf",
".",
"contrib",
".",
"lookup",
".",
"index_table_from_tensor",
"(",
"sorted",
"(",
"HAIRSTYLES",
")",
")",
"top_table",
"=",
"tf",
".",
"contrib",
".",
"lookup",
".",
"index_table_from_tensor",
"(",
"sorted",
"(",
"TOPS",
")",
")",
"pants_table",
"=",
"tf",
".",
"contrib",
".",
"lookup",
".",
"index_table_from_tensor",
"(",
"sorted",
"(",
"PANTS",
")",
")",
"action_table",
"=",
"tf",
".",
"contrib",
".",
"lookup",
".",
"index_table_from_tensor",
"(",
"sorted",
"(",
"action_names",
")",
")",
"def",
"process_example",
"(",
"attrs",
",",
"act_name",
",",
"act_metadata",
",",
"dir_row_offset",
")",
":",
"\"\"\"Processes a dataset row.\"\"\"",
"skin_name",
"=",
"attrs",
"[",
"0",
"]",
"hair_name",
"=",
"attrs",
"[",
"1",
"]",
"top_name",
"=",
"attrs",
"[",
"2",
"]",
"pants_name",
"=",
"attrs",
"[",
"3",
"]",
"if",
"fake_data",
":",
"char",
"=",
"dummy_image",
"else",
":",
"skin",
"=",
"read_image",
"(",
"basedir",
"+",
"os",
".",
"sep",
"+",
"skin_name",
")",
"hair",
"=",
"read_image",
"(",
"basedir",
"+",
"os",
".",
"sep",
"+",
"hair_name",
")",
"top",
"=",
"read_image",
"(",
"basedir",
"+",
"os",
".",
"sep",
"+",
"top_name",
")",
"pants",
"=",
"read_image",
"(",
"basedir",
"+",
"os",
".",
"sep",
"+",
"pants_name",
")",
"char",
"=",
"create_character",
"(",
"skin",
",",
"hair",
",",
"top",
",",
"pants",
")",
"if",
"shuffle",
":",
"seq",
"=",
"create_random_seq",
"(",
"char",
",",
"act_metadata",
",",
"dir_row_offset",
",",
"length",
")",
"else",
":",
"seq",
"=",
"create_seq",
"(",
"char",
",",
"act_metadata",
",",
"dir_row_offset",
",",
"length",
")",
"seq",
"=",
"seq",
"[",
"...",
",",
":",
"channels",
"]",
"# limit output channels",
"skin_idx",
"=",
"skin_table",
".",
"lookup",
"(",
"skin_name",
")",
"hair_idx",
"=",
"hair_table",
".",
"lookup",
"(",
"hair_name",
")",
"top_idx",
"=",
"top_table",
".",
"lookup",
"(",
"top_name",
")",
"pants_idx",
"=",
"pants_table",
".",
"lookup",
"(",
"pants_name",
")",
"act_idx",
"=",
"action_table",
".",
"lookup",
"(",
"act_name",
")",
"return",
"(",
"seq",
",",
"skin_idx",
",",
"hair_idx",
",",
"top_idx",
",",
"pants_idx",
",",
"act_idx",
",",
"skin_name",
",",
"hair_name",
",",
"top_name",
",",
"pants_name",
",",
"act_name",
")",
"dataset",
"=",
"dataset",
".",
"map",
"(",
"process_example",
")",
"return",
"dataset"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_maybe_validate_distributions
|
Checks that `distributions` satisfies all assumptions.
|
tensorflow_probability/python/distributions/blockwise.py
|
def _maybe_validate_distributions(distributions, dtype_override, validate_args):
"""Checks that `distributions` satisfies all assumptions."""
assertions = []
if not _is_iterable(distributions) or not distributions:
raise ValueError('`distributions` must be a list of one or more '
'distributions.')
if dtype_override is None:
dts = [
dtype_util.base_dtype(d.dtype)
for d in distributions
if d.dtype is not None
]
if dts[1:] != dts[:-1]:
raise TypeError('Distributions must have same dtype; found: {}.'.format(
set(dtype_util.name(dt) for dt in dts)))
# Validate event_ndims.
for d in distributions:
if tensorshape_util.rank(d.event_shape) is not None:
if tensorshape_util.rank(d.event_shape) != 1:
raise ValueError('`Distribution` must be vector variate, '
'found event nimds: {}.'.format(
tensorshape_util.rank(d.event_shape)))
elif validate_args:
assertions.append(
assert_util.assert_equal(
1, tf.size(input=d.event_shape_tensor()),
message='`Distribution` must be vector variate.'))
batch_shapes = [d.batch_shape for d in distributions]
if all(tensorshape_util.is_fully_defined(b) for b in batch_shapes):
if batch_shapes[1:] != batch_shapes[:-1]:
raise ValueError('Distributions must have the same `batch_shape`; '
'found: {}.'.format(batch_shapes))
elif validate_args:
batch_shapes = [
tensorshape_util.as_list(d.batch_shape) # pylint: disable=g-complex-comprehension
if tensorshape_util.is_fully_defined(d.batch_shape) else
d.batch_shape_tensor() for d in distributions
]
assertions.extend(
assert_util.assert_equal( # pylint: disable=g-complex-comprehension
b1, b2,
message='Distribution `batch_shape`s must be identical.')
for b1, b2 in zip(batch_shapes[1:], batch_shapes[:-1]))
return assertions
|
def _maybe_validate_distributions(distributions, dtype_override, validate_args):
"""Checks that `distributions` satisfies all assumptions."""
assertions = []
if not _is_iterable(distributions) or not distributions:
raise ValueError('`distributions` must be a list of one or more '
'distributions.')
if dtype_override is None:
dts = [
dtype_util.base_dtype(d.dtype)
for d in distributions
if d.dtype is not None
]
if dts[1:] != dts[:-1]:
raise TypeError('Distributions must have same dtype; found: {}.'.format(
set(dtype_util.name(dt) for dt in dts)))
# Validate event_ndims.
for d in distributions:
if tensorshape_util.rank(d.event_shape) is not None:
if tensorshape_util.rank(d.event_shape) != 1:
raise ValueError('`Distribution` must be vector variate, '
'found event nimds: {}.'.format(
tensorshape_util.rank(d.event_shape)))
elif validate_args:
assertions.append(
assert_util.assert_equal(
1, tf.size(input=d.event_shape_tensor()),
message='`Distribution` must be vector variate.'))
batch_shapes = [d.batch_shape for d in distributions]
if all(tensorshape_util.is_fully_defined(b) for b in batch_shapes):
if batch_shapes[1:] != batch_shapes[:-1]:
raise ValueError('Distributions must have the same `batch_shape`; '
'found: {}.'.format(batch_shapes))
elif validate_args:
batch_shapes = [
tensorshape_util.as_list(d.batch_shape) # pylint: disable=g-complex-comprehension
if tensorshape_util.is_fully_defined(d.batch_shape) else
d.batch_shape_tensor() for d in distributions
]
assertions.extend(
assert_util.assert_equal( # pylint: disable=g-complex-comprehension
b1, b2,
message='Distribution `batch_shape`s must be identical.')
for b1, b2 in zip(batch_shapes[1:], batch_shapes[:-1]))
return assertions
|
[
"Checks",
"that",
"distributions",
"satisfies",
"all",
"assumptions",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/blockwise.py#L177-L225
|
[
"def",
"_maybe_validate_distributions",
"(",
"distributions",
",",
"dtype_override",
",",
"validate_args",
")",
":",
"assertions",
"=",
"[",
"]",
"if",
"not",
"_is_iterable",
"(",
"distributions",
")",
"or",
"not",
"distributions",
":",
"raise",
"ValueError",
"(",
"'`distributions` must be a list of one or more '",
"'distributions.'",
")",
"if",
"dtype_override",
"is",
"None",
":",
"dts",
"=",
"[",
"dtype_util",
".",
"base_dtype",
"(",
"d",
".",
"dtype",
")",
"for",
"d",
"in",
"distributions",
"if",
"d",
".",
"dtype",
"is",
"not",
"None",
"]",
"if",
"dts",
"[",
"1",
":",
"]",
"!=",
"dts",
"[",
":",
"-",
"1",
"]",
":",
"raise",
"TypeError",
"(",
"'Distributions must have same dtype; found: {}.'",
".",
"format",
"(",
"set",
"(",
"dtype_util",
".",
"name",
"(",
"dt",
")",
"for",
"dt",
"in",
"dts",
")",
")",
")",
"# Validate event_ndims.",
"for",
"d",
"in",
"distributions",
":",
"if",
"tensorshape_util",
".",
"rank",
"(",
"d",
".",
"event_shape",
")",
"is",
"not",
"None",
":",
"if",
"tensorshape_util",
".",
"rank",
"(",
"d",
".",
"event_shape",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"'`Distribution` must be vector variate, '",
"'found event nimds: {}.'",
".",
"format",
"(",
"tensorshape_util",
".",
"rank",
"(",
"d",
".",
"event_shape",
")",
")",
")",
"elif",
"validate_args",
":",
"assertions",
".",
"append",
"(",
"assert_util",
".",
"assert_equal",
"(",
"1",
",",
"tf",
".",
"size",
"(",
"input",
"=",
"d",
".",
"event_shape_tensor",
"(",
")",
")",
",",
"message",
"=",
"'`Distribution` must be vector variate.'",
")",
")",
"batch_shapes",
"=",
"[",
"d",
".",
"batch_shape",
"for",
"d",
"in",
"distributions",
"]",
"if",
"all",
"(",
"tensorshape_util",
".",
"is_fully_defined",
"(",
"b",
")",
"for",
"b",
"in",
"batch_shapes",
")",
":",
"if",
"batch_shapes",
"[",
"1",
":",
"]",
"!=",
"batch_shapes",
"[",
":",
"-",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"'Distributions must have the same `batch_shape`; '",
"'found: {}.'",
".",
"format",
"(",
"batch_shapes",
")",
")",
"elif",
"validate_args",
":",
"batch_shapes",
"=",
"[",
"tensorshape_util",
".",
"as_list",
"(",
"d",
".",
"batch_shape",
")",
"# pylint: disable=g-complex-comprehension",
"if",
"tensorshape_util",
".",
"is_fully_defined",
"(",
"d",
".",
"batch_shape",
")",
"else",
"d",
".",
"batch_shape_tensor",
"(",
")",
"for",
"d",
"in",
"distributions",
"]",
"assertions",
".",
"extend",
"(",
"assert_util",
".",
"assert_equal",
"(",
"# pylint: disable=g-complex-comprehension",
"b1",
",",
"b2",
",",
"message",
"=",
"'Distribution `batch_shape`s must be identical.'",
")",
"for",
"b1",
",",
"b2",
"in",
"zip",
"(",
"batch_shapes",
"[",
"1",
":",
"]",
",",
"batch_shapes",
"[",
":",
"-",
"1",
"]",
")",
")",
"return",
"assertions"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_kl_blockwise_blockwise
|
Calculate the batched KL divergence KL(b0 || b1) with b0 and b1 Blockwise distributions.
Args:
b0: instance of a Blockwise distribution object.
b1: instance of a Blockwise distribution object.
name: (optional) Name to use for created operations. Default is
"kl_blockwise_blockwise".
Returns:
kl_blockwise_blockwise: `Tensor`. The batchwise KL(b0 || b1).
|
tensorflow_probability/python/distributions/blockwise.py
|
def _kl_blockwise_blockwise(b0, b1, name=None):
"""Calculate the batched KL divergence KL(b0 || b1) with b0 and b1 Blockwise distributions.
Args:
b0: instance of a Blockwise distribution object.
b1: instance of a Blockwise distribution object.
name: (optional) Name to use for created operations. Default is
"kl_blockwise_blockwise".
Returns:
kl_blockwise_blockwise: `Tensor`. The batchwise KL(b0 || b1).
"""
if len(b0.distributions) != len(b1.distributions):
raise ValueError(
'Can only compute KL divergence between Blockwise distributions with '
'the same number of component distributions.')
# We also need to check that the event shapes match for each one.
b0_event_sizes = [_event_size(d) for d in b0.distributions]
b1_event_sizes = [_event_size(d) for d in b1.distributions]
assertions = []
message = ('Can only compute KL divergence between Blockwise distributions '
'with the same pairwise event shapes.')
if (all(isinstance(event_size, int) for event_size in b0_event_sizes) and
all(isinstance(event_size, int) for event_size in b1_event_sizes)):
if b0_event_sizes != b1_event_sizes:
raise ValueError(message)
else:
if b0.validate_args or b1.validate_args:
assertions.extend(
assert_util.assert_equal( # pylint: disable=g-complex-comprehension
e1, e2, message=message)
for e1, e2 in zip(b0_event_sizes, b1_event_sizes))
with tf.name_scope(name or 'kl_blockwise_blockwise'):
with tf.control_dependencies(assertions):
return sum([
kullback_leibler.kl_divergence(d1, d2) for d1, d2 in zip(
b0.distributions, b1.distributions)])
|
def _kl_blockwise_blockwise(b0, b1, name=None):
"""Calculate the batched KL divergence KL(b0 || b1) with b0 and b1 Blockwise distributions.
Args:
b0: instance of a Blockwise distribution object.
b1: instance of a Blockwise distribution object.
name: (optional) Name to use for created operations. Default is
"kl_blockwise_blockwise".
Returns:
kl_blockwise_blockwise: `Tensor`. The batchwise KL(b0 || b1).
"""
if len(b0.distributions) != len(b1.distributions):
raise ValueError(
'Can only compute KL divergence between Blockwise distributions with '
'the same number of component distributions.')
# We also need to check that the event shapes match for each one.
b0_event_sizes = [_event_size(d) for d in b0.distributions]
b1_event_sizes = [_event_size(d) for d in b1.distributions]
assertions = []
message = ('Can only compute KL divergence between Blockwise distributions '
'with the same pairwise event shapes.')
if (all(isinstance(event_size, int) for event_size in b0_event_sizes) and
all(isinstance(event_size, int) for event_size in b1_event_sizes)):
if b0_event_sizes != b1_event_sizes:
raise ValueError(message)
else:
if b0.validate_args or b1.validate_args:
assertions.extend(
assert_util.assert_equal( # pylint: disable=g-complex-comprehension
e1, e2, message=message)
for e1, e2 in zip(b0_event_sizes, b1_event_sizes))
with tf.name_scope(name or 'kl_blockwise_blockwise'):
with tf.control_dependencies(assertions):
return sum([
kullback_leibler.kl_divergence(d1, d2) for d1, d2 in zip(
b0.distributions, b1.distributions)])
|
[
"Calculate",
"the",
"batched",
"KL",
"divergence",
"KL",
"(",
"b0",
"||",
"b1",
")",
"with",
"b0",
"and",
"b1",
"Blockwise",
"distributions",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/blockwise.py#L229-L269
|
[
"def",
"_kl_blockwise_blockwise",
"(",
"b0",
",",
"b1",
",",
"name",
"=",
"None",
")",
":",
"if",
"len",
"(",
"b0",
".",
"distributions",
")",
"!=",
"len",
"(",
"b1",
".",
"distributions",
")",
":",
"raise",
"ValueError",
"(",
"'Can only compute KL divergence between Blockwise distributions with '",
"'the same number of component distributions.'",
")",
"# We also need to check that the event shapes match for each one.",
"b0_event_sizes",
"=",
"[",
"_event_size",
"(",
"d",
")",
"for",
"d",
"in",
"b0",
".",
"distributions",
"]",
"b1_event_sizes",
"=",
"[",
"_event_size",
"(",
"d",
")",
"for",
"d",
"in",
"b1",
".",
"distributions",
"]",
"assertions",
"=",
"[",
"]",
"message",
"=",
"(",
"'Can only compute KL divergence between Blockwise distributions '",
"'with the same pairwise event shapes.'",
")",
"if",
"(",
"all",
"(",
"isinstance",
"(",
"event_size",
",",
"int",
")",
"for",
"event_size",
"in",
"b0_event_sizes",
")",
"and",
"all",
"(",
"isinstance",
"(",
"event_size",
",",
"int",
")",
"for",
"event_size",
"in",
"b1_event_sizes",
")",
")",
":",
"if",
"b0_event_sizes",
"!=",
"b1_event_sizes",
":",
"raise",
"ValueError",
"(",
"message",
")",
"else",
":",
"if",
"b0",
".",
"validate_args",
"or",
"b1",
".",
"validate_args",
":",
"assertions",
".",
"extend",
"(",
"assert_util",
".",
"assert_equal",
"(",
"# pylint: disable=g-complex-comprehension",
"e1",
",",
"e2",
",",
"message",
"=",
"message",
")",
"for",
"e1",
",",
"e2",
"in",
"zip",
"(",
"b0_event_sizes",
",",
"b1_event_sizes",
")",
")",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"'kl_blockwise_blockwise'",
")",
":",
"with",
"tf",
".",
"control_dependencies",
"(",
"assertions",
")",
":",
"return",
"sum",
"(",
"[",
"kullback_leibler",
".",
"kl_divergence",
"(",
"d1",
",",
"d2",
")",
"for",
"d1",
",",
"d2",
"in",
"zip",
"(",
"b0",
".",
"distributions",
",",
"b1",
".",
"distributions",
")",
"]",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_kl_half_normal_half_normal
|
Calculate the batched KL divergence KL(a || b) with a and b `HalfNormal`.
Args:
a: Instance of a `HalfNormal` distribution object.
b: Instance of a `HalfNormal` distribution object.
name: (optional) Name to use for created operations.
default is "kl_half_normal_half_normal".
Returns:
Batchwise KL(a || b)
|
tensorflow_probability/python/distributions/half_normal.py
|
def _kl_half_normal_half_normal(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a and b `HalfNormal`.
Args:
a: Instance of a `HalfNormal` distribution object.
b: Instance of a `HalfNormal` distribution object.
name: (optional) Name to use for created operations.
default is "kl_half_normal_half_normal".
Returns:
Batchwise KL(a || b)
"""
with tf.name_scope(name or "kl_half_normal_half_normal"):
# Consistent with
# http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf, page 119
return (tf.math.log(b.scale) - tf.math.log(a.scale) +
(a.scale**2 - b.scale**2) / (2 * b.scale**2))
|
def _kl_half_normal_half_normal(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a and b `HalfNormal`.
Args:
a: Instance of a `HalfNormal` distribution object.
b: Instance of a `HalfNormal` distribution object.
name: (optional) Name to use for created operations.
default is "kl_half_normal_half_normal".
Returns:
Batchwise KL(a || b)
"""
with tf.name_scope(name or "kl_half_normal_half_normal"):
# Consistent with
# http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf, page 119
return (tf.math.log(b.scale) - tf.math.log(a.scale) +
(a.scale**2 - b.scale**2) / (2 * b.scale**2))
|
[
"Calculate",
"the",
"batched",
"KL",
"divergence",
"KL",
"(",
"a",
"||",
"b",
")",
"with",
"a",
"and",
"b",
"HalfNormal",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/half_normal.py#L180-L196
|
[
"def",
"_kl_half_normal_half_normal",
"(",
"a",
",",
"b",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"\"kl_half_normal_half_normal\"",
")",
":",
"# Consistent with",
"# http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf, page 119",
"return",
"(",
"tf",
".",
"math",
".",
"log",
"(",
"b",
".",
"scale",
")",
"-",
"tf",
".",
"math",
".",
"log",
"(",
"a",
".",
"scale",
")",
"+",
"(",
"a",
".",
"scale",
"**",
"2",
"-",
"b",
".",
"scale",
"**",
"2",
")",
"/",
"(",
"2",
"*",
"b",
".",
"scale",
"**",
"2",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_flatten_summand_list
|
Flatten a list of kernels which may contain _SumKernel instances.
Args:
kernels: Python list of `PositiveSemidefiniteKernel` instances
Returns:
Python list containing the elements of kernels, with any _SumKernel
instances replaced by their `kernels` property contents.
|
tensorflow_probability/python/positive_semidefinite_kernels/positive_semidefinite_kernel.py
|
def _flatten_summand_list(kernels):
"""Flatten a list of kernels which may contain _SumKernel instances.
Args:
kernels: Python list of `PositiveSemidefiniteKernel` instances
Returns:
Python list containing the elements of kernels, with any _SumKernel
instances replaced by their `kernels` property contents.
"""
flattened = []
for k in kernels:
if isinstance(k, _SumKernel):
flattened += k.kernels
else:
flattened.append(k)
return flattened
|
def _flatten_summand_list(kernels):
"""Flatten a list of kernels which may contain _SumKernel instances.
Args:
kernels: Python list of `PositiveSemidefiniteKernel` instances
Returns:
Python list containing the elements of kernels, with any _SumKernel
instances replaced by their `kernels` property contents.
"""
flattened = []
for k in kernels:
if isinstance(k, _SumKernel):
flattened += k.kernels
else:
flattened.append(k)
return flattened
|
[
"Flatten",
"a",
"list",
"of",
"kernels",
"which",
"may",
"contain",
"_SumKernel",
"instances",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/positive_semidefinite_kernels/positive_semidefinite_kernel.py#L608-L624
|
[
"def",
"_flatten_summand_list",
"(",
"kernels",
")",
":",
"flattened",
"=",
"[",
"]",
"for",
"k",
"in",
"kernels",
":",
"if",
"isinstance",
"(",
"k",
",",
"_SumKernel",
")",
":",
"flattened",
"+=",
"k",
".",
"kernels",
"else",
":",
"flattened",
".",
"append",
"(",
"k",
")",
"return",
"flattened"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_flatten_multiplicand_list
|
Flatten a list of kernels which may contain _ProductKernel instances.
Args:
kernels: Python list of `PositiveSemidefiniteKernel` instances
Returns:
Python list containing the elements of kernels, with any _ProductKernel
instances replaced by their `kernels` property contents.
|
tensorflow_probability/python/positive_semidefinite_kernels/positive_semidefinite_kernel.py
|
def _flatten_multiplicand_list(kernels):
"""Flatten a list of kernels which may contain _ProductKernel instances.
Args:
kernels: Python list of `PositiveSemidefiniteKernel` instances
Returns:
Python list containing the elements of kernels, with any _ProductKernel
instances replaced by their `kernels` property contents.
"""
flattened = []
for k in kernels:
if isinstance(k, _ProductKernel):
flattened += k.kernels
else:
flattened.append(k)
return flattened
|
def _flatten_multiplicand_list(kernels):
"""Flatten a list of kernels which may contain _ProductKernel instances.
Args:
kernels: Python list of `PositiveSemidefiniteKernel` instances
Returns:
Python list containing the elements of kernels, with any _ProductKernel
instances replaced by their `kernels` property contents.
"""
flattened = []
for k in kernels:
if isinstance(k, _ProductKernel):
flattened += k.kernels
else:
flattened.append(k)
return flattened
|
[
"Flatten",
"a",
"list",
"of",
"kernels",
"which",
"may",
"contain",
"_ProductKernel",
"instances",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/positive_semidefinite_kernels/positive_semidefinite_kernel.py#L627-L643
|
[
"def",
"_flatten_multiplicand_list",
"(",
"kernels",
")",
":",
"flattened",
"=",
"[",
"]",
"for",
"k",
"in",
"kernels",
":",
"if",
"isinstance",
"(",
"k",
",",
"_ProductKernel",
")",
":",
"flattened",
"+=",
"k",
".",
"kernels",
"else",
":",
"flattened",
".",
"append",
"(",
"k",
")",
"return",
"flattened"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
build_input_pipeline
|
Build an Iterator switching between train and heldout data.
|
tensorflow_probability/examples/cifar10_bnn.py
|
def build_input_pipeline(x_train, x_test, y_train, y_test,
batch_size, valid_size):
"""Build an Iterator switching between train and heldout data."""
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255
x_test /= 255
y_train = y_train.flatten()
y_test = y_test.flatten()
if FLAGS.subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print("x_train shape:" + str(x_train.shape))
print(str(x_train.shape[0]) + " train samples")
print(str(x_test.shape[0]) + " test samples")
# Build an iterator over training batches.
training_dataset = tf.data.Dataset.from_tensor_slices(
(x_train, np.int32(y_train)))
training_batches = training_dataset.shuffle(
50000, reshuffle_each_iteration=True).repeat().batch(batch_size)
training_iterator = tf.compat.v1.data.make_one_shot_iterator(training_batches)
# Build a iterator over the heldout set with batch_size=heldout_size,
# i.e., return the entire heldout set as a constant.
heldout_dataset = tf.data.Dataset.from_tensor_slices(
(x_test, np.int32(y_test)))
heldout_batches = heldout_dataset.repeat().batch(valid_size)
heldout_iterator = tf.compat.v1.data.make_one_shot_iterator(heldout_batches)
# Combine these into a feedable iterator that can switch between training
# and validation inputs.
handle = tf.compat.v1.placeholder(tf.string, shape=[])
feedable_iterator = tf.compat.v1.data.Iterator.from_string_handle(
handle, training_batches.output_types, training_batches.output_shapes)
images, labels = feedable_iterator.get_next()
return images, labels, handle, training_iterator, heldout_iterator
|
def build_input_pipeline(x_train, x_test, y_train, y_test,
batch_size, valid_size):
"""Build an Iterator switching between train and heldout data."""
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255
x_test /= 255
y_train = y_train.flatten()
y_test = y_test.flatten()
if FLAGS.subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print("x_train shape:" + str(x_train.shape))
print(str(x_train.shape[0]) + " train samples")
print(str(x_test.shape[0]) + " test samples")
# Build an iterator over training batches.
training_dataset = tf.data.Dataset.from_tensor_slices(
(x_train, np.int32(y_train)))
training_batches = training_dataset.shuffle(
50000, reshuffle_each_iteration=True).repeat().batch(batch_size)
training_iterator = tf.compat.v1.data.make_one_shot_iterator(training_batches)
# Build a iterator over the heldout set with batch_size=heldout_size,
# i.e., return the entire heldout set as a constant.
heldout_dataset = tf.data.Dataset.from_tensor_slices(
(x_test, np.int32(y_test)))
heldout_batches = heldout_dataset.repeat().batch(valid_size)
heldout_iterator = tf.compat.v1.data.make_one_shot_iterator(heldout_batches)
# Combine these into a feedable iterator that can switch between training
# and validation inputs.
handle = tf.compat.v1.placeholder(tf.string, shape=[])
feedable_iterator = tf.compat.v1.data.Iterator.from_string_handle(
handle, training_batches.output_types, training_batches.output_shapes)
images, labels = feedable_iterator.get_next()
return images, labels, handle, training_iterator, heldout_iterator
|
[
"Build",
"an",
"Iterator",
"switching",
"between",
"train",
"and",
"heldout",
"data",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/cifar10_bnn.py#L109-L152
|
[
"def",
"build_input_pipeline",
"(",
"x_train",
",",
"x_test",
",",
"y_train",
",",
"y_test",
",",
"batch_size",
",",
"valid_size",
")",
":",
"x_train",
"=",
"x_train",
".",
"astype",
"(",
"\"float32\"",
")",
"x_test",
"=",
"x_test",
".",
"astype",
"(",
"\"float32\"",
")",
"x_train",
"/=",
"255",
"x_test",
"/=",
"255",
"y_train",
"=",
"y_train",
".",
"flatten",
"(",
")",
"y_test",
"=",
"y_test",
".",
"flatten",
"(",
")",
"if",
"FLAGS",
".",
"subtract_pixel_mean",
":",
"x_train_mean",
"=",
"np",
".",
"mean",
"(",
"x_train",
",",
"axis",
"=",
"0",
")",
"x_train",
"-=",
"x_train_mean",
"x_test",
"-=",
"x_train_mean",
"print",
"(",
"\"x_train shape:\"",
"+",
"str",
"(",
"x_train",
".",
"shape",
")",
")",
"print",
"(",
"str",
"(",
"x_train",
".",
"shape",
"[",
"0",
"]",
")",
"+",
"\" train samples\"",
")",
"print",
"(",
"str",
"(",
"x_test",
".",
"shape",
"[",
"0",
"]",
")",
"+",
"\" test samples\"",
")",
"# Build an iterator over training batches.",
"training_dataset",
"=",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"(",
"x_train",
",",
"np",
".",
"int32",
"(",
"y_train",
")",
")",
")",
"training_batches",
"=",
"training_dataset",
".",
"shuffle",
"(",
"50000",
",",
"reshuffle_each_iteration",
"=",
"True",
")",
".",
"repeat",
"(",
")",
".",
"batch",
"(",
"batch_size",
")",
"training_iterator",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"data",
".",
"make_one_shot_iterator",
"(",
"training_batches",
")",
"# Build a iterator over the heldout set with batch_size=heldout_size,",
"# i.e., return the entire heldout set as a constant.",
"heldout_dataset",
"=",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"(",
"x_test",
",",
"np",
".",
"int32",
"(",
"y_test",
")",
")",
")",
"heldout_batches",
"=",
"heldout_dataset",
".",
"repeat",
"(",
")",
".",
"batch",
"(",
"valid_size",
")",
"heldout_iterator",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"data",
".",
"make_one_shot_iterator",
"(",
"heldout_batches",
")",
"# Combine these into a feedable iterator that can switch between training",
"# and validation inputs.",
"handle",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"placeholder",
"(",
"tf",
".",
"string",
",",
"shape",
"=",
"[",
"]",
")",
"feedable_iterator",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"data",
".",
"Iterator",
".",
"from_string_handle",
"(",
"handle",
",",
"training_batches",
".",
"output_types",
",",
"training_batches",
".",
"output_shapes",
")",
"images",
",",
"labels",
"=",
"feedable_iterator",
".",
"get_next",
"(",
")",
"return",
"images",
",",
"labels",
",",
"handle",
",",
"training_iterator",
",",
"heldout_iterator"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
build_fake_data
|
Build fake CIFAR10-style data for unit testing.
|
tensorflow_probability/examples/cifar10_bnn.py
|
def build_fake_data():
"""Build fake CIFAR10-style data for unit testing."""
num_examples = 10
x_train = np.random.rand(num_examples, *IMAGE_SHAPE).astype(np.float32)
y_train = np.random.permutation(np.arange(num_examples)).astype(np.int32)
x_test = np.random.rand(num_examples, *IMAGE_SHAPE).astype(np.float32)
y_test = np.random.permutation(np.arange(num_examples)).astype(np.int32)
return (x_train, y_train), (x_test, y_test)
|
def build_fake_data():
"""Build fake CIFAR10-style data for unit testing."""
num_examples = 10
x_train = np.random.rand(num_examples, *IMAGE_SHAPE).astype(np.float32)
y_train = np.random.permutation(np.arange(num_examples)).astype(np.int32)
x_test = np.random.rand(num_examples, *IMAGE_SHAPE).astype(np.float32)
y_test = np.random.permutation(np.arange(num_examples)).astype(np.int32)
return (x_train, y_train), (x_test, y_test)
|
[
"Build",
"fake",
"CIFAR10",
"-",
"style",
"data",
"for",
"unit",
"testing",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/cifar10_bnn.py#L155-L162
|
[
"def",
"build_fake_data",
"(",
")",
":",
"num_examples",
"=",
"10",
"x_train",
"=",
"np",
".",
"random",
".",
"rand",
"(",
"num_examples",
",",
"*",
"IMAGE_SHAPE",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"y_train",
"=",
"np",
".",
"random",
".",
"permutation",
"(",
"np",
".",
"arange",
"(",
"num_examples",
")",
")",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"x_test",
"=",
"np",
".",
"random",
".",
"rand",
"(",
"num_examples",
",",
"*",
"IMAGE_SHAPE",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"y_test",
"=",
"np",
".",
"random",
".",
"permutation",
"(",
"np",
".",
"arange",
"(",
"num_examples",
")",
")",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"return",
"(",
"x_train",
",",
"y_train",
")",
",",
"(",
"x_test",
",",
"y_test",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
count_integers
|
Counts the number of occurrences of each value in an integer array `arr`.
Works like `tf.math.bincount`, but provides an `axis` kwarg that specifies
dimensions to reduce over. With
`~axis = [i for i in range(arr.ndim) if i not in axis]`,
this function returns a `Tensor` of shape `[K] + arr.shape[~axis]`.
If `minlength` and `maxlength` are not given, `K = tf.reduce_max(arr) + 1`
if `arr` is non-empty, and 0 otherwise.
If `weights` are non-None, then index `i` of the output stores the sum of the
value in `weights` at each index where the corresponding value in `arr` is
`i`.
Args:
arr: An `int32` `Tensor` of non-negative values.
weights: If non-None, must be the same shape as arr. For each value in
`arr`, the bin will be incremented by the corresponding weight instead of
1.
minlength: If given, ensures the output has length at least `minlength`,
padding with zeros at the end if necessary.
maxlength: If given, skips values in `arr` that are equal or greater than
`maxlength`, ensuring that the output has length at most `maxlength`.
axis: A `0-D` or `1-D` `int32` `Tensor` (with static values) designating
dimensions in `arr` to reduce over.
`Default value:` `None`, meaning reduce over all dimensions.
dtype: If `weights` is None, determines the type of the output bins.
name: A name scope for the associated operations (optional).
Returns:
A vector with the same dtype as `weights` or the given `dtype`. The bin
values.
|
tensorflow_probability/python/stats/quantiles.py
|
def count_integers(arr,
weights=None,
minlength=None,
maxlength=None,
axis=None,
dtype=tf.int32,
name=None):
"""Counts the number of occurrences of each value in an integer array `arr`.
Works like `tf.math.bincount`, but provides an `axis` kwarg that specifies
dimensions to reduce over. With
`~axis = [i for i in range(arr.ndim) if i not in axis]`,
this function returns a `Tensor` of shape `[K] + arr.shape[~axis]`.
If `minlength` and `maxlength` are not given, `K = tf.reduce_max(arr) + 1`
if `arr` is non-empty, and 0 otherwise.
If `weights` are non-None, then index `i` of the output stores the sum of the
value in `weights` at each index where the corresponding value in `arr` is
`i`.
Args:
arr: An `int32` `Tensor` of non-negative values.
weights: If non-None, must be the same shape as arr. For each value in
`arr`, the bin will be incremented by the corresponding weight instead of
1.
minlength: If given, ensures the output has length at least `minlength`,
padding with zeros at the end if necessary.
maxlength: If given, skips values in `arr` that are equal or greater than
`maxlength`, ensuring that the output has length at most `maxlength`.
axis: A `0-D` or `1-D` `int32` `Tensor` (with static values) designating
dimensions in `arr` to reduce over.
`Default value:` `None`, meaning reduce over all dimensions.
dtype: If `weights` is None, determines the type of the output bins.
name: A name scope for the associated operations (optional).
Returns:
A vector with the same dtype as `weights` or the given `dtype`. The bin
values.
"""
with tf.compat.v1.name_scope(
name, 'count_integers', values=[arr, weights, minlength, maxlength,
axis]):
if axis is None:
return tf.math.bincount(
arr,
weights=weights,
minlength=minlength,
maxlength=maxlength,
dtype=dtype)
arr = tf.convert_to_tensor(value=arr, dtype=tf.int32, name='arr')
arr_ndims = _get_static_ndims(arr, expect_static=True)
axis = _make_static_axis_non_negative_list(axis, arr_ndims)
# ~axis from docstring. Dims in arr that are not in axis.
not_axis = sorted(set(range(arr_ndims)).difference(axis))
# If we're reducing over everything, just use standard bincount.
if not not_axis:
return tf.math.bincount(
arr,
weights=weights,
minlength=minlength,
maxlength=maxlength,
dtype=dtype)
# Move dims in ~axis to the left, so we can tf.map_fn bincount over them,
# Producing counts for every index I in ~axis.
# Thus, flat_arr is not totally flat, it just has the dims in ~axis
# flattened.
flat_arr = _move_dims_to_flat_end(arr, not_axis, arr_ndims, right_end=False)
# tf.map_fn over dim 0.
if weights is None:
def one_bincount(arr_slice):
return tf.math.bincount(
arr_slice,
weights=None,
minlength=minlength,
maxlength=maxlength,
dtype=dtype)
flat_counts = tf.map_fn(one_bincount, elems=flat_arr, dtype=dtype)
else:
weights = tf.convert_to_tensor(value=weights, name='weights')
_get_static_ndims(weights, expect_static=True, expect_ndims=arr_ndims)
flat_weights = _move_dims_to_flat_end(
weights, not_axis, arr_ndims, right_end=False)
def one_bincount(arr_and_weights_slices):
arr_slice, weights_slice = arr_and_weights_slices
return tf.math.bincount(
arr_slice,
weights=weights_slice,
minlength=minlength,
maxlength=maxlength,
dtype=dtype)
flat_counts = tf.map_fn(
one_bincount, elems=[flat_arr, flat_weights], dtype=weights.dtype)
# flat_counts.shape = [prod(~axis), K], because map_fn stacked on axis 0.
# bincount needs to have the K bins in axis 0, so transpose...
flat_counts_t = tf.transpose(a=flat_counts, perm=[1, 0])
# Throw in this assert, to ensure shape assumptions are correct.
_get_static_ndims(flat_counts_t, expect_ndims=2, expect_static=True)
# not_axis_shape = arr.shape[~axis]
not_axis_shape = tf.gather(tf.shape(input=arr), indices=not_axis)
# The first index of flat_counts_t indexes bins 0,..,K-1, the rest are ~axis
out_shape = tf.concat([[-1], not_axis_shape], axis=0)
return tf.reshape(flat_counts_t, out_shape)
|
def count_integers(arr,
weights=None,
minlength=None,
maxlength=None,
axis=None,
dtype=tf.int32,
name=None):
"""Counts the number of occurrences of each value in an integer array `arr`.
Works like `tf.math.bincount`, but provides an `axis` kwarg that specifies
dimensions to reduce over. With
`~axis = [i for i in range(arr.ndim) if i not in axis]`,
this function returns a `Tensor` of shape `[K] + arr.shape[~axis]`.
If `minlength` and `maxlength` are not given, `K = tf.reduce_max(arr) + 1`
if `arr` is non-empty, and 0 otherwise.
If `weights` are non-None, then index `i` of the output stores the sum of the
value in `weights` at each index where the corresponding value in `arr` is
`i`.
Args:
arr: An `int32` `Tensor` of non-negative values.
weights: If non-None, must be the same shape as arr. For each value in
`arr`, the bin will be incremented by the corresponding weight instead of
1.
minlength: If given, ensures the output has length at least `minlength`,
padding with zeros at the end if necessary.
maxlength: If given, skips values in `arr` that are equal or greater than
`maxlength`, ensuring that the output has length at most `maxlength`.
axis: A `0-D` or `1-D` `int32` `Tensor` (with static values) designating
dimensions in `arr` to reduce over.
`Default value:` `None`, meaning reduce over all dimensions.
dtype: If `weights` is None, determines the type of the output bins.
name: A name scope for the associated operations (optional).
Returns:
A vector with the same dtype as `weights` or the given `dtype`. The bin
values.
"""
with tf.compat.v1.name_scope(
name, 'count_integers', values=[arr, weights, minlength, maxlength,
axis]):
if axis is None:
return tf.math.bincount(
arr,
weights=weights,
minlength=minlength,
maxlength=maxlength,
dtype=dtype)
arr = tf.convert_to_tensor(value=arr, dtype=tf.int32, name='arr')
arr_ndims = _get_static_ndims(arr, expect_static=True)
axis = _make_static_axis_non_negative_list(axis, arr_ndims)
# ~axis from docstring. Dims in arr that are not in axis.
not_axis = sorted(set(range(arr_ndims)).difference(axis))
# If we're reducing over everything, just use standard bincount.
if not not_axis:
return tf.math.bincount(
arr,
weights=weights,
minlength=minlength,
maxlength=maxlength,
dtype=dtype)
# Move dims in ~axis to the left, so we can tf.map_fn bincount over them,
# Producing counts for every index I in ~axis.
# Thus, flat_arr is not totally flat, it just has the dims in ~axis
# flattened.
flat_arr = _move_dims_to_flat_end(arr, not_axis, arr_ndims, right_end=False)
# tf.map_fn over dim 0.
if weights is None:
def one_bincount(arr_slice):
return tf.math.bincount(
arr_slice,
weights=None,
minlength=minlength,
maxlength=maxlength,
dtype=dtype)
flat_counts = tf.map_fn(one_bincount, elems=flat_arr, dtype=dtype)
else:
weights = tf.convert_to_tensor(value=weights, name='weights')
_get_static_ndims(weights, expect_static=True, expect_ndims=arr_ndims)
flat_weights = _move_dims_to_flat_end(
weights, not_axis, arr_ndims, right_end=False)
def one_bincount(arr_and_weights_slices):
arr_slice, weights_slice = arr_and_weights_slices
return tf.math.bincount(
arr_slice,
weights=weights_slice,
minlength=minlength,
maxlength=maxlength,
dtype=dtype)
flat_counts = tf.map_fn(
one_bincount, elems=[flat_arr, flat_weights], dtype=weights.dtype)
# flat_counts.shape = [prod(~axis), K], because map_fn stacked on axis 0.
# bincount needs to have the K bins in axis 0, so transpose...
flat_counts_t = tf.transpose(a=flat_counts, perm=[1, 0])
# Throw in this assert, to ensure shape assumptions are correct.
_get_static_ndims(flat_counts_t, expect_ndims=2, expect_static=True)
# not_axis_shape = arr.shape[~axis]
not_axis_shape = tf.gather(tf.shape(input=arr), indices=not_axis)
# The first index of flat_counts_t indexes bins 0,..,K-1, the rest are ~axis
out_shape = tf.concat([[-1], not_axis_shape], axis=0)
return tf.reshape(flat_counts_t, out_shape)
|
[
"Counts",
"the",
"number",
"of",
"occurrences",
"of",
"each",
"value",
"in",
"an",
"integer",
"array",
"arr",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/quantiles.py#L39-L155
|
[
"def",
"count_integers",
"(",
"arr",
",",
"weights",
"=",
"None",
",",
"minlength",
"=",
"None",
",",
"maxlength",
"=",
"None",
",",
"axis",
"=",
"None",
",",
"dtype",
"=",
"tf",
".",
"int32",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'count_integers'",
",",
"values",
"=",
"[",
"arr",
",",
"weights",
",",
"minlength",
",",
"maxlength",
",",
"axis",
"]",
")",
":",
"if",
"axis",
"is",
"None",
":",
"return",
"tf",
".",
"math",
".",
"bincount",
"(",
"arr",
",",
"weights",
"=",
"weights",
",",
"minlength",
"=",
"minlength",
",",
"maxlength",
"=",
"maxlength",
",",
"dtype",
"=",
"dtype",
")",
"arr",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"arr",
",",
"dtype",
"=",
"tf",
".",
"int32",
",",
"name",
"=",
"'arr'",
")",
"arr_ndims",
"=",
"_get_static_ndims",
"(",
"arr",
",",
"expect_static",
"=",
"True",
")",
"axis",
"=",
"_make_static_axis_non_negative_list",
"(",
"axis",
",",
"arr_ndims",
")",
"# ~axis from docstring. Dims in arr that are not in axis.",
"not_axis",
"=",
"sorted",
"(",
"set",
"(",
"range",
"(",
"arr_ndims",
")",
")",
".",
"difference",
"(",
"axis",
")",
")",
"# If we're reducing over everything, just use standard bincount.",
"if",
"not",
"not_axis",
":",
"return",
"tf",
".",
"math",
".",
"bincount",
"(",
"arr",
",",
"weights",
"=",
"weights",
",",
"minlength",
"=",
"minlength",
",",
"maxlength",
"=",
"maxlength",
",",
"dtype",
"=",
"dtype",
")",
"# Move dims in ~axis to the left, so we can tf.map_fn bincount over them,",
"# Producing counts for every index I in ~axis.",
"# Thus, flat_arr is not totally flat, it just has the dims in ~axis",
"# flattened.",
"flat_arr",
"=",
"_move_dims_to_flat_end",
"(",
"arr",
",",
"not_axis",
",",
"arr_ndims",
",",
"right_end",
"=",
"False",
")",
"# tf.map_fn over dim 0.",
"if",
"weights",
"is",
"None",
":",
"def",
"one_bincount",
"(",
"arr_slice",
")",
":",
"return",
"tf",
".",
"math",
".",
"bincount",
"(",
"arr_slice",
",",
"weights",
"=",
"None",
",",
"minlength",
"=",
"minlength",
",",
"maxlength",
"=",
"maxlength",
",",
"dtype",
"=",
"dtype",
")",
"flat_counts",
"=",
"tf",
".",
"map_fn",
"(",
"one_bincount",
",",
"elems",
"=",
"flat_arr",
",",
"dtype",
"=",
"dtype",
")",
"else",
":",
"weights",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"weights",
",",
"name",
"=",
"'weights'",
")",
"_get_static_ndims",
"(",
"weights",
",",
"expect_static",
"=",
"True",
",",
"expect_ndims",
"=",
"arr_ndims",
")",
"flat_weights",
"=",
"_move_dims_to_flat_end",
"(",
"weights",
",",
"not_axis",
",",
"arr_ndims",
",",
"right_end",
"=",
"False",
")",
"def",
"one_bincount",
"(",
"arr_and_weights_slices",
")",
":",
"arr_slice",
",",
"weights_slice",
"=",
"arr_and_weights_slices",
"return",
"tf",
".",
"math",
".",
"bincount",
"(",
"arr_slice",
",",
"weights",
"=",
"weights_slice",
",",
"minlength",
"=",
"minlength",
",",
"maxlength",
"=",
"maxlength",
",",
"dtype",
"=",
"dtype",
")",
"flat_counts",
"=",
"tf",
".",
"map_fn",
"(",
"one_bincount",
",",
"elems",
"=",
"[",
"flat_arr",
",",
"flat_weights",
"]",
",",
"dtype",
"=",
"weights",
".",
"dtype",
")",
"# flat_counts.shape = [prod(~axis), K], because map_fn stacked on axis 0.",
"# bincount needs to have the K bins in axis 0, so transpose...",
"flat_counts_t",
"=",
"tf",
".",
"transpose",
"(",
"a",
"=",
"flat_counts",
",",
"perm",
"=",
"[",
"1",
",",
"0",
"]",
")",
"# Throw in this assert, to ensure shape assumptions are correct.",
"_get_static_ndims",
"(",
"flat_counts_t",
",",
"expect_ndims",
"=",
"2",
",",
"expect_static",
"=",
"True",
")",
"# not_axis_shape = arr.shape[~axis]",
"not_axis_shape",
"=",
"tf",
".",
"gather",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"arr",
")",
",",
"indices",
"=",
"not_axis",
")",
"# The first index of flat_counts_t indexes bins 0,..,K-1, the rest are ~axis",
"out_shape",
"=",
"tf",
".",
"concat",
"(",
"[",
"[",
"-",
"1",
"]",
",",
"not_axis_shape",
"]",
",",
"axis",
"=",
"0",
")",
"return",
"tf",
".",
"reshape",
"(",
"flat_counts_t",
",",
"out_shape",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
find_bins
|
Bin values into discrete intervals.
Given `edges = [c0, ..., cK]`, defining intervals
`I0 = [c0, c1)`, `I1 = [c1, c2)`, ..., `I_{K-1} = [c_{K-1}, cK]`,
This function returns `bins`, such that:
`edges[bins[i]] <= x[i] < edges[bins[i] + 1]`.
Args:
x: Numeric `N-D` `Tensor` with `N > 0`.
edges: `Tensor` of same `dtype` as `x`. The first dimension indexes edges
of intervals. Must either be `1-D` or have
`x.shape[1:] == edges.shape[1:]`. If `rank(edges) > 1`, `edges[k]`
designates a shape `edges.shape[1:]` `Tensor` of bin edges for the
corresponding dimensions of `x`.
extend_lower_interval: Python `bool`. If `True`, extend the lowest
interval `I0` to `(-inf, c1]`.
extend_upper_interval: Python `bool`. If `True`, extend the upper
interval `I_{K-1}` to `[c_{K-1}, +inf)`.
dtype: The output type (`int32` or `int64`). `Default value:` `x.dtype`.
This effects the output values when `x` is below/above the intervals,
which will be `-1/K+1` for `int` types and `NaN` for `float`s.
At indices where `x` is `NaN`, the output values will be `0` for `int`
types and `NaN` for floats.
name: A Python string name to prepend to created ops. Default: 'find_bins'
Returns:
bins: `Tensor` with same `shape` as `x` and `dtype`.
Has whole number values. `bins[i] = k` means the `x[i]` falls into the
`kth` bin, ie, `edges[bins[i]] <= x[i] < edges[bins[i] + 1]`.
Raises:
ValueError: If `edges.shape[0]` is determined to be less than 2.
#### Examples
Cut a `1-D` array
```python
x = [0., 5., 6., 10., 20.]
edges = [0., 5., 10.]
tfp.stats.find_bins(x, edges)
==> [0., 0., 1., 1., np.nan]
```
Cut `x` into its deciles
```python
x = tf.random_uniform(shape=(100, 200))
decile_edges = tfp.stats.quantiles(x, num_quantiles=10)
bins = tfp.stats.find_bins(x, edges=decile_edges)
bins.shape
==> (100, 200)
tf.reduce_mean(bins == 0.)
==> approximately 0.1
tf.reduce_mean(bins == 1.)
==> approximately 0.1
```
|
tensorflow_probability/python/stats/quantiles.py
|
def find_bins(x,
edges,
extend_lower_interval=False,
extend_upper_interval=False,
dtype=None,
name=None):
"""Bin values into discrete intervals.
Given `edges = [c0, ..., cK]`, defining intervals
`I0 = [c0, c1)`, `I1 = [c1, c2)`, ..., `I_{K-1} = [c_{K-1}, cK]`,
This function returns `bins`, such that:
`edges[bins[i]] <= x[i] < edges[bins[i] + 1]`.
Args:
x: Numeric `N-D` `Tensor` with `N > 0`.
edges: `Tensor` of same `dtype` as `x`. The first dimension indexes edges
of intervals. Must either be `1-D` or have
`x.shape[1:] == edges.shape[1:]`. If `rank(edges) > 1`, `edges[k]`
designates a shape `edges.shape[1:]` `Tensor` of bin edges for the
corresponding dimensions of `x`.
extend_lower_interval: Python `bool`. If `True`, extend the lowest
interval `I0` to `(-inf, c1]`.
extend_upper_interval: Python `bool`. If `True`, extend the upper
interval `I_{K-1}` to `[c_{K-1}, +inf)`.
dtype: The output type (`int32` or `int64`). `Default value:` `x.dtype`.
This effects the output values when `x` is below/above the intervals,
which will be `-1/K+1` for `int` types and `NaN` for `float`s.
At indices where `x` is `NaN`, the output values will be `0` for `int`
types and `NaN` for floats.
name: A Python string name to prepend to created ops. Default: 'find_bins'
Returns:
bins: `Tensor` with same `shape` as `x` and `dtype`.
Has whole number values. `bins[i] = k` means the `x[i]` falls into the
`kth` bin, ie, `edges[bins[i]] <= x[i] < edges[bins[i] + 1]`.
Raises:
ValueError: If `edges.shape[0]` is determined to be less than 2.
#### Examples
Cut a `1-D` array
```python
x = [0., 5., 6., 10., 20.]
edges = [0., 5., 10.]
tfp.stats.find_bins(x, edges)
==> [0., 0., 1., 1., np.nan]
```
Cut `x` into its deciles
```python
x = tf.random_uniform(shape=(100, 200))
decile_edges = tfp.stats.quantiles(x, num_quantiles=10)
bins = tfp.stats.find_bins(x, edges=decile_edges)
bins.shape
==> (100, 200)
tf.reduce_mean(bins == 0.)
==> approximately 0.1
tf.reduce_mean(bins == 1.)
==> approximately 0.1
```
"""
# TFP users may be surprised to see the "action" in the leftmost dim of
# edges, rather than the rightmost (event) dim. Why?
# 1. Most likely you created edges by getting quantiles over samples, and
# quantile/percentile return these edges in the leftmost (sample) dim.
# 2. Say you have event_shape = [5], then we expect the bin will be different
# for all 5 events, so the index of the bin should not be in the event dim.
with tf.compat.v1.name_scope(
name, default_name='find_bins', values=[x, edges]):
in_type = dtype_util.common_dtype([x, edges],
preferred_dtype=tf.float32)
edges = tf.convert_to_tensor(value=edges, name='edges', dtype=in_type)
x = tf.convert_to_tensor(value=x, name='x', dtype=in_type)
if (tf.compat.dimension_value(edges.shape[0]) is not None and
tf.compat.dimension_value(edges.shape[0]) < 2):
raise ValueError(
'First dimension of `edges` must have length > 1 to index 1 or '
'more bin. Found: {}'.format(edges.shape))
flattening_x = edges.shape.ndims == 1 and x.shape.ndims > 1
if flattening_x:
x_orig_shape = tf.shape(input=x)
x = tf.reshape(x, [-1])
if dtype is None:
dtype = in_type
dtype = tf.as_dtype(dtype)
# Move first dims into the rightmost.
x_permed = distribution_util.rotate_transpose(x, shift=-1)
edges_permed = distribution_util.rotate_transpose(edges, shift=-1)
# If...
# x_permed = [0, 1, 6., 10]
# edges = [0, 5, 10.]
# ==> almost_output = [0, 1, 2, 2]
searchsorted_type = dtype if dtype in [tf.int32, tf.int64] else None
almost_output_permed = tf.searchsorted(
sorted_sequence=edges_permed,
values=x_permed,
side='right',
out_type=searchsorted_type)
# Move the rightmost dims back to the leftmost.
almost_output = tf.cast(
distribution_util.rotate_transpose(almost_output_permed, shift=1),
dtype)
# In above example, we want [0, 0, 1, 1], so correct this here.
bins = tf.clip_by_value(almost_output - 1, tf.cast(0, dtype),
tf.cast(tf.shape(input=edges)[0] - 2, dtype))
if not extend_lower_interval:
low_fill = np.nan if dtype.is_floating else -1
bins = tf.where(x < tf.expand_dims(edges[0], 0),
tf.fill(tf.shape(input=x), tf.cast(low_fill, dtype)),
bins)
if not extend_upper_interval:
up_fill = np.nan if dtype.is_floating else tf.shape(input=edges)[0] - 1
bins = tf.where(x > tf.expand_dims(edges[-1], 0),
tf.fill(tf.shape(input=x), tf.cast(up_fill, dtype)), bins)
if flattening_x:
bins = tf.reshape(bins, x_orig_shape)
return bins
|
def find_bins(x,
edges,
extend_lower_interval=False,
extend_upper_interval=False,
dtype=None,
name=None):
"""Bin values into discrete intervals.
Given `edges = [c0, ..., cK]`, defining intervals
`I0 = [c0, c1)`, `I1 = [c1, c2)`, ..., `I_{K-1} = [c_{K-1}, cK]`,
This function returns `bins`, such that:
`edges[bins[i]] <= x[i] < edges[bins[i] + 1]`.
Args:
x: Numeric `N-D` `Tensor` with `N > 0`.
edges: `Tensor` of same `dtype` as `x`. The first dimension indexes edges
of intervals. Must either be `1-D` or have
`x.shape[1:] == edges.shape[1:]`. If `rank(edges) > 1`, `edges[k]`
designates a shape `edges.shape[1:]` `Tensor` of bin edges for the
corresponding dimensions of `x`.
extend_lower_interval: Python `bool`. If `True`, extend the lowest
interval `I0` to `(-inf, c1]`.
extend_upper_interval: Python `bool`. If `True`, extend the upper
interval `I_{K-1}` to `[c_{K-1}, +inf)`.
dtype: The output type (`int32` or `int64`). `Default value:` `x.dtype`.
This effects the output values when `x` is below/above the intervals,
which will be `-1/K+1` for `int` types and `NaN` for `float`s.
At indices where `x` is `NaN`, the output values will be `0` for `int`
types and `NaN` for floats.
name: A Python string name to prepend to created ops. Default: 'find_bins'
Returns:
bins: `Tensor` with same `shape` as `x` and `dtype`.
Has whole number values. `bins[i] = k` means the `x[i]` falls into the
`kth` bin, ie, `edges[bins[i]] <= x[i] < edges[bins[i] + 1]`.
Raises:
ValueError: If `edges.shape[0]` is determined to be less than 2.
#### Examples
Cut a `1-D` array
```python
x = [0., 5., 6., 10., 20.]
edges = [0., 5., 10.]
tfp.stats.find_bins(x, edges)
==> [0., 0., 1., 1., np.nan]
```
Cut `x` into its deciles
```python
x = tf.random_uniform(shape=(100, 200))
decile_edges = tfp.stats.quantiles(x, num_quantiles=10)
bins = tfp.stats.find_bins(x, edges=decile_edges)
bins.shape
==> (100, 200)
tf.reduce_mean(bins == 0.)
==> approximately 0.1
tf.reduce_mean(bins == 1.)
==> approximately 0.1
```
"""
# TFP users may be surprised to see the "action" in the leftmost dim of
# edges, rather than the rightmost (event) dim. Why?
# 1. Most likely you created edges by getting quantiles over samples, and
# quantile/percentile return these edges in the leftmost (sample) dim.
# 2. Say you have event_shape = [5], then we expect the bin will be different
# for all 5 events, so the index of the bin should not be in the event dim.
with tf.compat.v1.name_scope(
name, default_name='find_bins', values=[x, edges]):
in_type = dtype_util.common_dtype([x, edges],
preferred_dtype=tf.float32)
edges = tf.convert_to_tensor(value=edges, name='edges', dtype=in_type)
x = tf.convert_to_tensor(value=x, name='x', dtype=in_type)
if (tf.compat.dimension_value(edges.shape[0]) is not None and
tf.compat.dimension_value(edges.shape[0]) < 2):
raise ValueError(
'First dimension of `edges` must have length > 1 to index 1 or '
'more bin. Found: {}'.format(edges.shape))
flattening_x = edges.shape.ndims == 1 and x.shape.ndims > 1
if flattening_x:
x_orig_shape = tf.shape(input=x)
x = tf.reshape(x, [-1])
if dtype is None:
dtype = in_type
dtype = tf.as_dtype(dtype)
# Move first dims into the rightmost.
x_permed = distribution_util.rotate_transpose(x, shift=-1)
edges_permed = distribution_util.rotate_transpose(edges, shift=-1)
# If...
# x_permed = [0, 1, 6., 10]
# edges = [0, 5, 10.]
# ==> almost_output = [0, 1, 2, 2]
searchsorted_type = dtype if dtype in [tf.int32, tf.int64] else None
almost_output_permed = tf.searchsorted(
sorted_sequence=edges_permed,
values=x_permed,
side='right',
out_type=searchsorted_type)
# Move the rightmost dims back to the leftmost.
almost_output = tf.cast(
distribution_util.rotate_transpose(almost_output_permed, shift=1),
dtype)
# In above example, we want [0, 0, 1, 1], so correct this here.
bins = tf.clip_by_value(almost_output - 1, tf.cast(0, dtype),
tf.cast(tf.shape(input=edges)[0] - 2, dtype))
if not extend_lower_interval:
low_fill = np.nan if dtype.is_floating else -1
bins = tf.where(x < tf.expand_dims(edges[0], 0),
tf.fill(tf.shape(input=x), tf.cast(low_fill, dtype)),
bins)
if not extend_upper_interval:
up_fill = np.nan if dtype.is_floating else tf.shape(input=edges)[0] - 1
bins = tf.where(x > tf.expand_dims(edges[-1], 0),
tf.fill(tf.shape(input=x), tf.cast(up_fill, dtype)), bins)
if flattening_x:
bins = tf.reshape(bins, x_orig_shape)
return bins
|
[
"Bin",
"values",
"into",
"discrete",
"intervals",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/quantiles.py#L158-L289
|
[
"def",
"find_bins",
"(",
"x",
",",
"edges",
",",
"extend_lower_interval",
"=",
"False",
",",
"extend_upper_interval",
"=",
"False",
",",
"dtype",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"# TFP users may be surprised to see the \"action\" in the leftmost dim of",
"# edges, rather than the rightmost (event) dim. Why?",
"# 1. Most likely you created edges by getting quantiles over samples, and",
"# quantile/percentile return these edges in the leftmost (sample) dim.",
"# 2. Say you have event_shape = [5], then we expect the bin will be different",
"# for all 5 events, so the index of the bin should not be in the event dim.",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"default_name",
"=",
"'find_bins'",
",",
"values",
"=",
"[",
"x",
",",
"edges",
"]",
")",
":",
"in_type",
"=",
"dtype_util",
".",
"common_dtype",
"(",
"[",
"x",
",",
"edges",
"]",
",",
"preferred_dtype",
"=",
"tf",
".",
"float32",
")",
"edges",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"edges",
",",
"name",
"=",
"'edges'",
",",
"dtype",
"=",
"in_type",
")",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"'x'",
",",
"dtype",
"=",
"in_type",
")",
"if",
"(",
"tf",
".",
"compat",
".",
"dimension_value",
"(",
"edges",
".",
"shape",
"[",
"0",
"]",
")",
"is",
"not",
"None",
"and",
"tf",
".",
"compat",
".",
"dimension_value",
"(",
"edges",
".",
"shape",
"[",
"0",
"]",
")",
"<",
"2",
")",
":",
"raise",
"ValueError",
"(",
"'First dimension of `edges` must have length > 1 to index 1 or '",
"'more bin. Found: {}'",
".",
"format",
"(",
"edges",
".",
"shape",
")",
")",
"flattening_x",
"=",
"edges",
".",
"shape",
".",
"ndims",
"==",
"1",
"and",
"x",
".",
"shape",
".",
"ndims",
">",
"1",
"if",
"flattening_x",
":",
"x_orig_shape",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
"x",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"[",
"-",
"1",
"]",
")",
"if",
"dtype",
"is",
"None",
":",
"dtype",
"=",
"in_type",
"dtype",
"=",
"tf",
".",
"as_dtype",
"(",
"dtype",
")",
"# Move first dims into the rightmost.",
"x_permed",
"=",
"distribution_util",
".",
"rotate_transpose",
"(",
"x",
",",
"shift",
"=",
"-",
"1",
")",
"edges_permed",
"=",
"distribution_util",
".",
"rotate_transpose",
"(",
"edges",
",",
"shift",
"=",
"-",
"1",
")",
"# If...",
"# x_permed = [0, 1, 6., 10]",
"# edges = [0, 5, 10.]",
"# ==> almost_output = [0, 1, 2, 2]",
"searchsorted_type",
"=",
"dtype",
"if",
"dtype",
"in",
"[",
"tf",
".",
"int32",
",",
"tf",
".",
"int64",
"]",
"else",
"None",
"almost_output_permed",
"=",
"tf",
".",
"searchsorted",
"(",
"sorted_sequence",
"=",
"edges_permed",
",",
"values",
"=",
"x_permed",
",",
"side",
"=",
"'right'",
",",
"out_type",
"=",
"searchsorted_type",
")",
"# Move the rightmost dims back to the leftmost.",
"almost_output",
"=",
"tf",
".",
"cast",
"(",
"distribution_util",
".",
"rotate_transpose",
"(",
"almost_output_permed",
",",
"shift",
"=",
"1",
")",
",",
"dtype",
")",
"# In above example, we want [0, 0, 1, 1], so correct this here.",
"bins",
"=",
"tf",
".",
"clip_by_value",
"(",
"almost_output",
"-",
"1",
",",
"tf",
".",
"cast",
"(",
"0",
",",
"dtype",
")",
",",
"tf",
".",
"cast",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"edges",
")",
"[",
"0",
"]",
"-",
"2",
",",
"dtype",
")",
")",
"if",
"not",
"extend_lower_interval",
":",
"low_fill",
"=",
"np",
".",
"nan",
"if",
"dtype",
".",
"is_floating",
"else",
"-",
"1",
"bins",
"=",
"tf",
".",
"where",
"(",
"x",
"<",
"tf",
".",
"expand_dims",
"(",
"edges",
"[",
"0",
"]",
",",
"0",
")",
",",
"tf",
".",
"fill",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
",",
"tf",
".",
"cast",
"(",
"low_fill",
",",
"dtype",
")",
")",
",",
"bins",
")",
"if",
"not",
"extend_upper_interval",
":",
"up_fill",
"=",
"np",
".",
"nan",
"if",
"dtype",
".",
"is_floating",
"else",
"tf",
".",
"shape",
"(",
"input",
"=",
"edges",
")",
"[",
"0",
"]",
"-",
"1",
"bins",
"=",
"tf",
".",
"where",
"(",
"x",
">",
"tf",
".",
"expand_dims",
"(",
"edges",
"[",
"-",
"1",
"]",
",",
"0",
")",
",",
"tf",
".",
"fill",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
",",
"tf",
".",
"cast",
"(",
"up_fill",
",",
"dtype",
")",
")",
",",
"bins",
")",
"if",
"flattening_x",
":",
"bins",
"=",
"tf",
".",
"reshape",
"(",
"bins",
",",
"x_orig_shape",
")",
"return",
"bins"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
histogram
|
Count how often `x` falls in intervals defined by `edges`.
Given `edges = [c0, ..., cK]`, defining intervals
`I0 = [c0, c1)`, `I1 = [c1, c2)`, ..., `I_{K-1} = [c_{K-1}, cK]`,
This function counts how often `x` falls into each interval.
Values of `x` outside of the intervals cause errors. Consider using
`extend_lower_interval`, `extend_upper_interval` to deal with this.
Args:
x: Numeric `N-D` `Tensor` with `N > 0`. If `axis` is not
`None`, must have statically known number of dimensions. The
`axis` kwarg determines which dimensions index iid samples.
Other dimensions of `x` index "events" for which we will compute different
histograms.
edges: `Tensor` of same `dtype` as `x`. The first dimension indexes edges
of intervals. Must either be `1-D` or have `edges.shape[1:]` the same
as the dimensions of `x` excluding `axis`.
If `rank(edges) > 1`, `edges[k]` designates a shape `edges.shape[1:]`
`Tensor` of interval edges for the corresponding dimensions of `x`.
axis: Optional `0-D` or `1-D` integer `Tensor` with constant
values. The axis in `x` that index iid samples.
`Default value:` `None` (treat every dimension as sample dimension).
extend_lower_interval: Python `bool`. If `True`, extend the lowest
interval `I0` to `(-inf, c1]`.
extend_upper_interval: Python `bool`. If `True`, extend the upper
interval `I_{K-1}` to `[c_{K-1}, +inf)`.
dtype: The output type (`int32` or `int64`). `Default value:` `x.dtype`.
name: A Python string name to prepend to created ops.
`Default value:` 'histogram'
Returns:
counts: `Tensor` of type `dtype` and, with
`~axis = [i for i in range(arr.ndim) if i not in axis]`,
`counts.shape = [edges.shape[0]] + x.shape[~axis]`.
With `I` a multi-index into `~axis`, `counts[k][I]` is the number of times
event(s) fell into the `kth` interval of `edges`.
#### Examples
```python
# x.shape = [1000, 2]
# x[:, 0] ~ Uniform(0, 1), x[:, 1] ~ Uniform(1, 2).
x = tf.stack([tf.random_uniform([1000]), 1 + tf.random_uniform([1000])],
axis=-1)
# edges ==> bins [0, 0.5), [0.5, 1.0), [1.0, 1.5), [1.5, 2.0].
edges = [0., 0.5, 1.0, 1.5, 2.0]
tfp.stats.histogram(x, edges)
==> approximately [500, 500, 500, 500]
tfp.stats.histogram(x, edges, axis=0)
==> approximately [[500, 500, 0, 0], [0, 0, 500, 500]]
```
|
tensorflow_probability/python/stats/quantiles.py
|
def histogram(x,
edges,
axis=None,
extend_lower_interval=False,
extend_upper_interval=False,
dtype=None,
name=None):
"""Count how often `x` falls in intervals defined by `edges`.
Given `edges = [c0, ..., cK]`, defining intervals
`I0 = [c0, c1)`, `I1 = [c1, c2)`, ..., `I_{K-1} = [c_{K-1}, cK]`,
This function counts how often `x` falls into each interval.
Values of `x` outside of the intervals cause errors. Consider using
`extend_lower_interval`, `extend_upper_interval` to deal with this.
Args:
x: Numeric `N-D` `Tensor` with `N > 0`. If `axis` is not
`None`, must have statically known number of dimensions. The
`axis` kwarg determines which dimensions index iid samples.
Other dimensions of `x` index "events" for which we will compute different
histograms.
edges: `Tensor` of same `dtype` as `x`. The first dimension indexes edges
of intervals. Must either be `1-D` or have `edges.shape[1:]` the same
as the dimensions of `x` excluding `axis`.
If `rank(edges) > 1`, `edges[k]` designates a shape `edges.shape[1:]`
`Tensor` of interval edges for the corresponding dimensions of `x`.
axis: Optional `0-D` or `1-D` integer `Tensor` with constant
values. The axis in `x` that index iid samples.
`Default value:` `None` (treat every dimension as sample dimension).
extend_lower_interval: Python `bool`. If `True`, extend the lowest
interval `I0` to `(-inf, c1]`.
extend_upper_interval: Python `bool`. If `True`, extend the upper
interval `I_{K-1}` to `[c_{K-1}, +inf)`.
dtype: The output type (`int32` or `int64`). `Default value:` `x.dtype`.
name: A Python string name to prepend to created ops.
`Default value:` 'histogram'
Returns:
counts: `Tensor` of type `dtype` and, with
`~axis = [i for i in range(arr.ndim) if i not in axis]`,
`counts.shape = [edges.shape[0]] + x.shape[~axis]`.
With `I` a multi-index into `~axis`, `counts[k][I]` is the number of times
event(s) fell into the `kth` interval of `edges`.
#### Examples
```python
# x.shape = [1000, 2]
# x[:, 0] ~ Uniform(0, 1), x[:, 1] ~ Uniform(1, 2).
x = tf.stack([tf.random_uniform([1000]), 1 + tf.random_uniform([1000])],
axis=-1)
# edges ==> bins [0, 0.5), [0.5, 1.0), [1.0, 1.5), [1.5, 2.0].
edges = [0., 0.5, 1.0, 1.5, 2.0]
tfp.stats.histogram(x, edges)
==> approximately [500, 500, 500, 500]
tfp.stats.histogram(x, edges, axis=0)
==> approximately [[500, 500, 0, 0], [0, 0, 500, 500]]
```
"""
with tf.compat.v1.name_scope(name, 'histogram', values=[x, edges, axis]):
# Tensor conversions.
in_dtype = dtype_util.common_dtype([x, edges], preferred_dtype=tf.float32)
x = tf.convert_to_tensor(value=x, name='x', dtype=in_dtype)
edges = tf.convert_to_tensor(value=edges, name='edges', dtype=in_dtype)
# Move dims in axis to the left end as one flattened dim.
# After this, x.shape = [n_samples] + E.
if axis is None:
x = tf.reshape(x, shape=[-1])
else:
x_ndims = _get_static_ndims(
x, expect_static=True, expect_ndims_at_least=1)
axis = _make_static_axis_non_negative_list(axis, x_ndims)
if not axis:
raise ValueError('`axis` cannot be empty. Found: {}'.format(axis))
x = _move_dims_to_flat_end(x, axis, x_ndims, right_end=False)
# bins.shape = x.shape = [n_samples] + E,
# and bins[i] is a shape E Tensor of the bins that sample `i` fell into.
# E is the "event shape", which is [] if axis is None.
bins = find_bins(
x,
edges=edges,
# If not extending intervals, then values outside the edges will return
# -1, which gives an error when fed to bincount.
extend_lower_interval=extend_lower_interval,
extend_upper_interval=extend_upper_interval,
dtype=tf.int32)
# TODO(b/124015136) Use standard tf.math.bincount once it supports `axis`.
counts = count_integers(
bins,
# Ensure we get correct output, even if x did not fall into every bin
minlength=tf.shape(input=edges)[0] - 1,
maxlength=tf.shape(input=edges)[0] - 1,
axis=0,
dtype=dtype or in_dtype)
n_edges = tf.compat.dimension_value(edges.shape[0])
if n_edges is not None:
counts.set_shape(
tf.TensorShape([n_edges - 1]).concatenate(counts.shape[1:]))
return counts
|
def histogram(x,
edges,
axis=None,
extend_lower_interval=False,
extend_upper_interval=False,
dtype=None,
name=None):
"""Count how often `x` falls in intervals defined by `edges`.
Given `edges = [c0, ..., cK]`, defining intervals
`I0 = [c0, c1)`, `I1 = [c1, c2)`, ..., `I_{K-1} = [c_{K-1}, cK]`,
This function counts how often `x` falls into each interval.
Values of `x` outside of the intervals cause errors. Consider using
`extend_lower_interval`, `extend_upper_interval` to deal with this.
Args:
x: Numeric `N-D` `Tensor` with `N > 0`. If `axis` is not
`None`, must have statically known number of dimensions. The
`axis` kwarg determines which dimensions index iid samples.
Other dimensions of `x` index "events" for which we will compute different
histograms.
edges: `Tensor` of same `dtype` as `x`. The first dimension indexes edges
of intervals. Must either be `1-D` or have `edges.shape[1:]` the same
as the dimensions of `x` excluding `axis`.
If `rank(edges) > 1`, `edges[k]` designates a shape `edges.shape[1:]`
`Tensor` of interval edges for the corresponding dimensions of `x`.
axis: Optional `0-D` or `1-D` integer `Tensor` with constant
values. The axis in `x` that index iid samples.
`Default value:` `None` (treat every dimension as sample dimension).
extend_lower_interval: Python `bool`. If `True`, extend the lowest
interval `I0` to `(-inf, c1]`.
extend_upper_interval: Python `bool`. If `True`, extend the upper
interval `I_{K-1}` to `[c_{K-1}, +inf)`.
dtype: The output type (`int32` or `int64`). `Default value:` `x.dtype`.
name: A Python string name to prepend to created ops.
`Default value:` 'histogram'
Returns:
counts: `Tensor` of type `dtype` and, with
`~axis = [i for i in range(arr.ndim) if i not in axis]`,
`counts.shape = [edges.shape[0]] + x.shape[~axis]`.
With `I` a multi-index into `~axis`, `counts[k][I]` is the number of times
event(s) fell into the `kth` interval of `edges`.
#### Examples
```python
# x.shape = [1000, 2]
# x[:, 0] ~ Uniform(0, 1), x[:, 1] ~ Uniform(1, 2).
x = tf.stack([tf.random_uniform([1000]), 1 + tf.random_uniform([1000])],
axis=-1)
# edges ==> bins [0, 0.5), [0.5, 1.0), [1.0, 1.5), [1.5, 2.0].
edges = [0., 0.5, 1.0, 1.5, 2.0]
tfp.stats.histogram(x, edges)
==> approximately [500, 500, 500, 500]
tfp.stats.histogram(x, edges, axis=0)
==> approximately [[500, 500, 0, 0], [0, 0, 500, 500]]
```
"""
with tf.compat.v1.name_scope(name, 'histogram', values=[x, edges, axis]):
# Tensor conversions.
in_dtype = dtype_util.common_dtype([x, edges], preferred_dtype=tf.float32)
x = tf.convert_to_tensor(value=x, name='x', dtype=in_dtype)
edges = tf.convert_to_tensor(value=edges, name='edges', dtype=in_dtype)
# Move dims in axis to the left end as one flattened dim.
# After this, x.shape = [n_samples] + E.
if axis is None:
x = tf.reshape(x, shape=[-1])
else:
x_ndims = _get_static_ndims(
x, expect_static=True, expect_ndims_at_least=1)
axis = _make_static_axis_non_negative_list(axis, x_ndims)
if not axis:
raise ValueError('`axis` cannot be empty. Found: {}'.format(axis))
x = _move_dims_to_flat_end(x, axis, x_ndims, right_end=False)
# bins.shape = x.shape = [n_samples] + E,
# and bins[i] is a shape E Tensor of the bins that sample `i` fell into.
# E is the "event shape", which is [] if axis is None.
bins = find_bins(
x,
edges=edges,
# If not extending intervals, then values outside the edges will return
# -1, which gives an error when fed to bincount.
extend_lower_interval=extend_lower_interval,
extend_upper_interval=extend_upper_interval,
dtype=tf.int32)
# TODO(b/124015136) Use standard tf.math.bincount once it supports `axis`.
counts = count_integers(
bins,
# Ensure we get correct output, even if x did not fall into every bin
minlength=tf.shape(input=edges)[0] - 1,
maxlength=tf.shape(input=edges)[0] - 1,
axis=0,
dtype=dtype or in_dtype)
n_edges = tf.compat.dimension_value(edges.shape[0])
if n_edges is not None:
counts.set_shape(
tf.TensorShape([n_edges - 1]).concatenate(counts.shape[1:]))
return counts
|
[
"Count",
"how",
"often",
"x",
"falls",
"in",
"intervals",
"defined",
"by",
"edges",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/quantiles.py#L292-L400
|
[
"def",
"histogram",
"(",
"x",
",",
"edges",
",",
"axis",
"=",
"None",
",",
"extend_lower_interval",
"=",
"False",
",",
"extend_upper_interval",
"=",
"False",
",",
"dtype",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'histogram'",
",",
"values",
"=",
"[",
"x",
",",
"edges",
",",
"axis",
"]",
")",
":",
"# Tensor conversions.",
"in_dtype",
"=",
"dtype_util",
".",
"common_dtype",
"(",
"[",
"x",
",",
"edges",
"]",
",",
"preferred_dtype",
"=",
"tf",
".",
"float32",
")",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"'x'",
",",
"dtype",
"=",
"in_dtype",
")",
"edges",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"edges",
",",
"name",
"=",
"'edges'",
",",
"dtype",
"=",
"in_dtype",
")",
"# Move dims in axis to the left end as one flattened dim.",
"# After this, x.shape = [n_samples] + E.",
"if",
"axis",
"is",
"None",
":",
"x",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"shape",
"=",
"[",
"-",
"1",
"]",
")",
"else",
":",
"x_ndims",
"=",
"_get_static_ndims",
"(",
"x",
",",
"expect_static",
"=",
"True",
",",
"expect_ndims_at_least",
"=",
"1",
")",
"axis",
"=",
"_make_static_axis_non_negative_list",
"(",
"axis",
",",
"x_ndims",
")",
"if",
"not",
"axis",
":",
"raise",
"ValueError",
"(",
"'`axis` cannot be empty. Found: {}'",
".",
"format",
"(",
"axis",
")",
")",
"x",
"=",
"_move_dims_to_flat_end",
"(",
"x",
",",
"axis",
",",
"x_ndims",
",",
"right_end",
"=",
"False",
")",
"# bins.shape = x.shape = [n_samples] + E,",
"# and bins[i] is a shape E Tensor of the bins that sample `i` fell into.",
"# E is the \"event shape\", which is [] if axis is None.",
"bins",
"=",
"find_bins",
"(",
"x",
",",
"edges",
"=",
"edges",
",",
"# If not extending intervals, then values outside the edges will return",
"# -1, which gives an error when fed to bincount.",
"extend_lower_interval",
"=",
"extend_lower_interval",
",",
"extend_upper_interval",
"=",
"extend_upper_interval",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"# TODO(b/124015136) Use standard tf.math.bincount once it supports `axis`.",
"counts",
"=",
"count_integers",
"(",
"bins",
",",
"# Ensure we get correct output, even if x did not fall into every bin",
"minlength",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"edges",
")",
"[",
"0",
"]",
"-",
"1",
",",
"maxlength",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"edges",
")",
"[",
"0",
"]",
"-",
"1",
",",
"axis",
"=",
"0",
",",
"dtype",
"=",
"dtype",
"or",
"in_dtype",
")",
"n_edges",
"=",
"tf",
".",
"compat",
".",
"dimension_value",
"(",
"edges",
".",
"shape",
"[",
"0",
"]",
")",
"if",
"n_edges",
"is",
"not",
"None",
":",
"counts",
".",
"set_shape",
"(",
"tf",
".",
"TensorShape",
"(",
"[",
"n_edges",
"-",
"1",
"]",
")",
".",
"concatenate",
"(",
"counts",
".",
"shape",
"[",
"1",
":",
"]",
")",
")",
"return",
"counts"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
percentile
|
Compute the `q`-th percentile(s) of `x`.
Given a vector `x`, the `q`-th percentile of `x` is the value `q / 100` of the
way from the minimum to the maximum in a sorted copy of `x`.
The values and distances of the two nearest neighbors as well as the
`interpolation` parameter will determine the percentile if the normalized
ranking does not match the location of `q` exactly.
This function is the same as the median if `q = 50`, the same as the minimum
if `q = 0` and the same as the maximum if `q = 100`.
Multiple percentiles can be computed at once by using `1-D` vector `q`.
Dimension zero of the returned `Tensor` will index the different percentiles.
Compare to `numpy.percentile`.
Args:
x: Numeric `N-D` `Tensor` with `N > 0`. If `axis` is not `None`,
`x` must have statically known number of dimensions.
q: Scalar or vector `Tensor` with values in `[0, 100]`. The percentile(s).
axis: Optional `0-D` or `1-D` integer `Tensor` with constant values. The
axis that index independent samples over which to return the desired
percentile. If `None` (the default), treat every dimension as a sample
dimension, returning a scalar.
interpolation : {'nearest', 'linear', 'lower', 'higher', 'midpoint'}.
Default value: 'nearest'. This specifies the interpolation method to
use when the desired quantile lies between two data points `i < j`:
* linear: i + (j - i) * fraction, where fraction is the fractional part
of the index surrounded by i and j.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j`, whichever is nearest.
* midpoint: (i + j) / 2.
`linear` and `midpoint` interpolation do not work with integer dtypes.
keep_dims: Python `bool`. If `True`, the last dimension is kept with size 1
If `False`, the last dimension is removed from the output shape.
validate_args: Whether to add runtime checks of argument validity. If
False, and arguments are incorrect, correct behavior is not guaranteed.
preserve_gradients: Python `bool`. If `True`, ensure that gradient w.r.t
the percentile `q` is preserved in the case of linear interpolation.
If `False`, the gradient will be (incorrectly) zero when `q` corresponds
to a point in `x`.
name: A Python string name to give this `Op`. Default is 'percentile'
Returns:
A `(rank(q) + N - len(axis))` dimensional `Tensor` of same dtype as `x`, or,
if `axis` is `None`, a `rank(q)` `Tensor`. The first `rank(q)` dimensions
index quantiles for different values of `q`.
Raises:
ValueError: If argument 'interpolation' is not an allowed type.
ValueError: If interpolation type not compatible with `dtype`.
#### Examples
```python
# Get 30th percentile with default ('nearest') interpolation.
x = [1., 2., 3., 4.]
tfp.stats.percentile(x, q=30.)
==> 2.0
# Get 30th percentile with 'linear' interpolation.
x = [1., 2., 3., 4.]
tfp.stats.percentile(x, q=30., interpolation='linear')
==> 1.9
# Get 30th and 70th percentiles with 'lower' interpolation
x = [1., 2., 3., 4.]
tfp.stats.percentile(x, q=[30., 70.], interpolation='lower')
==> [1., 3.]
# Get 100th percentile (maximum). By default, this is computed over every dim
x = [[1., 2.]
[3., 4.]]
tfp.stats.percentile(x, q=100.)
==> 4.
# Treat the leading dim as indexing samples, and find the 100th quantile (max)
# over all such samples.
x = [[1., 2.]
[3., 4.]]
tfp.stats.percentile(x, q=100., axis=[0])
==> [3., 4.]
```
|
tensorflow_probability/python/stats/quantiles.py
|
def percentile(x,
q,
axis=None,
interpolation=None,
keep_dims=False,
validate_args=False,
preserve_gradients=True,
name=None):
"""Compute the `q`-th percentile(s) of `x`.
Given a vector `x`, the `q`-th percentile of `x` is the value `q / 100` of the
way from the minimum to the maximum in a sorted copy of `x`.
The values and distances of the two nearest neighbors as well as the
`interpolation` parameter will determine the percentile if the normalized
ranking does not match the location of `q` exactly.
This function is the same as the median if `q = 50`, the same as the minimum
if `q = 0` and the same as the maximum if `q = 100`.
Multiple percentiles can be computed at once by using `1-D` vector `q`.
Dimension zero of the returned `Tensor` will index the different percentiles.
Compare to `numpy.percentile`.
Args:
x: Numeric `N-D` `Tensor` with `N > 0`. If `axis` is not `None`,
`x` must have statically known number of dimensions.
q: Scalar or vector `Tensor` with values in `[0, 100]`. The percentile(s).
axis: Optional `0-D` or `1-D` integer `Tensor` with constant values. The
axis that index independent samples over which to return the desired
percentile. If `None` (the default), treat every dimension as a sample
dimension, returning a scalar.
interpolation : {'nearest', 'linear', 'lower', 'higher', 'midpoint'}.
Default value: 'nearest'. This specifies the interpolation method to
use when the desired quantile lies between two data points `i < j`:
* linear: i + (j - i) * fraction, where fraction is the fractional part
of the index surrounded by i and j.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j`, whichever is nearest.
* midpoint: (i + j) / 2.
`linear` and `midpoint` interpolation do not work with integer dtypes.
keep_dims: Python `bool`. If `True`, the last dimension is kept with size 1
If `False`, the last dimension is removed from the output shape.
validate_args: Whether to add runtime checks of argument validity. If
False, and arguments are incorrect, correct behavior is not guaranteed.
preserve_gradients: Python `bool`. If `True`, ensure that gradient w.r.t
the percentile `q` is preserved in the case of linear interpolation.
If `False`, the gradient will be (incorrectly) zero when `q` corresponds
to a point in `x`.
name: A Python string name to give this `Op`. Default is 'percentile'
Returns:
A `(rank(q) + N - len(axis))` dimensional `Tensor` of same dtype as `x`, or,
if `axis` is `None`, a `rank(q)` `Tensor`. The first `rank(q)` dimensions
index quantiles for different values of `q`.
Raises:
ValueError: If argument 'interpolation' is not an allowed type.
ValueError: If interpolation type not compatible with `dtype`.
#### Examples
```python
# Get 30th percentile with default ('nearest') interpolation.
x = [1., 2., 3., 4.]
tfp.stats.percentile(x, q=30.)
==> 2.0
# Get 30th percentile with 'linear' interpolation.
x = [1., 2., 3., 4.]
tfp.stats.percentile(x, q=30., interpolation='linear')
==> 1.9
# Get 30th and 70th percentiles with 'lower' interpolation
x = [1., 2., 3., 4.]
tfp.stats.percentile(x, q=[30., 70.], interpolation='lower')
==> [1., 3.]
# Get 100th percentile (maximum). By default, this is computed over every dim
x = [[1., 2.]
[3., 4.]]
tfp.stats.percentile(x, q=100.)
==> 4.
# Treat the leading dim as indexing samples, and find the 100th quantile (max)
# over all such samples.
x = [[1., 2.]
[3., 4.]]
tfp.stats.percentile(x, q=100., axis=[0])
==> [3., 4.]
```
"""
name = name or 'percentile'
allowed_interpolations = {'linear', 'lower', 'higher', 'nearest', 'midpoint'}
if interpolation is None:
interpolation = 'nearest'
else:
if interpolation not in allowed_interpolations:
raise ValueError('Argument `interpolation` must be in %s. Found %s' %
(allowed_interpolations, interpolation))
with tf.compat.v1.name_scope(name, values=[x, q]):
x = tf.convert_to_tensor(value=x, name='x')
if interpolation in {'linear', 'midpoint'} and x.dtype.is_integer:
raise TypeError('{} interpolation not allowed with dtype {}'.format(
interpolation, x.dtype))
# Double is needed here and below, else we get the wrong index if the array
# is huge along axis.
q = tf.cast(q, tf.float64)
_get_static_ndims(q, expect_ndims_no_more_than=1)
if validate_args:
q = distribution_util.with_dependencies([
tf.compat.v1.assert_rank_in(q, [0, 1]),
tf.compat.v1.assert_greater_equal(q, tf.cast(0., tf.float64)),
tf.compat.v1.assert_less_equal(q, tf.cast(100., tf.float64))
], q)
# Move `axis` dims of `x` to the rightmost, call it `y`.
if axis is None:
y = tf.reshape(x, [-1])
else:
x_ndims = _get_static_ndims(
x, expect_static=True, expect_ndims_at_least=1)
axis = _make_static_axis_non_negative_list(axis, x_ndims)
y = _move_dims_to_flat_end(x, axis, x_ndims, right_end=True)
frac_at_q_or_above = 1. - q / 100.
# Sort everything, not just the top 'k' entries, which allows multiple calls
# to sort only once (under the hood) and use CSE.
sorted_y = _sort_tensor(y)
d = tf.cast(tf.shape(input=y)[-1], tf.float64)
def _get_indices(interp_type):
"""Get values of y at the indices implied by interp_type."""
# Note `lower` <--> ceiling. Confusing, huh? Due to the fact that
# _sort_tensor sorts highest to lowest, tf.ceil corresponds to the higher
# index, but the lower value of y!
if interp_type == 'lower':
indices = tf.math.ceil((d - 1) * frac_at_q_or_above)
elif interp_type == 'higher':
indices = tf.floor((d - 1) * frac_at_q_or_above)
elif interp_type == 'nearest':
indices = tf.round((d - 1) * frac_at_q_or_above)
# d - 1 will be distinct from d in int32, but not necessarily double.
# So clip to avoid out of bounds errors.
return tf.clip_by_value(
tf.cast(indices, tf.int32), 0,
tf.shape(input=y)[-1] - 1)
if interpolation in ['nearest', 'lower', 'higher']:
gathered_y = tf.gather(sorted_y, _get_indices(interpolation), axis=-1)
elif interpolation == 'midpoint':
gathered_y = 0.5 * (
tf.gather(sorted_y, _get_indices('lower'), axis=-1) +
tf.gather(sorted_y, _get_indices('higher'), axis=-1))
elif interpolation == 'linear':
# Copy-paste of docstring on interpolation:
# linear: i + (j - i) * fraction, where fraction is the fractional part
# of the index surrounded by i and j.
larger_y_idx = _get_indices('lower')
exact_idx = (d - 1) * frac_at_q_or_above
if preserve_gradients:
# If q corresponds to a point in x, we will initially have
# larger_y_idx == smaller_y_idx.
# This results in the gradient w.r.t. fraction being zero (recall `q`
# enters only through `fraction`...and see that things cancel).
# The fix is to ensure that smaller_y_idx and larger_y_idx are always
# separated by exactly 1.
smaller_y_idx = tf.maximum(larger_y_idx - 1, 0)
larger_y_idx = tf.minimum(smaller_y_idx + 1, tf.shape(input=y)[-1] - 1)
fraction = tf.cast(larger_y_idx, tf.float64) - exact_idx
else:
smaller_y_idx = _get_indices('higher')
fraction = tf.math.ceil((d - 1) * frac_at_q_or_above) - exact_idx
fraction = tf.cast(fraction, y.dtype)
gathered_y = (
tf.gather(sorted_y, larger_y_idx, axis=-1) * (1 - fraction) +
tf.gather(sorted_y, smaller_y_idx, axis=-1) * fraction)
# Propagate NaNs
if x.dtype in (tf.bfloat16, tf.float16, tf.float32, tf.float64):
# Apparently tf.is_nan doesn't like other dtypes
nan_batch_members = tf.reduce_any(
input_tensor=tf.math.is_nan(x), axis=axis)
right_rank_matched_shape = tf.pad(
tensor=tf.shape(input=nan_batch_members),
paddings=[[0, tf.rank(input=q)]],
constant_values=1)
nan_batch_members = tf.reshape(
nan_batch_members, shape=right_rank_matched_shape)
shape_gathered_y = tf.shape(input=gathered_y)
nan = np.array(np.nan, gathered_y.dtype.as_numpy_dtype)
gathered_y = tf.where(
tf.broadcast_to(nan_batch_members, shape_gathered_y),
tf.fill(shape_gathered_y, nan),
gathered_y)
# Expand dimensions if requested
if keep_dims:
if axis is None:
ones_vec = tf.ones(
shape=[_get_best_effort_ndims(x) + _get_best_effort_ndims(q)],
dtype=tf.int32)
gathered_y *= tf.ones(ones_vec, dtype=x.dtype)
else:
gathered_y = _insert_back_keep_dims(gathered_y, axis)
# If q is a scalar, then result has the right shape.
# If q is a vector, then result has trailing dim of shape q.shape, which
# needs to be rotated to dim 0.
return distribution_util.rotate_transpose(gathered_y, tf.rank(q))
|
def percentile(x,
q,
axis=None,
interpolation=None,
keep_dims=False,
validate_args=False,
preserve_gradients=True,
name=None):
"""Compute the `q`-th percentile(s) of `x`.
Given a vector `x`, the `q`-th percentile of `x` is the value `q / 100` of the
way from the minimum to the maximum in a sorted copy of `x`.
The values and distances of the two nearest neighbors as well as the
`interpolation` parameter will determine the percentile if the normalized
ranking does not match the location of `q` exactly.
This function is the same as the median if `q = 50`, the same as the minimum
if `q = 0` and the same as the maximum if `q = 100`.
Multiple percentiles can be computed at once by using `1-D` vector `q`.
Dimension zero of the returned `Tensor` will index the different percentiles.
Compare to `numpy.percentile`.
Args:
x: Numeric `N-D` `Tensor` with `N > 0`. If `axis` is not `None`,
`x` must have statically known number of dimensions.
q: Scalar or vector `Tensor` with values in `[0, 100]`. The percentile(s).
axis: Optional `0-D` or `1-D` integer `Tensor` with constant values. The
axis that index independent samples over which to return the desired
percentile. If `None` (the default), treat every dimension as a sample
dimension, returning a scalar.
interpolation : {'nearest', 'linear', 'lower', 'higher', 'midpoint'}.
Default value: 'nearest'. This specifies the interpolation method to
use when the desired quantile lies between two data points `i < j`:
* linear: i + (j - i) * fraction, where fraction is the fractional part
of the index surrounded by i and j.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j`, whichever is nearest.
* midpoint: (i + j) / 2.
`linear` and `midpoint` interpolation do not work with integer dtypes.
keep_dims: Python `bool`. If `True`, the last dimension is kept with size 1
If `False`, the last dimension is removed from the output shape.
validate_args: Whether to add runtime checks of argument validity. If
False, and arguments are incorrect, correct behavior is not guaranteed.
preserve_gradients: Python `bool`. If `True`, ensure that gradient w.r.t
the percentile `q` is preserved in the case of linear interpolation.
If `False`, the gradient will be (incorrectly) zero when `q` corresponds
to a point in `x`.
name: A Python string name to give this `Op`. Default is 'percentile'
Returns:
A `(rank(q) + N - len(axis))` dimensional `Tensor` of same dtype as `x`, or,
if `axis` is `None`, a `rank(q)` `Tensor`. The first `rank(q)` dimensions
index quantiles for different values of `q`.
Raises:
ValueError: If argument 'interpolation' is not an allowed type.
ValueError: If interpolation type not compatible with `dtype`.
#### Examples
```python
# Get 30th percentile with default ('nearest') interpolation.
x = [1., 2., 3., 4.]
tfp.stats.percentile(x, q=30.)
==> 2.0
# Get 30th percentile with 'linear' interpolation.
x = [1., 2., 3., 4.]
tfp.stats.percentile(x, q=30., interpolation='linear')
==> 1.9
# Get 30th and 70th percentiles with 'lower' interpolation
x = [1., 2., 3., 4.]
tfp.stats.percentile(x, q=[30., 70.], interpolation='lower')
==> [1., 3.]
# Get 100th percentile (maximum). By default, this is computed over every dim
x = [[1., 2.]
[3., 4.]]
tfp.stats.percentile(x, q=100.)
==> 4.
# Treat the leading dim as indexing samples, and find the 100th quantile (max)
# over all such samples.
x = [[1., 2.]
[3., 4.]]
tfp.stats.percentile(x, q=100., axis=[0])
==> [3., 4.]
```
"""
name = name or 'percentile'
allowed_interpolations = {'linear', 'lower', 'higher', 'nearest', 'midpoint'}
if interpolation is None:
interpolation = 'nearest'
else:
if interpolation not in allowed_interpolations:
raise ValueError('Argument `interpolation` must be in %s. Found %s' %
(allowed_interpolations, interpolation))
with tf.compat.v1.name_scope(name, values=[x, q]):
x = tf.convert_to_tensor(value=x, name='x')
if interpolation in {'linear', 'midpoint'} and x.dtype.is_integer:
raise TypeError('{} interpolation not allowed with dtype {}'.format(
interpolation, x.dtype))
# Double is needed here and below, else we get the wrong index if the array
# is huge along axis.
q = tf.cast(q, tf.float64)
_get_static_ndims(q, expect_ndims_no_more_than=1)
if validate_args:
q = distribution_util.with_dependencies([
tf.compat.v1.assert_rank_in(q, [0, 1]),
tf.compat.v1.assert_greater_equal(q, tf.cast(0., tf.float64)),
tf.compat.v1.assert_less_equal(q, tf.cast(100., tf.float64))
], q)
# Move `axis` dims of `x` to the rightmost, call it `y`.
if axis is None:
y = tf.reshape(x, [-1])
else:
x_ndims = _get_static_ndims(
x, expect_static=True, expect_ndims_at_least=1)
axis = _make_static_axis_non_negative_list(axis, x_ndims)
y = _move_dims_to_flat_end(x, axis, x_ndims, right_end=True)
frac_at_q_or_above = 1. - q / 100.
# Sort everything, not just the top 'k' entries, which allows multiple calls
# to sort only once (under the hood) and use CSE.
sorted_y = _sort_tensor(y)
d = tf.cast(tf.shape(input=y)[-1], tf.float64)
def _get_indices(interp_type):
"""Get values of y at the indices implied by interp_type."""
# Note `lower` <--> ceiling. Confusing, huh? Due to the fact that
# _sort_tensor sorts highest to lowest, tf.ceil corresponds to the higher
# index, but the lower value of y!
if interp_type == 'lower':
indices = tf.math.ceil((d - 1) * frac_at_q_or_above)
elif interp_type == 'higher':
indices = tf.floor((d - 1) * frac_at_q_or_above)
elif interp_type == 'nearest':
indices = tf.round((d - 1) * frac_at_q_or_above)
# d - 1 will be distinct from d in int32, but not necessarily double.
# So clip to avoid out of bounds errors.
return tf.clip_by_value(
tf.cast(indices, tf.int32), 0,
tf.shape(input=y)[-1] - 1)
if interpolation in ['nearest', 'lower', 'higher']:
gathered_y = tf.gather(sorted_y, _get_indices(interpolation), axis=-1)
elif interpolation == 'midpoint':
gathered_y = 0.5 * (
tf.gather(sorted_y, _get_indices('lower'), axis=-1) +
tf.gather(sorted_y, _get_indices('higher'), axis=-1))
elif interpolation == 'linear':
# Copy-paste of docstring on interpolation:
# linear: i + (j - i) * fraction, where fraction is the fractional part
# of the index surrounded by i and j.
larger_y_idx = _get_indices('lower')
exact_idx = (d - 1) * frac_at_q_or_above
if preserve_gradients:
# If q corresponds to a point in x, we will initially have
# larger_y_idx == smaller_y_idx.
# This results in the gradient w.r.t. fraction being zero (recall `q`
# enters only through `fraction`...and see that things cancel).
# The fix is to ensure that smaller_y_idx and larger_y_idx are always
# separated by exactly 1.
smaller_y_idx = tf.maximum(larger_y_idx - 1, 0)
larger_y_idx = tf.minimum(smaller_y_idx + 1, tf.shape(input=y)[-1] - 1)
fraction = tf.cast(larger_y_idx, tf.float64) - exact_idx
else:
smaller_y_idx = _get_indices('higher')
fraction = tf.math.ceil((d - 1) * frac_at_q_or_above) - exact_idx
fraction = tf.cast(fraction, y.dtype)
gathered_y = (
tf.gather(sorted_y, larger_y_idx, axis=-1) * (1 - fraction) +
tf.gather(sorted_y, smaller_y_idx, axis=-1) * fraction)
# Propagate NaNs
if x.dtype in (tf.bfloat16, tf.float16, tf.float32, tf.float64):
# Apparently tf.is_nan doesn't like other dtypes
nan_batch_members = tf.reduce_any(
input_tensor=tf.math.is_nan(x), axis=axis)
right_rank_matched_shape = tf.pad(
tensor=tf.shape(input=nan_batch_members),
paddings=[[0, tf.rank(input=q)]],
constant_values=1)
nan_batch_members = tf.reshape(
nan_batch_members, shape=right_rank_matched_shape)
shape_gathered_y = tf.shape(input=gathered_y)
nan = np.array(np.nan, gathered_y.dtype.as_numpy_dtype)
gathered_y = tf.where(
tf.broadcast_to(nan_batch_members, shape_gathered_y),
tf.fill(shape_gathered_y, nan),
gathered_y)
# Expand dimensions if requested
if keep_dims:
if axis is None:
ones_vec = tf.ones(
shape=[_get_best_effort_ndims(x) + _get_best_effort_ndims(q)],
dtype=tf.int32)
gathered_y *= tf.ones(ones_vec, dtype=x.dtype)
else:
gathered_y = _insert_back_keep_dims(gathered_y, axis)
# If q is a scalar, then result has the right shape.
# If q is a vector, then result has trailing dim of shape q.shape, which
# needs to be rotated to dim 0.
return distribution_util.rotate_transpose(gathered_y, tf.rank(q))
|
[
"Compute",
"the",
"q",
"-",
"th",
"percentile",
"(",
"s",
")",
"of",
"x",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/quantiles.py#L403-L623
|
[
"def",
"percentile",
"(",
"x",
",",
"q",
",",
"axis",
"=",
"None",
",",
"interpolation",
"=",
"None",
",",
"keep_dims",
"=",
"False",
",",
"validate_args",
"=",
"False",
",",
"preserve_gradients",
"=",
"True",
",",
"name",
"=",
"None",
")",
":",
"name",
"=",
"name",
"or",
"'percentile'",
"allowed_interpolations",
"=",
"{",
"'linear'",
",",
"'lower'",
",",
"'higher'",
",",
"'nearest'",
",",
"'midpoint'",
"}",
"if",
"interpolation",
"is",
"None",
":",
"interpolation",
"=",
"'nearest'",
"else",
":",
"if",
"interpolation",
"not",
"in",
"allowed_interpolations",
":",
"raise",
"ValueError",
"(",
"'Argument `interpolation` must be in %s. Found %s'",
"%",
"(",
"allowed_interpolations",
",",
"interpolation",
")",
")",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"values",
"=",
"[",
"x",
",",
"q",
"]",
")",
":",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"'x'",
")",
"if",
"interpolation",
"in",
"{",
"'linear'",
",",
"'midpoint'",
"}",
"and",
"x",
".",
"dtype",
".",
"is_integer",
":",
"raise",
"TypeError",
"(",
"'{} interpolation not allowed with dtype {}'",
".",
"format",
"(",
"interpolation",
",",
"x",
".",
"dtype",
")",
")",
"# Double is needed here and below, else we get the wrong index if the array",
"# is huge along axis.",
"q",
"=",
"tf",
".",
"cast",
"(",
"q",
",",
"tf",
".",
"float64",
")",
"_get_static_ndims",
"(",
"q",
",",
"expect_ndims_no_more_than",
"=",
"1",
")",
"if",
"validate_args",
":",
"q",
"=",
"distribution_util",
".",
"with_dependencies",
"(",
"[",
"tf",
".",
"compat",
".",
"v1",
".",
"assert_rank_in",
"(",
"q",
",",
"[",
"0",
",",
"1",
"]",
")",
",",
"tf",
".",
"compat",
".",
"v1",
".",
"assert_greater_equal",
"(",
"q",
",",
"tf",
".",
"cast",
"(",
"0.",
",",
"tf",
".",
"float64",
")",
")",
",",
"tf",
".",
"compat",
".",
"v1",
".",
"assert_less_equal",
"(",
"q",
",",
"tf",
".",
"cast",
"(",
"100.",
",",
"tf",
".",
"float64",
")",
")",
"]",
",",
"q",
")",
"# Move `axis` dims of `x` to the rightmost, call it `y`.",
"if",
"axis",
"is",
"None",
":",
"y",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"[",
"-",
"1",
"]",
")",
"else",
":",
"x_ndims",
"=",
"_get_static_ndims",
"(",
"x",
",",
"expect_static",
"=",
"True",
",",
"expect_ndims_at_least",
"=",
"1",
")",
"axis",
"=",
"_make_static_axis_non_negative_list",
"(",
"axis",
",",
"x_ndims",
")",
"y",
"=",
"_move_dims_to_flat_end",
"(",
"x",
",",
"axis",
",",
"x_ndims",
",",
"right_end",
"=",
"True",
")",
"frac_at_q_or_above",
"=",
"1.",
"-",
"q",
"/",
"100.",
"# Sort everything, not just the top 'k' entries, which allows multiple calls",
"# to sort only once (under the hood) and use CSE.",
"sorted_y",
"=",
"_sort_tensor",
"(",
"y",
")",
"d",
"=",
"tf",
".",
"cast",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"y",
")",
"[",
"-",
"1",
"]",
",",
"tf",
".",
"float64",
")",
"def",
"_get_indices",
"(",
"interp_type",
")",
":",
"\"\"\"Get values of y at the indices implied by interp_type.\"\"\"",
"# Note `lower` <--> ceiling. Confusing, huh? Due to the fact that",
"# _sort_tensor sorts highest to lowest, tf.ceil corresponds to the higher",
"# index, but the lower value of y!",
"if",
"interp_type",
"==",
"'lower'",
":",
"indices",
"=",
"tf",
".",
"math",
".",
"ceil",
"(",
"(",
"d",
"-",
"1",
")",
"*",
"frac_at_q_or_above",
")",
"elif",
"interp_type",
"==",
"'higher'",
":",
"indices",
"=",
"tf",
".",
"floor",
"(",
"(",
"d",
"-",
"1",
")",
"*",
"frac_at_q_or_above",
")",
"elif",
"interp_type",
"==",
"'nearest'",
":",
"indices",
"=",
"tf",
".",
"round",
"(",
"(",
"d",
"-",
"1",
")",
"*",
"frac_at_q_or_above",
")",
"# d - 1 will be distinct from d in int32, but not necessarily double.",
"# So clip to avoid out of bounds errors.",
"return",
"tf",
".",
"clip_by_value",
"(",
"tf",
".",
"cast",
"(",
"indices",
",",
"tf",
".",
"int32",
")",
",",
"0",
",",
"tf",
".",
"shape",
"(",
"input",
"=",
"y",
")",
"[",
"-",
"1",
"]",
"-",
"1",
")",
"if",
"interpolation",
"in",
"[",
"'nearest'",
",",
"'lower'",
",",
"'higher'",
"]",
":",
"gathered_y",
"=",
"tf",
".",
"gather",
"(",
"sorted_y",
",",
"_get_indices",
"(",
"interpolation",
")",
",",
"axis",
"=",
"-",
"1",
")",
"elif",
"interpolation",
"==",
"'midpoint'",
":",
"gathered_y",
"=",
"0.5",
"*",
"(",
"tf",
".",
"gather",
"(",
"sorted_y",
",",
"_get_indices",
"(",
"'lower'",
")",
",",
"axis",
"=",
"-",
"1",
")",
"+",
"tf",
".",
"gather",
"(",
"sorted_y",
",",
"_get_indices",
"(",
"'higher'",
")",
",",
"axis",
"=",
"-",
"1",
")",
")",
"elif",
"interpolation",
"==",
"'linear'",
":",
"# Copy-paste of docstring on interpolation:",
"# linear: i + (j - i) * fraction, where fraction is the fractional part",
"# of the index surrounded by i and j.",
"larger_y_idx",
"=",
"_get_indices",
"(",
"'lower'",
")",
"exact_idx",
"=",
"(",
"d",
"-",
"1",
")",
"*",
"frac_at_q_or_above",
"if",
"preserve_gradients",
":",
"# If q corresponds to a point in x, we will initially have",
"# larger_y_idx == smaller_y_idx.",
"# This results in the gradient w.r.t. fraction being zero (recall `q`",
"# enters only through `fraction`...and see that things cancel).",
"# The fix is to ensure that smaller_y_idx and larger_y_idx are always",
"# separated by exactly 1.",
"smaller_y_idx",
"=",
"tf",
".",
"maximum",
"(",
"larger_y_idx",
"-",
"1",
",",
"0",
")",
"larger_y_idx",
"=",
"tf",
".",
"minimum",
"(",
"smaller_y_idx",
"+",
"1",
",",
"tf",
".",
"shape",
"(",
"input",
"=",
"y",
")",
"[",
"-",
"1",
"]",
"-",
"1",
")",
"fraction",
"=",
"tf",
".",
"cast",
"(",
"larger_y_idx",
",",
"tf",
".",
"float64",
")",
"-",
"exact_idx",
"else",
":",
"smaller_y_idx",
"=",
"_get_indices",
"(",
"'higher'",
")",
"fraction",
"=",
"tf",
".",
"math",
".",
"ceil",
"(",
"(",
"d",
"-",
"1",
")",
"*",
"frac_at_q_or_above",
")",
"-",
"exact_idx",
"fraction",
"=",
"tf",
".",
"cast",
"(",
"fraction",
",",
"y",
".",
"dtype",
")",
"gathered_y",
"=",
"(",
"tf",
".",
"gather",
"(",
"sorted_y",
",",
"larger_y_idx",
",",
"axis",
"=",
"-",
"1",
")",
"*",
"(",
"1",
"-",
"fraction",
")",
"+",
"tf",
".",
"gather",
"(",
"sorted_y",
",",
"smaller_y_idx",
",",
"axis",
"=",
"-",
"1",
")",
"*",
"fraction",
")",
"# Propagate NaNs",
"if",
"x",
".",
"dtype",
"in",
"(",
"tf",
".",
"bfloat16",
",",
"tf",
".",
"float16",
",",
"tf",
".",
"float32",
",",
"tf",
".",
"float64",
")",
":",
"# Apparently tf.is_nan doesn't like other dtypes",
"nan_batch_members",
"=",
"tf",
".",
"reduce_any",
"(",
"input_tensor",
"=",
"tf",
".",
"math",
".",
"is_nan",
"(",
"x",
")",
",",
"axis",
"=",
"axis",
")",
"right_rank_matched_shape",
"=",
"tf",
".",
"pad",
"(",
"tensor",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"nan_batch_members",
")",
",",
"paddings",
"=",
"[",
"[",
"0",
",",
"tf",
".",
"rank",
"(",
"input",
"=",
"q",
")",
"]",
"]",
",",
"constant_values",
"=",
"1",
")",
"nan_batch_members",
"=",
"tf",
".",
"reshape",
"(",
"nan_batch_members",
",",
"shape",
"=",
"right_rank_matched_shape",
")",
"shape_gathered_y",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"gathered_y",
")",
"nan",
"=",
"np",
".",
"array",
"(",
"np",
".",
"nan",
",",
"gathered_y",
".",
"dtype",
".",
"as_numpy_dtype",
")",
"gathered_y",
"=",
"tf",
".",
"where",
"(",
"tf",
".",
"broadcast_to",
"(",
"nan_batch_members",
",",
"shape_gathered_y",
")",
",",
"tf",
".",
"fill",
"(",
"shape_gathered_y",
",",
"nan",
")",
",",
"gathered_y",
")",
"# Expand dimensions if requested",
"if",
"keep_dims",
":",
"if",
"axis",
"is",
"None",
":",
"ones_vec",
"=",
"tf",
".",
"ones",
"(",
"shape",
"=",
"[",
"_get_best_effort_ndims",
"(",
"x",
")",
"+",
"_get_best_effort_ndims",
"(",
"q",
")",
"]",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"gathered_y",
"*=",
"tf",
".",
"ones",
"(",
"ones_vec",
",",
"dtype",
"=",
"x",
".",
"dtype",
")",
"else",
":",
"gathered_y",
"=",
"_insert_back_keep_dims",
"(",
"gathered_y",
",",
"axis",
")",
"# If q is a scalar, then result has the right shape.",
"# If q is a vector, then result has trailing dim of shape q.shape, which",
"# needs to be rotated to dim 0.",
"return",
"distribution_util",
".",
"rotate_transpose",
"(",
"gathered_y",
",",
"tf",
".",
"rank",
"(",
"q",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
quantiles
|
Compute quantiles of `x` along `axis`.
The quantiles of a distribution are cut points dividing the range into
intervals with equal probabilities.
Given a vector `x` of samples, this function estimates the cut points by
returning `num_quantiles + 1` cut points, `(c0, ..., cn)`, such that, roughly
speaking, equal number of sample points lie in the `num_quantiles` intervals
`[c0, c1), [c1, c2), ..., [c_{n-1}, cn]`. That is,
* About `1 / n` fraction of the data lies in `[c_{k-1}, c_k)`, `k = 1, ..., n`
* About `k / n` fraction of the data lies below `c_k`.
* `c0` is the sample minimum and `cn` is the maximum.
The exact number of data points in each interval depends on the size of
`x` (e.g. whether the size is divisible by `n`) and the `interpolation` kwarg.
Args:
x: Numeric `N-D` `Tensor` with `N > 0`. If `axis` is not `None`,
`x` must have statically known number of dimensions.
num_quantiles: Scalar `integer` `Tensor`. The number of intervals the
returned `num_quantiles + 1` cut points divide the range into.
axis: Optional `0-D` or `1-D` integer `Tensor` with constant values. The
axis that index independent samples over which to return the desired
percentile. If `None` (the default), treat every dimension as a sample
dimension, returning a scalar.
interpolation : {'nearest', 'linear', 'lower', 'higher', 'midpoint'}.
Default value: 'nearest'. This specifies the interpolation method to
use when the fractions `k / n` lie between two data points `i < j`:
* linear: i + (j - i) * fraction, where fraction is the fractional part
of the index surrounded by i and j.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j`, whichever is nearest.
* midpoint: (i + j) / 2. `linear` and `midpoint` interpolation do not
work with integer dtypes.
keep_dims: Python `bool`. If `True`, the last dimension is kept with size 1
If `False`, the last dimension is removed from the output shape.
validate_args: Whether to add runtime checks of argument validity. If
False, and arguments are incorrect, correct behavior is not guaranteed.
name: A Python string name to give this `Op`. Default is 'percentile'
Returns:
cut_points: A `rank(x) + 1 - len(axis)` dimensional `Tensor` with same
`dtype` as `x` and shape `[num_quantiles + 1, ...]` where the trailing shape
is that of `x` without the dimensions in `axis` (unless `keep_dims is True`)
Raises:
ValueError: If argument 'interpolation' is not an allowed type.
ValueError: If interpolation type not compatible with `dtype`.
#### Examples
```python
# Get quartiles of x with various interpolation choices.
x = [0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]
tfp.stats.quantiles(x, num_quantiles=4, interpolation='nearest')
==> [ 0., 2., 5., 8., 10.]
tfp.stats.quantiles(x, num_quantiles=4, interpolation='linear')
==> [ 0. , 2.5, 5. , 7.5, 10. ]
tfp.stats.quantiles(x, num_quantiles=4, interpolation='lower')
==> [ 0., 2., 5., 7., 10.]
# Get deciles of columns of an R x C data set.
data = load_my_columnar_data(...)
tfp.stats.quantiles(data, num_quantiles=10)
==> Shape [11, C] Tensor
```
|
tensorflow_probability/python/stats/quantiles.py
|
def quantiles(x,
num_quantiles,
axis=None,
interpolation=None,
keep_dims=False,
validate_args=False,
name=None):
"""Compute quantiles of `x` along `axis`.
The quantiles of a distribution are cut points dividing the range into
intervals with equal probabilities.
Given a vector `x` of samples, this function estimates the cut points by
returning `num_quantiles + 1` cut points, `(c0, ..., cn)`, such that, roughly
speaking, equal number of sample points lie in the `num_quantiles` intervals
`[c0, c1), [c1, c2), ..., [c_{n-1}, cn]`. That is,
* About `1 / n` fraction of the data lies in `[c_{k-1}, c_k)`, `k = 1, ..., n`
* About `k / n` fraction of the data lies below `c_k`.
* `c0` is the sample minimum and `cn` is the maximum.
The exact number of data points in each interval depends on the size of
`x` (e.g. whether the size is divisible by `n`) and the `interpolation` kwarg.
Args:
x: Numeric `N-D` `Tensor` with `N > 0`. If `axis` is not `None`,
`x` must have statically known number of dimensions.
num_quantiles: Scalar `integer` `Tensor`. The number of intervals the
returned `num_quantiles + 1` cut points divide the range into.
axis: Optional `0-D` or `1-D` integer `Tensor` with constant values. The
axis that index independent samples over which to return the desired
percentile. If `None` (the default), treat every dimension as a sample
dimension, returning a scalar.
interpolation : {'nearest', 'linear', 'lower', 'higher', 'midpoint'}.
Default value: 'nearest'. This specifies the interpolation method to
use when the fractions `k / n` lie between two data points `i < j`:
* linear: i + (j - i) * fraction, where fraction is the fractional part
of the index surrounded by i and j.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j`, whichever is nearest.
* midpoint: (i + j) / 2. `linear` and `midpoint` interpolation do not
work with integer dtypes.
keep_dims: Python `bool`. If `True`, the last dimension is kept with size 1
If `False`, the last dimension is removed from the output shape.
validate_args: Whether to add runtime checks of argument validity. If
False, and arguments are incorrect, correct behavior is not guaranteed.
name: A Python string name to give this `Op`. Default is 'percentile'
Returns:
cut_points: A `rank(x) + 1 - len(axis)` dimensional `Tensor` with same
`dtype` as `x` and shape `[num_quantiles + 1, ...]` where the trailing shape
is that of `x` without the dimensions in `axis` (unless `keep_dims is True`)
Raises:
ValueError: If argument 'interpolation' is not an allowed type.
ValueError: If interpolation type not compatible with `dtype`.
#### Examples
```python
# Get quartiles of x with various interpolation choices.
x = [0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]
tfp.stats.quantiles(x, num_quantiles=4, interpolation='nearest')
==> [ 0., 2., 5., 8., 10.]
tfp.stats.quantiles(x, num_quantiles=4, interpolation='linear')
==> [ 0. , 2.5, 5. , 7.5, 10. ]
tfp.stats.quantiles(x, num_quantiles=4, interpolation='lower')
==> [ 0., 2., 5., 7., 10.]
# Get deciles of columns of an R x C data set.
data = load_my_columnar_data(...)
tfp.stats.quantiles(data, num_quantiles=10)
==> Shape [11, C] Tensor
```
"""
with tf.compat.v1.name_scope(
name, 'quantiles', values=[x, num_quantiles, axis]):
x = tf.convert_to_tensor(value=x, name='x')
return percentile(
x,
q=tf.linspace(
# percentile casts q to float64 before using it...so may as well use
# float64 here. Note that using x.dtype won't work with linspace
# if x is integral type (which is anothe motivation for hard-coding
# float64).
tf.convert_to_tensor(value=0, dtype=tf.float64),
tf.convert_to_tensor(value=100, dtype=tf.float64),
num=num_quantiles + 1),
axis=axis,
interpolation=interpolation,
keep_dims=keep_dims,
validate_args=validate_args,
preserve_gradients=False)
|
def quantiles(x,
num_quantiles,
axis=None,
interpolation=None,
keep_dims=False,
validate_args=False,
name=None):
"""Compute quantiles of `x` along `axis`.
The quantiles of a distribution are cut points dividing the range into
intervals with equal probabilities.
Given a vector `x` of samples, this function estimates the cut points by
returning `num_quantiles + 1` cut points, `(c0, ..., cn)`, such that, roughly
speaking, equal number of sample points lie in the `num_quantiles` intervals
`[c0, c1), [c1, c2), ..., [c_{n-1}, cn]`. That is,
* About `1 / n` fraction of the data lies in `[c_{k-1}, c_k)`, `k = 1, ..., n`
* About `k / n` fraction of the data lies below `c_k`.
* `c0` is the sample minimum and `cn` is the maximum.
The exact number of data points in each interval depends on the size of
`x` (e.g. whether the size is divisible by `n`) and the `interpolation` kwarg.
Args:
x: Numeric `N-D` `Tensor` with `N > 0`. If `axis` is not `None`,
`x` must have statically known number of dimensions.
num_quantiles: Scalar `integer` `Tensor`. The number of intervals the
returned `num_quantiles + 1` cut points divide the range into.
axis: Optional `0-D` or `1-D` integer `Tensor` with constant values. The
axis that index independent samples over which to return the desired
percentile. If `None` (the default), treat every dimension as a sample
dimension, returning a scalar.
interpolation : {'nearest', 'linear', 'lower', 'higher', 'midpoint'}.
Default value: 'nearest'. This specifies the interpolation method to
use when the fractions `k / n` lie between two data points `i < j`:
* linear: i + (j - i) * fraction, where fraction is the fractional part
of the index surrounded by i and j.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j`, whichever is nearest.
* midpoint: (i + j) / 2. `linear` and `midpoint` interpolation do not
work with integer dtypes.
keep_dims: Python `bool`. If `True`, the last dimension is kept with size 1
If `False`, the last dimension is removed from the output shape.
validate_args: Whether to add runtime checks of argument validity. If
False, and arguments are incorrect, correct behavior is not guaranteed.
name: A Python string name to give this `Op`. Default is 'percentile'
Returns:
cut_points: A `rank(x) + 1 - len(axis)` dimensional `Tensor` with same
`dtype` as `x` and shape `[num_quantiles + 1, ...]` where the trailing shape
is that of `x` without the dimensions in `axis` (unless `keep_dims is True`)
Raises:
ValueError: If argument 'interpolation' is not an allowed type.
ValueError: If interpolation type not compatible with `dtype`.
#### Examples
```python
# Get quartiles of x with various interpolation choices.
x = [0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]
tfp.stats.quantiles(x, num_quantiles=4, interpolation='nearest')
==> [ 0., 2., 5., 8., 10.]
tfp.stats.quantiles(x, num_quantiles=4, interpolation='linear')
==> [ 0. , 2.5, 5. , 7.5, 10. ]
tfp.stats.quantiles(x, num_quantiles=4, interpolation='lower')
==> [ 0., 2., 5., 7., 10.]
# Get deciles of columns of an R x C data set.
data = load_my_columnar_data(...)
tfp.stats.quantiles(data, num_quantiles=10)
==> Shape [11, C] Tensor
```
"""
with tf.compat.v1.name_scope(
name, 'quantiles', values=[x, num_quantiles, axis]):
x = tf.convert_to_tensor(value=x, name='x')
return percentile(
x,
q=tf.linspace(
# percentile casts q to float64 before using it...so may as well use
# float64 here. Note that using x.dtype won't work with linspace
# if x is integral type (which is anothe motivation for hard-coding
# float64).
tf.convert_to_tensor(value=0, dtype=tf.float64),
tf.convert_to_tensor(value=100, dtype=tf.float64),
num=num_quantiles + 1),
axis=axis,
interpolation=interpolation,
keep_dims=keep_dims,
validate_args=validate_args,
preserve_gradients=False)
|
[
"Compute",
"quantiles",
"of",
"x",
"along",
"axis",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/quantiles.py#L626-L723
|
[
"def",
"quantiles",
"(",
"x",
",",
"num_quantiles",
",",
"axis",
"=",
"None",
",",
"interpolation",
"=",
"None",
",",
"keep_dims",
"=",
"False",
",",
"validate_args",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'quantiles'",
",",
"values",
"=",
"[",
"x",
",",
"num_quantiles",
",",
"axis",
"]",
")",
":",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"'x'",
")",
"return",
"percentile",
"(",
"x",
",",
"q",
"=",
"tf",
".",
"linspace",
"(",
"# percentile casts q to float64 before using it...so may as well use",
"# float64 here. Note that using x.dtype won't work with linspace",
"# if x is integral type (which is anothe motivation for hard-coding",
"# float64).",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"0",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
",",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"100",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
",",
"num",
"=",
"num_quantiles",
"+",
"1",
")",
",",
"axis",
"=",
"axis",
",",
"interpolation",
"=",
"interpolation",
",",
"keep_dims",
"=",
"keep_dims",
",",
"validate_args",
"=",
"validate_args",
",",
"preserve_gradients",
"=",
"False",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_get_static_ndims
|
Get static number of dimensions and assert that some expectations are met.
This function returns the number of dimensions 'ndims' of x, as a Python int.
The optional expect arguments are used to check the ndims of x, but this is
only done if the static ndims of x is not None.
Args:
x: A Tensor.
expect_static: Expect `x` to have statically defined `ndims`.
expect_ndims: Optional Python integer. If provided, assert that x has
number of dimensions equal to this.
expect_ndims_no_more_than: Optional Python integer. If provided, assert
that x has no more than this many dimensions.
expect_ndims_at_least: Optional Python integer. If provided, assert that x
has at least this many dimensions.
Returns:
ndims: A Python integer.
Raises:
ValueError: If any of the expectations above are violated.
|
tensorflow_probability/python/stats/quantiles.py
|
def _get_static_ndims(x,
expect_static=False,
expect_ndims=None,
expect_ndims_no_more_than=None,
expect_ndims_at_least=None):
"""Get static number of dimensions and assert that some expectations are met.
This function returns the number of dimensions 'ndims' of x, as a Python int.
The optional expect arguments are used to check the ndims of x, but this is
only done if the static ndims of x is not None.
Args:
x: A Tensor.
expect_static: Expect `x` to have statically defined `ndims`.
expect_ndims: Optional Python integer. If provided, assert that x has
number of dimensions equal to this.
expect_ndims_no_more_than: Optional Python integer. If provided, assert
that x has no more than this many dimensions.
expect_ndims_at_least: Optional Python integer. If provided, assert that x
has at least this many dimensions.
Returns:
ndims: A Python integer.
Raises:
ValueError: If any of the expectations above are violated.
"""
ndims = x.shape.ndims
if ndims is None:
shape_const = tf.get_static_value(tf.shape(input=x))
if shape_const is not None:
ndims = shape_const.ndim
if ndims is None:
if expect_static:
raise ValueError(
'Expected argument `x` to have statically defined `ndims`. Found: ' %
x)
return
if expect_ndims is not None:
ndims_message = ('Expected argument `x` to have ndims %s. Found tensor %s'
% (expect_ndims, x))
if ndims != expect_ndims:
raise ValueError(ndims_message)
if expect_ndims_at_least is not None:
ndims_at_least_message = (
'Expected argument `x` to have ndims >= %d. Found tensor %s' %
(expect_ndims_at_least, x))
if ndims < expect_ndims_at_least:
raise ValueError(ndims_at_least_message)
if expect_ndims_no_more_than is not None:
ndims_no_more_than_message = (
'Expected argument `x` to have ndims <= %d. Found tensor %s' %
(expect_ndims_no_more_than, x))
if ndims > expect_ndims_no_more_than:
raise ValueError(ndims_no_more_than_message)
return ndims
|
def _get_static_ndims(x,
expect_static=False,
expect_ndims=None,
expect_ndims_no_more_than=None,
expect_ndims_at_least=None):
"""Get static number of dimensions and assert that some expectations are met.
This function returns the number of dimensions 'ndims' of x, as a Python int.
The optional expect arguments are used to check the ndims of x, but this is
only done if the static ndims of x is not None.
Args:
x: A Tensor.
expect_static: Expect `x` to have statically defined `ndims`.
expect_ndims: Optional Python integer. If provided, assert that x has
number of dimensions equal to this.
expect_ndims_no_more_than: Optional Python integer. If provided, assert
that x has no more than this many dimensions.
expect_ndims_at_least: Optional Python integer. If provided, assert that x
has at least this many dimensions.
Returns:
ndims: A Python integer.
Raises:
ValueError: If any of the expectations above are violated.
"""
ndims = x.shape.ndims
if ndims is None:
shape_const = tf.get_static_value(tf.shape(input=x))
if shape_const is not None:
ndims = shape_const.ndim
if ndims is None:
if expect_static:
raise ValueError(
'Expected argument `x` to have statically defined `ndims`. Found: ' %
x)
return
if expect_ndims is not None:
ndims_message = ('Expected argument `x` to have ndims %s. Found tensor %s'
% (expect_ndims, x))
if ndims != expect_ndims:
raise ValueError(ndims_message)
if expect_ndims_at_least is not None:
ndims_at_least_message = (
'Expected argument `x` to have ndims >= %d. Found tensor %s' %
(expect_ndims_at_least, x))
if ndims < expect_ndims_at_least:
raise ValueError(ndims_at_least_message)
if expect_ndims_no_more_than is not None:
ndims_no_more_than_message = (
'Expected argument `x` to have ndims <= %d. Found tensor %s' %
(expect_ndims_no_more_than, x))
if ndims > expect_ndims_no_more_than:
raise ValueError(ndims_no_more_than_message)
return ndims
|
[
"Get",
"static",
"number",
"of",
"dimensions",
"and",
"assert",
"that",
"some",
"expectations",
"are",
"met",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/quantiles.py#L726-L787
|
[
"def",
"_get_static_ndims",
"(",
"x",
",",
"expect_static",
"=",
"False",
",",
"expect_ndims",
"=",
"None",
",",
"expect_ndims_no_more_than",
"=",
"None",
",",
"expect_ndims_at_least",
"=",
"None",
")",
":",
"ndims",
"=",
"x",
".",
"shape",
".",
"ndims",
"if",
"ndims",
"is",
"None",
":",
"shape_const",
"=",
"tf",
".",
"get_static_value",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
")",
"if",
"shape_const",
"is",
"not",
"None",
":",
"ndims",
"=",
"shape_const",
".",
"ndim",
"if",
"ndims",
"is",
"None",
":",
"if",
"expect_static",
":",
"raise",
"ValueError",
"(",
"'Expected argument `x` to have statically defined `ndims`. Found: '",
"%",
"x",
")",
"return",
"if",
"expect_ndims",
"is",
"not",
"None",
":",
"ndims_message",
"=",
"(",
"'Expected argument `x` to have ndims %s. Found tensor %s'",
"%",
"(",
"expect_ndims",
",",
"x",
")",
")",
"if",
"ndims",
"!=",
"expect_ndims",
":",
"raise",
"ValueError",
"(",
"ndims_message",
")",
"if",
"expect_ndims_at_least",
"is",
"not",
"None",
":",
"ndims_at_least_message",
"=",
"(",
"'Expected argument `x` to have ndims >= %d. Found tensor %s'",
"%",
"(",
"expect_ndims_at_least",
",",
"x",
")",
")",
"if",
"ndims",
"<",
"expect_ndims_at_least",
":",
"raise",
"ValueError",
"(",
"ndims_at_least_message",
")",
"if",
"expect_ndims_no_more_than",
"is",
"not",
"None",
":",
"ndims_no_more_than_message",
"=",
"(",
"'Expected argument `x` to have ndims <= %d. Found tensor %s'",
"%",
"(",
"expect_ndims_no_more_than",
",",
"x",
")",
")",
"if",
"ndims",
">",
"expect_ndims_no_more_than",
":",
"raise",
"ValueError",
"(",
"ndims_no_more_than_message",
")",
"return",
"ndims"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_get_best_effort_ndims
|
Get static ndims if possible. Fallback on `tf.rank(x)`.
|
tensorflow_probability/python/stats/quantiles.py
|
def _get_best_effort_ndims(x,
expect_ndims=None,
expect_ndims_at_least=None,
expect_ndims_no_more_than=None):
"""Get static ndims if possible. Fallback on `tf.rank(x)`."""
ndims_static = _get_static_ndims(
x,
expect_ndims=expect_ndims,
expect_ndims_at_least=expect_ndims_at_least,
expect_ndims_no_more_than=expect_ndims_no_more_than)
if ndims_static is not None:
return ndims_static
return tf.rank(x)
|
def _get_best_effort_ndims(x,
expect_ndims=None,
expect_ndims_at_least=None,
expect_ndims_no_more_than=None):
"""Get static ndims if possible. Fallback on `tf.rank(x)`."""
ndims_static = _get_static_ndims(
x,
expect_ndims=expect_ndims,
expect_ndims_at_least=expect_ndims_at_least,
expect_ndims_no_more_than=expect_ndims_no_more_than)
if ndims_static is not None:
return ndims_static
return tf.rank(x)
|
[
"Get",
"static",
"ndims",
"if",
"possible",
".",
"Fallback",
"on",
"tf",
".",
"rank",
"(",
"x",
")",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/quantiles.py#L790-L802
|
[
"def",
"_get_best_effort_ndims",
"(",
"x",
",",
"expect_ndims",
"=",
"None",
",",
"expect_ndims_at_least",
"=",
"None",
",",
"expect_ndims_no_more_than",
"=",
"None",
")",
":",
"ndims_static",
"=",
"_get_static_ndims",
"(",
"x",
",",
"expect_ndims",
"=",
"expect_ndims",
",",
"expect_ndims_at_least",
"=",
"expect_ndims_at_least",
",",
"expect_ndims_no_more_than",
"=",
"expect_ndims_no_more_than",
")",
"if",
"ndims_static",
"is",
"not",
"None",
":",
"return",
"ndims_static",
"return",
"tf",
".",
"rank",
"(",
"x",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_insert_back_keep_dims
|
Insert the dims in `axis` back as singletons after being removed.
Args:
x: `Tensor`.
axis: Python list of integers.
Returns:
`Tensor` with same values as `x`, but additional singleton dimensions.
|
tensorflow_probability/python/stats/quantiles.py
|
def _insert_back_keep_dims(x, axis):
"""Insert the dims in `axis` back as singletons after being removed.
Args:
x: `Tensor`.
axis: Python list of integers.
Returns:
`Tensor` with same values as `x`, but additional singleton dimensions.
"""
for i in sorted(axis):
x = tf.expand_dims(x, axis=i)
return x
|
def _insert_back_keep_dims(x, axis):
"""Insert the dims in `axis` back as singletons after being removed.
Args:
x: `Tensor`.
axis: Python list of integers.
Returns:
`Tensor` with same values as `x`, but additional singleton dimensions.
"""
for i in sorted(axis):
x = tf.expand_dims(x, axis=i)
return x
|
[
"Insert",
"the",
"dims",
"in",
"axis",
"back",
"as",
"singletons",
"after",
"being",
"removed",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/quantiles.py#L805-L817
|
[
"def",
"_insert_back_keep_dims",
"(",
"x",
",",
"axis",
")",
":",
"for",
"i",
"in",
"sorted",
"(",
"axis",
")",
":",
"x",
"=",
"tf",
".",
"expand_dims",
"(",
"x",
",",
"axis",
"=",
"i",
")",
"return",
"x"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_make_static_axis_non_negative_list
|
Convert possibly negatively indexed axis to non-negative list of ints.
Args:
axis: Integer Tensor.
ndims: Number of dimensions into which axis indexes.
Returns:
A list of non-negative Python integers.
Raises:
ValueError: If `axis` is not statically defined.
|
tensorflow_probability/python/stats/quantiles.py
|
def _make_static_axis_non_negative_list(axis, ndims):
"""Convert possibly negatively indexed axis to non-negative list of ints.
Args:
axis: Integer Tensor.
ndims: Number of dimensions into which axis indexes.
Returns:
A list of non-negative Python integers.
Raises:
ValueError: If `axis` is not statically defined.
"""
axis = distribution_util.make_non_negative_axis(axis, ndims)
axis_const = tf.get_static_value(axis)
if axis_const is None:
raise ValueError(
'Expected argument `axis` to be statically available. Found: %s' %
axis)
# Make at least 1-D.
axis = axis_const + np.zeros([1], dtype=axis_const.dtype)
return list(int(dim) for dim in axis)
|
def _make_static_axis_non_negative_list(axis, ndims):
"""Convert possibly negatively indexed axis to non-negative list of ints.
Args:
axis: Integer Tensor.
ndims: Number of dimensions into which axis indexes.
Returns:
A list of non-negative Python integers.
Raises:
ValueError: If `axis` is not statically defined.
"""
axis = distribution_util.make_non_negative_axis(axis, ndims)
axis_const = tf.get_static_value(axis)
if axis_const is None:
raise ValueError(
'Expected argument `axis` to be statically available. Found: %s' %
axis)
# Make at least 1-D.
axis = axis_const + np.zeros([1], dtype=axis_const.dtype)
return list(int(dim) for dim in axis)
|
[
"Convert",
"possibly",
"negatively",
"indexed",
"axis",
"to",
"non",
"-",
"negative",
"list",
"of",
"ints",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/quantiles.py#L820-L844
|
[
"def",
"_make_static_axis_non_negative_list",
"(",
"axis",
",",
"ndims",
")",
":",
"axis",
"=",
"distribution_util",
".",
"make_non_negative_axis",
"(",
"axis",
",",
"ndims",
")",
"axis_const",
"=",
"tf",
".",
"get_static_value",
"(",
"axis",
")",
"if",
"axis_const",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Expected argument `axis` to be statically available. Found: %s'",
"%",
"axis",
")",
"# Make at least 1-D.",
"axis",
"=",
"axis_const",
"+",
"np",
".",
"zeros",
"(",
"[",
"1",
"]",
",",
"dtype",
"=",
"axis_const",
".",
"dtype",
")",
"return",
"list",
"(",
"int",
"(",
"dim",
")",
"for",
"dim",
"in",
"axis",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_move_dims_to_flat_end
|
Move dims corresponding to `axis` in `x` to the end, then flatten.
Args:
x: `Tensor` with shape `[B0,B1,...,Bb]`.
axis: Python list of indices into dimensions of `x`.
x_ndims: Python integer holding number of dimensions in `x`.
right_end: Python bool. Whether to move dims to the right end (else left).
Returns:
`Tensor` with value from `x` and dims in `axis` moved to end into one single
dimension.
|
tensorflow_probability/python/stats/quantiles.py
|
def _move_dims_to_flat_end(x, axis, x_ndims, right_end=True):
"""Move dims corresponding to `axis` in `x` to the end, then flatten.
Args:
x: `Tensor` with shape `[B0,B1,...,Bb]`.
axis: Python list of indices into dimensions of `x`.
x_ndims: Python integer holding number of dimensions in `x`.
right_end: Python bool. Whether to move dims to the right end (else left).
Returns:
`Tensor` with value from `x` and dims in `axis` moved to end into one single
dimension.
"""
if not axis:
return x
# Suppose x.shape = [a, b, c, d]
# Suppose axis = [1, 3]
# other_dims = [0, 2] in example above.
other_dims = sorted(set(range(x_ndims)).difference(axis))
# x_permed.shape = [a, c, b, d]
perm = other_dims + list(axis) if right_end else list(axis) + other_dims
x_permed = tf.transpose(a=x, perm=perm)
if x.shape.is_fully_defined():
x_shape = x.shape.as_list()
# other_shape = [a, c], end_shape = [b * d]
other_shape = [x_shape[i] for i in other_dims]
end_shape = [np.prod([x_shape[i] for i in axis])]
full_shape = (
other_shape + end_shape if right_end else end_shape + other_shape)
else:
other_shape = tf.gather(tf.shape(input=x), other_dims)
full_shape = tf.concat(
[other_shape, [-1]] if right_end else [[-1], other_shape], axis=0)
return tf.reshape(x_permed, shape=full_shape)
|
def _move_dims_to_flat_end(x, axis, x_ndims, right_end=True):
"""Move dims corresponding to `axis` in `x` to the end, then flatten.
Args:
x: `Tensor` with shape `[B0,B1,...,Bb]`.
axis: Python list of indices into dimensions of `x`.
x_ndims: Python integer holding number of dimensions in `x`.
right_end: Python bool. Whether to move dims to the right end (else left).
Returns:
`Tensor` with value from `x` and dims in `axis` moved to end into one single
dimension.
"""
if not axis:
return x
# Suppose x.shape = [a, b, c, d]
# Suppose axis = [1, 3]
# other_dims = [0, 2] in example above.
other_dims = sorted(set(range(x_ndims)).difference(axis))
# x_permed.shape = [a, c, b, d]
perm = other_dims + list(axis) if right_end else list(axis) + other_dims
x_permed = tf.transpose(a=x, perm=perm)
if x.shape.is_fully_defined():
x_shape = x.shape.as_list()
# other_shape = [a, c], end_shape = [b * d]
other_shape = [x_shape[i] for i in other_dims]
end_shape = [np.prod([x_shape[i] for i in axis])]
full_shape = (
other_shape + end_shape if right_end else end_shape + other_shape)
else:
other_shape = tf.gather(tf.shape(input=x), other_dims)
full_shape = tf.concat(
[other_shape, [-1]] if right_end else [[-1], other_shape], axis=0)
return tf.reshape(x_permed, shape=full_shape)
|
[
"Move",
"dims",
"corresponding",
"to",
"axis",
"in",
"x",
"to",
"the",
"end",
"then",
"flatten",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/quantiles.py#L847-L884
|
[
"def",
"_move_dims_to_flat_end",
"(",
"x",
",",
"axis",
",",
"x_ndims",
",",
"right_end",
"=",
"True",
")",
":",
"if",
"not",
"axis",
":",
"return",
"x",
"# Suppose x.shape = [a, b, c, d]",
"# Suppose axis = [1, 3]",
"# other_dims = [0, 2] in example above.",
"other_dims",
"=",
"sorted",
"(",
"set",
"(",
"range",
"(",
"x_ndims",
")",
")",
".",
"difference",
"(",
"axis",
")",
")",
"# x_permed.shape = [a, c, b, d]",
"perm",
"=",
"other_dims",
"+",
"list",
"(",
"axis",
")",
"if",
"right_end",
"else",
"list",
"(",
"axis",
")",
"+",
"other_dims",
"x_permed",
"=",
"tf",
".",
"transpose",
"(",
"a",
"=",
"x",
",",
"perm",
"=",
"perm",
")",
"if",
"x",
".",
"shape",
".",
"is_fully_defined",
"(",
")",
":",
"x_shape",
"=",
"x",
".",
"shape",
".",
"as_list",
"(",
")",
"# other_shape = [a, c], end_shape = [b * d]",
"other_shape",
"=",
"[",
"x_shape",
"[",
"i",
"]",
"for",
"i",
"in",
"other_dims",
"]",
"end_shape",
"=",
"[",
"np",
".",
"prod",
"(",
"[",
"x_shape",
"[",
"i",
"]",
"for",
"i",
"in",
"axis",
"]",
")",
"]",
"full_shape",
"=",
"(",
"other_shape",
"+",
"end_shape",
"if",
"right_end",
"else",
"end_shape",
"+",
"other_shape",
")",
"else",
":",
"other_shape",
"=",
"tf",
".",
"gather",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
",",
"other_dims",
")",
"full_shape",
"=",
"tf",
".",
"concat",
"(",
"[",
"other_shape",
",",
"[",
"-",
"1",
"]",
"]",
"if",
"right_end",
"else",
"[",
"[",
"-",
"1",
"]",
",",
"other_shape",
"]",
",",
"axis",
"=",
"0",
")",
"return",
"tf",
".",
"reshape",
"(",
"x_permed",
",",
"shape",
"=",
"full_shape",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_sort_tensor
|
Use `top_k` to sort a `Tensor` along the last dimension.
|
tensorflow_probability/python/stats/quantiles.py
|
def _sort_tensor(tensor):
"""Use `top_k` to sort a `Tensor` along the last dimension."""
sorted_, _ = tf.nn.top_k(tensor, k=tf.shape(input=tensor)[-1])
sorted_.set_shape(tensor.shape)
return sorted_
|
def _sort_tensor(tensor):
"""Use `top_k` to sort a `Tensor` along the last dimension."""
sorted_, _ = tf.nn.top_k(tensor, k=tf.shape(input=tensor)[-1])
sorted_.set_shape(tensor.shape)
return sorted_
|
[
"Use",
"top_k",
"to",
"sort",
"a",
"Tensor",
"along",
"the",
"last",
"dimension",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/quantiles.py#L887-L891
|
[
"def",
"_sort_tensor",
"(",
"tensor",
")",
":",
"sorted_",
",",
"_",
"=",
"tf",
".",
"nn",
".",
"top_k",
"(",
"tensor",
",",
"k",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"tensor",
")",
"[",
"-",
"1",
"]",
")",
"sorted_",
".",
"set_shape",
"(",
"tensor",
".",
"shape",
")",
"return",
"sorted_"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
Sum.make_component_state_space_models
|
Build an ordered list of Distribution instances for component models.
Args:
num_timesteps: Python `int` number of timesteps to model.
param_vals: a list of `Tensor` parameter values in order corresponding to
`self.parameters`, or a dict mapping from parameter names to values.
initial_step: optional `int` specifying the initial timestep to model.
This is relevant when the model contains time-varying components,
e.g., holidays or seasonality.
Returns:
component_ssms: a Python list of `LinearGaussianStateSpaceModel`
Distribution objects, in order corresponding to `self.components`.
|
tensorflow_probability/python/sts/sum.py
|
def make_component_state_space_models(self,
num_timesteps,
param_vals,
initial_step=0):
"""Build an ordered list of Distribution instances for component models.
Args:
num_timesteps: Python `int` number of timesteps to model.
param_vals: a list of `Tensor` parameter values in order corresponding to
`self.parameters`, or a dict mapping from parameter names to values.
initial_step: optional `int` specifying the initial timestep to model.
This is relevant when the model contains time-varying components,
e.g., holidays or seasonality.
Returns:
component_ssms: a Python list of `LinearGaussianStateSpaceModel`
Distribution objects, in order corresponding to `self.components`.
"""
with tf.compat.v1.name_scope('make_component_state_space_models'):
# List the model parameters in canonical order
param_map = self._canonicalize_param_vals_as_map(param_vals)
param_vals_list = [param_map[p.name] for p in self.parameters]
# Build SSMs for each component model. We process the components in
# canonical order, extracting the parameters for each component from the
# (ordered) list of parameters.
remaining_param_vals = param_vals_list[1:]
component_ssms = []
for component in self.components:
num_parameters = len(component.parameters)
component_param_vals = remaining_param_vals[:num_parameters]
remaining_param_vals = remaining_param_vals[num_parameters:]
component_ssms.append(
component.make_state_space_model(
num_timesteps,
param_vals=component_param_vals,
initial_step=initial_step))
return component_ssms
|
def make_component_state_space_models(self,
num_timesteps,
param_vals,
initial_step=0):
"""Build an ordered list of Distribution instances for component models.
Args:
num_timesteps: Python `int` number of timesteps to model.
param_vals: a list of `Tensor` parameter values in order corresponding to
`self.parameters`, or a dict mapping from parameter names to values.
initial_step: optional `int` specifying the initial timestep to model.
This is relevant when the model contains time-varying components,
e.g., holidays or seasonality.
Returns:
component_ssms: a Python list of `LinearGaussianStateSpaceModel`
Distribution objects, in order corresponding to `self.components`.
"""
with tf.compat.v1.name_scope('make_component_state_space_models'):
# List the model parameters in canonical order
param_map = self._canonicalize_param_vals_as_map(param_vals)
param_vals_list = [param_map[p.name] for p in self.parameters]
# Build SSMs for each component model. We process the components in
# canonical order, extracting the parameters for each component from the
# (ordered) list of parameters.
remaining_param_vals = param_vals_list[1:]
component_ssms = []
for component in self.components:
num_parameters = len(component.parameters)
component_param_vals = remaining_param_vals[:num_parameters]
remaining_param_vals = remaining_param_vals[num_parameters:]
component_ssms.append(
component.make_state_space_model(
num_timesteps,
param_vals=component_param_vals,
initial_step=initial_step))
return component_ssms
|
[
"Build",
"an",
"ordered",
"list",
"of",
"Distribution",
"instances",
"for",
"component",
"models",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/sum.py#L475-L516
|
[
"def",
"make_component_state_space_models",
"(",
"self",
",",
"num_timesteps",
",",
"param_vals",
",",
"initial_step",
"=",
"0",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"'make_component_state_space_models'",
")",
":",
"# List the model parameters in canonical order",
"param_map",
"=",
"self",
".",
"_canonicalize_param_vals_as_map",
"(",
"param_vals",
")",
"param_vals_list",
"=",
"[",
"param_map",
"[",
"p",
".",
"name",
"]",
"for",
"p",
"in",
"self",
".",
"parameters",
"]",
"# Build SSMs for each component model. We process the components in",
"# canonical order, extracting the parameters for each component from the",
"# (ordered) list of parameters.",
"remaining_param_vals",
"=",
"param_vals_list",
"[",
"1",
":",
"]",
"component_ssms",
"=",
"[",
"]",
"for",
"component",
"in",
"self",
".",
"components",
":",
"num_parameters",
"=",
"len",
"(",
"component",
".",
"parameters",
")",
"component_param_vals",
"=",
"remaining_param_vals",
"[",
":",
"num_parameters",
"]",
"remaining_param_vals",
"=",
"remaining_param_vals",
"[",
"num_parameters",
":",
"]",
"component_ssms",
".",
"append",
"(",
"component",
".",
"make_state_space_model",
"(",
"num_timesteps",
",",
"param_vals",
"=",
"component_param_vals",
",",
"initial_step",
"=",
"initial_step",
")",
")",
"return",
"component_ssms"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
amari_alpha
|
The Amari-alpha Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True`, the Amari-alpha Csiszar-function is:
```none
f(u) = { -log(u) + (u - 1), alpha = 0
{ u log(u) - (u - 1), alpha = 1
{ [(u**alpha - 1) - alpha (u - 1)] / (alpha (alpha - 1)), otherwise
```
When `self_normalized = False` the `(u - 1)` terms are omitted.
Warning: when `alpha != 0` and/or `self_normalized = True` this function makes
non-log-space calculations and may therefore be numerically unstable for
`|logu| >> 0`.
For more information, see:
A. Cichocki and S. Amari. "Families of Alpha-Beta-and GammaDivergences:
Flexible and Robust Measures of Similarities." Entropy, vol. 12, no. 6, pp.
1532-1568, 2010.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
alpha: `float`-like Python scalar. (See Mathematical Details for meaning.)
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
amari_alpha_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
at `u = exp(logu)`.
Raises:
TypeError: if `alpha` is `None` or a `Tensor`.
TypeError: if `self_normalized` is `None` or a `Tensor`.
|
tensorflow_probability/python/vi/csiszar_divergence.py
|
def amari_alpha(logu, alpha=1., self_normalized=False, name=None):
"""The Amari-alpha Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True`, the Amari-alpha Csiszar-function is:
```none
f(u) = { -log(u) + (u - 1), alpha = 0
{ u log(u) - (u - 1), alpha = 1
{ [(u**alpha - 1) - alpha (u - 1)] / (alpha (alpha - 1)), otherwise
```
When `self_normalized = False` the `(u - 1)` terms are omitted.
Warning: when `alpha != 0` and/or `self_normalized = True` this function makes
non-log-space calculations and may therefore be numerically unstable for
`|logu| >> 0`.
For more information, see:
A. Cichocki and S. Amari. "Families of Alpha-Beta-and GammaDivergences:
Flexible and Robust Measures of Similarities." Entropy, vol. 12, no. 6, pp.
1532-1568, 2010.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
alpha: `float`-like Python scalar. (See Mathematical Details for meaning.)
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
amari_alpha_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
at `u = exp(logu)`.
Raises:
TypeError: if `alpha` is `None` or a `Tensor`.
TypeError: if `self_normalized` is `None` or a `Tensor`.
"""
with tf.compat.v1.name_scope(name, "amari_alpha", [logu]):
if alpha is None or tf.is_tensor(alpha):
raise TypeError("`alpha` cannot be `None` or `Tensor` type.")
if (self_normalized is None or tf.is_tensor(self_normalized)):
raise TypeError("`self_normalized` cannot be `None` or `Tensor` type.")
logu = tf.convert_to_tensor(value=logu, name="logu")
if alpha == 0.:
f = -logu
elif alpha == 1.:
f = tf.exp(logu) * logu
else:
f = tf.math.expm1(alpha * logu) / (alpha * (alpha - 1.))
if not self_normalized:
return f
if alpha == 0.:
return f + tf.math.expm1(logu)
elif alpha == 1.:
return f - tf.math.expm1(logu)
else:
return f - tf.math.expm1(logu) / (alpha - 1.)
|
def amari_alpha(logu, alpha=1., self_normalized=False, name=None):
"""The Amari-alpha Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True`, the Amari-alpha Csiszar-function is:
```none
f(u) = { -log(u) + (u - 1), alpha = 0
{ u log(u) - (u - 1), alpha = 1
{ [(u**alpha - 1) - alpha (u - 1)] / (alpha (alpha - 1)), otherwise
```
When `self_normalized = False` the `(u - 1)` terms are omitted.
Warning: when `alpha != 0` and/or `self_normalized = True` this function makes
non-log-space calculations and may therefore be numerically unstable for
`|logu| >> 0`.
For more information, see:
A. Cichocki and S. Amari. "Families of Alpha-Beta-and GammaDivergences:
Flexible and Robust Measures of Similarities." Entropy, vol. 12, no. 6, pp.
1532-1568, 2010.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
alpha: `float`-like Python scalar. (See Mathematical Details for meaning.)
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
amari_alpha_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
at `u = exp(logu)`.
Raises:
TypeError: if `alpha` is `None` or a `Tensor`.
TypeError: if `self_normalized` is `None` or a `Tensor`.
"""
with tf.compat.v1.name_scope(name, "amari_alpha", [logu]):
if alpha is None or tf.is_tensor(alpha):
raise TypeError("`alpha` cannot be `None` or `Tensor` type.")
if (self_normalized is None or tf.is_tensor(self_normalized)):
raise TypeError("`self_normalized` cannot be `None` or `Tensor` type.")
logu = tf.convert_to_tensor(value=logu, name="logu")
if alpha == 0.:
f = -logu
elif alpha == 1.:
f = tf.exp(logu) * logu
else:
f = tf.math.expm1(alpha * logu) / (alpha * (alpha - 1.))
if not self_normalized:
return f
if alpha == 0.:
return f + tf.math.expm1(logu)
elif alpha == 1.:
return f - tf.math.expm1(logu)
else:
return f - tf.math.expm1(logu) / (alpha - 1.)
|
[
"The",
"Amari",
"-",
"alpha",
"Csiszar",
"-",
"function",
"in",
"log",
"-",
"space",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/vi/csiszar_divergence.py#L51-L118
|
[
"def",
"amari_alpha",
"(",
"logu",
",",
"alpha",
"=",
"1.",
",",
"self_normalized",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"\"amari_alpha\"",
",",
"[",
"logu",
"]",
")",
":",
"if",
"alpha",
"is",
"None",
"or",
"tf",
".",
"is_tensor",
"(",
"alpha",
")",
":",
"raise",
"TypeError",
"(",
"\"`alpha` cannot be `None` or `Tensor` type.\"",
")",
"if",
"(",
"self_normalized",
"is",
"None",
"or",
"tf",
".",
"is_tensor",
"(",
"self_normalized",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"`self_normalized` cannot be `None` or `Tensor` type.\"",
")",
"logu",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"logu",
",",
"name",
"=",
"\"logu\"",
")",
"if",
"alpha",
"==",
"0.",
":",
"f",
"=",
"-",
"logu",
"elif",
"alpha",
"==",
"1.",
":",
"f",
"=",
"tf",
".",
"exp",
"(",
"logu",
")",
"*",
"logu",
"else",
":",
"f",
"=",
"tf",
".",
"math",
".",
"expm1",
"(",
"alpha",
"*",
"logu",
")",
"/",
"(",
"alpha",
"*",
"(",
"alpha",
"-",
"1.",
")",
")",
"if",
"not",
"self_normalized",
":",
"return",
"f",
"if",
"alpha",
"==",
"0.",
":",
"return",
"f",
"+",
"tf",
".",
"math",
".",
"expm1",
"(",
"logu",
")",
"elif",
"alpha",
"==",
"1.",
":",
"return",
"f",
"-",
"tf",
".",
"math",
".",
"expm1",
"(",
"logu",
")",
"else",
":",
"return",
"f",
"-",
"tf",
".",
"math",
".",
"expm1",
"(",
"logu",
")",
"/",
"(",
"alpha",
"-",
"1.",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
kl_reverse
|
The reverse Kullback-Leibler Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True`, the KL-reverse Csiszar-function is:
```none
f(u) = -log(u) + (u - 1)
```
When `self_normalized = False` the `(u - 1)` term is omitted.
Observe that as an f-Divergence, this Csiszar-function implies:
```none
D_f[p, q] = KL[q, p]
```
The KL is "reverse" because in maximum likelihood we think of minimizing `q`
as in `KL[p, q]`.
Warning: when self_normalized = True` this function makes non-log-space
calculations and may therefore be numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
kl_reverse_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at
`u = exp(logu)`.
Raises:
TypeError: if `self_normalized` is `None` or a `Tensor`.
|
tensorflow_probability/python/vi/csiszar_divergence.py
|
def kl_reverse(logu, self_normalized=False, name=None):
"""The reverse Kullback-Leibler Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True`, the KL-reverse Csiszar-function is:
```none
f(u) = -log(u) + (u - 1)
```
When `self_normalized = False` the `(u - 1)` term is omitted.
Observe that as an f-Divergence, this Csiszar-function implies:
```none
D_f[p, q] = KL[q, p]
```
The KL is "reverse" because in maximum likelihood we think of minimizing `q`
as in `KL[p, q]`.
Warning: when self_normalized = True` this function makes non-log-space
calculations and may therefore be numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
kl_reverse_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at
`u = exp(logu)`.
Raises:
TypeError: if `self_normalized` is `None` or a `Tensor`.
"""
with tf.compat.v1.name_scope(name, "kl_reverse", [logu]):
return amari_alpha(logu, alpha=0., self_normalized=self_normalized)
|
def kl_reverse(logu, self_normalized=False, name=None):
"""The reverse Kullback-Leibler Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True`, the KL-reverse Csiszar-function is:
```none
f(u) = -log(u) + (u - 1)
```
When `self_normalized = False` the `(u - 1)` term is omitted.
Observe that as an f-Divergence, this Csiszar-function implies:
```none
D_f[p, q] = KL[q, p]
```
The KL is "reverse" because in maximum likelihood we think of minimizing `q`
as in `KL[p, q]`.
Warning: when self_normalized = True` this function makes non-log-space
calculations and may therefore be numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
kl_reverse_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at
`u = exp(logu)`.
Raises:
TypeError: if `self_normalized` is `None` or a `Tensor`.
"""
with tf.compat.v1.name_scope(name, "kl_reverse", [logu]):
return amari_alpha(logu, alpha=0., self_normalized=self_normalized)
|
[
"The",
"reverse",
"Kullback",
"-",
"Leibler",
"Csiszar",
"-",
"function",
"in",
"log",
"-",
"space",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/vi/csiszar_divergence.py#L121-L166
|
[
"def",
"kl_reverse",
"(",
"logu",
",",
"self_normalized",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"\"kl_reverse\"",
",",
"[",
"logu",
"]",
")",
":",
"return",
"amari_alpha",
"(",
"logu",
",",
"alpha",
"=",
"0.",
",",
"self_normalized",
"=",
"self_normalized",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
jensen_shannon
|
The Jensen-Shannon Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True`, the Jensen-Shannon Csiszar-function is:
```none
f(u) = u log(u) - (1 + u) log(1 + u) + (u + 1) log(2)
```
When `self_normalized = False` the `(u + 1) log(2)` term is omitted.
Observe that as an f-Divergence, this Csiszar-function implies:
```none
D_f[p, q] = KL[p, m] + KL[q, m]
m(x) = 0.5 p(x) + 0.5 q(x)
```
In a sense, this divergence is the "reverse" of the Arithmetic-Geometric
f-Divergence.
This Csiszar-function induces a symmetric f-Divergence, i.e.,
`D_f[p, q] = D_f[q, p]`.
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
For more information, see:
Lin, J. "Divergence measures based on the Shannon entropy." IEEE Trans.
Inf. Th., 37, 145-151, 1991.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
jensen_shannon_of_u: `float`-like `Tensor` of the Csiszar-function
evaluated at `u = exp(logu)`.
|
tensorflow_probability/python/vi/csiszar_divergence.py
|
def jensen_shannon(logu, self_normalized=False, name=None):
"""The Jensen-Shannon Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True`, the Jensen-Shannon Csiszar-function is:
```none
f(u) = u log(u) - (1 + u) log(1 + u) + (u + 1) log(2)
```
When `self_normalized = False` the `(u + 1) log(2)` term is omitted.
Observe that as an f-Divergence, this Csiszar-function implies:
```none
D_f[p, q] = KL[p, m] + KL[q, m]
m(x) = 0.5 p(x) + 0.5 q(x)
```
In a sense, this divergence is the "reverse" of the Arithmetic-Geometric
f-Divergence.
This Csiszar-function induces a symmetric f-Divergence, i.e.,
`D_f[p, q] = D_f[q, p]`.
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
For more information, see:
Lin, J. "Divergence measures based on the Shannon entropy." IEEE Trans.
Inf. Th., 37, 145-151, 1991.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
jensen_shannon_of_u: `float`-like `Tensor` of the Csiszar-function
evaluated at `u = exp(logu)`.
"""
with tf.compat.v1.name_scope(name, "jensen_shannon", [logu]):
logu = tf.convert_to_tensor(value=logu, name="logu")
npdt = logu.dtype.as_numpy_dtype
y = tf.nn.softplus(logu)
if self_normalized:
y -= np.log(2).astype(npdt)
return tf.exp(logu) * logu - (1. + tf.exp(logu)) * y
|
def jensen_shannon(logu, self_normalized=False, name=None):
"""The Jensen-Shannon Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True`, the Jensen-Shannon Csiszar-function is:
```none
f(u) = u log(u) - (1 + u) log(1 + u) + (u + 1) log(2)
```
When `self_normalized = False` the `(u + 1) log(2)` term is omitted.
Observe that as an f-Divergence, this Csiszar-function implies:
```none
D_f[p, q] = KL[p, m] + KL[q, m]
m(x) = 0.5 p(x) + 0.5 q(x)
```
In a sense, this divergence is the "reverse" of the Arithmetic-Geometric
f-Divergence.
This Csiszar-function induces a symmetric f-Divergence, i.e.,
`D_f[p, q] = D_f[q, p]`.
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
For more information, see:
Lin, J. "Divergence measures based on the Shannon entropy." IEEE Trans.
Inf. Th., 37, 145-151, 1991.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
jensen_shannon_of_u: `float`-like `Tensor` of the Csiszar-function
evaluated at `u = exp(logu)`.
"""
with tf.compat.v1.name_scope(name, "jensen_shannon", [logu]):
logu = tf.convert_to_tensor(value=logu, name="logu")
npdt = logu.dtype.as_numpy_dtype
y = tf.nn.softplus(logu)
if self_normalized:
y -= np.log(2).astype(npdt)
return tf.exp(logu) * logu - (1. + tf.exp(logu)) * y
|
[
"The",
"Jensen",
"-",
"Shannon",
"Csiszar",
"-",
"function",
"in",
"log",
"-",
"space",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/vi/csiszar_divergence.py#L217-L272
|
[
"def",
"jensen_shannon",
"(",
"logu",
",",
"self_normalized",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"\"jensen_shannon\"",
",",
"[",
"logu",
"]",
")",
":",
"logu",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"logu",
",",
"name",
"=",
"\"logu\"",
")",
"npdt",
"=",
"logu",
".",
"dtype",
".",
"as_numpy_dtype",
"y",
"=",
"tf",
".",
"nn",
".",
"softplus",
"(",
"logu",
")",
"if",
"self_normalized",
":",
"y",
"-=",
"np",
".",
"log",
"(",
"2",
")",
".",
"astype",
"(",
"npdt",
")",
"return",
"tf",
".",
"exp",
"(",
"logu",
")",
"*",
"logu",
"-",
"(",
"1.",
"+",
"tf",
".",
"exp",
"(",
"logu",
")",
")",
"*",
"y"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
pearson
|
The Pearson Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Pearson Csiszar-function is:
```none
f(u) = (u - 1)**2
```
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
name: Python `str` name prefixed to Ops created by this function.
Returns:
pearson_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at
`u = exp(logu)`.
|
tensorflow_probability/python/vi/csiszar_divergence.py
|
def pearson(logu, name=None):
"""The Pearson Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Pearson Csiszar-function is:
```none
f(u) = (u - 1)**2
```
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
name: Python `str` name prefixed to Ops created by this function.
Returns:
pearson_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at
`u = exp(logu)`.
"""
with tf.compat.v1.name_scope(name, "pearson", [logu]):
logu = tf.convert_to_tensor(value=logu, name="logu")
return tf.square(tf.math.expm1(logu))
|
def pearson(logu, name=None):
"""The Pearson Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Pearson Csiszar-function is:
```none
f(u) = (u - 1)**2
```
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
name: Python `str` name prefixed to Ops created by this function.
Returns:
pearson_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at
`u = exp(logu)`.
"""
with tf.compat.v1.name_scope(name, "pearson", [logu]):
logu = tf.convert_to_tensor(value=logu, name="logu")
return tf.square(tf.math.expm1(logu))
|
[
"The",
"Pearson",
"Csiszar",
"-",
"function",
"in",
"log",
"-",
"space",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/vi/csiszar_divergence.py#L360-L389
|
[
"def",
"pearson",
"(",
"logu",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"\"pearson\"",
",",
"[",
"logu",
"]",
")",
":",
"logu",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"logu",
",",
"name",
"=",
"\"logu\"",
")",
"return",
"tf",
".",
"square",
"(",
"tf",
".",
"math",
".",
"expm1",
"(",
"logu",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
squared_hellinger
|
The Squared-Hellinger Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Squared-Hellinger Csiszar-function is:
```none
f(u) = (sqrt(u) - 1)**2
```
This Csiszar-function induces a symmetric f-Divergence, i.e.,
`D_f[p, q] = D_f[q, p]`.
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
name: Python `str` name prefixed to Ops created by this function.
Returns:
squared_hellinger_of_u: `float`-like `Tensor` of the Csiszar-function
evaluated at `u = exp(logu)`.
|
tensorflow_probability/python/vi/csiszar_divergence.py
|
def squared_hellinger(logu, name=None):
"""The Squared-Hellinger Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Squared-Hellinger Csiszar-function is:
```none
f(u) = (sqrt(u) - 1)**2
```
This Csiszar-function induces a symmetric f-Divergence, i.e.,
`D_f[p, q] = D_f[q, p]`.
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
name: Python `str` name prefixed to Ops created by this function.
Returns:
squared_hellinger_of_u: `float`-like `Tensor` of the Csiszar-function
evaluated at `u = exp(logu)`.
"""
with tf.compat.v1.name_scope(name, "squared_hellinger", [logu]):
logu = tf.convert_to_tensor(value=logu, name="logu")
return pearson(0.5 * logu)
|
def squared_hellinger(logu, name=None):
"""The Squared-Hellinger Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Squared-Hellinger Csiszar-function is:
```none
f(u) = (sqrt(u) - 1)**2
```
This Csiszar-function induces a symmetric f-Divergence, i.e.,
`D_f[p, q] = D_f[q, p]`.
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
name: Python `str` name prefixed to Ops created by this function.
Returns:
squared_hellinger_of_u: `float`-like `Tensor` of the Csiszar-function
evaluated at `u = exp(logu)`.
"""
with tf.compat.v1.name_scope(name, "squared_hellinger", [logu]):
logu = tf.convert_to_tensor(value=logu, name="logu")
return pearson(0.5 * logu)
|
[
"The",
"Squared",
"-",
"Hellinger",
"Csiszar",
"-",
"function",
"in",
"log",
"-",
"space",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/vi/csiszar_divergence.py#L392-L424
|
[
"def",
"squared_hellinger",
"(",
"logu",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"\"squared_hellinger\"",
",",
"[",
"logu",
"]",
")",
":",
"logu",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"logu",
",",
"name",
"=",
"\"logu\"",
")",
"return",
"pearson",
"(",
"0.5",
"*",
"logu",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.