partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
_create_input_order
|
Returns a degree vectors for the input.
|
tensorflow_probability/python/bijectors/masked_autoregressive.py
|
def _create_input_order(input_size, input_order="left-to-right"):
"""Returns a degree vectors for the input."""
if isinstance(input_order, six.string_types):
if input_order == "left-to-right":
return np.arange(start=1, stop=input_size + 1)
elif input_order == "right-to-left":
return np.arange(start=input_size, stop=0, step=-1)
elif input_order == "random":
ret = np.arange(start=1, stop=input_size + 1)
np.random.shuffle(ret)
return ret
elif np.all(np.sort(input_order) == np.arange(1, input_size + 1)):
return np.array(input_order)
raise ValueError("Invalid input order: '{}'.".format(input_order))
|
def _create_input_order(input_size, input_order="left-to-right"):
"""Returns a degree vectors for the input."""
if isinstance(input_order, six.string_types):
if input_order == "left-to-right":
return np.arange(start=1, stop=input_size + 1)
elif input_order == "right-to-left":
return np.arange(start=input_size, stop=0, step=-1)
elif input_order == "random":
ret = np.arange(start=1, stop=input_size + 1)
np.random.shuffle(ret)
return ret
elif np.all(np.sort(input_order) == np.arange(1, input_size + 1)):
return np.array(input_order)
raise ValueError("Invalid input order: '{}'.".format(input_order))
|
[
"Returns",
"a",
"degree",
"vectors",
"for",
"the",
"input",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/masked_autoregressive.py#L886-L900
|
[
"def",
"_create_input_order",
"(",
"input_size",
",",
"input_order",
"=",
"\"left-to-right\"",
")",
":",
"if",
"isinstance",
"(",
"input_order",
",",
"six",
".",
"string_types",
")",
":",
"if",
"input_order",
"==",
"\"left-to-right\"",
":",
"return",
"np",
".",
"arange",
"(",
"start",
"=",
"1",
",",
"stop",
"=",
"input_size",
"+",
"1",
")",
"elif",
"input_order",
"==",
"\"right-to-left\"",
":",
"return",
"np",
".",
"arange",
"(",
"start",
"=",
"input_size",
",",
"stop",
"=",
"0",
",",
"step",
"=",
"-",
"1",
")",
"elif",
"input_order",
"==",
"\"random\"",
":",
"ret",
"=",
"np",
".",
"arange",
"(",
"start",
"=",
"1",
",",
"stop",
"=",
"input_size",
"+",
"1",
")",
"np",
".",
"random",
".",
"shuffle",
"(",
"ret",
")",
"return",
"ret",
"elif",
"np",
".",
"all",
"(",
"np",
".",
"sort",
"(",
"input_order",
")",
"==",
"np",
".",
"arange",
"(",
"1",
",",
"input_size",
"+",
"1",
")",
")",
":",
"return",
"np",
".",
"array",
"(",
"input_order",
")",
"raise",
"ValueError",
"(",
"\"Invalid input order: '{}'.\"",
".",
"format",
"(",
"input_order",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_create_degrees
|
Returns a list of degree vectors, one for each input and hidden layer.
A unit with degree d can only receive input from units with degree < d. Output
units always have the same degree as their associated input unit.
Args:
input_size: Number of inputs.
hidden_units: list with the number of hidden units per layer. It does not
include the output layer. Each hidden unit size must be at least the size
of length (otherwise autoregressivity is not possible).
input_order: Order of degrees to the input units: 'random', 'left-to-right',
'right-to-left', or an array of an explicit order. For example,
'left-to-right' builds an autoregressive model
p(x) = p(x1) p(x2 | x1) ... p(xD | x<D).
hidden_degrees: Method for assigning degrees to the hidden units:
'equal', 'random'. If 'equal', hidden units in each layer are allocated
equally (up to a remainder term) to each degree. Default: 'equal'.
Raises:
ValueError: invalid input order.
ValueError: invalid hidden degrees.
|
tensorflow_probability/python/bijectors/masked_autoregressive.py
|
def _create_degrees(input_size,
hidden_units=None,
input_order="left-to-right",
hidden_degrees="equal"):
"""Returns a list of degree vectors, one for each input and hidden layer.
A unit with degree d can only receive input from units with degree < d. Output
units always have the same degree as their associated input unit.
Args:
input_size: Number of inputs.
hidden_units: list with the number of hidden units per layer. It does not
include the output layer. Each hidden unit size must be at least the size
of length (otherwise autoregressivity is not possible).
input_order: Order of degrees to the input units: 'random', 'left-to-right',
'right-to-left', or an array of an explicit order. For example,
'left-to-right' builds an autoregressive model
p(x) = p(x1) p(x2 | x1) ... p(xD | x<D).
hidden_degrees: Method for assigning degrees to the hidden units:
'equal', 'random'. If 'equal', hidden units in each layer are allocated
equally (up to a remainder term) to each degree. Default: 'equal'.
Raises:
ValueError: invalid input order.
ValueError: invalid hidden degrees.
"""
input_order = _create_input_order(input_size, input_order)
degrees = [input_order]
if hidden_units is None:
hidden_units = []
for units in hidden_units:
if isinstance(hidden_degrees, six.string_types):
if hidden_degrees == "random":
# samples from: [low, high)
degrees.append(
np.random.randint(low=min(np.min(degrees[-1]), input_size - 1),
high=input_size,
size=units))
elif hidden_degrees == "equal":
min_degree = min(np.min(degrees[-1]), input_size - 1)
degrees.append(np.maximum(
min_degree,
# Evenly divide the range `[1, input_size - 1]` in to `units + 1`
# segments, and pick the boundaries between the segments as degrees.
np.ceil(np.arange(1, units + 1)
* (input_size - 1) / float(units + 1)).astype(np.int32)))
else:
raise ValueError('Invalid hidden order: "{}".'.format(hidden_degrees))
return degrees
|
def _create_degrees(input_size,
hidden_units=None,
input_order="left-to-right",
hidden_degrees="equal"):
"""Returns a list of degree vectors, one for each input and hidden layer.
A unit with degree d can only receive input from units with degree < d. Output
units always have the same degree as their associated input unit.
Args:
input_size: Number of inputs.
hidden_units: list with the number of hidden units per layer. It does not
include the output layer. Each hidden unit size must be at least the size
of length (otherwise autoregressivity is not possible).
input_order: Order of degrees to the input units: 'random', 'left-to-right',
'right-to-left', or an array of an explicit order. For example,
'left-to-right' builds an autoregressive model
p(x) = p(x1) p(x2 | x1) ... p(xD | x<D).
hidden_degrees: Method for assigning degrees to the hidden units:
'equal', 'random'. If 'equal', hidden units in each layer are allocated
equally (up to a remainder term) to each degree. Default: 'equal'.
Raises:
ValueError: invalid input order.
ValueError: invalid hidden degrees.
"""
input_order = _create_input_order(input_size, input_order)
degrees = [input_order]
if hidden_units is None:
hidden_units = []
for units in hidden_units:
if isinstance(hidden_degrees, six.string_types):
if hidden_degrees == "random":
# samples from: [low, high)
degrees.append(
np.random.randint(low=min(np.min(degrees[-1]), input_size - 1),
high=input_size,
size=units))
elif hidden_degrees == "equal":
min_degree = min(np.min(degrees[-1]), input_size - 1)
degrees.append(np.maximum(
min_degree,
# Evenly divide the range `[1, input_size - 1]` in to `units + 1`
# segments, and pick the boundaries between the segments as degrees.
np.ceil(np.arange(1, units + 1)
* (input_size - 1) / float(units + 1)).astype(np.int32)))
else:
raise ValueError('Invalid hidden order: "{}".'.format(hidden_degrees))
return degrees
|
[
"Returns",
"a",
"list",
"of",
"degree",
"vectors",
"one",
"for",
"each",
"input",
"and",
"hidden",
"layer",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/masked_autoregressive.py#L903-L954
|
[
"def",
"_create_degrees",
"(",
"input_size",
",",
"hidden_units",
"=",
"None",
",",
"input_order",
"=",
"\"left-to-right\"",
",",
"hidden_degrees",
"=",
"\"equal\"",
")",
":",
"input_order",
"=",
"_create_input_order",
"(",
"input_size",
",",
"input_order",
")",
"degrees",
"=",
"[",
"input_order",
"]",
"if",
"hidden_units",
"is",
"None",
":",
"hidden_units",
"=",
"[",
"]",
"for",
"units",
"in",
"hidden_units",
":",
"if",
"isinstance",
"(",
"hidden_degrees",
",",
"six",
".",
"string_types",
")",
":",
"if",
"hidden_degrees",
"==",
"\"random\"",
":",
"# samples from: [low, high)",
"degrees",
".",
"append",
"(",
"np",
".",
"random",
".",
"randint",
"(",
"low",
"=",
"min",
"(",
"np",
".",
"min",
"(",
"degrees",
"[",
"-",
"1",
"]",
")",
",",
"input_size",
"-",
"1",
")",
",",
"high",
"=",
"input_size",
",",
"size",
"=",
"units",
")",
")",
"elif",
"hidden_degrees",
"==",
"\"equal\"",
":",
"min_degree",
"=",
"min",
"(",
"np",
".",
"min",
"(",
"degrees",
"[",
"-",
"1",
"]",
")",
",",
"input_size",
"-",
"1",
")",
"degrees",
".",
"append",
"(",
"np",
".",
"maximum",
"(",
"min_degree",
",",
"# Evenly divide the range `[1, input_size - 1]` in to `units + 1`",
"# segments, and pick the boundaries between the segments as degrees.",
"np",
".",
"ceil",
"(",
"np",
".",
"arange",
"(",
"1",
",",
"units",
"+",
"1",
")",
"*",
"(",
"input_size",
"-",
"1",
")",
"/",
"float",
"(",
"units",
"+",
"1",
")",
")",
".",
"astype",
"(",
"np",
".",
"int32",
")",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Invalid hidden order: \"{}\".'",
".",
"format",
"(",
"hidden_degrees",
")",
")",
"return",
"degrees"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_create_masks
|
Returns a list of binary mask matrices enforcing autoregressivity.
|
tensorflow_probability/python/bijectors/masked_autoregressive.py
|
def _create_masks(degrees):
"""Returns a list of binary mask matrices enforcing autoregressivity."""
return [
# Create input->hidden and hidden->hidden masks.
inp[:, np.newaxis] <= out
for inp, out in zip(degrees[:-1], degrees[1:])
] + [
# Create hidden->output mask.
degrees[-1][:, np.newaxis] < degrees[0]
]
|
def _create_masks(degrees):
"""Returns a list of binary mask matrices enforcing autoregressivity."""
return [
# Create input->hidden and hidden->hidden masks.
inp[:, np.newaxis] <= out
for inp, out in zip(degrees[:-1], degrees[1:])
] + [
# Create hidden->output mask.
degrees[-1][:, np.newaxis] < degrees[0]
]
|
[
"Returns",
"a",
"list",
"of",
"binary",
"mask",
"matrices",
"enforcing",
"autoregressivity",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/masked_autoregressive.py#L957-L966
|
[
"def",
"_create_masks",
"(",
"degrees",
")",
":",
"return",
"[",
"# Create input->hidden and hidden->hidden masks.",
"inp",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"<=",
"out",
"for",
"inp",
",",
"out",
"in",
"zip",
"(",
"degrees",
"[",
":",
"-",
"1",
"]",
",",
"degrees",
"[",
"1",
":",
"]",
")",
"]",
"+",
"[",
"# Create hidden->output mask.",
"degrees",
"[",
"-",
"1",
"]",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"<",
"degrees",
"[",
"0",
"]",
"]"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_make_masked_initializer
|
Returns a masked version of the given initializer.
|
tensorflow_probability/python/bijectors/masked_autoregressive.py
|
def _make_masked_initializer(mask, initializer):
"""Returns a masked version of the given initializer."""
initializer = tf.keras.initializers.get(initializer)
def masked_initializer(shape, dtype=None, partition_info=None):
# If no `partition_info` is given, then don't pass it to `initializer`, as
# `initializer` may be a `tf.compat.v2.initializers.Initializer` (which
# don't accept a `partition_info` argument).
if partition_info is None:
x = initializer(shape, dtype)
else:
x = initializer(shape, dtype, partition_info)
return tf.cast(mask, x.dtype) * x
return masked_initializer
|
def _make_masked_initializer(mask, initializer):
"""Returns a masked version of the given initializer."""
initializer = tf.keras.initializers.get(initializer)
def masked_initializer(shape, dtype=None, partition_info=None):
# If no `partition_info` is given, then don't pass it to `initializer`, as
# `initializer` may be a `tf.compat.v2.initializers.Initializer` (which
# don't accept a `partition_info` argument).
if partition_info is None:
x = initializer(shape, dtype)
else:
x = initializer(shape, dtype, partition_info)
return tf.cast(mask, x.dtype) * x
return masked_initializer
|
[
"Returns",
"a",
"masked",
"version",
"of",
"the",
"given",
"initializer",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/masked_autoregressive.py#L969-L981
|
[
"def",
"_make_masked_initializer",
"(",
"mask",
",",
"initializer",
")",
":",
"initializer",
"=",
"tf",
".",
"keras",
".",
"initializers",
".",
"get",
"(",
"initializer",
")",
"def",
"masked_initializer",
"(",
"shape",
",",
"dtype",
"=",
"None",
",",
"partition_info",
"=",
"None",
")",
":",
"# If no `partition_info` is given, then don't pass it to `initializer`, as",
"# `initializer` may be a `tf.compat.v2.initializers.Initializer` (which",
"# don't accept a `partition_info` argument).",
"if",
"partition_info",
"is",
"None",
":",
"x",
"=",
"initializer",
"(",
"shape",
",",
"dtype",
")",
"else",
":",
"x",
"=",
"initializer",
"(",
"shape",
",",
"dtype",
",",
"partition_info",
")",
"return",
"tf",
".",
"cast",
"(",
"mask",
",",
"x",
".",
"dtype",
")",
"*",
"x",
"return",
"masked_initializer"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
AutoregressiveLayer.build
|
See tfkl.Layer.build.
|
tensorflow_probability/python/bijectors/masked_autoregressive.py
|
def build(self, input_shape):
"""See tfkl.Layer.build."""
if self._event_shape is None:
# `event_shape` wasn't specied at __init__, so infer from `input_shape`.
self._event_shape = [tf.compat.dimension_value(input_shape[-1])]
self._event_size = self._event_shape[-1]
self._event_ndims = len(self._event_shape)
# Should we throw if input_shape has rank > 2?
if input_shape[-1] != self._event_shape[-1]:
raise ValueError("Invalid final dimension of `input_shape`. "
"Expected `{!r}`, but got `{!r}`".format(
self._event_shape[-1], input_shape[-1]))
# Construct the masks.
self._input_order = _create_input_order(
self._event_size, self._input_order_param)
self._masks = _create_masks(_create_degrees(
input_size=self._event_size,
hidden_units=self._hidden_units,
input_order=self._input_order,
hidden_degrees=self._hidden_degrees))
# In the final layer, we will produce `self._params` outputs for each of the
# `self._event_size` inputs to `AutoregressiveLayer`. But `masks[-1]` has
# shape `[self._hidden_units[-1], self._event_size]`. Thus, we need to
# expand the mask to `[hidden_units[-1], event_size * self._params]` such
# that all units for the same input are masked identically. In particular,
# we tile the mask so the j-th element of `tf.unstack(output, axis=-1)` is a
# tensor of the j-th parameter/unit for each input.
#
# NOTE: Other orderings of the output could be faster -- should benchmark.
self._masks[-1] = np.reshape(
np.tile(self._masks[-1][..., tf.newaxis], [1, 1, self._params]),
[self._masks[-1].shape[0], self._event_size * self._params])
self._network = tf.keras.Sequential([
# Starting this model with an `InputLayer` ensures that Keras will build
# and propagate our `dtype` to each layer we add.
tf.keras.layers.InputLayer((self._event_size,), dtype=self.dtype)
])
# Input-to-hidden, hidden-to-hidden, and hidden-to-output layers:
# [..., self._event_size] -> [..., self._hidden_units[0]].
# [..., self._hidden_units[k-1]] -> [..., self._hidden_units[k]].
# [..., self._hidden_units[-1]] -> [..., event_size * self._params].
layer_output_sizes = self._hidden_units + [self._event_size * self._params]
for k in range(len(self._masks)):
self._network.add(tf.keras.layers.Dense(
layer_output_sizes[k],
kernel_initializer=_make_masked_initializer(
self._masks[k], self._kernel_initializer),
kernel_constraint=_make_masked_constraint(self._masks[k]),
activation=self._activation if k + 1 < len(self._masks) else None,
use_bias=self._use_bias,
**self._kwargs))
# Record that the layer has been built.
super(AutoregressiveLayer, self).build(input_shape)
|
def build(self, input_shape):
"""See tfkl.Layer.build."""
if self._event_shape is None:
# `event_shape` wasn't specied at __init__, so infer from `input_shape`.
self._event_shape = [tf.compat.dimension_value(input_shape[-1])]
self._event_size = self._event_shape[-1]
self._event_ndims = len(self._event_shape)
# Should we throw if input_shape has rank > 2?
if input_shape[-1] != self._event_shape[-1]:
raise ValueError("Invalid final dimension of `input_shape`. "
"Expected `{!r}`, but got `{!r}`".format(
self._event_shape[-1], input_shape[-1]))
# Construct the masks.
self._input_order = _create_input_order(
self._event_size, self._input_order_param)
self._masks = _create_masks(_create_degrees(
input_size=self._event_size,
hidden_units=self._hidden_units,
input_order=self._input_order,
hidden_degrees=self._hidden_degrees))
# In the final layer, we will produce `self._params` outputs for each of the
# `self._event_size` inputs to `AutoregressiveLayer`. But `masks[-1]` has
# shape `[self._hidden_units[-1], self._event_size]`. Thus, we need to
# expand the mask to `[hidden_units[-1], event_size * self._params]` such
# that all units for the same input are masked identically. In particular,
# we tile the mask so the j-th element of `tf.unstack(output, axis=-1)` is a
# tensor of the j-th parameter/unit for each input.
#
# NOTE: Other orderings of the output could be faster -- should benchmark.
self._masks[-1] = np.reshape(
np.tile(self._masks[-1][..., tf.newaxis], [1, 1, self._params]),
[self._masks[-1].shape[0], self._event_size * self._params])
self._network = tf.keras.Sequential([
# Starting this model with an `InputLayer` ensures that Keras will build
# and propagate our `dtype` to each layer we add.
tf.keras.layers.InputLayer((self._event_size,), dtype=self.dtype)
])
# Input-to-hidden, hidden-to-hidden, and hidden-to-output layers:
# [..., self._event_size] -> [..., self._hidden_units[0]].
# [..., self._hidden_units[k-1]] -> [..., self._hidden_units[k]].
# [..., self._hidden_units[-1]] -> [..., event_size * self._params].
layer_output_sizes = self._hidden_units + [self._event_size * self._params]
for k in range(len(self._masks)):
self._network.add(tf.keras.layers.Dense(
layer_output_sizes[k],
kernel_initializer=_make_masked_initializer(
self._masks[k], self._kernel_initializer),
kernel_constraint=_make_masked_constraint(self._masks[k]),
activation=self._activation if k + 1 < len(self._masks) else None,
use_bias=self._use_bias,
**self._kwargs))
# Record that the layer has been built.
super(AutoregressiveLayer, self).build(input_shape)
|
[
"See",
"tfkl",
".",
"Layer",
".",
"build",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/masked_autoregressive.py#L794-L852
|
[
"def",
"build",
"(",
"self",
",",
"input_shape",
")",
":",
"if",
"self",
".",
"_event_shape",
"is",
"None",
":",
"# `event_shape` wasn't specied at __init__, so infer from `input_shape`.",
"self",
".",
"_event_shape",
"=",
"[",
"tf",
".",
"compat",
".",
"dimension_value",
"(",
"input_shape",
"[",
"-",
"1",
"]",
")",
"]",
"self",
".",
"_event_size",
"=",
"self",
".",
"_event_shape",
"[",
"-",
"1",
"]",
"self",
".",
"_event_ndims",
"=",
"len",
"(",
"self",
".",
"_event_shape",
")",
"# Should we throw if input_shape has rank > 2?",
"if",
"input_shape",
"[",
"-",
"1",
"]",
"!=",
"self",
".",
"_event_shape",
"[",
"-",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"\"Invalid final dimension of `input_shape`. \"",
"\"Expected `{!r}`, but got `{!r}`\"",
".",
"format",
"(",
"self",
".",
"_event_shape",
"[",
"-",
"1",
"]",
",",
"input_shape",
"[",
"-",
"1",
"]",
")",
")",
"# Construct the masks.",
"self",
".",
"_input_order",
"=",
"_create_input_order",
"(",
"self",
".",
"_event_size",
",",
"self",
".",
"_input_order_param",
")",
"self",
".",
"_masks",
"=",
"_create_masks",
"(",
"_create_degrees",
"(",
"input_size",
"=",
"self",
".",
"_event_size",
",",
"hidden_units",
"=",
"self",
".",
"_hidden_units",
",",
"input_order",
"=",
"self",
".",
"_input_order",
",",
"hidden_degrees",
"=",
"self",
".",
"_hidden_degrees",
")",
")",
"# In the final layer, we will produce `self._params` outputs for each of the",
"# `self._event_size` inputs to `AutoregressiveLayer`. But `masks[-1]` has",
"# shape `[self._hidden_units[-1], self._event_size]`. Thus, we need to",
"# expand the mask to `[hidden_units[-1], event_size * self._params]` such",
"# that all units for the same input are masked identically. In particular,",
"# we tile the mask so the j-th element of `tf.unstack(output, axis=-1)` is a",
"# tensor of the j-th parameter/unit for each input.",
"#",
"# NOTE: Other orderings of the output could be faster -- should benchmark.",
"self",
".",
"_masks",
"[",
"-",
"1",
"]",
"=",
"np",
".",
"reshape",
"(",
"np",
".",
"tile",
"(",
"self",
".",
"_masks",
"[",
"-",
"1",
"]",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
",",
"[",
"1",
",",
"1",
",",
"self",
".",
"_params",
"]",
")",
",",
"[",
"self",
".",
"_masks",
"[",
"-",
"1",
"]",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"_event_size",
"*",
"self",
".",
"_params",
"]",
")",
"self",
".",
"_network",
"=",
"tf",
".",
"keras",
".",
"Sequential",
"(",
"[",
"# Starting this model with an `InputLayer` ensures that Keras will build",
"# and propagate our `dtype` to each layer we add.",
"tf",
".",
"keras",
".",
"layers",
".",
"InputLayer",
"(",
"(",
"self",
".",
"_event_size",
",",
")",
",",
"dtype",
"=",
"self",
".",
"dtype",
")",
"]",
")",
"# Input-to-hidden, hidden-to-hidden, and hidden-to-output layers:",
"# [..., self._event_size] -> [..., self._hidden_units[0]].",
"# [..., self._hidden_units[k-1]] -> [..., self._hidden_units[k]].",
"# [..., self._hidden_units[-1]] -> [..., event_size * self._params].",
"layer_output_sizes",
"=",
"self",
".",
"_hidden_units",
"+",
"[",
"self",
".",
"_event_size",
"*",
"self",
".",
"_params",
"]",
"for",
"k",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"_masks",
")",
")",
":",
"self",
".",
"_network",
".",
"add",
"(",
"tf",
".",
"keras",
".",
"layers",
".",
"Dense",
"(",
"layer_output_sizes",
"[",
"k",
"]",
",",
"kernel_initializer",
"=",
"_make_masked_initializer",
"(",
"self",
".",
"_masks",
"[",
"k",
"]",
",",
"self",
".",
"_kernel_initializer",
")",
",",
"kernel_constraint",
"=",
"_make_masked_constraint",
"(",
"self",
".",
"_masks",
"[",
"k",
"]",
")",
",",
"activation",
"=",
"self",
".",
"_activation",
"if",
"k",
"+",
"1",
"<",
"len",
"(",
"self",
".",
"_masks",
")",
"else",
"None",
",",
"use_bias",
"=",
"self",
".",
"_use_bias",
",",
"*",
"*",
"self",
".",
"_kwargs",
")",
")",
"# Record that the layer has been built.",
"super",
"(",
"AutoregressiveLayer",
",",
"self",
")",
".",
"build",
"(",
"input_shape",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
AutoregressiveLayer.call
|
See tfkl.Layer.call.
|
tensorflow_probability/python/bijectors/masked_autoregressive.py
|
def call(self, x):
"""See tfkl.Layer.call."""
with tf.compat.v2.name_scope(self.name or "AutoregressiveLayer_call"):
x = tf.convert_to_tensor(value=x, dtype=self.dtype, name="x")
input_shape = tf.shape(input=x)
# TODO(b/67594795): Better support for dynamic shapes.
if tensorshape_util.rank(x.shape) == 1:
x = x[tf.newaxis, ...]
return tf.reshape(self._network(x),
tf.concat([input_shape, [self._params]], axis=0))
|
def call(self, x):
"""See tfkl.Layer.call."""
with tf.compat.v2.name_scope(self.name or "AutoregressiveLayer_call"):
x = tf.convert_to_tensor(value=x, dtype=self.dtype, name="x")
input_shape = tf.shape(input=x)
# TODO(b/67594795): Better support for dynamic shapes.
if tensorshape_util.rank(x.shape) == 1:
x = x[tf.newaxis, ...]
return tf.reshape(self._network(x),
tf.concat([input_shape, [self._params]], axis=0))
|
[
"See",
"tfkl",
".",
"Layer",
".",
"call",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/masked_autoregressive.py#L854-L863
|
[
"def",
"call",
"(",
"self",
",",
"x",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v2",
".",
"name_scope",
"(",
"self",
".",
"name",
"or",
"\"AutoregressiveLayer_call\"",
")",
":",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"dtype",
"=",
"self",
".",
"dtype",
",",
"name",
"=",
"\"x\"",
")",
"input_shape",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
"# TODO(b/67594795): Better support for dynamic shapes.",
"if",
"tensorshape_util",
".",
"rank",
"(",
"x",
".",
"shape",
")",
"==",
"1",
":",
"x",
"=",
"x",
"[",
"tf",
".",
"newaxis",
",",
"...",
"]",
"return",
"tf",
".",
"reshape",
"(",
"self",
".",
"_network",
"(",
"x",
")",
",",
"tf",
".",
"concat",
"(",
"[",
"input_shape",
",",
"[",
"self",
".",
"_params",
"]",
"]",
",",
"axis",
"=",
"0",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
draw_sample
|
Sample a multinomial.
The batch shape is given by broadcasting num_trials with
remove_last_dimension(logits).
Args:
num_samples: Python int or singleton integer Tensor: number of multinomial
samples to draw.
num_classes: Python int or singleton integer Tensor: number of classes.
logits: Floating Tensor with last dimension k, of (unnormalized) logit
probabilities per class.
num_trials: Tensor of number of categorical trials each multinomial consists
of. num_trials[..., tf.newaxis] must broadcast with logits.
dtype: dtype at which to emit samples.
seed: Random seed.
Returns:
samples: Tensor of given dtype and shape [n] + batch_shape + [k].
|
tensorflow_probability/python/distributions/multinomial.py
|
def draw_sample(num_samples, num_classes, logits, num_trials, dtype, seed):
"""Sample a multinomial.
The batch shape is given by broadcasting num_trials with
remove_last_dimension(logits).
Args:
num_samples: Python int or singleton integer Tensor: number of multinomial
samples to draw.
num_classes: Python int or singleton integer Tensor: number of classes.
logits: Floating Tensor with last dimension k, of (unnormalized) logit
probabilities per class.
num_trials: Tensor of number of categorical trials each multinomial consists
of. num_trials[..., tf.newaxis] must broadcast with logits.
dtype: dtype at which to emit samples.
seed: Random seed.
Returns:
samples: Tensor of given dtype and shape [n] + batch_shape + [k].
"""
with tf.name_scope("multinomial.draw_sample"):
# broadcast the num_trials and logits to same shape
num_trials = tf.ones_like(
logits[..., 0], dtype=num_trials.dtype) * num_trials
logits = tf.ones_like(
num_trials[..., tf.newaxis], dtype=logits.dtype) * logits
# flatten the total_count and logits
# flat_logits has shape [B1B2...Bm, num_classes]
flat_logits = tf.reshape(logits, [-1, num_classes])
flat_num_trials = num_samples * tf.reshape(num_trials, [-1]) # [B1B2...Bm]
# Computes each logits and num_trials situation by map_fn.
# Using just one batch tf.random.categorical call doesn't work because that
# requires num_trials to be the same across all members of the batch of
# logits. This restriction makes sense for tf.random.categorical because
# for it, num_trials is part of the returned shape. However, the
# multinomial sampler does not need that restriction, because it sums out
# exactly that dimension.
# One possibility would be to draw a batch categorical whose sample count is
# max(num_trials) and mask out the excess ones. However, if the elements of
# num_trials vary widely, this can be wasteful of memory.
# TODO(b/123763054, b/112152209): Revisit the possibility of writing this
# with a batch categorical followed by batch unsorted_segment_sum, once both
# of those work and are memory-efficient enough.
def _sample_one_batch_member(args):
logits, num_cat_samples = args[0], args[1] # [K], []
# x has shape [1, num_cat_samples = num_samples * num_trials]
x = tf.random.categorical(
logits[tf.newaxis, ...], num_cat_samples, seed=seed)
x = tf.reshape(x, shape=[num_samples, -1]) # [num_samples, num_trials]
x = tf.one_hot(
x, depth=num_classes) # [num_samples, num_trials, num_classes]
x = tf.reduce_sum(input_tensor=x, axis=-2) # [num_samples, num_classes]
return tf.cast(x, dtype=dtype)
x = tf.map_fn(
_sample_one_batch_member, [flat_logits, flat_num_trials],
dtype=dtype) # [B1B2...Bm, num_samples, num_classes]
# reshape the results to proper shape
x = tf.transpose(a=x, perm=[1, 0, 2])
final_shape = tf.concat([[num_samples],
tf.shape(input=num_trials), [num_classes]],
axis=0)
x = tf.reshape(x, final_shape)
return x
|
def draw_sample(num_samples, num_classes, logits, num_trials, dtype, seed):
"""Sample a multinomial.
The batch shape is given by broadcasting num_trials with
remove_last_dimension(logits).
Args:
num_samples: Python int or singleton integer Tensor: number of multinomial
samples to draw.
num_classes: Python int or singleton integer Tensor: number of classes.
logits: Floating Tensor with last dimension k, of (unnormalized) logit
probabilities per class.
num_trials: Tensor of number of categorical trials each multinomial consists
of. num_trials[..., tf.newaxis] must broadcast with logits.
dtype: dtype at which to emit samples.
seed: Random seed.
Returns:
samples: Tensor of given dtype and shape [n] + batch_shape + [k].
"""
with tf.name_scope("multinomial.draw_sample"):
# broadcast the num_trials and logits to same shape
num_trials = tf.ones_like(
logits[..., 0], dtype=num_trials.dtype) * num_trials
logits = tf.ones_like(
num_trials[..., tf.newaxis], dtype=logits.dtype) * logits
# flatten the total_count and logits
# flat_logits has shape [B1B2...Bm, num_classes]
flat_logits = tf.reshape(logits, [-1, num_classes])
flat_num_trials = num_samples * tf.reshape(num_trials, [-1]) # [B1B2...Bm]
# Computes each logits and num_trials situation by map_fn.
# Using just one batch tf.random.categorical call doesn't work because that
# requires num_trials to be the same across all members of the batch of
# logits. This restriction makes sense for tf.random.categorical because
# for it, num_trials is part of the returned shape. However, the
# multinomial sampler does not need that restriction, because it sums out
# exactly that dimension.
# One possibility would be to draw a batch categorical whose sample count is
# max(num_trials) and mask out the excess ones. However, if the elements of
# num_trials vary widely, this can be wasteful of memory.
# TODO(b/123763054, b/112152209): Revisit the possibility of writing this
# with a batch categorical followed by batch unsorted_segment_sum, once both
# of those work and are memory-efficient enough.
def _sample_one_batch_member(args):
logits, num_cat_samples = args[0], args[1] # [K], []
# x has shape [1, num_cat_samples = num_samples * num_trials]
x = tf.random.categorical(
logits[tf.newaxis, ...], num_cat_samples, seed=seed)
x = tf.reshape(x, shape=[num_samples, -1]) # [num_samples, num_trials]
x = tf.one_hot(
x, depth=num_classes) # [num_samples, num_trials, num_classes]
x = tf.reduce_sum(input_tensor=x, axis=-2) # [num_samples, num_classes]
return tf.cast(x, dtype=dtype)
x = tf.map_fn(
_sample_one_batch_member, [flat_logits, flat_num_trials],
dtype=dtype) # [B1B2...Bm, num_samples, num_classes]
# reshape the results to proper shape
x = tf.transpose(a=x, perm=[1, 0, 2])
final_shape = tf.concat([[num_samples],
tf.shape(input=num_trials), [num_classes]],
axis=0)
x = tf.reshape(x, final_shape)
return x
|
[
"Sample",
"a",
"multinomial",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/multinomial.py#L285-L355
|
[
"def",
"draw_sample",
"(",
"num_samples",
",",
"num_classes",
",",
"logits",
",",
"num_trials",
",",
"dtype",
",",
"seed",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"\"multinomial.draw_sample\"",
")",
":",
"# broadcast the num_trials and logits to same shape",
"num_trials",
"=",
"tf",
".",
"ones_like",
"(",
"logits",
"[",
"...",
",",
"0",
"]",
",",
"dtype",
"=",
"num_trials",
".",
"dtype",
")",
"*",
"num_trials",
"logits",
"=",
"tf",
".",
"ones_like",
"(",
"num_trials",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
",",
"dtype",
"=",
"logits",
".",
"dtype",
")",
"*",
"logits",
"# flatten the total_count and logits",
"# flat_logits has shape [B1B2...Bm, num_classes]",
"flat_logits",
"=",
"tf",
".",
"reshape",
"(",
"logits",
",",
"[",
"-",
"1",
",",
"num_classes",
"]",
")",
"flat_num_trials",
"=",
"num_samples",
"*",
"tf",
".",
"reshape",
"(",
"num_trials",
",",
"[",
"-",
"1",
"]",
")",
"# [B1B2...Bm]",
"# Computes each logits and num_trials situation by map_fn.",
"# Using just one batch tf.random.categorical call doesn't work because that",
"# requires num_trials to be the same across all members of the batch of",
"# logits. This restriction makes sense for tf.random.categorical because",
"# for it, num_trials is part of the returned shape. However, the",
"# multinomial sampler does not need that restriction, because it sums out",
"# exactly that dimension.",
"# One possibility would be to draw a batch categorical whose sample count is",
"# max(num_trials) and mask out the excess ones. However, if the elements of",
"# num_trials vary widely, this can be wasteful of memory.",
"# TODO(b/123763054, b/112152209): Revisit the possibility of writing this",
"# with a batch categorical followed by batch unsorted_segment_sum, once both",
"# of those work and are memory-efficient enough.",
"def",
"_sample_one_batch_member",
"(",
"args",
")",
":",
"logits",
",",
"num_cat_samples",
"=",
"args",
"[",
"0",
"]",
",",
"args",
"[",
"1",
"]",
"# [K], []",
"# x has shape [1, num_cat_samples = num_samples * num_trials]",
"x",
"=",
"tf",
".",
"random",
".",
"categorical",
"(",
"logits",
"[",
"tf",
".",
"newaxis",
",",
"...",
"]",
",",
"num_cat_samples",
",",
"seed",
"=",
"seed",
")",
"x",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"shape",
"=",
"[",
"num_samples",
",",
"-",
"1",
"]",
")",
"# [num_samples, num_trials]",
"x",
"=",
"tf",
".",
"one_hot",
"(",
"x",
",",
"depth",
"=",
"num_classes",
")",
"# [num_samples, num_trials, num_classes]",
"x",
"=",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"x",
",",
"axis",
"=",
"-",
"2",
")",
"# [num_samples, num_classes]",
"return",
"tf",
".",
"cast",
"(",
"x",
",",
"dtype",
"=",
"dtype",
")",
"x",
"=",
"tf",
".",
"map_fn",
"(",
"_sample_one_batch_member",
",",
"[",
"flat_logits",
",",
"flat_num_trials",
"]",
",",
"dtype",
"=",
"dtype",
")",
"# [B1B2...Bm, num_samples, num_classes]",
"# reshape the results to proper shape",
"x",
"=",
"tf",
".",
"transpose",
"(",
"a",
"=",
"x",
",",
"perm",
"=",
"[",
"1",
",",
"0",
",",
"2",
"]",
")",
"final_shape",
"=",
"tf",
".",
"concat",
"(",
"[",
"[",
"num_samples",
"]",
",",
"tf",
".",
"shape",
"(",
"input",
"=",
"num_trials",
")",
",",
"[",
"num_classes",
"]",
"]",
",",
"axis",
"=",
"0",
")",
"x",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"final_shape",
")",
"return",
"x"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_zero_dimensional_mvndiag
|
Build a zero-dimensional MVNDiag object.
|
tensorflow_probability/python/sts/regression.py
|
def _zero_dimensional_mvndiag(dtype):
"""Build a zero-dimensional MVNDiag object."""
dummy_mvndiag = tfd.MultivariateNormalDiag(
scale_diag=tf.ones([0], dtype=dtype))
dummy_mvndiag.covariance = lambda: dummy_mvndiag.variance()[..., tf.newaxis]
return dummy_mvndiag
|
def _zero_dimensional_mvndiag(dtype):
"""Build a zero-dimensional MVNDiag object."""
dummy_mvndiag = tfd.MultivariateNormalDiag(
scale_diag=tf.ones([0], dtype=dtype))
dummy_mvndiag.covariance = lambda: dummy_mvndiag.variance()[..., tf.newaxis]
return dummy_mvndiag
|
[
"Build",
"a",
"zero",
"-",
"dimensional",
"MVNDiag",
"object",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/regression.py#L32-L37
|
[
"def",
"_zero_dimensional_mvndiag",
"(",
"dtype",
")",
":",
"dummy_mvndiag",
"=",
"tfd",
".",
"MultivariateNormalDiag",
"(",
"scale_diag",
"=",
"tf",
".",
"ones",
"(",
"[",
"0",
"]",
",",
"dtype",
"=",
"dtype",
")",
")",
"dummy_mvndiag",
".",
"covariance",
"=",
"lambda",
":",
"dummy_mvndiag",
".",
"variance",
"(",
")",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
"return",
"dummy_mvndiag"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_observe_timeseries_fn
|
Build an observation_noise_fn that observes a Tensor timeseries.
|
tensorflow_probability/python/sts/regression.py
|
def _observe_timeseries_fn(timeseries):
"""Build an observation_noise_fn that observes a Tensor timeseries."""
def observation_noise_fn(t):
current_slice = timeseries[..., t, :]
return tfd.MultivariateNormalDiag(
loc=current_slice,
scale_diag=tf.zeros_like(current_slice))
return observation_noise_fn
|
def _observe_timeseries_fn(timeseries):
"""Build an observation_noise_fn that observes a Tensor timeseries."""
def observation_noise_fn(t):
current_slice = timeseries[..., t, :]
return tfd.MultivariateNormalDiag(
loc=current_slice,
scale_diag=tf.zeros_like(current_slice))
return observation_noise_fn
|
[
"Build",
"an",
"observation_noise_fn",
"that",
"observes",
"a",
"Tensor",
"timeseries",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/regression.py#L40-L47
|
[
"def",
"_observe_timeseries_fn",
"(",
"timeseries",
")",
":",
"def",
"observation_noise_fn",
"(",
"t",
")",
":",
"current_slice",
"=",
"timeseries",
"[",
"...",
",",
"t",
",",
":",
"]",
"return",
"tfd",
".",
"MultivariateNormalDiag",
"(",
"loc",
"=",
"current_slice",
",",
"scale_diag",
"=",
"tf",
".",
"zeros_like",
"(",
"current_slice",
")",
")",
"return",
"observation_noise_fn"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
SparseLinearRegression.params_to_weights
|
Build regression weights from model parameters.
|
tensorflow_probability/python/sts/regression.py
|
def params_to_weights(self,
global_scale_variance,
global_scale_noncentered,
local_scale_variances,
local_scales_noncentered,
weights_noncentered):
"""Build regression weights from model parameters."""
global_scale = (global_scale_noncentered *
tf.sqrt(global_scale_variance) *
self.weights_prior_scale)
local_scales = local_scales_noncentered * tf.sqrt(local_scale_variances)
return weights_noncentered * local_scales * global_scale[..., tf.newaxis]
|
def params_to_weights(self,
global_scale_variance,
global_scale_noncentered,
local_scale_variances,
local_scales_noncentered,
weights_noncentered):
"""Build regression weights from model parameters."""
global_scale = (global_scale_noncentered *
tf.sqrt(global_scale_variance) *
self.weights_prior_scale)
local_scales = local_scales_noncentered * tf.sqrt(local_scale_variances)
return weights_noncentered * local_scales * global_scale[..., tf.newaxis]
|
[
"Build",
"regression",
"weights",
"from",
"model",
"parameters",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/regression.py#L474-L486
|
[
"def",
"params_to_weights",
"(",
"self",
",",
"global_scale_variance",
",",
"global_scale_noncentered",
",",
"local_scale_variances",
",",
"local_scales_noncentered",
",",
"weights_noncentered",
")",
":",
"global_scale",
"=",
"(",
"global_scale_noncentered",
"*",
"tf",
".",
"sqrt",
"(",
"global_scale_variance",
")",
"*",
"self",
".",
"weights_prior_scale",
")",
"local_scales",
"=",
"local_scales_noncentered",
"*",
"tf",
".",
"sqrt",
"(",
"local_scale_variances",
")",
"return",
"weights_noncentered",
"*",
"local_scales",
"*",
"global_scale",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_depth
|
Computes the number of edges on longest path from node to root.
|
tensorflow_probability/python/distributions/joint_distribution_named.py
|
def _depth(g):
"""Computes the number of edges on longest path from node to root."""
def _explore(v):
if v.depth < 0:
v.depth = ((1 + max([-1] + [_explore(annotated_graph[u])
for u in v.parents]))
if v.parents else 0)
return v.depth
annotated_graph = {k: _Node(k, v) for k, v in g.items()}
for v in annotated_graph.values():
_explore(v)
return annotated_graph
|
def _depth(g):
"""Computes the number of edges on longest path from node to root."""
def _explore(v):
if v.depth < 0:
v.depth = ((1 + max([-1] + [_explore(annotated_graph[u])
for u in v.parents]))
if v.parents else 0)
return v.depth
annotated_graph = {k: _Node(k, v) for k, v in g.items()}
for v in annotated_graph.values():
_explore(v)
return annotated_graph
|
[
"Computes",
"the",
"number",
"of",
"edges",
"on",
"longest",
"path",
"from",
"node",
"to",
"root",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/joint_distribution_named.py#L204-L215
|
[
"def",
"_depth",
"(",
"g",
")",
":",
"def",
"_explore",
"(",
"v",
")",
":",
"if",
"v",
".",
"depth",
"<",
"0",
":",
"v",
".",
"depth",
"=",
"(",
"(",
"1",
"+",
"max",
"(",
"[",
"-",
"1",
"]",
"+",
"[",
"_explore",
"(",
"annotated_graph",
"[",
"u",
"]",
")",
"for",
"u",
"in",
"v",
".",
"parents",
"]",
")",
")",
"if",
"v",
".",
"parents",
"else",
"0",
")",
"return",
"v",
".",
"depth",
"annotated_graph",
"=",
"{",
"k",
":",
"_Node",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"g",
".",
"items",
"(",
")",
"}",
"for",
"v",
"in",
"annotated_graph",
".",
"values",
"(",
")",
":",
"_explore",
"(",
"v",
")",
"return",
"annotated_graph"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_best_order
|
Creates tuple of str tuple-str pairs representing resolved & sorted DAG.
|
tensorflow_probability/python/distributions/joint_distribution_named.py
|
def _best_order(g):
"""Creates tuple of str tuple-str pairs representing resolved & sorted DAG."""
def _explore(u):
"""Recursive function to ascend up through unvisited dependencies."""
if u.depth < 0:
return # Already visited.
if not u.parents:
result.append((u.name, u.parents))
u.depth = -1 # Mark visited.
return
b = (u.name, [])
result.append(b)
u.depth = -1 # Mark visited.
d = 0
for v in sorted((g.get(p) for p in u.parents), key=lambda v: v.depth):
n0 = len(result)
_explore(v)
n1 = len(result)
b[1].extend(['_']*d + [v.name])
d = n1 - n0 - 1
g = _depth(g)
result = []
for u in sorted(g.values(), key=lambda v: v.depth, reverse=True):
_explore(u)
return tuple(reversed(result))
|
def _best_order(g):
"""Creates tuple of str tuple-str pairs representing resolved & sorted DAG."""
def _explore(u):
"""Recursive function to ascend up through unvisited dependencies."""
if u.depth < 0:
return # Already visited.
if not u.parents:
result.append((u.name, u.parents))
u.depth = -1 # Mark visited.
return
b = (u.name, [])
result.append(b)
u.depth = -1 # Mark visited.
d = 0
for v in sorted((g.get(p) for p in u.parents), key=lambda v: v.depth):
n0 = len(result)
_explore(v)
n1 = len(result)
b[1].extend(['_']*d + [v.name])
d = n1 - n0 - 1
g = _depth(g)
result = []
for u in sorted(g.values(), key=lambda v: v.depth, reverse=True):
_explore(u)
return tuple(reversed(result))
|
[
"Creates",
"tuple",
"of",
"str",
"tuple",
"-",
"str",
"pairs",
"representing",
"resolved",
"&",
"sorted",
"DAG",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/joint_distribution_named.py#L218-L242
|
[
"def",
"_best_order",
"(",
"g",
")",
":",
"def",
"_explore",
"(",
"u",
")",
":",
"\"\"\"Recursive function to ascend up through unvisited dependencies.\"\"\"",
"if",
"u",
".",
"depth",
"<",
"0",
":",
"return",
"# Already visited.",
"if",
"not",
"u",
".",
"parents",
":",
"result",
".",
"append",
"(",
"(",
"u",
".",
"name",
",",
"u",
".",
"parents",
")",
")",
"u",
".",
"depth",
"=",
"-",
"1",
"# Mark visited.",
"return",
"b",
"=",
"(",
"u",
".",
"name",
",",
"[",
"]",
")",
"result",
".",
"append",
"(",
"b",
")",
"u",
".",
"depth",
"=",
"-",
"1",
"# Mark visited.",
"d",
"=",
"0",
"for",
"v",
"in",
"sorted",
"(",
"(",
"g",
".",
"get",
"(",
"p",
")",
"for",
"p",
"in",
"u",
".",
"parents",
")",
",",
"key",
"=",
"lambda",
"v",
":",
"v",
".",
"depth",
")",
":",
"n0",
"=",
"len",
"(",
"result",
")",
"_explore",
"(",
"v",
")",
"n1",
"=",
"len",
"(",
"result",
")",
"b",
"[",
"1",
"]",
".",
"extend",
"(",
"[",
"'_'",
"]",
"*",
"d",
"+",
"[",
"v",
".",
"name",
"]",
")",
"d",
"=",
"n1",
"-",
"n0",
"-",
"1",
"g",
"=",
"_depth",
"(",
"g",
")",
"result",
"=",
"[",
"]",
"for",
"u",
"in",
"sorted",
"(",
"g",
".",
"values",
"(",
")",
",",
"key",
"=",
"lambda",
"v",
":",
"v",
".",
"depth",
",",
"reverse",
"=",
"True",
")",
":",
"_explore",
"(",
"u",
")",
"return",
"tuple",
"(",
"reversed",
"(",
"result",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_prob_chain_rule_flatten
|
Creates lists of callables suitable for JDSeq.
|
tensorflow_probability/python/distributions/joint_distribution_named.py
|
def _prob_chain_rule_flatten(named_makers):
"""Creates lists of callables suitable for JDSeq."""
def _make(dist_fn, args):
if args is None:
return lambda *_: dist_fn
if not args:
return lambda *_: dist_fn()
def _fn(*xs):
kwargs = dict(zip(args, reversed(xs[-len(args):])))
kwargs.pop('_', None)
return dist_fn(**kwargs)
return _fn
named_makers = _convert_to_dict(named_makers)
g = {k: (None if distribution_util.is_distribution_instance(v)
else joint_distribution_sequential._get_required_args(v)) # pylint: disable=protected-access
for k, v in named_makers.items()}
g = _best_order(g)
dist_fn_name, dist_fn_args = zip(*g)
dist_fn_args = tuple(None if a is None else tuple(a) for a in dist_fn_args)
dist_fn_wrapped = tuple(_make(named_makers[name], parents)
for (name, parents) in g)
dist_fn = tuple(named_makers.get(n) for n in dist_fn_name)
return dist_fn, dist_fn_wrapped, dist_fn_args, dist_fn_name
|
def _prob_chain_rule_flatten(named_makers):
"""Creates lists of callables suitable for JDSeq."""
def _make(dist_fn, args):
if args is None:
return lambda *_: dist_fn
if not args:
return lambda *_: dist_fn()
def _fn(*xs):
kwargs = dict(zip(args, reversed(xs[-len(args):])))
kwargs.pop('_', None)
return dist_fn(**kwargs)
return _fn
named_makers = _convert_to_dict(named_makers)
g = {k: (None if distribution_util.is_distribution_instance(v)
else joint_distribution_sequential._get_required_args(v)) # pylint: disable=protected-access
for k, v in named_makers.items()}
g = _best_order(g)
dist_fn_name, dist_fn_args = zip(*g)
dist_fn_args = tuple(None if a is None else tuple(a) for a in dist_fn_args)
dist_fn_wrapped = tuple(_make(named_makers[name], parents)
for (name, parents) in g)
dist_fn = tuple(named_makers.get(n) for n in dist_fn_name)
return dist_fn, dist_fn_wrapped, dist_fn_args, dist_fn_name
|
[
"Creates",
"lists",
"of",
"callables",
"suitable",
"for",
"JDSeq",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/joint_distribution_named.py#L245-L267
|
[
"def",
"_prob_chain_rule_flatten",
"(",
"named_makers",
")",
":",
"def",
"_make",
"(",
"dist_fn",
",",
"args",
")",
":",
"if",
"args",
"is",
"None",
":",
"return",
"lambda",
"*",
"_",
":",
"dist_fn",
"if",
"not",
"args",
":",
"return",
"lambda",
"*",
"_",
":",
"dist_fn",
"(",
")",
"def",
"_fn",
"(",
"*",
"xs",
")",
":",
"kwargs",
"=",
"dict",
"(",
"zip",
"(",
"args",
",",
"reversed",
"(",
"xs",
"[",
"-",
"len",
"(",
"args",
")",
":",
"]",
")",
")",
")",
"kwargs",
".",
"pop",
"(",
"'_'",
",",
"None",
")",
"return",
"dist_fn",
"(",
"*",
"*",
"kwargs",
")",
"return",
"_fn",
"named_makers",
"=",
"_convert_to_dict",
"(",
"named_makers",
")",
"g",
"=",
"{",
"k",
":",
"(",
"None",
"if",
"distribution_util",
".",
"is_distribution_instance",
"(",
"v",
")",
"else",
"joint_distribution_sequential",
".",
"_get_required_args",
"(",
"v",
")",
")",
"# pylint: disable=protected-access",
"for",
"k",
",",
"v",
"in",
"named_makers",
".",
"items",
"(",
")",
"}",
"g",
"=",
"_best_order",
"(",
"g",
")",
"dist_fn_name",
",",
"dist_fn_args",
"=",
"zip",
"(",
"*",
"g",
")",
"dist_fn_args",
"=",
"tuple",
"(",
"None",
"if",
"a",
"is",
"None",
"else",
"tuple",
"(",
"a",
")",
"for",
"a",
"in",
"dist_fn_args",
")",
"dist_fn_wrapped",
"=",
"tuple",
"(",
"_make",
"(",
"named_makers",
"[",
"name",
"]",
",",
"parents",
")",
"for",
"(",
"name",
",",
"parents",
")",
"in",
"g",
")",
"dist_fn",
"=",
"tuple",
"(",
"named_makers",
".",
"get",
"(",
"n",
")",
"for",
"n",
"in",
"dist_fn_name",
")",
"return",
"dist_fn",
",",
"dist_fn_wrapped",
",",
"dist_fn_args",
",",
"dist_fn_name"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
JointDistributionNamed._build
|
Creates `dist_fn`, `dist_fn_wrapped`, `dist_fn_args`, `dist_fn_name`.
|
tensorflow_probability/python/distributions/joint_distribution_named.py
|
def _build(self, model):
"""Creates `dist_fn`, `dist_fn_wrapped`, `dist_fn_args`, `dist_fn_name`."""
if not _is_dict_like(model):
raise TypeError('`model` must be convertible to `dict` (saw: {}).'.format(
type(model).__name__))
[
self._dist_fn,
self._dist_fn_wrapped,
self._dist_fn_args,
self._dist_fn_name, # JointDistributionSequential doesn't have this.
] = _prob_chain_rule_flatten(model)
|
def _build(self, model):
"""Creates `dist_fn`, `dist_fn_wrapped`, `dist_fn_args`, `dist_fn_name`."""
if not _is_dict_like(model):
raise TypeError('`model` must be convertible to `dict` (saw: {}).'.format(
type(model).__name__))
[
self._dist_fn,
self._dist_fn_wrapped,
self._dist_fn_args,
self._dist_fn_name, # JointDistributionSequential doesn't have this.
] = _prob_chain_rule_flatten(model)
|
[
"Creates",
"dist_fn",
"dist_fn_wrapped",
"dist_fn_args",
"dist_fn_name",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/joint_distribution_named.py#L172-L182
|
[
"def",
"_build",
"(",
"self",
",",
"model",
")",
":",
"if",
"not",
"_is_dict_like",
"(",
"model",
")",
":",
"raise",
"TypeError",
"(",
"'`model` must be convertible to `dict` (saw: {}).'",
".",
"format",
"(",
"type",
"(",
"model",
")",
".",
"__name__",
")",
")",
"[",
"self",
".",
"_dist_fn",
",",
"self",
".",
"_dist_fn_wrapped",
",",
"self",
".",
"_dist_fn_args",
",",
"self",
".",
"_dist_fn_name",
",",
"# JointDistributionSequential doesn't have this.",
"]",
"=",
"_prob_chain_rule_flatten",
"(",
"model",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
VariationalGaussianProcess.variational_loss
|
Variational loss for the VGP.
Given `observations` and `observation_index_points`, compute the
negative variational lower bound as specified in [Hensman, 2013][1].
Args:
observations: `float` `Tensor` representing collection, or batch of
collections, of observations corresponding to
`observation_index_points`. Shape has the form `[b1, ..., bB, e]`, which
must be brodcastable with the batch and example shapes of
`observation_index_points`. The batch shape `[b1, ..., bB]` must be
broadcastable with the shapes of all other batched parameters
(`kernel.batch_shape`, `observation_index_points`, etc.).
observation_index_points: `float` `Tensor` representing finite (batch of)
vector(s) of points where observations are defined. Shape has the
form `[b1, ..., bB, e1, f1, ..., fF]` where `F` is the number of feature
dimensions and must equal `kernel.feature_ndims` and `e1` is the number
(size) of index points in each batch (we denote it `e1` to distinguish
it from the numer of inducing index points, denoted `e2` below). If
set to `None` uses `index_points` as the origin for observations.
Default value: None.
kl_weight: Amount by which to scale the KL divergence loss between prior
and posterior.
Default value: 1.
name: Python `str` name prefixed to Ops created by this class.
Default value: "GaussianProcess".
Returns:
loss: Scalar tensor representing the negative variational lower bound.
Can be directly used in a `tf.Optimizer`.
Raises:
ValueError: if `mean_fn` is not `None` and is not callable.
#### References
[1]: Hensman, J., Lawrence, N. "Gaussian Processes for Big Data", 2013
https://arxiv.org/abs/1309.6835
|
tensorflow_probability/python/distributions/variational_gaussian_process.py
|
def variational_loss(self,
observations,
observation_index_points=None,
kl_weight=1.,
name='variational_loss'):
"""Variational loss for the VGP.
Given `observations` and `observation_index_points`, compute the
negative variational lower bound as specified in [Hensman, 2013][1].
Args:
observations: `float` `Tensor` representing collection, or batch of
collections, of observations corresponding to
`observation_index_points`. Shape has the form `[b1, ..., bB, e]`, which
must be brodcastable with the batch and example shapes of
`observation_index_points`. The batch shape `[b1, ..., bB]` must be
broadcastable with the shapes of all other batched parameters
(`kernel.batch_shape`, `observation_index_points`, etc.).
observation_index_points: `float` `Tensor` representing finite (batch of)
vector(s) of points where observations are defined. Shape has the
form `[b1, ..., bB, e1, f1, ..., fF]` where `F` is the number of feature
dimensions and must equal `kernel.feature_ndims` and `e1` is the number
(size) of index points in each batch (we denote it `e1` to distinguish
it from the numer of inducing index points, denoted `e2` below). If
set to `None` uses `index_points` as the origin for observations.
Default value: None.
kl_weight: Amount by which to scale the KL divergence loss between prior
and posterior.
Default value: 1.
name: Python `str` name prefixed to Ops created by this class.
Default value: "GaussianProcess".
Returns:
loss: Scalar tensor representing the negative variational lower bound.
Can be directly used in a `tf.Optimizer`.
Raises:
ValueError: if `mean_fn` is not `None` and is not callable.
#### References
[1]: Hensman, J., Lawrence, N. "Gaussian Processes for Big Data", 2013
https://arxiv.org/abs/1309.6835
"""
with tf.name_scope(name or 'variational_gp_loss'):
if observation_index_points is None:
observation_index_points = self._index_points
observation_index_points = tf.convert_to_tensor(
value=observation_index_points, dtype=self._dtype,
name='observation_index_points')
observations = tf.convert_to_tensor(
value=observations, dtype=self._dtype, name='observations')
kl_weight = tf.convert_to_tensor(
value=kl_weight, dtype=self._dtype,
name='kl_weight')
# The variational loss is a negative ELBO. The ELBO can be broken down
# into three terms:
# 1. a likelihood term
# 2. a trace term arising from the covariance of the posterior predictive
kzx = self.kernel.matrix(self._inducing_index_points,
observation_index_points)
kzx_linop = tf.linalg.LinearOperatorFullMatrix(kzx)
loc = (self._mean_fn(observation_index_points) +
kzx_linop.matvec(self._kzz_inv_varloc, adjoint=True))
likelihood = independent.Independent(
normal.Normal(
loc=loc,
scale=tf.sqrt(self._observation_noise_variance + self._jitter),
name='NormalLikelihood'),
reinterpreted_batch_ndims=1)
obs_ll = likelihood.log_prob(observations)
chol_kzz_linop = tf.linalg.LinearOperatorLowerTriangular(self._chol_kzz)
chol_kzz_inv_kzx = chol_kzz_linop.solve(kzx)
kzz_inv_kzx = chol_kzz_linop.solve(chol_kzz_inv_kzx, adjoint=True)
kxx_diag = tf.linalg.diag_part(
self.kernel.matrix(
observation_index_points, observation_index_points))
ktilde_trace_term = (
tf.reduce_sum(input_tensor=kxx_diag, axis=-1) -
tf.reduce_sum(input_tensor=chol_kzz_inv_kzx ** 2, axis=[-2, -1]))
# Tr(SB)
# where S = A A.T, A = variational_inducing_observations_scale
# and B = Kzz^-1 Kzx Kzx.T Kzz^-1
#
# Now Tr(SB) = Tr(A A.T Kzz^-1 Kzx Kzx.T Kzz^-1)
# = Tr(A.T Kzz^-1 Kzx Kzx.T Kzz^-1 A)
# = sum_ij (A.T Kzz^-1 Kzx)_{ij}^2
other_trace_term = tf.reduce_sum(
input_tensor=(
self._variational_inducing_observations_posterior.scale.matmul(
kzz_inv_kzx) ** 2),
axis=[-2, -1])
trace_term = (.5 * (ktilde_trace_term + other_trace_term) /
self._observation_noise_variance)
inducing_prior = gaussian_process.GaussianProcess(
kernel=self._kernel,
mean_fn=self._mean_fn,
index_points=self._inducing_index_points,
observation_noise_variance=self._observation_noise_variance)
kl_term = kl_weight * kullback_leibler.kl_divergence(
self._variational_inducing_observations_posterior,
inducing_prior)
lower_bound = (obs_ll - trace_term - kl_term)
return -tf.reduce_mean(input_tensor=lower_bound)
|
def variational_loss(self,
observations,
observation_index_points=None,
kl_weight=1.,
name='variational_loss'):
"""Variational loss for the VGP.
Given `observations` and `observation_index_points`, compute the
negative variational lower bound as specified in [Hensman, 2013][1].
Args:
observations: `float` `Tensor` representing collection, or batch of
collections, of observations corresponding to
`observation_index_points`. Shape has the form `[b1, ..., bB, e]`, which
must be brodcastable with the batch and example shapes of
`observation_index_points`. The batch shape `[b1, ..., bB]` must be
broadcastable with the shapes of all other batched parameters
(`kernel.batch_shape`, `observation_index_points`, etc.).
observation_index_points: `float` `Tensor` representing finite (batch of)
vector(s) of points where observations are defined. Shape has the
form `[b1, ..., bB, e1, f1, ..., fF]` where `F` is the number of feature
dimensions and must equal `kernel.feature_ndims` and `e1` is the number
(size) of index points in each batch (we denote it `e1` to distinguish
it from the numer of inducing index points, denoted `e2` below). If
set to `None` uses `index_points` as the origin for observations.
Default value: None.
kl_weight: Amount by which to scale the KL divergence loss between prior
and posterior.
Default value: 1.
name: Python `str` name prefixed to Ops created by this class.
Default value: "GaussianProcess".
Returns:
loss: Scalar tensor representing the negative variational lower bound.
Can be directly used in a `tf.Optimizer`.
Raises:
ValueError: if `mean_fn` is not `None` and is not callable.
#### References
[1]: Hensman, J., Lawrence, N. "Gaussian Processes for Big Data", 2013
https://arxiv.org/abs/1309.6835
"""
with tf.name_scope(name or 'variational_gp_loss'):
if observation_index_points is None:
observation_index_points = self._index_points
observation_index_points = tf.convert_to_tensor(
value=observation_index_points, dtype=self._dtype,
name='observation_index_points')
observations = tf.convert_to_tensor(
value=observations, dtype=self._dtype, name='observations')
kl_weight = tf.convert_to_tensor(
value=kl_weight, dtype=self._dtype,
name='kl_weight')
# The variational loss is a negative ELBO. The ELBO can be broken down
# into three terms:
# 1. a likelihood term
# 2. a trace term arising from the covariance of the posterior predictive
kzx = self.kernel.matrix(self._inducing_index_points,
observation_index_points)
kzx_linop = tf.linalg.LinearOperatorFullMatrix(kzx)
loc = (self._mean_fn(observation_index_points) +
kzx_linop.matvec(self._kzz_inv_varloc, adjoint=True))
likelihood = independent.Independent(
normal.Normal(
loc=loc,
scale=tf.sqrt(self._observation_noise_variance + self._jitter),
name='NormalLikelihood'),
reinterpreted_batch_ndims=1)
obs_ll = likelihood.log_prob(observations)
chol_kzz_linop = tf.linalg.LinearOperatorLowerTriangular(self._chol_kzz)
chol_kzz_inv_kzx = chol_kzz_linop.solve(kzx)
kzz_inv_kzx = chol_kzz_linop.solve(chol_kzz_inv_kzx, adjoint=True)
kxx_diag = tf.linalg.diag_part(
self.kernel.matrix(
observation_index_points, observation_index_points))
ktilde_trace_term = (
tf.reduce_sum(input_tensor=kxx_diag, axis=-1) -
tf.reduce_sum(input_tensor=chol_kzz_inv_kzx ** 2, axis=[-2, -1]))
# Tr(SB)
# where S = A A.T, A = variational_inducing_observations_scale
# and B = Kzz^-1 Kzx Kzx.T Kzz^-1
#
# Now Tr(SB) = Tr(A A.T Kzz^-1 Kzx Kzx.T Kzz^-1)
# = Tr(A.T Kzz^-1 Kzx Kzx.T Kzz^-1 A)
# = sum_ij (A.T Kzz^-1 Kzx)_{ij}^2
other_trace_term = tf.reduce_sum(
input_tensor=(
self._variational_inducing_observations_posterior.scale.matmul(
kzz_inv_kzx) ** 2),
axis=[-2, -1])
trace_term = (.5 * (ktilde_trace_term + other_trace_term) /
self._observation_noise_variance)
inducing_prior = gaussian_process.GaussianProcess(
kernel=self._kernel,
mean_fn=self._mean_fn,
index_points=self._inducing_index_points,
observation_noise_variance=self._observation_noise_variance)
kl_term = kl_weight * kullback_leibler.kl_divergence(
self._variational_inducing_observations_posterior,
inducing_prior)
lower_bound = (obs_ll - trace_term - kl_term)
return -tf.reduce_mean(input_tensor=lower_bound)
|
[
"Variational",
"loss",
"for",
"the",
"VGP",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/variational_gaussian_process.py#L728-L842
|
[
"def",
"variational_loss",
"(",
"self",
",",
"observations",
",",
"observation_index_points",
"=",
"None",
",",
"kl_weight",
"=",
"1.",
",",
"name",
"=",
"'variational_loss'",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"'variational_gp_loss'",
")",
":",
"if",
"observation_index_points",
"is",
"None",
":",
"observation_index_points",
"=",
"self",
".",
"_index_points",
"observation_index_points",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"observation_index_points",
",",
"dtype",
"=",
"self",
".",
"_dtype",
",",
"name",
"=",
"'observation_index_points'",
")",
"observations",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"observations",
",",
"dtype",
"=",
"self",
".",
"_dtype",
",",
"name",
"=",
"'observations'",
")",
"kl_weight",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"kl_weight",
",",
"dtype",
"=",
"self",
".",
"_dtype",
",",
"name",
"=",
"'kl_weight'",
")",
"# The variational loss is a negative ELBO. The ELBO can be broken down",
"# into three terms:",
"# 1. a likelihood term",
"# 2. a trace term arising from the covariance of the posterior predictive",
"kzx",
"=",
"self",
".",
"kernel",
".",
"matrix",
"(",
"self",
".",
"_inducing_index_points",
",",
"observation_index_points",
")",
"kzx_linop",
"=",
"tf",
".",
"linalg",
".",
"LinearOperatorFullMatrix",
"(",
"kzx",
")",
"loc",
"=",
"(",
"self",
".",
"_mean_fn",
"(",
"observation_index_points",
")",
"+",
"kzx_linop",
".",
"matvec",
"(",
"self",
".",
"_kzz_inv_varloc",
",",
"adjoint",
"=",
"True",
")",
")",
"likelihood",
"=",
"independent",
".",
"Independent",
"(",
"normal",
".",
"Normal",
"(",
"loc",
"=",
"loc",
",",
"scale",
"=",
"tf",
".",
"sqrt",
"(",
"self",
".",
"_observation_noise_variance",
"+",
"self",
".",
"_jitter",
")",
",",
"name",
"=",
"'NormalLikelihood'",
")",
",",
"reinterpreted_batch_ndims",
"=",
"1",
")",
"obs_ll",
"=",
"likelihood",
".",
"log_prob",
"(",
"observations",
")",
"chol_kzz_linop",
"=",
"tf",
".",
"linalg",
".",
"LinearOperatorLowerTriangular",
"(",
"self",
".",
"_chol_kzz",
")",
"chol_kzz_inv_kzx",
"=",
"chol_kzz_linop",
".",
"solve",
"(",
"kzx",
")",
"kzz_inv_kzx",
"=",
"chol_kzz_linop",
".",
"solve",
"(",
"chol_kzz_inv_kzx",
",",
"adjoint",
"=",
"True",
")",
"kxx_diag",
"=",
"tf",
".",
"linalg",
".",
"diag_part",
"(",
"self",
".",
"kernel",
".",
"matrix",
"(",
"observation_index_points",
",",
"observation_index_points",
")",
")",
"ktilde_trace_term",
"=",
"(",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"kxx_diag",
",",
"axis",
"=",
"-",
"1",
")",
"-",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"chol_kzz_inv_kzx",
"**",
"2",
",",
"axis",
"=",
"[",
"-",
"2",
",",
"-",
"1",
"]",
")",
")",
"# Tr(SB)",
"# where S = A A.T, A = variational_inducing_observations_scale",
"# and B = Kzz^-1 Kzx Kzx.T Kzz^-1",
"#",
"# Now Tr(SB) = Tr(A A.T Kzz^-1 Kzx Kzx.T Kzz^-1)",
"# = Tr(A.T Kzz^-1 Kzx Kzx.T Kzz^-1 A)",
"# = sum_ij (A.T Kzz^-1 Kzx)_{ij}^2",
"other_trace_term",
"=",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"(",
"self",
".",
"_variational_inducing_observations_posterior",
".",
"scale",
".",
"matmul",
"(",
"kzz_inv_kzx",
")",
"**",
"2",
")",
",",
"axis",
"=",
"[",
"-",
"2",
",",
"-",
"1",
"]",
")",
"trace_term",
"=",
"(",
".5",
"*",
"(",
"ktilde_trace_term",
"+",
"other_trace_term",
")",
"/",
"self",
".",
"_observation_noise_variance",
")",
"inducing_prior",
"=",
"gaussian_process",
".",
"GaussianProcess",
"(",
"kernel",
"=",
"self",
".",
"_kernel",
",",
"mean_fn",
"=",
"self",
".",
"_mean_fn",
",",
"index_points",
"=",
"self",
".",
"_inducing_index_points",
",",
"observation_noise_variance",
"=",
"self",
".",
"_observation_noise_variance",
")",
"kl_term",
"=",
"kl_weight",
"*",
"kullback_leibler",
".",
"kl_divergence",
"(",
"self",
".",
"_variational_inducing_observations_posterior",
",",
"inducing_prior",
")",
"lower_bound",
"=",
"(",
"obs_ll",
"-",
"trace_term",
"-",
"kl_term",
")",
"return",
"-",
"tf",
".",
"reduce_mean",
"(",
"input_tensor",
"=",
"lower_bound",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
VariationalGaussianProcess.optimal_variational_posterior
|
Model selection for optimal variational hyperparameters.
Given the full training set (parameterized by `observations` and
`observation_index_points`), compute the optimal variational
location and scale for the VGP. This is based of the method suggested
in [Titsias, 2009][1].
Args:
kernel: `PositiveSemidefiniteKernel`-like instance representing the
GP's covariance function.
inducing_index_points: `float` `Tensor` of locations of inducing points in
the index set. Shape has the form `[b1, ..., bB, e2, f1, ..., fF]`, just
like `observation_index_points`. The batch shape components needn't be
identical to those of `observation_index_points`, but must be broadcast
compatible with them.
observation_index_points: `float` `Tensor` representing finite (batch of)
vector(s) of points where observations are defined. Shape has the
form `[b1, ..., bB, e1, f1, ..., fF]` where `F` is the number of feature
dimensions and must equal `kernel.feature_ndims` and `e1` is the number
(size) of index points in each batch (we denote it `e1` to distinguish
it from the numer of inducing index points, denoted `e2` below).
observations: `float` `Tensor` representing collection, or batch of
collections, of observations corresponding to
`observation_index_points`. Shape has the form `[b1, ..., bB, e]`, which
must be brodcastable with the batch and example shapes of
`observation_index_points`. The batch shape `[b1, ..., bB]` must be
broadcastable with the shapes of all other batched parameters
(`kernel.batch_shape`, `observation_index_points`, etc.).
observation_noise_variance: `float` `Tensor` representing the variance
of the noise in the Normal likelihood distribution of the model. May be
batched, in which case the batch shape must be broadcastable with the
shapes of all other batched parameters (`kernel.batch_shape`,
`index_points`, etc.).
Default value: `0.`
mean_fn: Python `callable` that acts on index points to produce a (batch
of) vector(s) of mean values at those index points. Takes a `Tensor` of
shape `[b1, ..., bB, f1, ..., fF]` and returns a `Tensor` whose shape is
(broadcastable with) `[b1, ..., bB]`. Default value: `None` implies
constant zero function.
jitter: `float` scalar `Tensor` added to the diagonal of the covariance
matrix to ensure positive definiteness of the covariance matrix.
Default value: `1e-6`.
name: Python `str` name prefixed to Ops created by this class.
Default value: "optimal_variational_posterior".
Returns:
loc, scale: Tuple representing the variational location and scale.
Raises:
ValueError: if `mean_fn` is not `None` and is not callable.
#### References
[1]: Titsias, M. "Variational Model Selection for Sparse Gaussian Process
Regression", 2009.
http://proceedings.mlr.press/v5/titsias09a/titsias09a.pdf
|
tensorflow_probability/python/distributions/variational_gaussian_process.py
|
def optimal_variational_posterior(
kernel,
inducing_index_points,
observation_index_points,
observations,
observation_noise_variance,
mean_fn=None,
jitter=1e-6,
name=None):
"""Model selection for optimal variational hyperparameters.
Given the full training set (parameterized by `observations` and
`observation_index_points`), compute the optimal variational
location and scale for the VGP. This is based of the method suggested
in [Titsias, 2009][1].
Args:
kernel: `PositiveSemidefiniteKernel`-like instance representing the
GP's covariance function.
inducing_index_points: `float` `Tensor` of locations of inducing points in
the index set. Shape has the form `[b1, ..., bB, e2, f1, ..., fF]`, just
like `observation_index_points`. The batch shape components needn't be
identical to those of `observation_index_points`, but must be broadcast
compatible with them.
observation_index_points: `float` `Tensor` representing finite (batch of)
vector(s) of points where observations are defined. Shape has the
form `[b1, ..., bB, e1, f1, ..., fF]` where `F` is the number of feature
dimensions and must equal `kernel.feature_ndims` and `e1` is the number
(size) of index points in each batch (we denote it `e1` to distinguish
it from the numer of inducing index points, denoted `e2` below).
observations: `float` `Tensor` representing collection, or batch of
collections, of observations corresponding to
`observation_index_points`. Shape has the form `[b1, ..., bB, e]`, which
must be brodcastable with the batch and example shapes of
`observation_index_points`. The batch shape `[b1, ..., bB]` must be
broadcastable with the shapes of all other batched parameters
(`kernel.batch_shape`, `observation_index_points`, etc.).
observation_noise_variance: `float` `Tensor` representing the variance
of the noise in the Normal likelihood distribution of the model. May be
batched, in which case the batch shape must be broadcastable with the
shapes of all other batched parameters (`kernel.batch_shape`,
`index_points`, etc.).
Default value: `0.`
mean_fn: Python `callable` that acts on index points to produce a (batch
of) vector(s) of mean values at those index points. Takes a `Tensor` of
shape `[b1, ..., bB, f1, ..., fF]` and returns a `Tensor` whose shape is
(broadcastable with) `[b1, ..., bB]`. Default value: `None` implies
constant zero function.
jitter: `float` scalar `Tensor` added to the diagonal of the covariance
matrix to ensure positive definiteness of the covariance matrix.
Default value: `1e-6`.
name: Python `str` name prefixed to Ops created by this class.
Default value: "optimal_variational_posterior".
Returns:
loc, scale: Tuple representing the variational location and scale.
Raises:
ValueError: if `mean_fn` is not `None` and is not callable.
#### References
[1]: Titsias, M. "Variational Model Selection for Sparse Gaussian Process
Regression", 2009.
http://proceedings.mlr.press/v5/titsias09a/titsias09a.pdf
"""
with tf.name_scope(name or 'optimal_variational_posterior'):
dtype = dtype_util.common_dtype(
[inducing_index_points,
observation_index_points,
observations,
observation_noise_variance,
jitter], tf.float32)
inducing_index_points = tf.convert_to_tensor(
value=inducing_index_points,
dtype=dtype, name='inducing_index_points')
observation_index_points = tf.convert_to_tensor(
value=observation_index_points, dtype=dtype,
name='observation_index_points')
observations = tf.convert_to_tensor(
value=observations, dtype=dtype, name='observations')
observation_noise_variance = tf.convert_to_tensor(
value=observation_noise_variance,
dtype=dtype,
name='observation_noise_variance')
jitter = tf.convert_to_tensor(
value=jitter, dtype=dtype, name='jitter')
# Default to a constant zero function.
if mean_fn is None:
mean_fn = lambda x: tf.zeros([1], dtype=dtype)
else:
if not callable(mean_fn):
raise ValueError('`mean_fn` must be a Python callable')
# z are the inducing points and x are the observation index points.
kzz = kernel.matrix(inducing_index_points, inducing_index_points)
kzx = kernel.matrix(inducing_index_points, observation_index_points)
noise_var_inv = tf.math.reciprocal(observation_noise_variance)
sigma_inv = _add_diagonal_shift(
kzz + noise_var_inv * tf.matmul(kzx, kzx, adjoint_b=True),
jitter)
chol_sigma_inv = tf.linalg.cholesky(sigma_inv)
kzx_lin_op = tf.linalg.LinearOperatorFullMatrix(kzx)
kzx_obs = kzx_lin_op.matvec(
observations - mean_fn(observation_index_points))
kzz_lin_op = tf.linalg.LinearOperatorFullMatrix(kzz)
loc = (mean_fn(inducing_index_points) +
noise_var_inv * kzz_lin_op.matvec(
_solve_cholesky_factored_system_vec(chol_sigma_inv, kzx_obs)))
chol_sigma_inv_lin_op = tf.linalg.LinearOperatorLowerTriangular(
chol_sigma_inv)
scale = chol_sigma_inv_lin_op.solve(kzz)
return loc, scale
|
def optimal_variational_posterior(
kernel,
inducing_index_points,
observation_index_points,
observations,
observation_noise_variance,
mean_fn=None,
jitter=1e-6,
name=None):
"""Model selection for optimal variational hyperparameters.
Given the full training set (parameterized by `observations` and
`observation_index_points`), compute the optimal variational
location and scale for the VGP. This is based of the method suggested
in [Titsias, 2009][1].
Args:
kernel: `PositiveSemidefiniteKernel`-like instance representing the
GP's covariance function.
inducing_index_points: `float` `Tensor` of locations of inducing points in
the index set. Shape has the form `[b1, ..., bB, e2, f1, ..., fF]`, just
like `observation_index_points`. The batch shape components needn't be
identical to those of `observation_index_points`, but must be broadcast
compatible with them.
observation_index_points: `float` `Tensor` representing finite (batch of)
vector(s) of points where observations are defined. Shape has the
form `[b1, ..., bB, e1, f1, ..., fF]` where `F` is the number of feature
dimensions and must equal `kernel.feature_ndims` and `e1` is the number
(size) of index points in each batch (we denote it `e1` to distinguish
it from the numer of inducing index points, denoted `e2` below).
observations: `float` `Tensor` representing collection, or batch of
collections, of observations corresponding to
`observation_index_points`. Shape has the form `[b1, ..., bB, e]`, which
must be brodcastable with the batch and example shapes of
`observation_index_points`. The batch shape `[b1, ..., bB]` must be
broadcastable with the shapes of all other batched parameters
(`kernel.batch_shape`, `observation_index_points`, etc.).
observation_noise_variance: `float` `Tensor` representing the variance
of the noise in the Normal likelihood distribution of the model. May be
batched, in which case the batch shape must be broadcastable with the
shapes of all other batched parameters (`kernel.batch_shape`,
`index_points`, etc.).
Default value: `0.`
mean_fn: Python `callable` that acts on index points to produce a (batch
of) vector(s) of mean values at those index points. Takes a `Tensor` of
shape `[b1, ..., bB, f1, ..., fF]` and returns a `Tensor` whose shape is
(broadcastable with) `[b1, ..., bB]`. Default value: `None` implies
constant zero function.
jitter: `float` scalar `Tensor` added to the diagonal of the covariance
matrix to ensure positive definiteness of the covariance matrix.
Default value: `1e-6`.
name: Python `str` name prefixed to Ops created by this class.
Default value: "optimal_variational_posterior".
Returns:
loc, scale: Tuple representing the variational location and scale.
Raises:
ValueError: if `mean_fn` is not `None` and is not callable.
#### References
[1]: Titsias, M. "Variational Model Selection for Sparse Gaussian Process
Regression", 2009.
http://proceedings.mlr.press/v5/titsias09a/titsias09a.pdf
"""
with tf.name_scope(name or 'optimal_variational_posterior'):
dtype = dtype_util.common_dtype(
[inducing_index_points,
observation_index_points,
observations,
observation_noise_variance,
jitter], tf.float32)
inducing_index_points = tf.convert_to_tensor(
value=inducing_index_points,
dtype=dtype, name='inducing_index_points')
observation_index_points = tf.convert_to_tensor(
value=observation_index_points, dtype=dtype,
name='observation_index_points')
observations = tf.convert_to_tensor(
value=observations, dtype=dtype, name='observations')
observation_noise_variance = tf.convert_to_tensor(
value=observation_noise_variance,
dtype=dtype,
name='observation_noise_variance')
jitter = tf.convert_to_tensor(
value=jitter, dtype=dtype, name='jitter')
# Default to a constant zero function.
if mean_fn is None:
mean_fn = lambda x: tf.zeros([1], dtype=dtype)
else:
if not callable(mean_fn):
raise ValueError('`mean_fn` must be a Python callable')
# z are the inducing points and x are the observation index points.
kzz = kernel.matrix(inducing_index_points, inducing_index_points)
kzx = kernel.matrix(inducing_index_points, observation_index_points)
noise_var_inv = tf.math.reciprocal(observation_noise_variance)
sigma_inv = _add_diagonal_shift(
kzz + noise_var_inv * tf.matmul(kzx, kzx, adjoint_b=True),
jitter)
chol_sigma_inv = tf.linalg.cholesky(sigma_inv)
kzx_lin_op = tf.linalg.LinearOperatorFullMatrix(kzx)
kzx_obs = kzx_lin_op.matvec(
observations - mean_fn(observation_index_points))
kzz_lin_op = tf.linalg.LinearOperatorFullMatrix(kzz)
loc = (mean_fn(inducing_index_points) +
noise_var_inv * kzz_lin_op.matvec(
_solve_cholesky_factored_system_vec(chol_sigma_inv, kzx_obs)))
chol_sigma_inv_lin_op = tf.linalg.LinearOperatorLowerTriangular(
chol_sigma_inv)
scale = chol_sigma_inv_lin_op.solve(kzz)
return loc, scale
|
[
"Model",
"selection",
"for",
"optimal",
"variational",
"hyperparameters",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/variational_gaussian_process.py#L845-L964
|
[
"def",
"optimal_variational_posterior",
"(",
"kernel",
",",
"inducing_index_points",
",",
"observation_index_points",
",",
"observations",
",",
"observation_noise_variance",
",",
"mean_fn",
"=",
"None",
",",
"jitter",
"=",
"1e-6",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"'optimal_variational_posterior'",
")",
":",
"dtype",
"=",
"dtype_util",
".",
"common_dtype",
"(",
"[",
"inducing_index_points",
",",
"observation_index_points",
",",
"observations",
",",
"observation_noise_variance",
",",
"jitter",
"]",
",",
"tf",
".",
"float32",
")",
"inducing_index_points",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"inducing_index_points",
",",
"dtype",
"=",
"dtype",
",",
"name",
"=",
"'inducing_index_points'",
")",
"observation_index_points",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"observation_index_points",
",",
"dtype",
"=",
"dtype",
",",
"name",
"=",
"'observation_index_points'",
")",
"observations",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"observations",
",",
"dtype",
"=",
"dtype",
",",
"name",
"=",
"'observations'",
")",
"observation_noise_variance",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"observation_noise_variance",
",",
"dtype",
"=",
"dtype",
",",
"name",
"=",
"'observation_noise_variance'",
")",
"jitter",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"jitter",
",",
"dtype",
"=",
"dtype",
",",
"name",
"=",
"'jitter'",
")",
"# Default to a constant zero function.",
"if",
"mean_fn",
"is",
"None",
":",
"mean_fn",
"=",
"lambda",
"x",
":",
"tf",
".",
"zeros",
"(",
"[",
"1",
"]",
",",
"dtype",
"=",
"dtype",
")",
"else",
":",
"if",
"not",
"callable",
"(",
"mean_fn",
")",
":",
"raise",
"ValueError",
"(",
"'`mean_fn` must be a Python callable'",
")",
"# z are the inducing points and x are the observation index points.",
"kzz",
"=",
"kernel",
".",
"matrix",
"(",
"inducing_index_points",
",",
"inducing_index_points",
")",
"kzx",
"=",
"kernel",
".",
"matrix",
"(",
"inducing_index_points",
",",
"observation_index_points",
")",
"noise_var_inv",
"=",
"tf",
".",
"math",
".",
"reciprocal",
"(",
"observation_noise_variance",
")",
"sigma_inv",
"=",
"_add_diagonal_shift",
"(",
"kzz",
"+",
"noise_var_inv",
"*",
"tf",
".",
"matmul",
"(",
"kzx",
",",
"kzx",
",",
"adjoint_b",
"=",
"True",
")",
",",
"jitter",
")",
"chol_sigma_inv",
"=",
"tf",
".",
"linalg",
".",
"cholesky",
"(",
"sigma_inv",
")",
"kzx_lin_op",
"=",
"tf",
".",
"linalg",
".",
"LinearOperatorFullMatrix",
"(",
"kzx",
")",
"kzx_obs",
"=",
"kzx_lin_op",
".",
"matvec",
"(",
"observations",
"-",
"mean_fn",
"(",
"observation_index_points",
")",
")",
"kzz_lin_op",
"=",
"tf",
".",
"linalg",
".",
"LinearOperatorFullMatrix",
"(",
"kzz",
")",
"loc",
"=",
"(",
"mean_fn",
"(",
"inducing_index_points",
")",
"+",
"noise_var_inv",
"*",
"kzz_lin_op",
".",
"matvec",
"(",
"_solve_cholesky_factored_system_vec",
"(",
"chol_sigma_inv",
",",
"kzx_obs",
")",
")",
")",
"chol_sigma_inv_lin_op",
"=",
"tf",
".",
"linalg",
".",
"LinearOperatorLowerTriangular",
"(",
"chol_sigma_inv",
")",
"scale",
"=",
"chol_sigma_inv_lin_op",
".",
"solve",
"(",
"kzz",
")",
"return",
"loc",
",",
"scale"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
build_is_last_day_of_season
|
Build utility method to compute whether the season is changing.
|
tensorflow_probability/python/sts/seasonal.py
|
def build_is_last_day_of_season(num_steps_per_season):
"""Build utility method to compute whether the season is changing."""
num_steps_per_cycle = np.sum(num_steps_per_season)
changepoints = np.cumsum(np.ravel(num_steps_per_season)) - 1
def is_last_day_of_season(t):
t_ = dist_util.maybe_get_static_value(t)
if t_ is not None: # static case
step_in_cycle = t_ % num_steps_per_cycle
return any(step_in_cycle == changepoints)
else:
step_in_cycle = tf.math.floormod(t, num_steps_per_cycle)
return tf.reduce_any(
input_tensor=tf.equal(step_in_cycle, changepoints))
return is_last_day_of_season
|
def build_is_last_day_of_season(num_steps_per_season):
"""Build utility method to compute whether the season is changing."""
num_steps_per_cycle = np.sum(num_steps_per_season)
changepoints = np.cumsum(np.ravel(num_steps_per_season)) - 1
def is_last_day_of_season(t):
t_ = dist_util.maybe_get_static_value(t)
if t_ is not None: # static case
step_in_cycle = t_ % num_steps_per_cycle
return any(step_in_cycle == changepoints)
else:
step_in_cycle = tf.math.floormod(t, num_steps_per_cycle)
return tf.reduce_any(
input_tensor=tf.equal(step_in_cycle, changepoints))
return is_last_day_of_season
|
[
"Build",
"utility",
"method",
"to",
"compute",
"whether",
"the",
"season",
"is",
"changing",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/seasonal.py#L513-L526
|
[
"def",
"build_is_last_day_of_season",
"(",
"num_steps_per_season",
")",
":",
"num_steps_per_cycle",
"=",
"np",
".",
"sum",
"(",
"num_steps_per_season",
")",
"changepoints",
"=",
"np",
".",
"cumsum",
"(",
"np",
".",
"ravel",
"(",
"num_steps_per_season",
")",
")",
"-",
"1",
"def",
"is_last_day_of_season",
"(",
"t",
")",
":",
"t_",
"=",
"dist_util",
".",
"maybe_get_static_value",
"(",
"t",
")",
"if",
"t_",
"is",
"not",
"None",
":",
"# static case",
"step_in_cycle",
"=",
"t_",
"%",
"num_steps_per_cycle",
"return",
"any",
"(",
"step_in_cycle",
"==",
"changepoints",
")",
"else",
":",
"step_in_cycle",
"=",
"tf",
".",
"math",
".",
"floormod",
"(",
"t",
",",
"num_steps_per_cycle",
")",
"return",
"tf",
".",
"reduce_any",
"(",
"input_tensor",
"=",
"tf",
".",
"equal",
"(",
"step_in_cycle",
",",
"changepoints",
")",
")",
"return",
"is_last_day_of_season"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
build_effects_to_residuals_matrix
|
Build change-of-basis matrices for constrained seasonal effects.
This method builds the matrix that transforms seasonal effects into
effect residuals (differences from the mean effect), and additionally
projects these residuals onto the subspace where the mean effect is zero.
See `ConstrainedSeasonalStateSpaceModel` for mathematical details.
Args:
num_seasons: scalar `int` number of seasons.
dtype: TensorFlow `dtype` for the returned values.
Returns:
effects_to_residuals: `Tensor` of shape
`[num_seasons-1, num_seasons]`, such that `differences_from_mean_effect =
matmul(effects_to_residuals, seasonal_effects)`. In the
notation of `ConstrainedSeasonalStateSpaceModel`, this is
`effects_to_residuals = P * R`.
residuals_to_effects: the (pseudo)-inverse of the above; a
`Tensor` of shape `[num_seasons, num_seasons-1]`. In the
notation of `ConstrainedSeasonalStateSpaceModel`, this is
`residuals_to_effects = R^{-1} * P'`.
|
tensorflow_probability/python/sts/seasonal.py
|
def build_effects_to_residuals_matrix(num_seasons, dtype):
"""Build change-of-basis matrices for constrained seasonal effects.
This method builds the matrix that transforms seasonal effects into
effect residuals (differences from the mean effect), and additionally
projects these residuals onto the subspace where the mean effect is zero.
See `ConstrainedSeasonalStateSpaceModel` for mathematical details.
Args:
num_seasons: scalar `int` number of seasons.
dtype: TensorFlow `dtype` for the returned values.
Returns:
effects_to_residuals: `Tensor` of shape
`[num_seasons-1, num_seasons]`, such that `differences_from_mean_effect =
matmul(effects_to_residuals, seasonal_effects)`. In the
notation of `ConstrainedSeasonalStateSpaceModel`, this is
`effects_to_residuals = P * R`.
residuals_to_effects: the (pseudo)-inverse of the above; a
`Tensor` of shape `[num_seasons, num_seasons-1]`. In the
notation of `ConstrainedSeasonalStateSpaceModel`, this is
`residuals_to_effects = R^{-1} * P'`.
"""
# Build the matrix that converts effects `e_i` into differences from the mean
# effect `(e_i - sum(e_i)) / num_seasons`, with the mean effect in the last
# row so that the transformation is invertible.
effects_to_residuals_fullrank = np.eye(num_seasons) - 1./num_seasons
effects_to_residuals_fullrank[-1, :] = 1./num_seasons # compute mean effect
residuals_to_effects_fullrank = np.linalg.inv(effects_to_residuals_fullrank)
# Drop the final dimension, effectively setting the mean effect to zero.
effects_to_residuals = effects_to_residuals_fullrank[:-1, :]
residuals_to_effects = residuals_to_effects_fullrank[:, :-1]
# Return Tensor values of the specified dtype.
effects_to_residuals = tf.cast(
effects_to_residuals, dtype=dtype, name='effects_to_residuals')
residuals_to_effects = tf.cast(
residuals_to_effects, dtype=dtype, name='residuals_to_effects')
return effects_to_residuals, residuals_to_effects
|
def build_effects_to_residuals_matrix(num_seasons, dtype):
"""Build change-of-basis matrices for constrained seasonal effects.
This method builds the matrix that transforms seasonal effects into
effect residuals (differences from the mean effect), and additionally
projects these residuals onto the subspace where the mean effect is zero.
See `ConstrainedSeasonalStateSpaceModel` for mathematical details.
Args:
num_seasons: scalar `int` number of seasons.
dtype: TensorFlow `dtype` for the returned values.
Returns:
effects_to_residuals: `Tensor` of shape
`[num_seasons-1, num_seasons]`, such that `differences_from_mean_effect =
matmul(effects_to_residuals, seasonal_effects)`. In the
notation of `ConstrainedSeasonalStateSpaceModel`, this is
`effects_to_residuals = P * R`.
residuals_to_effects: the (pseudo)-inverse of the above; a
`Tensor` of shape `[num_seasons, num_seasons-1]`. In the
notation of `ConstrainedSeasonalStateSpaceModel`, this is
`residuals_to_effects = R^{-1} * P'`.
"""
# Build the matrix that converts effects `e_i` into differences from the mean
# effect `(e_i - sum(e_i)) / num_seasons`, with the mean effect in the last
# row so that the transformation is invertible.
effects_to_residuals_fullrank = np.eye(num_seasons) - 1./num_seasons
effects_to_residuals_fullrank[-1, :] = 1./num_seasons # compute mean effect
residuals_to_effects_fullrank = np.linalg.inv(effects_to_residuals_fullrank)
# Drop the final dimension, effectively setting the mean effect to zero.
effects_to_residuals = effects_to_residuals_fullrank[:-1, :]
residuals_to_effects = residuals_to_effects_fullrank[:, :-1]
# Return Tensor values of the specified dtype.
effects_to_residuals = tf.cast(
effects_to_residuals, dtype=dtype, name='effects_to_residuals')
residuals_to_effects = tf.cast(
residuals_to_effects, dtype=dtype, name='residuals_to_effects')
return effects_to_residuals, residuals_to_effects
|
[
"Build",
"change",
"-",
"of",
"-",
"basis",
"matrices",
"for",
"constrained",
"seasonal",
"effects",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/seasonal.py#L529-L570
|
[
"def",
"build_effects_to_residuals_matrix",
"(",
"num_seasons",
",",
"dtype",
")",
":",
"# Build the matrix that converts effects `e_i` into differences from the mean",
"# effect `(e_i - sum(e_i)) / num_seasons`, with the mean effect in the last",
"# row so that the transformation is invertible.",
"effects_to_residuals_fullrank",
"=",
"np",
".",
"eye",
"(",
"num_seasons",
")",
"-",
"1.",
"/",
"num_seasons",
"effects_to_residuals_fullrank",
"[",
"-",
"1",
",",
":",
"]",
"=",
"1.",
"/",
"num_seasons",
"# compute mean effect",
"residuals_to_effects_fullrank",
"=",
"np",
".",
"linalg",
".",
"inv",
"(",
"effects_to_residuals_fullrank",
")",
"# Drop the final dimension, effectively setting the mean effect to zero.",
"effects_to_residuals",
"=",
"effects_to_residuals_fullrank",
"[",
":",
"-",
"1",
",",
":",
"]",
"residuals_to_effects",
"=",
"residuals_to_effects_fullrank",
"[",
":",
",",
":",
"-",
"1",
"]",
"# Return Tensor values of the specified dtype.",
"effects_to_residuals",
"=",
"tf",
".",
"cast",
"(",
"effects_to_residuals",
",",
"dtype",
"=",
"dtype",
",",
"name",
"=",
"'effects_to_residuals'",
")",
"residuals_to_effects",
"=",
"tf",
".",
"cast",
"(",
"residuals_to_effects",
",",
"dtype",
"=",
"dtype",
",",
"name",
"=",
"'residuals_to_effects'",
")",
"return",
"effects_to_residuals",
",",
"residuals_to_effects"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
build_seasonal_transition_matrix
|
Build a function computing transitions for a seasonal effect model.
|
tensorflow_probability/python/sts/seasonal.py
|
def build_seasonal_transition_matrix(
num_seasons, is_last_day_of_season, dtype,
basis_change_matrix=None, basis_change_matrix_inv=None):
"""Build a function computing transitions for a seasonal effect model."""
with tf.compat.v1.name_scope('build_seasonal_transition_matrix'):
# If the season is changing, the transition matrix permutes the latent
# state to shift all seasons up by a dimension, and sends the current
# season's effect to the bottom.
seasonal_permutation = np.concatenate(
[np.arange(1, num_seasons), [0]], axis=0)
seasonal_permutation_matrix = tf.constant(
np.eye(num_seasons)[seasonal_permutation], dtype=dtype)
# Optionally transform the transition matrix into a reparameterized space,
# enforcing the zero-sum constraint for ConstrainedSeasonalStateSpaceModel.
if basis_change_matrix is not None:
seasonal_permutation_matrix = tf.matmul(
basis_change_matrix,
tf.matmul(seasonal_permutation_matrix, basis_change_matrix_inv))
identity_matrix = tf.eye(
tf.shape(input=seasonal_permutation_matrix)[-1], dtype=dtype)
def seasonal_transition_matrix(t):
return tf.linalg.LinearOperatorFullMatrix(
matrix=dist_util.pick_scalar_condition(
is_last_day_of_season(t),
seasonal_permutation_matrix,
identity_matrix))
return seasonal_transition_matrix
|
def build_seasonal_transition_matrix(
num_seasons, is_last_day_of_season, dtype,
basis_change_matrix=None, basis_change_matrix_inv=None):
"""Build a function computing transitions for a seasonal effect model."""
with tf.compat.v1.name_scope('build_seasonal_transition_matrix'):
# If the season is changing, the transition matrix permutes the latent
# state to shift all seasons up by a dimension, and sends the current
# season's effect to the bottom.
seasonal_permutation = np.concatenate(
[np.arange(1, num_seasons), [0]], axis=0)
seasonal_permutation_matrix = tf.constant(
np.eye(num_seasons)[seasonal_permutation], dtype=dtype)
# Optionally transform the transition matrix into a reparameterized space,
# enforcing the zero-sum constraint for ConstrainedSeasonalStateSpaceModel.
if basis_change_matrix is not None:
seasonal_permutation_matrix = tf.matmul(
basis_change_matrix,
tf.matmul(seasonal_permutation_matrix, basis_change_matrix_inv))
identity_matrix = tf.eye(
tf.shape(input=seasonal_permutation_matrix)[-1], dtype=dtype)
def seasonal_transition_matrix(t):
return tf.linalg.LinearOperatorFullMatrix(
matrix=dist_util.pick_scalar_condition(
is_last_day_of_season(t),
seasonal_permutation_matrix,
identity_matrix))
return seasonal_transition_matrix
|
[
"Build",
"a",
"function",
"computing",
"transitions",
"for",
"a",
"seasonal",
"effect",
"model",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/seasonal.py#L573-L604
|
[
"def",
"build_seasonal_transition_matrix",
"(",
"num_seasons",
",",
"is_last_day_of_season",
",",
"dtype",
",",
"basis_change_matrix",
"=",
"None",
",",
"basis_change_matrix_inv",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"'build_seasonal_transition_matrix'",
")",
":",
"# If the season is changing, the transition matrix permutes the latent",
"# state to shift all seasons up by a dimension, and sends the current",
"# season's effect to the bottom.",
"seasonal_permutation",
"=",
"np",
".",
"concatenate",
"(",
"[",
"np",
".",
"arange",
"(",
"1",
",",
"num_seasons",
")",
",",
"[",
"0",
"]",
"]",
",",
"axis",
"=",
"0",
")",
"seasonal_permutation_matrix",
"=",
"tf",
".",
"constant",
"(",
"np",
".",
"eye",
"(",
"num_seasons",
")",
"[",
"seasonal_permutation",
"]",
",",
"dtype",
"=",
"dtype",
")",
"# Optionally transform the transition matrix into a reparameterized space,",
"# enforcing the zero-sum constraint for ConstrainedSeasonalStateSpaceModel.",
"if",
"basis_change_matrix",
"is",
"not",
"None",
":",
"seasonal_permutation_matrix",
"=",
"tf",
".",
"matmul",
"(",
"basis_change_matrix",
",",
"tf",
".",
"matmul",
"(",
"seasonal_permutation_matrix",
",",
"basis_change_matrix_inv",
")",
")",
"identity_matrix",
"=",
"tf",
".",
"eye",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"seasonal_permutation_matrix",
")",
"[",
"-",
"1",
"]",
",",
"dtype",
"=",
"dtype",
")",
"def",
"seasonal_transition_matrix",
"(",
"t",
")",
":",
"return",
"tf",
".",
"linalg",
".",
"LinearOperatorFullMatrix",
"(",
"matrix",
"=",
"dist_util",
".",
"pick_scalar_condition",
"(",
"is_last_day_of_season",
"(",
"t",
")",
",",
"seasonal_permutation_matrix",
",",
"identity_matrix",
")",
")",
"return",
"seasonal_transition_matrix"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
build_seasonal_transition_noise
|
Build the transition noise model for a SeasonalStateSpaceModel.
|
tensorflow_probability/python/sts/seasonal.py
|
def build_seasonal_transition_noise(
drift_scale, num_seasons, is_last_day_of_season):
"""Build the transition noise model for a SeasonalStateSpaceModel."""
# If the current season has just ended, increase the variance of its effect
# following drift_scale. (the just-ended seasonal effect will always be the
# bottom element of the vector). Otherwise, do nothing.
drift_scale_diag = tf.stack(
[tf.zeros_like(drift_scale)] * (num_seasons - 1) + [drift_scale],
axis=-1)
def seasonal_transition_noise(t):
noise_scale_diag = dist_util.pick_scalar_condition(
is_last_day_of_season(t),
drift_scale_diag,
tf.zeros_like(drift_scale_diag))
return tfd.MultivariateNormalDiag(
loc=tf.zeros(num_seasons, dtype=drift_scale.dtype),
scale_diag=noise_scale_diag)
return seasonal_transition_noise
|
def build_seasonal_transition_noise(
drift_scale, num_seasons, is_last_day_of_season):
"""Build the transition noise model for a SeasonalStateSpaceModel."""
# If the current season has just ended, increase the variance of its effect
# following drift_scale. (the just-ended seasonal effect will always be the
# bottom element of the vector). Otherwise, do nothing.
drift_scale_diag = tf.stack(
[tf.zeros_like(drift_scale)] * (num_seasons - 1) + [drift_scale],
axis=-1)
def seasonal_transition_noise(t):
noise_scale_diag = dist_util.pick_scalar_condition(
is_last_day_of_season(t),
drift_scale_diag,
tf.zeros_like(drift_scale_diag))
return tfd.MultivariateNormalDiag(
loc=tf.zeros(num_seasons, dtype=drift_scale.dtype),
scale_diag=noise_scale_diag)
return seasonal_transition_noise
|
[
"Build",
"the",
"transition",
"noise",
"model",
"for",
"a",
"SeasonalStateSpaceModel",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/seasonal.py#L607-L625
|
[
"def",
"build_seasonal_transition_noise",
"(",
"drift_scale",
",",
"num_seasons",
",",
"is_last_day_of_season",
")",
":",
"# If the current season has just ended, increase the variance of its effect",
"# following drift_scale. (the just-ended seasonal effect will always be the",
"# bottom element of the vector). Otherwise, do nothing.",
"drift_scale_diag",
"=",
"tf",
".",
"stack",
"(",
"[",
"tf",
".",
"zeros_like",
"(",
"drift_scale",
")",
"]",
"*",
"(",
"num_seasons",
"-",
"1",
")",
"+",
"[",
"drift_scale",
"]",
",",
"axis",
"=",
"-",
"1",
")",
"def",
"seasonal_transition_noise",
"(",
"t",
")",
":",
"noise_scale_diag",
"=",
"dist_util",
".",
"pick_scalar_condition",
"(",
"is_last_day_of_season",
"(",
"t",
")",
",",
"drift_scale_diag",
",",
"tf",
".",
"zeros_like",
"(",
"drift_scale_diag",
")",
")",
"return",
"tfd",
".",
"MultivariateNormalDiag",
"(",
"loc",
"=",
"tf",
".",
"zeros",
"(",
"num_seasons",
",",
"dtype",
"=",
"drift_scale",
".",
"dtype",
")",
",",
"scale_diag",
"=",
"noise_scale_diag",
")",
"return",
"seasonal_transition_noise"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
build_constrained_seasonal_transition_noise
|
Build transition noise distribution for a ConstrainedSeasonalSSM.
|
tensorflow_probability/python/sts/seasonal.py
|
def build_constrained_seasonal_transition_noise(
drift_scale, num_seasons, is_last_day_of_season):
"""Build transition noise distribution for a ConstrainedSeasonalSSM."""
# Conceptually, this method takes the noise covariance on effects L @ L'
# computed by `build_seasonal_transition_noise`, with scale factor
# L = [ 0, 0, ..., 0
# ...
# 0, 0, ..., drift_scale],
# and transforms it to act on the constrained-residual representation.
#
# The resulting noise covariance M @ M' is equivalent to
# M @ M' = effects_to_residuals @ LL' @ residuals_to_effects
# where `@` is matrix multiplication. However because this matrix is
# rank-deficient, we can't take its Cholesky decomposition directly, so we'll
# construct its lower-triangular scale factor `M` by hand instead.
#
# Concretely, let `M = P @ R @ L` be the scale factor in the
# transformed space, with matrices `R`, `P` applying the reparameterization
# and zero-mean constraint respectively as defined in the
# "Mathematical Details" section of `ConstrainedSeasonalStateSpaceModel`. It's
# easy to see (*) that the implied covariance
# `M @ M' = P @ R @ L @ L' @ R' @ P'` is just the constant matrix
# `M @ M' = [ 1, 1, ..., 1, 0
# 1, 1, ..., 1, 0
# ...
# 1, 1, ..., 1, 0
# 0, 0, ..., 0, 0] * (drift_scale / num_seasons)**2`
# with zeros in the final row and column. So we can directly construct
# the lower-triangular factor
# `Q = [ 1, 0, ... 0
# 1, 0, ..., 0
# ...
# 1, 0, ..., 0
# 0, 0, ..., 0 ] * drift_scale/num_seasons`
# such that Q @ Q' = M @ M'. In practice, we don't reify the final row and
# column full of zeroes, i.e., we construct
# `Q[:num_seasons-1, :num_seasons-1]` as the scale-TriL covariance factor.
#
# (*) Argument: `L` is zero everywhere but the last column, so `R @ L` will be
# too. Since the last column of `R` is the constant `-1/num_seasons`, `R @ L`
# is simply the matrix with constant `-drift_scale/num_seasons` in the final
# column (except the final row, which is negated) and zero in all other
# columns, and `M = P @ R @ L` additionally zeroes out the final row. Then
# M @ M' is just the outer product of that final column with itself (since all
# other columns are zero), which gives the matrix shown above.
drift_scale_tril_nonzeros = tf.concat([
tf.ones([num_seasons - 1, 1], dtype=drift_scale.dtype),
tf.zeros([num_seasons - 1, num_seasons - 2], dtype=drift_scale.dtype)],
axis=-1)
drift_scale_tril = (drift_scale_tril_nonzeros *
drift_scale[..., tf.newaxis, tf.newaxis] / num_seasons)
# Inject transition noise iff it is the last day of the season.
def seasonal_transition_noise(t):
noise_scale_tril = dist_util.pick_scalar_condition(
is_last_day_of_season(t),
drift_scale_tril,
tf.zeros_like(drift_scale_tril))
return tfd.MultivariateNormalTriL(
loc=tf.zeros(num_seasons-1, dtype=drift_scale.dtype),
scale_tril=noise_scale_tril)
return seasonal_transition_noise
|
def build_constrained_seasonal_transition_noise(
drift_scale, num_seasons, is_last_day_of_season):
"""Build transition noise distribution for a ConstrainedSeasonalSSM."""
# Conceptually, this method takes the noise covariance on effects L @ L'
# computed by `build_seasonal_transition_noise`, with scale factor
# L = [ 0, 0, ..., 0
# ...
# 0, 0, ..., drift_scale],
# and transforms it to act on the constrained-residual representation.
#
# The resulting noise covariance M @ M' is equivalent to
# M @ M' = effects_to_residuals @ LL' @ residuals_to_effects
# where `@` is matrix multiplication. However because this matrix is
# rank-deficient, we can't take its Cholesky decomposition directly, so we'll
# construct its lower-triangular scale factor `M` by hand instead.
#
# Concretely, let `M = P @ R @ L` be the scale factor in the
# transformed space, with matrices `R`, `P` applying the reparameterization
# and zero-mean constraint respectively as defined in the
# "Mathematical Details" section of `ConstrainedSeasonalStateSpaceModel`. It's
# easy to see (*) that the implied covariance
# `M @ M' = P @ R @ L @ L' @ R' @ P'` is just the constant matrix
# `M @ M' = [ 1, 1, ..., 1, 0
# 1, 1, ..., 1, 0
# ...
# 1, 1, ..., 1, 0
# 0, 0, ..., 0, 0] * (drift_scale / num_seasons)**2`
# with zeros in the final row and column. So we can directly construct
# the lower-triangular factor
# `Q = [ 1, 0, ... 0
# 1, 0, ..., 0
# ...
# 1, 0, ..., 0
# 0, 0, ..., 0 ] * drift_scale/num_seasons`
# such that Q @ Q' = M @ M'. In practice, we don't reify the final row and
# column full of zeroes, i.e., we construct
# `Q[:num_seasons-1, :num_seasons-1]` as the scale-TriL covariance factor.
#
# (*) Argument: `L` is zero everywhere but the last column, so `R @ L` will be
# too. Since the last column of `R` is the constant `-1/num_seasons`, `R @ L`
# is simply the matrix with constant `-drift_scale/num_seasons` in the final
# column (except the final row, which is negated) and zero in all other
# columns, and `M = P @ R @ L` additionally zeroes out the final row. Then
# M @ M' is just the outer product of that final column with itself (since all
# other columns are zero), which gives the matrix shown above.
drift_scale_tril_nonzeros = tf.concat([
tf.ones([num_seasons - 1, 1], dtype=drift_scale.dtype),
tf.zeros([num_seasons - 1, num_seasons - 2], dtype=drift_scale.dtype)],
axis=-1)
drift_scale_tril = (drift_scale_tril_nonzeros *
drift_scale[..., tf.newaxis, tf.newaxis] / num_seasons)
# Inject transition noise iff it is the last day of the season.
def seasonal_transition_noise(t):
noise_scale_tril = dist_util.pick_scalar_condition(
is_last_day_of_season(t),
drift_scale_tril,
tf.zeros_like(drift_scale_tril))
return tfd.MultivariateNormalTriL(
loc=tf.zeros(num_seasons-1, dtype=drift_scale.dtype),
scale_tril=noise_scale_tril)
return seasonal_transition_noise
|
[
"Build",
"transition",
"noise",
"distribution",
"for",
"a",
"ConstrainedSeasonalSSM",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/seasonal.py#L628-L691
|
[
"def",
"build_constrained_seasonal_transition_noise",
"(",
"drift_scale",
",",
"num_seasons",
",",
"is_last_day_of_season",
")",
":",
"# Conceptually, this method takes the noise covariance on effects L @ L'",
"# computed by `build_seasonal_transition_noise`, with scale factor",
"# L = [ 0, 0, ..., 0",
"# ...",
"# 0, 0, ..., drift_scale],",
"# and transforms it to act on the constrained-residual representation.",
"#",
"# The resulting noise covariance M @ M' is equivalent to",
"# M @ M' = effects_to_residuals @ LL' @ residuals_to_effects",
"# where `@` is matrix multiplication. However because this matrix is",
"# rank-deficient, we can't take its Cholesky decomposition directly, so we'll",
"# construct its lower-triangular scale factor `M` by hand instead.",
"#",
"# Concretely, let `M = P @ R @ L` be the scale factor in the",
"# transformed space, with matrices `R`, `P` applying the reparameterization",
"# and zero-mean constraint respectively as defined in the",
"# \"Mathematical Details\" section of `ConstrainedSeasonalStateSpaceModel`. It's",
"# easy to see (*) that the implied covariance",
"# `M @ M' = P @ R @ L @ L' @ R' @ P'` is just the constant matrix",
"# `M @ M' = [ 1, 1, ..., 1, 0",
"# 1, 1, ..., 1, 0",
"# ...",
"# 1, 1, ..., 1, 0",
"# 0, 0, ..., 0, 0] * (drift_scale / num_seasons)**2`",
"# with zeros in the final row and column. So we can directly construct",
"# the lower-triangular factor",
"# `Q = [ 1, 0, ... 0",
"# 1, 0, ..., 0",
"# ...",
"# 1, 0, ..., 0",
"# 0, 0, ..., 0 ] * drift_scale/num_seasons`",
"# such that Q @ Q' = M @ M'. In practice, we don't reify the final row and",
"# column full of zeroes, i.e., we construct",
"# `Q[:num_seasons-1, :num_seasons-1]` as the scale-TriL covariance factor.",
"#",
"# (*) Argument: `L` is zero everywhere but the last column, so `R @ L` will be",
"# too. Since the last column of `R` is the constant `-1/num_seasons`, `R @ L`",
"# is simply the matrix with constant `-drift_scale/num_seasons` in the final",
"# column (except the final row, which is negated) and zero in all other",
"# columns, and `M = P @ R @ L` additionally zeroes out the final row. Then",
"# M @ M' is just the outer product of that final column with itself (since all",
"# other columns are zero), which gives the matrix shown above.",
"drift_scale_tril_nonzeros",
"=",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"ones",
"(",
"[",
"num_seasons",
"-",
"1",
",",
"1",
"]",
",",
"dtype",
"=",
"drift_scale",
".",
"dtype",
")",
",",
"tf",
".",
"zeros",
"(",
"[",
"num_seasons",
"-",
"1",
",",
"num_seasons",
"-",
"2",
"]",
",",
"dtype",
"=",
"drift_scale",
".",
"dtype",
")",
"]",
",",
"axis",
"=",
"-",
"1",
")",
"drift_scale_tril",
"=",
"(",
"drift_scale_tril_nonzeros",
"*",
"drift_scale",
"[",
"...",
",",
"tf",
".",
"newaxis",
",",
"tf",
".",
"newaxis",
"]",
"/",
"num_seasons",
")",
"# Inject transition noise iff it is the last day of the season.",
"def",
"seasonal_transition_noise",
"(",
"t",
")",
":",
"noise_scale_tril",
"=",
"dist_util",
".",
"pick_scalar_condition",
"(",
"is_last_day_of_season",
"(",
"t",
")",
",",
"drift_scale_tril",
",",
"tf",
".",
"zeros_like",
"(",
"drift_scale_tril",
")",
")",
"return",
"tfd",
".",
"MultivariateNormalTriL",
"(",
"loc",
"=",
"tf",
".",
"zeros",
"(",
"num_seasons",
"-",
"1",
",",
"dtype",
"=",
"drift_scale",
".",
"dtype",
")",
",",
"scale_tril",
"=",
"noise_scale_tril",
")",
"return",
"seasonal_transition_noise"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_is_empty_observation_data
|
Returns `True` if given observation data is empty.
Emptiness means either
1. Both `observation_index_points` and `observations` are `None`, or
2. the "number of observations" shape is 0. The shape of
`observation_index_points` is `[..., N, f1, ..., fF]`, where `N` is the
number of observations and the `f`s are feature dims. Thus, we look at the
shape element just to the left of the leftmost feature dim. If that shape is
zero, we consider the data empty.
We don't check the shape of observations; validations are checked elsewhere in
the calling code, to ensure these shapes are consistent.
Args:
feature_ndims: the number of feature dims, as reported by the GP kernel.
observation_index_points: the observation data locations in the index set.
observations: the observation data.
Returns:
is_empty: True if the data were deemed to be empty.
|
tensorflow_probability/python/distributions/gaussian_process_regression_model.py
|
def _is_empty_observation_data(
feature_ndims, observation_index_points, observations):
"""Returns `True` if given observation data is empty.
Emptiness means either
1. Both `observation_index_points` and `observations` are `None`, or
2. the "number of observations" shape is 0. The shape of
`observation_index_points` is `[..., N, f1, ..., fF]`, where `N` is the
number of observations and the `f`s are feature dims. Thus, we look at the
shape element just to the left of the leftmost feature dim. If that shape is
zero, we consider the data empty.
We don't check the shape of observations; validations are checked elsewhere in
the calling code, to ensure these shapes are consistent.
Args:
feature_ndims: the number of feature dims, as reported by the GP kernel.
observation_index_points: the observation data locations in the index set.
observations: the observation data.
Returns:
is_empty: True if the data were deemed to be empty.
"""
# If both input locations and observations are `None`, we consider this
# "empty" observation data.
if observation_index_points is None and observations is None:
return True
num_obs = tf.compat.dimension_value(
observation_index_points.shape[-(feature_ndims + 1)])
if num_obs is not None and num_obs == 0:
return True
return False
|
def _is_empty_observation_data(
feature_ndims, observation_index_points, observations):
"""Returns `True` if given observation data is empty.
Emptiness means either
1. Both `observation_index_points` and `observations` are `None`, or
2. the "number of observations" shape is 0. The shape of
`observation_index_points` is `[..., N, f1, ..., fF]`, where `N` is the
number of observations and the `f`s are feature dims. Thus, we look at the
shape element just to the left of the leftmost feature dim. If that shape is
zero, we consider the data empty.
We don't check the shape of observations; validations are checked elsewhere in
the calling code, to ensure these shapes are consistent.
Args:
feature_ndims: the number of feature dims, as reported by the GP kernel.
observation_index_points: the observation data locations in the index set.
observations: the observation data.
Returns:
is_empty: True if the data were deemed to be empty.
"""
# If both input locations and observations are `None`, we consider this
# "empty" observation data.
if observation_index_points is None and observations is None:
return True
num_obs = tf.compat.dimension_value(
observation_index_points.shape[-(feature_ndims + 1)])
if num_obs is not None and num_obs == 0:
return True
return False
|
[
"Returns",
"True",
"if",
"given",
"observation",
"data",
"is",
"empty",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/gaussian_process_regression_model.py#L39-L70
|
[
"def",
"_is_empty_observation_data",
"(",
"feature_ndims",
",",
"observation_index_points",
",",
"observations",
")",
":",
"# If both input locations and observations are `None`, we consider this",
"# \"empty\" observation data.",
"if",
"observation_index_points",
"is",
"None",
"and",
"observations",
"is",
"None",
":",
"return",
"True",
"num_obs",
"=",
"tf",
".",
"compat",
".",
"dimension_value",
"(",
"observation_index_points",
".",
"shape",
"[",
"-",
"(",
"feature_ndims",
"+",
"1",
")",
"]",
")",
"if",
"num_obs",
"is",
"not",
"None",
"and",
"num_obs",
"==",
"0",
":",
"return",
"True",
"return",
"False"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_validate_observation_data
|
Ensure that observation data and locations have consistent shapes.
This basically means that the batch shapes are broadcastable. We can only
ensure this when those shapes are fully statically defined.
Args:
kernel: The GP kernel.
observation_index_points: the observation data locations in the index set.
observations: the observation data.
Raises:
ValueError: if the observations' batch shapes are not broadcastable.
|
tensorflow_probability/python/distributions/gaussian_process_regression_model.py
|
def _validate_observation_data(
kernel, observation_index_points, observations):
"""Ensure that observation data and locations have consistent shapes.
This basically means that the batch shapes are broadcastable. We can only
ensure this when those shapes are fully statically defined.
Args:
kernel: The GP kernel.
observation_index_points: the observation data locations in the index set.
observations: the observation data.
Raises:
ValueError: if the observations' batch shapes are not broadcastable.
"""
# Check that observation index points and observation counts broadcast.
ndims = kernel.feature_ndims
if (tensorshape_util.is_fully_defined(
observation_index_points.shape[:-ndims]) and
tensorshape_util.is_fully_defined(observations.shape)):
index_point_count = observation_index_points.shape[:-ndims]
observation_count = observations.shape
try:
tf.broadcast_static_shape(index_point_count, observation_count)
except ValueError:
# Re-raise with our own more contextual error message.
raise ValueError(
'Observation index point and observation counts are not '
'broadcastable: {} and {}, respectively.'.format(
index_point_count, observation_count))
|
def _validate_observation_data(
kernel, observation_index_points, observations):
"""Ensure that observation data and locations have consistent shapes.
This basically means that the batch shapes are broadcastable. We can only
ensure this when those shapes are fully statically defined.
Args:
kernel: The GP kernel.
observation_index_points: the observation data locations in the index set.
observations: the observation data.
Raises:
ValueError: if the observations' batch shapes are not broadcastable.
"""
# Check that observation index points and observation counts broadcast.
ndims = kernel.feature_ndims
if (tensorshape_util.is_fully_defined(
observation_index_points.shape[:-ndims]) and
tensorshape_util.is_fully_defined(observations.shape)):
index_point_count = observation_index_points.shape[:-ndims]
observation_count = observations.shape
try:
tf.broadcast_static_shape(index_point_count, observation_count)
except ValueError:
# Re-raise with our own more contextual error message.
raise ValueError(
'Observation index point and observation counts are not '
'broadcastable: {} and {}, respectively.'.format(
index_point_count, observation_count))
|
[
"Ensure",
"that",
"observation",
"data",
"and",
"locations",
"have",
"consistent",
"shapes",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/gaussian_process_regression_model.py#L73-L103
|
[
"def",
"_validate_observation_data",
"(",
"kernel",
",",
"observation_index_points",
",",
"observations",
")",
":",
"# Check that observation index points and observation counts broadcast.",
"ndims",
"=",
"kernel",
".",
"feature_ndims",
"if",
"(",
"tensorshape_util",
".",
"is_fully_defined",
"(",
"observation_index_points",
".",
"shape",
"[",
":",
"-",
"ndims",
"]",
")",
"and",
"tensorshape_util",
".",
"is_fully_defined",
"(",
"observations",
".",
"shape",
")",
")",
":",
"index_point_count",
"=",
"observation_index_points",
".",
"shape",
"[",
":",
"-",
"ndims",
"]",
"observation_count",
"=",
"observations",
".",
"shape",
"try",
":",
"tf",
".",
"broadcast_static_shape",
"(",
"index_point_count",
",",
"observation_count",
")",
"except",
"ValueError",
":",
"# Re-raise with our own more contextual error message.",
"raise",
"ValueError",
"(",
"'Observation index point and observation counts are not '",
"'broadcastable: {} and {}, respectively.'",
".",
"format",
"(",
"index_point_count",
",",
"observation_count",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_kl_gamma_gamma
|
Calculate the batched KL divergence KL(g0 || g1) with g0 and g1 Gamma.
Args:
g0: instance of a Gamma distribution object.
g1: instance of a Gamma distribution object.
name: (optional) Name to use for created operations. Default is
"kl_gamma_gamma".
Returns:
kl_gamma_gamma: `Tensor`. The batchwise KL(g0 || g1).
|
tensorflow_probability/python/distributions/gamma.py
|
def _kl_gamma_gamma(g0, g1, name=None):
"""Calculate the batched KL divergence KL(g0 || g1) with g0 and g1 Gamma.
Args:
g0: instance of a Gamma distribution object.
g1: instance of a Gamma distribution object.
name: (optional) Name to use for created operations. Default is
"kl_gamma_gamma".
Returns:
kl_gamma_gamma: `Tensor`. The batchwise KL(g0 || g1).
"""
with tf.name_scope(name or "kl_gamma_gamma"):
# Result from:
# http://www.fil.ion.ucl.ac.uk/~wpenny/publications/densities.ps
# For derivation see:
# http://stats.stackexchange.com/questions/11646/kullback-leibler-divergence-between-two-gamma-distributions pylint: disable=line-too-long
return (((g0.concentration - g1.concentration) *
tf.math.digamma(g0.concentration)) +
tf.math.lgamma(g1.concentration) -
tf.math.lgamma(g0.concentration) +
g1.concentration * tf.math.log(g0.rate) -
g1.concentration * tf.math.log(g1.rate) + g0.concentration *
(g1.rate / g0.rate - 1.))
|
def _kl_gamma_gamma(g0, g1, name=None):
"""Calculate the batched KL divergence KL(g0 || g1) with g0 and g1 Gamma.
Args:
g0: instance of a Gamma distribution object.
g1: instance of a Gamma distribution object.
name: (optional) Name to use for created operations. Default is
"kl_gamma_gamma".
Returns:
kl_gamma_gamma: `Tensor`. The batchwise KL(g0 || g1).
"""
with tf.name_scope(name or "kl_gamma_gamma"):
# Result from:
# http://www.fil.ion.ucl.ac.uk/~wpenny/publications/densities.ps
# For derivation see:
# http://stats.stackexchange.com/questions/11646/kullback-leibler-divergence-between-two-gamma-distributions pylint: disable=line-too-long
return (((g0.concentration - g1.concentration) *
tf.math.digamma(g0.concentration)) +
tf.math.lgamma(g1.concentration) -
tf.math.lgamma(g0.concentration) +
g1.concentration * tf.math.log(g0.rate) -
g1.concentration * tf.math.log(g1.rate) + g0.concentration *
(g1.rate / g0.rate - 1.))
|
[
"Calculate",
"the",
"batched",
"KL",
"divergence",
"KL",
"(",
"g0",
"||",
"g1",
")",
"with",
"g0",
"and",
"g1",
"Gamma",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/gamma.py#L273-L296
|
[
"def",
"_kl_gamma_gamma",
"(",
"g0",
",",
"g1",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"\"kl_gamma_gamma\"",
")",
":",
"# Result from:",
"# http://www.fil.ion.ucl.ac.uk/~wpenny/publications/densities.ps",
"# For derivation see:",
"# http://stats.stackexchange.com/questions/11646/kullback-leibler-divergence-between-two-gamma-distributions pylint: disable=line-too-long",
"return",
"(",
"(",
"(",
"g0",
".",
"concentration",
"-",
"g1",
".",
"concentration",
")",
"*",
"tf",
".",
"math",
".",
"digamma",
"(",
"g0",
".",
"concentration",
")",
")",
"+",
"tf",
".",
"math",
".",
"lgamma",
"(",
"g1",
".",
"concentration",
")",
"-",
"tf",
".",
"math",
".",
"lgamma",
"(",
"g0",
".",
"concentration",
")",
"+",
"g1",
".",
"concentration",
"*",
"tf",
".",
"math",
".",
"log",
"(",
"g0",
".",
"rate",
")",
"-",
"g1",
".",
"concentration",
"*",
"tf",
".",
"math",
".",
"log",
"(",
"g1",
".",
"rate",
")",
"+",
"g0",
".",
"concentration",
"*",
"(",
"g1",
".",
"rate",
"/",
"g0",
".",
"rate",
"-",
"1.",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
SequentialSchedule.add
|
Add a learning rate scheduler to the contained `schedules`
:param scheduler: learning rate scheduler to be add
:param max_iteration: iteration numbers this scheduler will run
|
pyspark/bigdl/optim/optimizer.py
|
def add(self, scheduler, max_iteration, bigdl_type="float"):
"""
Add a learning rate scheduler to the contained `schedules`
:param scheduler: learning rate scheduler to be add
:param max_iteration: iteration numbers this scheduler will run
"""
return callBigDlFunc(bigdl_type, "addScheduler", self.value, scheduler, max_iteration)
|
def add(self, scheduler, max_iteration, bigdl_type="float"):
"""
Add a learning rate scheduler to the contained `schedules`
:param scheduler: learning rate scheduler to be add
:param max_iteration: iteration numbers this scheduler will run
"""
return callBigDlFunc(bigdl_type, "addScheduler", self.value, scheduler, max_iteration)
|
[
"Add",
"a",
"learning",
"rate",
"scheduler",
"to",
"the",
"contained",
"schedules"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/optim/optimizer.py#L425-L432
|
[
"def",
"add",
"(",
"self",
",",
"scheduler",
",",
"max_iteration",
",",
"bigdl_type",
"=",
"\"float\"",
")",
":",
"return",
"callBigDlFunc",
"(",
"bigdl_type",
",",
"\"addScheduler\"",
",",
"self",
".",
"value",
",",
"scheduler",
",",
"max_iteration",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
OptimMethod.save
|
save OptimMethod
:param path path
:param overWrite whether to overwrite
|
pyspark/bigdl/optim/optimizer.py
|
def save(self, path, overWrite):
"""
save OptimMethod
:param path path
:param overWrite whether to overwrite
"""
method=self.value
return callBigDlFunc(self.bigdl_type, "saveOptimMethod", method, path, overWrite)
|
def save(self, path, overWrite):
"""
save OptimMethod
:param path path
:param overWrite whether to overwrite
"""
method=self.value
return callBigDlFunc(self.bigdl_type, "saveOptimMethod", method, path, overWrite)
|
[
"save",
"OptimMethod",
":",
"param",
"path",
"path",
":",
"param",
"overWrite",
"whether",
"to",
"overwrite"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/optim/optimizer.py#L453-L460
|
[
"def",
"save",
"(",
"self",
",",
"path",
",",
"overWrite",
")",
":",
"method",
"=",
"self",
".",
"value",
"return",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"saveOptimMethod\"",
",",
"method",
",",
"path",
",",
"overWrite",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
BaseOptimizer.set_checkpoint
|
Configure checkpoint settings.
:param checkpoint_trigger: the interval to write snapshots
:param checkpoint_path: the path to write snapshots into
:param isOverWrite: whether to overwrite existing snapshots in path.default is True
|
pyspark/bigdl/optim/optimizer.py
|
def set_checkpoint(self, checkpoint_trigger,
checkpoint_path, isOverWrite=True):
"""
Configure checkpoint settings.
:param checkpoint_trigger: the interval to write snapshots
:param checkpoint_path: the path to write snapshots into
:param isOverWrite: whether to overwrite existing snapshots in path.default is True
"""
if not os.path.exists(checkpoint_path):
mkpath(checkpoint_path)
callBigDlFunc(self.bigdl_type, "setCheckPoint", self.value,
checkpoint_trigger, checkpoint_path, isOverWrite)
|
def set_checkpoint(self, checkpoint_trigger,
checkpoint_path, isOverWrite=True):
"""
Configure checkpoint settings.
:param checkpoint_trigger: the interval to write snapshots
:param checkpoint_path: the path to write snapshots into
:param isOverWrite: whether to overwrite existing snapshots in path.default is True
"""
if not os.path.exists(checkpoint_path):
mkpath(checkpoint_path)
callBigDlFunc(self.bigdl_type, "setCheckPoint", self.value,
checkpoint_trigger, checkpoint_path, isOverWrite)
|
[
"Configure",
"checkpoint",
"settings",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/optim/optimizer.py#L719-L732
|
[
"def",
"set_checkpoint",
"(",
"self",
",",
"checkpoint_trigger",
",",
"checkpoint_path",
",",
"isOverWrite",
"=",
"True",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"checkpoint_path",
")",
":",
"mkpath",
"(",
"checkpoint_path",
")",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"setCheckPoint\"",
",",
"self",
".",
"value",
",",
"checkpoint_trigger",
",",
"checkpoint_path",
",",
"isOverWrite",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
BaseOptimizer.set_gradclip_const
|
Configure constant clipping settings.
:param min_value: the minimum value to clip by
:param max_value: the maxmimum value to clip by
|
pyspark/bigdl/optim/optimizer.py
|
def set_gradclip_const(self, min_value, max_value):
"""
Configure constant clipping settings.
:param min_value: the minimum value to clip by
:param max_value: the maxmimum value to clip by
"""
callBigDlFunc(self.bigdl_type, "setConstantClip", self.value, min_value, max_value)
|
def set_gradclip_const(self, min_value, max_value):
"""
Configure constant clipping settings.
:param min_value: the minimum value to clip by
:param max_value: the maxmimum value to clip by
"""
callBigDlFunc(self.bigdl_type, "setConstantClip", self.value, min_value, max_value)
|
[
"Configure",
"constant",
"clipping",
"settings",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/optim/optimizer.py#L734-L742
|
[
"def",
"set_gradclip_const",
"(",
"self",
",",
"min_value",
",",
"max_value",
")",
":",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"setConstantClip\"",
",",
"self",
".",
"value",
",",
"min_value",
",",
"max_value",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
BaseOptimizer.optimize
|
Do an optimization.
|
pyspark/bigdl/optim/optimizer.py
|
def optimize(self):
"""
Do an optimization.
"""
jmodel = callJavaFunc(self.value.optimize)
from bigdl.nn.layer import Layer
return Layer.of(jmodel)
|
def optimize(self):
"""
Do an optimization.
"""
jmodel = callJavaFunc(self.value.optimize)
from bigdl.nn.layer import Layer
return Layer.of(jmodel)
|
[
"Do",
"an",
"optimization",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/optim/optimizer.py#L760-L766
|
[
"def",
"optimize",
"(",
"self",
")",
":",
"jmodel",
"=",
"callJavaFunc",
"(",
"self",
".",
"value",
".",
"optimize",
")",
"from",
"bigdl",
".",
"nn",
".",
"layer",
"import",
"Layer",
"return",
"Layer",
".",
"of",
"(",
"jmodel",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
BaseOptimizer.set_train_summary
|
Set train summary. A TrainSummary object contains information
necessary for the optimizer to know how often the logs are recorded,
where to store the logs and how to retrieve them, etc. For details,
refer to the docs of TrainSummary.
:param summary: a TrainSummary object
|
pyspark/bigdl/optim/optimizer.py
|
def set_train_summary(self, summary):
"""
Set train summary. A TrainSummary object contains information
necessary for the optimizer to know how often the logs are recorded,
where to store the logs and how to retrieve them, etc. For details,
refer to the docs of TrainSummary.
:param summary: a TrainSummary object
"""
callBigDlFunc(self.bigdl_type, "setTrainSummary", self.value,
summary)
return self
|
def set_train_summary(self, summary):
"""
Set train summary. A TrainSummary object contains information
necessary for the optimizer to know how often the logs are recorded,
where to store the logs and how to retrieve them, etc. For details,
refer to the docs of TrainSummary.
:param summary: a TrainSummary object
"""
callBigDlFunc(self.bigdl_type, "setTrainSummary", self.value,
summary)
return self
|
[
"Set",
"train",
"summary",
".",
"A",
"TrainSummary",
"object",
"contains",
"information",
"necessary",
"for",
"the",
"optimizer",
"to",
"know",
"how",
"often",
"the",
"logs",
"are",
"recorded",
"where",
"to",
"store",
"the",
"logs",
"and",
"how",
"to",
"retrieve",
"them",
"etc",
".",
"For",
"details",
"refer",
"to",
"the",
"docs",
"of",
"TrainSummary",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/optim/optimizer.py#L768-L780
|
[
"def",
"set_train_summary",
"(",
"self",
",",
"summary",
")",
":",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"setTrainSummary\"",
",",
"self",
".",
"value",
",",
"summary",
")",
"return",
"self"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
BaseOptimizer.set_val_summary
|
Set validation summary. A ValidationSummary object contains information
necessary for the optimizer to know how often the logs are recorded,
where to store the logs and how to retrieve them, etc. For details,
refer to the docs of ValidationSummary.
:param summary: a ValidationSummary object
|
pyspark/bigdl/optim/optimizer.py
|
def set_val_summary(self, summary):
"""
Set validation summary. A ValidationSummary object contains information
necessary for the optimizer to know how often the logs are recorded,
where to store the logs and how to retrieve them, etc. For details,
refer to the docs of ValidationSummary.
:param summary: a ValidationSummary object
"""
callBigDlFunc(self.bigdl_type, "setValSummary", self.value,
summary)
return self
|
def set_val_summary(self, summary):
"""
Set validation summary. A ValidationSummary object contains information
necessary for the optimizer to know how often the logs are recorded,
where to store the logs and how to retrieve them, etc. For details,
refer to the docs of ValidationSummary.
:param summary: a ValidationSummary object
"""
callBigDlFunc(self.bigdl_type, "setValSummary", self.value,
summary)
return self
|
[
"Set",
"validation",
"summary",
".",
"A",
"ValidationSummary",
"object",
"contains",
"information",
"necessary",
"for",
"the",
"optimizer",
"to",
"know",
"how",
"often",
"the",
"logs",
"are",
"recorded",
"where",
"to",
"store",
"the",
"logs",
"and",
"how",
"to",
"retrieve",
"them",
"etc",
".",
"For",
"details",
"refer",
"to",
"the",
"docs",
"of",
"ValidationSummary",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/optim/optimizer.py#L782-L796
|
[
"def",
"set_val_summary",
"(",
"self",
",",
"summary",
")",
":",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"setValSummary\"",
",",
"self",
".",
"value",
",",
"summary",
")",
"return",
"self"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
Optimizer.create
|
Create an optimizer.
Depend on the input type, the returning optimizer can be a local optimizer \
or a distributed optimizer.
:param model: the neural net model
:param training_set: (features, label) for local mode. RDD[Sample] for distributed mode.
:param criterion: the loss function
:param optim_method: the algorithm to use for optimization,
e.g. SGD, Adagrad, etc. If optim_method is None, the default algorithm is SGD.
:param end_trigger: when to end the optimization. default value is MapEpoch(1)
:param batch_size: training batch size
:param cores: This is for local optimizer only and use total physical cores as the default value
|
pyspark/bigdl/optim/optimizer.py
|
def create(model,
training_set,
criterion,
end_trigger=None,
batch_size=32,
optim_method=None,
cores=None,
bigdl_type="float"):
"""
Create an optimizer.
Depend on the input type, the returning optimizer can be a local optimizer \
or a distributed optimizer.
:param model: the neural net model
:param training_set: (features, label) for local mode. RDD[Sample] for distributed mode.
:param criterion: the loss function
:param optim_method: the algorithm to use for optimization,
e.g. SGD, Adagrad, etc. If optim_method is None, the default algorithm is SGD.
:param end_trigger: when to end the optimization. default value is MapEpoch(1)
:param batch_size: training batch size
:param cores: This is for local optimizer only and use total physical cores as the default value
"""
if not end_trigger:
end_trigger = MaxEpoch(1)
if not optim_method:
optim_method = SGD()
if isinstance(training_set, RDD) or isinstance(training_set, DataSet):
return DistriOptimizer(model=model,
training_rdd=training_set,
criterion=criterion,
end_trigger=end_trigger,
batch_size=batch_size,
optim_method=optim_method,
bigdl_type=bigdl_type)
elif isinstance(training_set, tuple) and len(training_set) == 2:
x, y = training_set
return LocalOptimizer(X=x,
Y=y,
model=model,
criterion=criterion,
end_trigger=end_trigger,
batch_size=batch_size,
optim_method=optim_method,
cores=cores,
bigdl_type="float")
else:
raise Exception("Not supported training set: %s" % type(training_set))
|
def create(model,
training_set,
criterion,
end_trigger=None,
batch_size=32,
optim_method=None,
cores=None,
bigdl_type="float"):
"""
Create an optimizer.
Depend on the input type, the returning optimizer can be a local optimizer \
or a distributed optimizer.
:param model: the neural net model
:param training_set: (features, label) for local mode. RDD[Sample] for distributed mode.
:param criterion: the loss function
:param optim_method: the algorithm to use for optimization,
e.g. SGD, Adagrad, etc. If optim_method is None, the default algorithm is SGD.
:param end_trigger: when to end the optimization. default value is MapEpoch(1)
:param batch_size: training batch size
:param cores: This is for local optimizer only and use total physical cores as the default value
"""
if not end_trigger:
end_trigger = MaxEpoch(1)
if not optim_method:
optim_method = SGD()
if isinstance(training_set, RDD) or isinstance(training_set, DataSet):
return DistriOptimizer(model=model,
training_rdd=training_set,
criterion=criterion,
end_trigger=end_trigger,
batch_size=batch_size,
optim_method=optim_method,
bigdl_type=bigdl_type)
elif isinstance(training_set, tuple) and len(training_set) == 2:
x, y = training_set
return LocalOptimizer(X=x,
Y=y,
model=model,
criterion=criterion,
end_trigger=end_trigger,
batch_size=batch_size,
optim_method=optim_method,
cores=cores,
bigdl_type="float")
else:
raise Exception("Not supported training set: %s" % type(training_set))
|
[
"Create",
"an",
"optimizer",
".",
"Depend",
"on",
"the",
"input",
"type",
"the",
"returning",
"optimizer",
"can",
"be",
"a",
"local",
"optimizer",
"\\",
"or",
"a",
"distributed",
"optimizer",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/optim/optimizer.py#L848-L894
|
[
"def",
"create",
"(",
"model",
",",
"training_set",
",",
"criterion",
",",
"end_trigger",
"=",
"None",
",",
"batch_size",
"=",
"32",
",",
"optim_method",
"=",
"None",
",",
"cores",
"=",
"None",
",",
"bigdl_type",
"=",
"\"float\"",
")",
":",
"if",
"not",
"end_trigger",
":",
"end_trigger",
"=",
"MaxEpoch",
"(",
"1",
")",
"if",
"not",
"optim_method",
":",
"optim_method",
"=",
"SGD",
"(",
")",
"if",
"isinstance",
"(",
"training_set",
",",
"RDD",
")",
"or",
"isinstance",
"(",
"training_set",
",",
"DataSet",
")",
":",
"return",
"DistriOptimizer",
"(",
"model",
"=",
"model",
",",
"training_rdd",
"=",
"training_set",
",",
"criterion",
"=",
"criterion",
",",
"end_trigger",
"=",
"end_trigger",
",",
"batch_size",
"=",
"batch_size",
",",
"optim_method",
"=",
"optim_method",
",",
"bigdl_type",
"=",
"bigdl_type",
")",
"elif",
"isinstance",
"(",
"training_set",
",",
"tuple",
")",
"and",
"len",
"(",
"training_set",
")",
"==",
"2",
":",
"x",
",",
"y",
"=",
"training_set",
"return",
"LocalOptimizer",
"(",
"X",
"=",
"x",
",",
"Y",
"=",
"y",
",",
"model",
"=",
"model",
",",
"criterion",
"=",
"criterion",
",",
"end_trigger",
"=",
"end_trigger",
",",
"batch_size",
"=",
"batch_size",
",",
"optim_method",
"=",
"optim_method",
",",
"cores",
"=",
"cores",
",",
"bigdl_type",
"=",
"\"float\"",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Not supported training set: %s\"",
"%",
"type",
"(",
"training_set",
")",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
Optimizer.set_validation
|
Configure validation settings.
:param batch_size: validation batch size
:param val_rdd: validation dataset
:param trigger: validation interval
:param val_method: the ValidationMethod to use,e.g. "Top1Accuracy", "Top5Accuracy", "Loss"
|
pyspark/bigdl/optim/optimizer.py
|
def set_validation(self, batch_size, val_rdd, trigger, val_method=None):
"""
Configure validation settings.
:param batch_size: validation batch size
:param val_rdd: validation dataset
:param trigger: validation interval
:param val_method: the ValidationMethod to use,e.g. "Top1Accuracy", "Top5Accuracy", "Loss"
"""
if val_method is None:
val_method = [Top1Accuracy()]
func_name = "setValidation"
if isinstance(val_rdd, DataSet):
func_name = "setValidationFromDataSet"
callBigDlFunc(self.bigdl_type, func_name, self.value, batch_size,
trigger, val_rdd, to_list(val_method))
|
def set_validation(self, batch_size, val_rdd, trigger, val_method=None):
"""
Configure validation settings.
:param batch_size: validation batch size
:param val_rdd: validation dataset
:param trigger: validation interval
:param val_method: the ValidationMethod to use,e.g. "Top1Accuracy", "Top5Accuracy", "Loss"
"""
if val_method is None:
val_method = [Top1Accuracy()]
func_name = "setValidation"
if isinstance(val_rdd, DataSet):
func_name = "setValidationFromDataSet"
callBigDlFunc(self.bigdl_type, func_name, self.value, batch_size,
trigger, val_rdd, to_list(val_method))
|
[
"Configure",
"validation",
"settings",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/optim/optimizer.py#L896-L912
|
[
"def",
"set_validation",
"(",
"self",
",",
"batch_size",
",",
"val_rdd",
",",
"trigger",
",",
"val_method",
"=",
"None",
")",
":",
"if",
"val_method",
"is",
"None",
":",
"val_method",
"=",
"[",
"Top1Accuracy",
"(",
")",
"]",
"func_name",
"=",
"\"setValidation\"",
"if",
"isinstance",
"(",
"val_rdd",
",",
"DataSet",
")",
":",
"func_name",
"=",
"\"setValidationFromDataSet\"",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"func_name",
",",
"self",
".",
"value",
",",
"batch_size",
",",
"trigger",
",",
"val_rdd",
",",
"to_list",
"(",
"val_method",
")",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
Optimizer.set_traindata
|
Set new training dataset, for optimizer reuse
:param training_rdd: the training dataset
:param batch_size: training batch size
:return:
|
pyspark/bigdl/optim/optimizer.py
|
def set_traindata(self, training_rdd, batch_size):
"""
Set new training dataset, for optimizer reuse
:param training_rdd: the training dataset
:param batch_size: training batch size
:return:
"""
callBigDlFunc(self.bigdl_type, "setTrainData", self.value,
training_rdd, batch_size)
|
def set_traindata(self, training_rdd, batch_size):
"""
Set new training dataset, for optimizer reuse
:param training_rdd: the training dataset
:param batch_size: training batch size
:return:
"""
callBigDlFunc(self.bigdl_type, "setTrainData", self.value,
training_rdd, batch_size)
|
[
"Set",
"new",
"training",
"dataset",
"for",
"optimizer",
"reuse"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/optim/optimizer.py#L914-L923
|
[
"def",
"set_traindata",
"(",
"self",
",",
"training_rdd",
",",
"batch_size",
")",
":",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"setTrainData\"",
",",
"self",
".",
"value",
",",
"training_rdd",
",",
"batch_size",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
LocalOptimizer.set_validation
|
Configure validation settings.
:param batch_size: validation batch size
:param X_val: features of validation dataset
:param Y_val: label of validation dataset
:param trigger: validation interval
:param val_method: the ValidationMethod to use,e.g. "Top1Accuracy", "Top5Accuracy", "Loss"
|
pyspark/bigdl/optim/optimizer.py
|
def set_validation(self, batch_size, X_val, Y_val, trigger, val_method=None):
"""
Configure validation settings.
:param batch_size: validation batch size
:param X_val: features of validation dataset
:param Y_val: label of validation dataset
:param trigger: validation interval
:param val_method: the ValidationMethod to use,e.g. "Top1Accuracy", "Top5Accuracy", "Loss"
"""
if val_method is None:
val_method = [Top1Accuracy()]
callBigDlFunc(self.bigdl_type, "setValidation", self.value, batch_size,
trigger, [JTensor.from_ndarray(X) for X in to_list(X_val)],
JTensor.from_ndarray(Y_val), to_list(val_method))
|
def set_validation(self, batch_size, X_val, Y_val, trigger, val_method=None):
"""
Configure validation settings.
:param batch_size: validation batch size
:param X_val: features of validation dataset
:param Y_val: label of validation dataset
:param trigger: validation interval
:param val_method: the ValidationMethod to use,e.g. "Top1Accuracy", "Top5Accuracy", "Loss"
"""
if val_method is None:
val_method = [Top1Accuracy()]
callBigDlFunc(self.bigdl_type, "setValidation", self.value, batch_size,
trigger, [JTensor.from_ndarray(X) for X in to_list(X_val)],
JTensor.from_ndarray(Y_val), to_list(val_method))
|
[
"Configure",
"validation",
"settings",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/optim/optimizer.py#L1009-L1023
|
[
"def",
"set_validation",
"(",
"self",
",",
"batch_size",
",",
"X_val",
",",
"Y_val",
",",
"trigger",
",",
"val_method",
"=",
"None",
")",
":",
"if",
"val_method",
"is",
"None",
":",
"val_method",
"=",
"[",
"Top1Accuracy",
"(",
")",
"]",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"setValidation\"",
",",
"self",
".",
"value",
",",
"batch_size",
",",
"trigger",
",",
"[",
"JTensor",
".",
"from_ndarray",
"(",
"X",
")",
"for",
"X",
"in",
"to_list",
"(",
"X_val",
")",
"]",
",",
"JTensor",
".",
"from_ndarray",
"(",
"Y_val",
")",
",",
"to_list",
"(",
"val_method",
")",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
TrainSummary.set_summary_trigger
|
Set the interval of recording for each indicator.
:param tag: tag name. Supported tag names are "LearningRate", "Loss","Throughput", "Parameters". "Parameters" is an umbrella tag thatincludes weight, bias, gradWeight, gradBias, and some running status(eg. runningMean and runningVar in BatchNormalization). If youdidn't set any triggers, we will by default record Loss and Throughputin each iteration, while *NOT* recording LearningRate and Parameters,as recording parameters may introduce substantial overhead when themodel is very big, LearningRate is not a public attribute for allOptimMethod.
:param trigger: trigger
|
pyspark/bigdl/optim/optimizer.py
|
def set_summary_trigger(self, name, trigger):
"""
Set the interval of recording for each indicator.
:param tag: tag name. Supported tag names are "LearningRate", "Loss","Throughput", "Parameters". "Parameters" is an umbrella tag thatincludes weight, bias, gradWeight, gradBias, and some running status(eg. runningMean and runningVar in BatchNormalization). If youdidn't set any triggers, we will by default record Loss and Throughputin each iteration, while *NOT* recording LearningRate and Parameters,as recording parameters may introduce substantial overhead when themodel is very big, LearningRate is not a public attribute for allOptimMethod.
:param trigger: trigger
"""
return callBigDlFunc(self.bigdl_type, "summarySetTrigger", self.value,
name, trigger)
|
def set_summary_trigger(self, name, trigger):
"""
Set the interval of recording for each indicator.
:param tag: tag name. Supported tag names are "LearningRate", "Loss","Throughput", "Parameters". "Parameters" is an umbrella tag thatincludes weight, bias, gradWeight, gradBias, and some running status(eg. runningMean and runningVar in BatchNormalization). If youdidn't set any triggers, we will by default record Loss and Throughputin each iteration, while *NOT* recording LearningRate and Parameters,as recording parameters may introduce substantial overhead when themodel is very big, LearningRate is not a public attribute for allOptimMethod.
:param trigger: trigger
"""
return callBigDlFunc(self.bigdl_type, "summarySetTrigger", self.value,
name, trigger)
|
[
"Set",
"the",
"interval",
"of",
"recording",
"for",
"each",
"indicator",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/optim/optimizer.py#L1062-L1071
|
[
"def",
"set_summary_trigger",
"(",
"self",
",",
"name",
",",
"trigger",
")",
":",
"return",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"summarySetTrigger\"",
",",
"self",
".",
"value",
",",
"name",
",",
"trigger",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
read_data_sets
|
Parse or download mnist data if train_dir is empty.
:param: train_dir: The directory storing the mnist data
:param: data_type: Reading training set or testing set.It can be either "train" or "test"
:return:
```
(ndarray, ndarray) representing (features, labels)
features is a 4D unit8 numpy array [index, y, x, depth] representing each pixel valued from 0 to 255.
labels is 1D unit8 nunpy array representing the label valued from 0 to 9.
```
|
pyspark/bigdl/dataset/mnist.py
|
def read_data_sets(train_dir, data_type="train"):
"""
Parse or download mnist data if train_dir is empty.
:param: train_dir: The directory storing the mnist data
:param: data_type: Reading training set or testing set.It can be either "train" or "test"
:return:
```
(ndarray, ndarray) representing (features, labels)
features is a 4D unit8 numpy array [index, y, x, depth] representing each pixel valued from 0 to 255.
labels is 1D unit8 nunpy array representing the label valued from 0 to 9.
```
"""
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
if data_type == "train":
local_file = base.maybe_download(TRAIN_IMAGES, train_dir,
SOURCE_URL + TRAIN_IMAGES)
with open(local_file, 'rb') as f:
train_images = extract_images(f)
local_file = base.maybe_download(TRAIN_LABELS, train_dir,
SOURCE_URL + TRAIN_LABELS)
with open(local_file, 'rb') as f:
train_labels = extract_labels(f)
return train_images, train_labels
else:
local_file = base.maybe_download(TEST_IMAGES, train_dir,
SOURCE_URL + TEST_IMAGES)
with open(local_file, 'rb') as f:
test_images = extract_images(f)
local_file = base.maybe_download(TEST_LABELS, train_dir,
SOURCE_URL + TEST_LABELS)
with open(local_file, 'rb') as f:
test_labels = extract_labels(f)
return test_images, test_labels
|
def read_data_sets(train_dir, data_type="train"):
"""
Parse or download mnist data if train_dir is empty.
:param: train_dir: The directory storing the mnist data
:param: data_type: Reading training set or testing set.It can be either "train" or "test"
:return:
```
(ndarray, ndarray) representing (features, labels)
features is a 4D unit8 numpy array [index, y, x, depth] representing each pixel valued from 0 to 255.
labels is 1D unit8 nunpy array representing the label valued from 0 to 9.
```
"""
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
if data_type == "train":
local_file = base.maybe_download(TRAIN_IMAGES, train_dir,
SOURCE_URL + TRAIN_IMAGES)
with open(local_file, 'rb') as f:
train_images = extract_images(f)
local_file = base.maybe_download(TRAIN_LABELS, train_dir,
SOURCE_URL + TRAIN_LABELS)
with open(local_file, 'rb') as f:
train_labels = extract_labels(f)
return train_images, train_labels
else:
local_file = base.maybe_download(TEST_IMAGES, train_dir,
SOURCE_URL + TEST_IMAGES)
with open(local_file, 'rb') as f:
test_images = extract_images(f)
local_file = base.maybe_download(TEST_LABELS, train_dir,
SOURCE_URL + TEST_LABELS)
with open(local_file, 'rb') as f:
test_labels = extract_labels(f)
return test_images, test_labels
|
[
"Parse",
"or",
"download",
"mnist",
"data",
"if",
"train_dir",
"is",
"empty",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/dataset/mnist.py#L77-L121
|
[
"def",
"read_data_sets",
"(",
"train_dir",
",",
"data_type",
"=",
"\"train\"",
")",
":",
"TRAIN_IMAGES",
"=",
"'train-images-idx3-ubyte.gz'",
"TRAIN_LABELS",
"=",
"'train-labels-idx1-ubyte.gz'",
"TEST_IMAGES",
"=",
"'t10k-images-idx3-ubyte.gz'",
"TEST_LABELS",
"=",
"'t10k-labels-idx1-ubyte.gz'",
"if",
"data_type",
"==",
"\"train\"",
":",
"local_file",
"=",
"base",
".",
"maybe_download",
"(",
"TRAIN_IMAGES",
",",
"train_dir",
",",
"SOURCE_URL",
"+",
"TRAIN_IMAGES",
")",
"with",
"open",
"(",
"local_file",
",",
"'rb'",
")",
"as",
"f",
":",
"train_images",
"=",
"extract_images",
"(",
"f",
")",
"local_file",
"=",
"base",
".",
"maybe_download",
"(",
"TRAIN_LABELS",
",",
"train_dir",
",",
"SOURCE_URL",
"+",
"TRAIN_LABELS",
")",
"with",
"open",
"(",
"local_file",
",",
"'rb'",
")",
"as",
"f",
":",
"train_labels",
"=",
"extract_labels",
"(",
"f",
")",
"return",
"train_images",
",",
"train_labels",
"else",
":",
"local_file",
"=",
"base",
".",
"maybe_download",
"(",
"TEST_IMAGES",
",",
"train_dir",
",",
"SOURCE_URL",
"+",
"TEST_IMAGES",
")",
"with",
"open",
"(",
"local_file",
",",
"'rb'",
")",
"as",
"f",
":",
"test_images",
"=",
"extract_images",
"(",
"f",
")",
"local_file",
"=",
"base",
".",
"maybe_download",
"(",
"TEST_LABELS",
",",
"train_dir",
",",
"SOURCE_URL",
"+",
"TEST_LABELS",
")",
"with",
"open",
"(",
"local_file",
",",
"'rb'",
")",
"as",
"f",
":",
"test_labels",
"=",
"extract_labels",
"(",
"f",
")",
"return",
"test_images",
",",
"test_labels"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
get_news20
|
Parse or download news20 if source_dir is empty.
:param source_dir: The directory storing news data.
:return: A list of (tokens, label)
|
pyspark/bigdl/dataset/news20.py
|
def get_news20(source_dir="./data/news20/"):
"""
Parse or download news20 if source_dir is empty.
:param source_dir: The directory storing news data.
:return: A list of (tokens, label)
"""
news_dir = download_news20(source_dir)
texts = [] # list of text samples
label_id = 0
for name in sorted(os.listdir(news_dir)):
path = os.path.join(news_dir, name)
label_id += 1
if os.path.isdir(path):
for fname in sorted(os.listdir(path)):
if fname.isdigit():
fpath = os.path.join(path, fname)
if sys.version_info < (3,):
f = open(fpath)
else:
f = open(fpath, encoding='latin-1')
content = f.read()
texts.append((content, label_id))
f.close()
print('Found %s texts.' % len(texts))
return texts
|
def get_news20(source_dir="./data/news20/"):
"""
Parse or download news20 if source_dir is empty.
:param source_dir: The directory storing news data.
:return: A list of (tokens, label)
"""
news_dir = download_news20(source_dir)
texts = [] # list of text samples
label_id = 0
for name in sorted(os.listdir(news_dir)):
path = os.path.join(news_dir, name)
label_id += 1
if os.path.isdir(path):
for fname in sorted(os.listdir(path)):
if fname.isdigit():
fpath = os.path.join(path, fname)
if sys.version_info < (3,):
f = open(fpath)
else:
f = open(fpath, encoding='latin-1')
content = f.read()
texts.append((content, label_id))
f.close()
print('Found %s texts.' % len(texts))
return texts
|
[
"Parse",
"or",
"download",
"news20",
"if",
"source_dir",
"is",
"empty",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/dataset/news20.py#L53-L79
|
[
"def",
"get_news20",
"(",
"source_dir",
"=",
"\"./data/news20/\"",
")",
":",
"news_dir",
"=",
"download_news20",
"(",
"source_dir",
")",
"texts",
"=",
"[",
"]",
"# list of text samples",
"label_id",
"=",
"0",
"for",
"name",
"in",
"sorted",
"(",
"os",
".",
"listdir",
"(",
"news_dir",
")",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"news_dir",
",",
"name",
")",
"label_id",
"+=",
"1",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"for",
"fname",
"in",
"sorted",
"(",
"os",
".",
"listdir",
"(",
"path",
")",
")",
":",
"if",
"fname",
".",
"isdigit",
"(",
")",
":",
"fpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"fname",
")",
"if",
"sys",
".",
"version_info",
"<",
"(",
"3",
",",
")",
":",
"f",
"=",
"open",
"(",
"fpath",
")",
"else",
":",
"f",
"=",
"open",
"(",
"fpath",
",",
"encoding",
"=",
"'latin-1'",
")",
"content",
"=",
"f",
".",
"read",
"(",
")",
"texts",
".",
"append",
"(",
"(",
"content",
",",
"label_id",
")",
")",
"f",
".",
"close",
"(",
")",
"print",
"(",
"'Found %s texts.'",
"%",
"len",
"(",
"texts",
")",
")",
"return",
"texts"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
get_glove_w2v
|
Parse or download the pre-trained glove word2vec if source_dir is empty.
:param source_dir: The directory storing the pre-trained word2vec
:param dim: The dimension of a vector
:return: A dict mapping from word to vector
|
pyspark/bigdl/dataset/news20.py
|
def get_glove_w2v(source_dir="./data/news20/", dim=100):
"""
Parse or download the pre-trained glove word2vec if source_dir is empty.
:param source_dir: The directory storing the pre-trained word2vec
:param dim: The dimension of a vector
:return: A dict mapping from word to vector
"""
w2v_dir = download_glove_w2v(source_dir)
w2v_path = os.path.join(w2v_dir, "glove.6B.%sd.txt" % dim)
if sys.version_info < (3,):
w2v_f = open(w2v_path)
else:
w2v_f = open(w2v_path, encoding='latin-1')
pre_w2v = {}
for line in w2v_f.readlines():
items = line.split(" ")
pre_w2v[items[0]] = [float(i) for i in items[1:]]
w2v_f.close()
return pre_w2v
|
def get_glove_w2v(source_dir="./data/news20/", dim=100):
"""
Parse or download the pre-trained glove word2vec if source_dir is empty.
:param source_dir: The directory storing the pre-trained word2vec
:param dim: The dimension of a vector
:return: A dict mapping from word to vector
"""
w2v_dir = download_glove_w2v(source_dir)
w2v_path = os.path.join(w2v_dir, "glove.6B.%sd.txt" % dim)
if sys.version_info < (3,):
w2v_f = open(w2v_path)
else:
w2v_f = open(w2v_path, encoding='latin-1')
pre_w2v = {}
for line in w2v_f.readlines():
items = line.split(" ")
pre_w2v[items[0]] = [float(i) for i in items[1:]]
w2v_f.close()
return pre_w2v
|
[
"Parse",
"or",
"download",
"the",
"pre",
"-",
"trained",
"glove",
"word2vec",
"if",
"source_dir",
"is",
"empty",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/dataset/news20.py#L82-L101
|
[
"def",
"get_glove_w2v",
"(",
"source_dir",
"=",
"\"./data/news20/\"",
",",
"dim",
"=",
"100",
")",
":",
"w2v_dir",
"=",
"download_glove_w2v",
"(",
"source_dir",
")",
"w2v_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"w2v_dir",
",",
"\"glove.6B.%sd.txt\"",
"%",
"dim",
")",
"if",
"sys",
".",
"version_info",
"<",
"(",
"3",
",",
")",
":",
"w2v_f",
"=",
"open",
"(",
"w2v_path",
")",
"else",
":",
"w2v_f",
"=",
"open",
"(",
"w2v_path",
",",
"encoding",
"=",
"'latin-1'",
")",
"pre_w2v",
"=",
"{",
"}",
"for",
"line",
"in",
"w2v_f",
".",
"readlines",
"(",
")",
":",
"items",
"=",
"line",
".",
"split",
"(",
"\" \"",
")",
"pre_w2v",
"[",
"items",
"[",
"0",
"]",
"]",
"=",
"[",
"float",
"(",
"i",
")",
"for",
"i",
"in",
"items",
"[",
"1",
":",
"]",
"]",
"w2v_f",
".",
"close",
"(",
")",
"return",
"pre_w2v"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
KerasModel.compile
|
Configures the learning process. Must be called before fit or evaluate.
# Arguments
optimizer: Optimization method to be used. One can alternatively pass in the corresponding
string representation, such as 'sgd'.
loss: Criterion to be used. One can alternatively pass in the corresponding string
representation, such as 'mse'.
metrics: List of validation methods to be used. Default is None. One can alternatively use ['accuracy'].
|
pyspark/bigdl/nn/keras/topology.py
|
def compile(self, optimizer, loss, metrics=None):
"""
Configures the learning process. Must be called before fit or evaluate.
# Arguments
optimizer: Optimization method to be used. One can alternatively pass in the corresponding
string representation, such as 'sgd'.
loss: Criterion to be used. One can alternatively pass in the corresponding string
representation, such as 'mse'.
metrics: List of validation methods to be used. Default is None. One can alternatively use ['accuracy'].
"""
if isinstance(optimizer, six.string_types):
optimizer = self.__convert_optim_method(optimizer)
if isinstance(loss, six.string_types):
loss = self.__convert_criterion(loss)
if all(isinstance(metric, six.string_types) for metric in metrics):
metrics = self.__convert_metrics(metrics)
callBigDlFunc(self.bigdl_type, "compile",
self.value,
optimizer,
loss,
metrics)
|
def compile(self, optimizer, loss, metrics=None):
"""
Configures the learning process. Must be called before fit or evaluate.
# Arguments
optimizer: Optimization method to be used. One can alternatively pass in the corresponding
string representation, such as 'sgd'.
loss: Criterion to be used. One can alternatively pass in the corresponding string
representation, such as 'mse'.
metrics: List of validation methods to be used. Default is None. One can alternatively use ['accuracy'].
"""
if isinstance(optimizer, six.string_types):
optimizer = self.__convert_optim_method(optimizer)
if isinstance(loss, six.string_types):
loss = self.__convert_criterion(loss)
if all(isinstance(metric, six.string_types) for metric in metrics):
metrics = self.__convert_metrics(metrics)
callBigDlFunc(self.bigdl_type, "compile",
self.value,
optimizer,
loss,
metrics)
|
[
"Configures",
"the",
"learning",
"process",
".",
"Must",
"be",
"called",
"before",
"fit",
"or",
"evaluate",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/keras/topology.py#L82-L103
|
[
"def",
"compile",
"(",
"self",
",",
"optimizer",
",",
"loss",
",",
"metrics",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"optimizer",
",",
"six",
".",
"string_types",
")",
":",
"optimizer",
"=",
"self",
".",
"__convert_optim_method",
"(",
"optimizer",
")",
"if",
"isinstance",
"(",
"loss",
",",
"six",
".",
"string_types",
")",
":",
"loss",
"=",
"self",
".",
"__convert_criterion",
"(",
"loss",
")",
"if",
"all",
"(",
"isinstance",
"(",
"metric",
",",
"six",
".",
"string_types",
")",
"for",
"metric",
"in",
"metrics",
")",
":",
"metrics",
"=",
"self",
".",
"__convert_metrics",
"(",
"metrics",
")",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"compile\"",
",",
"self",
".",
"value",
",",
"optimizer",
",",
"loss",
",",
"metrics",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
KerasModel.fit
|
Train a model for a fixed number of epochs on a dataset.
# Arguments
x: Input data. A Numpy array or RDD of Sample or Image DataSet.
y: Labels. A Numpy array. Default is None if x is already RDD of Sample or Image DataSet.
batch_size: Number of samples per gradient update.
nb_epoch: Number of iterations to train.
validation_data: Tuple (x_val, y_val) where x_val and y_val are both Numpy arrays.
Or RDD of Sample. Default is None if no validation is involved.
distributed: Boolean. Whether to train the model in distributed mode or local mode.
Default is True. In local mode, x and y must both be Numpy arrays.
|
pyspark/bigdl/nn/keras/topology.py
|
def fit(self, x, y=None, batch_size=32, nb_epoch=10, validation_data=None, distributed=True):
"""
Train a model for a fixed number of epochs on a dataset.
# Arguments
x: Input data. A Numpy array or RDD of Sample or Image DataSet.
y: Labels. A Numpy array. Default is None if x is already RDD of Sample or Image DataSet.
batch_size: Number of samples per gradient update.
nb_epoch: Number of iterations to train.
validation_data: Tuple (x_val, y_val) where x_val and y_val are both Numpy arrays.
Or RDD of Sample. Default is None if no validation is involved.
distributed: Boolean. Whether to train the model in distributed mode or local mode.
Default is True. In local mode, x and y must both be Numpy arrays.
"""
if distributed:
if isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
training_data = to_sample_rdd(x, y)
if validation_data:
validation_data = to_sample_rdd(*validation_data)
elif (isinstance(x, RDD) or isinstance(x, DataSet)) and not y:
training_data = x
else:
raise TypeError("Unsupported training data type: %s" % type(x))
callBigDlFunc(self.bigdl_type, "fit",
self.value,
training_data,
batch_size,
nb_epoch,
validation_data)
else:
if validation_data:
val_x = [JTensor.from_ndarray(x) for x in to_list(validation_data[0])]
val_y = JTensor.from_ndarray(validation_data[1])
else:
val_x, val_y = None, None
callBigDlFunc(self.bigdl_type, "fit",
self.value,
[JTensor.from_ndarray(x) for x in to_list(x)],
JTensor.from_ndarray(y),
batch_size,
nb_epoch,
val_x,
val_y,
multiprocessing.cpu_count())
|
def fit(self, x, y=None, batch_size=32, nb_epoch=10, validation_data=None, distributed=True):
"""
Train a model for a fixed number of epochs on a dataset.
# Arguments
x: Input data. A Numpy array or RDD of Sample or Image DataSet.
y: Labels. A Numpy array. Default is None if x is already RDD of Sample or Image DataSet.
batch_size: Number of samples per gradient update.
nb_epoch: Number of iterations to train.
validation_data: Tuple (x_val, y_val) where x_val and y_val are both Numpy arrays.
Or RDD of Sample. Default is None if no validation is involved.
distributed: Boolean. Whether to train the model in distributed mode or local mode.
Default is True. In local mode, x and y must both be Numpy arrays.
"""
if distributed:
if isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
training_data = to_sample_rdd(x, y)
if validation_data:
validation_data = to_sample_rdd(*validation_data)
elif (isinstance(x, RDD) or isinstance(x, DataSet)) and not y:
training_data = x
else:
raise TypeError("Unsupported training data type: %s" % type(x))
callBigDlFunc(self.bigdl_type, "fit",
self.value,
training_data,
batch_size,
nb_epoch,
validation_data)
else:
if validation_data:
val_x = [JTensor.from_ndarray(x) for x in to_list(validation_data[0])]
val_y = JTensor.from_ndarray(validation_data[1])
else:
val_x, val_y = None, None
callBigDlFunc(self.bigdl_type, "fit",
self.value,
[JTensor.from_ndarray(x) for x in to_list(x)],
JTensor.from_ndarray(y),
batch_size,
nb_epoch,
val_x,
val_y,
multiprocessing.cpu_count())
|
[
"Train",
"a",
"model",
"for",
"a",
"fixed",
"number",
"of",
"epochs",
"on",
"a",
"dataset",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/keras/topology.py#L105-L148
|
[
"def",
"fit",
"(",
"self",
",",
"x",
",",
"y",
"=",
"None",
",",
"batch_size",
"=",
"32",
",",
"nb_epoch",
"=",
"10",
",",
"validation_data",
"=",
"None",
",",
"distributed",
"=",
"True",
")",
":",
"if",
"distributed",
":",
"if",
"isinstance",
"(",
"x",
",",
"np",
".",
"ndarray",
")",
"and",
"isinstance",
"(",
"y",
",",
"np",
".",
"ndarray",
")",
":",
"training_data",
"=",
"to_sample_rdd",
"(",
"x",
",",
"y",
")",
"if",
"validation_data",
":",
"validation_data",
"=",
"to_sample_rdd",
"(",
"*",
"validation_data",
")",
"elif",
"(",
"isinstance",
"(",
"x",
",",
"RDD",
")",
"or",
"isinstance",
"(",
"x",
",",
"DataSet",
")",
")",
"and",
"not",
"y",
":",
"training_data",
"=",
"x",
"else",
":",
"raise",
"TypeError",
"(",
"\"Unsupported training data type: %s\"",
"%",
"type",
"(",
"x",
")",
")",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"fit\"",
",",
"self",
".",
"value",
",",
"training_data",
",",
"batch_size",
",",
"nb_epoch",
",",
"validation_data",
")",
"else",
":",
"if",
"validation_data",
":",
"val_x",
"=",
"[",
"JTensor",
".",
"from_ndarray",
"(",
"x",
")",
"for",
"x",
"in",
"to_list",
"(",
"validation_data",
"[",
"0",
"]",
")",
"]",
"val_y",
"=",
"JTensor",
".",
"from_ndarray",
"(",
"validation_data",
"[",
"1",
"]",
")",
"else",
":",
"val_x",
",",
"val_y",
"=",
"None",
",",
"None",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"fit\"",
",",
"self",
".",
"value",
",",
"[",
"JTensor",
".",
"from_ndarray",
"(",
"x",
")",
"for",
"x",
"in",
"to_list",
"(",
"x",
")",
"]",
",",
"JTensor",
".",
"from_ndarray",
"(",
"y",
")",
",",
"batch_size",
",",
"nb_epoch",
",",
"val_x",
",",
"val_y",
",",
"multiprocessing",
".",
"cpu_count",
"(",
")",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
KerasModel.evaluate
|
Evaluate a model on a given dataset in distributed mode.
# Arguments
x: Input data. A Numpy array or RDD of Sample.
y: Labels. A Numpy array. Default is None if x is already RDD of Sample.
batch_size: Number of samples per gradient update.
|
pyspark/bigdl/nn/keras/topology.py
|
def evaluate(self, x, y=None, batch_size=32):
"""
Evaluate a model on a given dataset in distributed mode.
# Arguments
x: Input data. A Numpy array or RDD of Sample.
y: Labels. A Numpy array. Default is None if x is already RDD of Sample.
batch_size: Number of samples per gradient update.
"""
if isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
evaluation_data = to_sample_rdd(x, y)
elif isinstance(x, RDD) and not y:
evaluation_data = x
else:
raise TypeError("Unsupported evaluation data type: %s" % type(x))
return callBigDlFunc(self.bigdl_type, "evaluate",
self.value,
evaluation_data,
batch_size)
|
def evaluate(self, x, y=None, batch_size=32):
"""
Evaluate a model on a given dataset in distributed mode.
# Arguments
x: Input data. A Numpy array or RDD of Sample.
y: Labels. A Numpy array. Default is None if x is already RDD of Sample.
batch_size: Number of samples per gradient update.
"""
if isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
evaluation_data = to_sample_rdd(x, y)
elif isinstance(x, RDD) and not y:
evaluation_data = x
else:
raise TypeError("Unsupported evaluation data type: %s" % type(x))
return callBigDlFunc(self.bigdl_type, "evaluate",
self.value,
evaluation_data,
batch_size)
|
[
"Evaluate",
"a",
"model",
"on",
"a",
"given",
"dataset",
"in",
"distributed",
"mode",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/keras/topology.py#L150-L168
|
[
"def",
"evaluate",
"(",
"self",
",",
"x",
",",
"y",
"=",
"None",
",",
"batch_size",
"=",
"32",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"np",
".",
"ndarray",
")",
"and",
"isinstance",
"(",
"y",
",",
"np",
".",
"ndarray",
")",
":",
"evaluation_data",
"=",
"to_sample_rdd",
"(",
"x",
",",
"y",
")",
"elif",
"isinstance",
"(",
"x",
",",
"RDD",
")",
"and",
"not",
"y",
":",
"evaluation_data",
"=",
"x",
"else",
":",
"raise",
"TypeError",
"(",
"\"Unsupported evaluation data type: %s\"",
"%",
"type",
"(",
"x",
")",
")",
"return",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"evaluate\"",
",",
"self",
".",
"value",
",",
"evaluation_data",
",",
"batch_size",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
KerasModel.predict
|
Use a model to do prediction.
# Arguments
x: Input data. A Numpy array or RDD of Sample.
distributed: Boolean. Whether to do prediction in distributed mode or local mode.
Default is True. In local mode, x must be a Numpy array.
|
pyspark/bigdl/nn/keras/topology.py
|
def predict(self, x, distributed=True):
"""
Use a model to do prediction.
# Arguments
x: Input data. A Numpy array or RDD of Sample.
distributed: Boolean. Whether to do prediction in distributed mode or local mode.
Default is True. In local mode, x must be a Numpy array.
"""
if is_distributed:
if isinstance(x, np.ndarray):
features = to_sample_rdd(x, np.zeros([x.shape[0]]))
elif isinstance(x, RDD):
features = x
else:
raise TypeError("Unsupported prediction data type: %s" % type(x))
return self.predict_distributed(features)
else:
if isinstance(x, np.ndarray):
return self.predict_local(x)
else:
raise TypeError("Unsupported prediction data type: %s" % type(x))
|
def predict(self, x, distributed=True):
"""
Use a model to do prediction.
# Arguments
x: Input data. A Numpy array or RDD of Sample.
distributed: Boolean. Whether to do prediction in distributed mode or local mode.
Default is True. In local mode, x must be a Numpy array.
"""
if is_distributed:
if isinstance(x, np.ndarray):
features = to_sample_rdd(x, np.zeros([x.shape[0]]))
elif isinstance(x, RDD):
features = x
else:
raise TypeError("Unsupported prediction data type: %s" % type(x))
return self.predict_distributed(features)
else:
if isinstance(x, np.ndarray):
return self.predict_local(x)
else:
raise TypeError("Unsupported prediction data type: %s" % type(x))
|
[
"Use",
"a",
"model",
"to",
"do",
"prediction",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/keras/topology.py#L170-L191
|
[
"def",
"predict",
"(",
"self",
",",
"x",
",",
"distributed",
"=",
"True",
")",
":",
"if",
"is_distributed",
":",
"if",
"isinstance",
"(",
"x",
",",
"np",
".",
"ndarray",
")",
":",
"features",
"=",
"to_sample_rdd",
"(",
"x",
",",
"np",
".",
"zeros",
"(",
"[",
"x",
".",
"shape",
"[",
"0",
"]",
"]",
")",
")",
"elif",
"isinstance",
"(",
"x",
",",
"RDD",
")",
":",
"features",
"=",
"x",
"else",
":",
"raise",
"TypeError",
"(",
"\"Unsupported prediction data type: %s\"",
"%",
"type",
"(",
"x",
")",
")",
"return",
"self",
".",
"predict_distributed",
"(",
"features",
")",
"else",
":",
"if",
"isinstance",
"(",
"x",
",",
"np",
".",
"ndarray",
")",
":",
"return",
"self",
".",
"predict_local",
"(",
"x",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Unsupported prediction data type: %s\"",
"%",
"type",
"(",
"x",
")",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
Sequential.from_jvalue
|
Create a Python Model base on the given java value
:param jvalue: Java object create by Py4j
:return: A Python Model
|
pyspark/bigdl/nn/keras/topology.py
|
def from_jvalue(jvalue, bigdl_type="float"):
"""
Create a Python Model base on the given java value
:param jvalue: Java object create by Py4j
:return: A Python Model
"""
model = Sequential(jvalue=jvalue)
model.value = jvalue
return model
|
def from_jvalue(jvalue, bigdl_type="float"):
"""
Create a Python Model base on the given java value
:param jvalue: Java object create by Py4j
:return: A Python Model
"""
model = Sequential(jvalue=jvalue)
model.value = jvalue
return model
|
[
"Create",
"a",
"Python",
"Model",
"base",
"on",
"the",
"given",
"java",
"value",
":",
"param",
"jvalue",
":",
"Java",
"object",
"create",
"by",
"Py4j",
":",
"return",
":",
"A",
"Python",
"Model"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/keras/topology.py#L208-L216
|
[
"def",
"from_jvalue",
"(",
"jvalue",
",",
"bigdl_type",
"=",
"\"float\"",
")",
":",
"model",
"=",
"Sequential",
"(",
"jvalue",
"=",
"jvalue",
")",
"model",
".",
"value",
"=",
"jvalue",
"return",
"model"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
Model.from_jvalue
|
Create a Python Model base on the given java value
:param jvalue: Java object create by Py4j
:return: A Python Model
|
pyspark/bigdl/nn/keras/topology.py
|
def from_jvalue(jvalue, bigdl_type="float"):
"""
Create a Python Model base on the given java value
:param jvalue: Java object create by Py4j
:return: A Python Model
"""
model = Model([], [], jvalue=jvalue)
model.value = jvalue
return model
|
def from_jvalue(jvalue, bigdl_type="float"):
"""
Create a Python Model base on the given java value
:param jvalue: Java object create by Py4j
:return: A Python Model
"""
model = Model([], [], jvalue=jvalue)
model.value = jvalue
return model
|
[
"Create",
"a",
"Python",
"Model",
"base",
"on",
"the",
"given",
"java",
"value",
":",
"param",
"jvalue",
":",
"Java",
"object",
"create",
"by",
"Py4j",
":",
"return",
":",
"A",
"Python",
"Model"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/keras/topology.py#L238-L246
|
[
"def",
"from_jvalue",
"(",
"jvalue",
",",
"bigdl_type",
"=",
"\"float\"",
")",
":",
"model",
"=",
"Model",
"(",
"[",
"]",
",",
"[",
"]",
",",
"jvalue",
"=",
"jvalue",
")",
"model",
".",
"value",
"=",
"jvalue",
"return",
"model"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
get_mnist
|
Get mnist dataset and parallelize into RDDs.
Data would be downloaded automatically if it doesn't present at the specific location.
:param sc: SparkContext.
:param data_type: "train" for training data and "test" for testing data.
:param location: Location to store mnist dataset.
:return: RDD of (features: ndarray, label: ndarray).
|
pyspark/bigdl/models/lenet/utils.py
|
def get_mnist(sc, data_type="train", location="/tmp/mnist"):
"""
Get mnist dataset and parallelize into RDDs.
Data would be downloaded automatically if it doesn't present at the specific location.
:param sc: SparkContext.
:param data_type: "train" for training data and "test" for testing data.
:param location: Location to store mnist dataset.
:return: RDD of (features: ndarray, label: ndarray).
"""
(images, labels) = mnist.read_data_sets(location, data_type)
images = sc.parallelize(images)
labels = sc.parallelize(labels + 1) # Target start from 1 in BigDL
record = images.zip(labels)
return record
|
def get_mnist(sc, data_type="train", location="/tmp/mnist"):
"""
Get mnist dataset and parallelize into RDDs.
Data would be downloaded automatically if it doesn't present at the specific location.
:param sc: SparkContext.
:param data_type: "train" for training data and "test" for testing data.
:param location: Location to store mnist dataset.
:return: RDD of (features: ndarray, label: ndarray).
"""
(images, labels) = mnist.read_data_sets(location, data_type)
images = sc.parallelize(images)
labels = sc.parallelize(labels + 1) # Target start from 1 in BigDL
record = images.zip(labels)
return record
|
[
"Get",
"mnist",
"dataset",
"and",
"parallelize",
"into",
"RDDs",
".",
"Data",
"would",
"be",
"downloaded",
"automatically",
"if",
"it",
"doesn",
"t",
"present",
"at",
"the",
"specific",
"location",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/models/lenet/utils.py#L22-L36
|
[
"def",
"get_mnist",
"(",
"sc",
",",
"data_type",
"=",
"\"train\"",
",",
"location",
"=",
"\"/tmp/mnist\"",
")",
":",
"(",
"images",
",",
"labels",
")",
"=",
"mnist",
".",
"read_data_sets",
"(",
"location",
",",
"data_type",
")",
"images",
"=",
"sc",
".",
"parallelize",
"(",
"images",
")",
"labels",
"=",
"sc",
".",
"parallelize",
"(",
"labels",
"+",
"1",
")",
"# Target start from 1 in BigDL",
"record",
"=",
"images",
".",
"zip",
"(",
"labels",
")",
"return",
"record"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
preprocess_mnist
|
Preprocess mnist dataset.
Normalize and transform into Sample of RDDs.
|
pyspark/bigdl/models/lenet/utils.py
|
def preprocess_mnist(sc, options):
"""
Preprocess mnist dataset.
Normalize and transform into Sample of RDDs.
"""
train_data = get_mnist(sc, "train", options.dataPath)\
.map(lambda rec_tuple: (normalizer(rec_tuple[0], mnist.TRAIN_MEAN, mnist.TRAIN_STD),
rec_tuple[1]))\
.map(lambda t: Sample.from_ndarray(t[0], t[1]))
test_data = get_mnist(sc, "test", options.dataPath)\
.map(lambda rec_tuple: (normalizer(rec_tuple[0], mnist.TEST_MEAN, mnist.TEST_STD),
rec_tuple[1]))\
.map(lambda t: Sample.from_ndarray(t[0], t[1]))
return train_data, test_data
|
def preprocess_mnist(sc, options):
"""
Preprocess mnist dataset.
Normalize and transform into Sample of RDDs.
"""
train_data = get_mnist(sc, "train", options.dataPath)\
.map(lambda rec_tuple: (normalizer(rec_tuple[0], mnist.TRAIN_MEAN, mnist.TRAIN_STD),
rec_tuple[1]))\
.map(lambda t: Sample.from_ndarray(t[0], t[1]))
test_data = get_mnist(sc, "test", options.dataPath)\
.map(lambda rec_tuple: (normalizer(rec_tuple[0], mnist.TEST_MEAN, mnist.TEST_STD),
rec_tuple[1]))\
.map(lambda t: Sample.from_ndarray(t[0], t[1]))
return train_data, test_data
|
[
"Preprocess",
"mnist",
"dataset",
".",
"Normalize",
"and",
"transform",
"into",
"Sample",
"of",
"RDDs",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/models/lenet/utils.py#L39-L52
|
[
"def",
"preprocess_mnist",
"(",
"sc",
",",
"options",
")",
":",
"train_data",
"=",
"get_mnist",
"(",
"sc",
",",
"\"train\"",
",",
"options",
".",
"dataPath",
")",
".",
"map",
"(",
"lambda",
"rec_tuple",
":",
"(",
"normalizer",
"(",
"rec_tuple",
"[",
"0",
"]",
",",
"mnist",
".",
"TRAIN_MEAN",
",",
"mnist",
".",
"TRAIN_STD",
")",
",",
"rec_tuple",
"[",
"1",
"]",
")",
")",
".",
"map",
"(",
"lambda",
"t",
":",
"Sample",
".",
"from_ndarray",
"(",
"t",
"[",
"0",
"]",
",",
"t",
"[",
"1",
"]",
")",
")",
"test_data",
"=",
"get_mnist",
"(",
"sc",
",",
"\"test\"",
",",
"options",
".",
"dataPath",
")",
".",
"map",
"(",
"lambda",
"rec_tuple",
":",
"(",
"normalizer",
"(",
"rec_tuple",
"[",
"0",
"]",
",",
"mnist",
".",
"TEST_MEAN",
",",
"mnist",
".",
"TEST_STD",
")",
",",
"rec_tuple",
"[",
"1",
"]",
")",
")",
".",
"map",
"(",
"lambda",
"t",
":",
"Sample",
".",
"from_ndarray",
"(",
"t",
"[",
"0",
"]",
",",
"t",
"[",
"1",
"]",
")",
")",
"return",
"train_data",
",",
"test_data"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
get_end_trigger
|
When to end the optimization based on input option.
|
pyspark/bigdl/models/lenet/utils.py
|
def get_end_trigger(options):
"""
When to end the optimization based on input option.
"""
if options.endTriggerType.lower() == "epoch":
return MaxEpoch(options.endTriggerNum)
else:
return MaxIteration(options.endTriggerNum)
|
def get_end_trigger(options):
"""
When to end the optimization based on input option.
"""
if options.endTriggerType.lower() == "epoch":
return MaxEpoch(options.endTriggerNum)
else:
return MaxIteration(options.endTriggerNum)
|
[
"When",
"to",
"end",
"the",
"optimization",
"based",
"on",
"input",
"option",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/models/lenet/utils.py#L55-L62
|
[
"def",
"get_end_trigger",
"(",
"options",
")",
":",
"if",
"options",
".",
"endTriggerType",
".",
"lower",
"(",
")",
"==",
"\"epoch\"",
":",
"return",
"MaxEpoch",
"(",
"options",
".",
"endTriggerNum",
")",
"else",
":",
"return",
"MaxIteration",
"(",
"options",
".",
"endTriggerNum",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
validate_optimizer
|
Set validation and checkpoint for distributed optimizer.
|
pyspark/bigdl/models/lenet/utils.py
|
def validate_optimizer(optimizer, test_data, options):
"""
Set validation and checkpoint for distributed optimizer.
"""
optimizer.set_validation(
batch_size=options.batchSize,
val_rdd=test_data,
trigger=EveryEpoch(),
val_method=[Top1Accuracy()]
)
optimizer.set_checkpoint(EveryEpoch(), options.checkpointPath)
|
def validate_optimizer(optimizer, test_data, options):
"""
Set validation and checkpoint for distributed optimizer.
"""
optimizer.set_validation(
batch_size=options.batchSize,
val_rdd=test_data,
trigger=EveryEpoch(),
val_method=[Top1Accuracy()]
)
optimizer.set_checkpoint(EveryEpoch(), options.checkpointPath)
|
[
"Set",
"validation",
"and",
"checkpoint",
"for",
"distributed",
"optimizer",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/models/lenet/utils.py#L65-L75
|
[
"def",
"validate_optimizer",
"(",
"optimizer",
",",
"test_data",
",",
"options",
")",
":",
"optimizer",
".",
"set_validation",
"(",
"batch_size",
"=",
"options",
".",
"batchSize",
",",
"val_rdd",
"=",
"test_data",
",",
"trigger",
"=",
"EveryEpoch",
"(",
")",
",",
"val_method",
"=",
"[",
"Top1Accuracy",
"(",
")",
"]",
")",
"optimizer",
".",
"set_checkpoint",
"(",
"EveryEpoch",
"(",
")",
",",
"options",
".",
"checkpointPath",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
HasBatchSize.setBatchSize
|
Sets the value of :py:attr:`batchSize`.
|
pyspark/bigdl/models/ml_pipeline/dl_classifier.py
|
def setBatchSize(self, val):
"""
Sets the value of :py:attr:`batchSize`.
"""
self._paramMap[self.batchSize] = val
pythonBigDL_method_name = "setBatchSize" + self.__class__.__name__
callBigDlFunc(self.bigdl_type, pythonBigDL_method_name, self.value, val)
return self
|
def setBatchSize(self, val):
"""
Sets the value of :py:attr:`batchSize`.
"""
self._paramMap[self.batchSize] = val
pythonBigDL_method_name = "setBatchSize" + self.__class__.__name__
callBigDlFunc(self.bigdl_type, pythonBigDL_method_name, self.value, val)
return self
|
[
"Sets",
"the",
"value",
"of",
":",
"py",
":",
"attr",
":",
"batchSize",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/models/ml_pipeline/dl_classifier.py#L25-L32
|
[
"def",
"setBatchSize",
"(",
"self",
",",
"val",
")",
":",
"self",
".",
"_paramMap",
"[",
"self",
".",
"batchSize",
"]",
"=",
"val",
"pythonBigDL_method_name",
"=",
"\"setBatchSize\"",
"+",
"self",
".",
"__class__",
".",
"__name__",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"pythonBigDL_method_name",
",",
"self",
".",
"value",
",",
"val",
")",
"return",
"self"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
ModelBroadcast.value
|
Return the broadcasted value
|
pyspark/bigdl/models/utils/model_broadcast.py
|
def value(self):
""" Return the broadcasted value
"""
if not hasattr(self, "_value") and self._path is not None:
self._value = self._load(self._path)
return self._value
|
def value(self):
""" Return the broadcasted value
"""
if not hasattr(self, "_value") and self._path is not None:
self._value = self._load(self._path)
return self._value
|
[
"Return",
"the",
"broadcasted",
"value"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/models/utils/model_broadcast.py#L61-L66
|
[
"def",
"value",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"_value\"",
")",
"and",
"self",
".",
"_path",
"is",
"not",
"None",
":",
"self",
".",
"_value",
"=",
"self",
".",
"_load",
"(",
"self",
".",
"_path",
")",
"return",
"self",
".",
"_value"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
to_sample_rdd
|
Conver x and y into RDD[Sample]
:param x: ndarray and the first dimension should be batch
:param y: ndarray and the first dimension should be batch
:param numSlices:
:return:
|
pyspark/bigdl/util/common.py
|
def to_sample_rdd(x, y, numSlices=None):
"""
Conver x and y into RDD[Sample]
:param x: ndarray and the first dimension should be batch
:param y: ndarray and the first dimension should be batch
:param numSlices:
:return:
"""
sc = get_spark_context()
from bigdl.util.common import Sample
x_rdd = sc.parallelize(x, numSlices)
y_rdd = sc.parallelize(y, numSlices)
return x_rdd.zip(y_rdd).map(lambda item: Sample.from_ndarray(item[0], item[1]))
|
def to_sample_rdd(x, y, numSlices=None):
"""
Conver x and y into RDD[Sample]
:param x: ndarray and the first dimension should be batch
:param y: ndarray and the first dimension should be batch
:param numSlices:
:return:
"""
sc = get_spark_context()
from bigdl.util.common import Sample
x_rdd = sc.parallelize(x, numSlices)
y_rdd = sc.parallelize(y, numSlices)
return x_rdd.zip(y_rdd).map(lambda item: Sample.from_ndarray(item[0], item[1]))
|
[
"Conver",
"x",
"and",
"y",
"into",
"RDD",
"[",
"Sample",
"]",
":",
"param",
"x",
":",
"ndarray",
"and",
"the",
"first",
"dimension",
"should",
"be",
"batch",
":",
"param",
"y",
":",
"ndarray",
"and",
"the",
"first",
"dimension",
"should",
"be",
"batch",
":",
"param",
"numSlices",
":",
":",
"return",
":"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/util/common.py#L478-L490
|
[
"def",
"to_sample_rdd",
"(",
"x",
",",
"y",
",",
"numSlices",
"=",
"None",
")",
":",
"sc",
"=",
"get_spark_context",
"(",
")",
"from",
"bigdl",
".",
"util",
".",
"common",
"import",
"Sample",
"x_rdd",
"=",
"sc",
".",
"parallelize",
"(",
"x",
",",
"numSlices",
")",
"y_rdd",
"=",
"sc",
".",
"parallelize",
"(",
"y",
",",
"numSlices",
")",
"return",
"x_rdd",
".",
"zip",
"(",
"y_rdd",
")",
".",
"map",
"(",
"lambda",
"item",
":",
"Sample",
".",
"from_ndarray",
"(",
"item",
"[",
"0",
"]",
",",
"item",
"[",
"1",
"]",
")",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
get_spark_context
|
Get the current active spark context and create one if no active instance
:param conf: combining bigdl configs into spark conf
:return: SparkContext
|
pyspark/bigdl/util/common.py
|
def get_spark_context(conf=None):
"""
Get the current active spark context and create one if no active instance
:param conf: combining bigdl configs into spark conf
:return: SparkContext
"""
if hasattr(SparkContext, "getOrCreate"):
with SparkContext._lock:
if SparkContext._active_spark_context is None:
spark_conf = create_spark_conf() if conf is None else conf
return SparkContext.getOrCreate(spark_conf)
else:
return SparkContext.getOrCreate()
else:
# Might have threading issue but we cann't add _lock here
# as it's not RLock in spark1.5;
if SparkContext._active_spark_context is None:
spark_conf = create_spark_conf() if conf is None else conf
return SparkContext(conf=spark_conf)
else:
return SparkContext._active_spark_context
|
def get_spark_context(conf=None):
"""
Get the current active spark context and create one if no active instance
:param conf: combining bigdl configs into spark conf
:return: SparkContext
"""
if hasattr(SparkContext, "getOrCreate"):
with SparkContext._lock:
if SparkContext._active_spark_context is None:
spark_conf = create_spark_conf() if conf is None else conf
return SparkContext.getOrCreate(spark_conf)
else:
return SparkContext.getOrCreate()
else:
# Might have threading issue but we cann't add _lock here
# as it's not RLock in spark1.5;
if SparkContext._active_spark_context is None:
spark_conf = create_spark_conf() if conf is None else conf
return SparkContext(conf=spark_conf)
else:
return SparkContext._active_spark_context
|
[
"Get",
"the",
"current",
"active",
"spark",
"context",
"and",
"create",
"one",
"if",
"no",
"active",
"instance",
":",
"param",
"conf",
":",
"combining",
"bigdl",
"configs",
"into",
"spark",
"conf",
":",
"return",
":",
"SparkContext"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/util/common.py#L520-L541
|
[
"def",
"get_spark_context",
"(",
"conf",
"=",
"None",
")",
":",
"if",
"hasattr",
"(",
"SparkContext",
",",
"\"getOrCreate\"",
")",
":",
"with",
"SparkContext",
".",
"_lock",
":",
"if",
"SparkContext",
".",
"_active_spark_context",
"is",
"None",
":",
"spark_conf",
"=",
"create_spark_conf",
"(",
")",
"if",
"conf",
"is",
"None",
"else",
"conf",
"return",
"SparkContext",
".",
"getOrCreate",
"(",
"spark_conf",
")",
"else",
":",
"return",
"SparkContext",
".",
"getOrCreate",
"(",
")",
"else",
":",
"# Might have threading issue but we cann't add _lock here",
"# as it's not RLock in spark1.5;",
"if",
"SparkContext",
".",
"_active_spark_context",
"is",
"None",
":",
"spark_conf",
"=",
"create_spark_conf",
"(",
")",
"if",
"conf",
"is",
"None",
"else",
"conf",
"return",
"SparkContext",
"(",
"conf",
"=",
"spark_conf",
")",
"else",
":",
"return",
"SparkContext",
".",
"_active_spark_context"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
callBigDlFunc
|
Call API in PythonBigDL
|
pyspark/bigdl/util/common.py
|
def callBigDlFunc(bigdl_type, name, *args):
""" Call API in PythonBigDL """
gateway = _get_gateway()
error = Exception("Cannot find function: %s" % name)
for jinvoker in JavaCreator.instance(bigdl_type, gateway).value:
# hasattr(jinvoker, name) always return true here,
# so you need to invoke the method to check if it exist or not
try:
api = getattr(jinvoker, name)
result = callJavaFunc(api, *args)
except Exception as e:
error = e
if "does not exist" not in str(e):
raise e
else:
return result
raise error
|
def callBigDlFunc(bigdl_type, name, *args):
""" Call API in PythonBigDL """
gateway = _get_gateway()
error = Exception("Cannot find function: %s" % name)
for jinvoker in JavaCreator.instance(bigdl_type, gateway).value:
# hasattr(jinvoker, name) always return true here,
# so you need to invoke the method to check if it exist or not
try:
api = getattr(jinvoker, name)
result = callJavaFunc(api, *args)
except Exception as e:
error = e
if "does not exist" not in str(e):
raise e
else:
return result
raise error
|
[
"Call",
"API",
"in",
"PythonBigDL"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/util/common.py#L576-L592
|
[
"def",
"callBigDlFunc",
"(",
"bigdl_type",
",",
"name",
",",
"*",
"args",
")",
":",
"gateway",
"=",
"_get_gateway",
"(",
")",
"error",
"=",
"Exception",
"(",
"\"Cannot find function: %s\"",
"%",
"name",
")",
"for",
"jinvoker",
"in",
"JavaCreator",
".",
"instance",
"(",
"bigdl_type",
",",
"gateway",
")",
".",
"value",
":",
"# hasattr(jinvoker, name) always return true here,",
"# so you need to invoke the method to check if it exist or not",
"try",
":",
"api",
"=",
"getattr",
"(",
"jinvoker",
",",
"name",
")",
"result",
"=",
"callJavaFunc",
"(",
"api",
",",
"*",
"args",
")",
"except",
"Exception",
"as",
"e",
":",
"error",
"=",
"e",
"if",
"\"does not exist\"",
"not",
"in",
"str",
"(",
"e",
")",
":",
"raise",
"e",
"else",
":",
"return",
"result",
"raise",
"error"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
callJavaFunc
|
Call Java Function
|
pyspark/bigdl/util/common.py
|
def callJavaFunc(func, *args):
""" Call Java Function """
gateway = _get_gateway()
args = [_py2java(gateway, a) for a in args]
result = func(*args)
return _java2py(gateway, result)
|
def callJavaFunc(func, *args):
""" Call Java Function """
gateway = _get_gateway()
args = [_py2java(gateway, a) for a in args]
result = func(*args)
return _java2py(gateway, result)
|
[
"Call",
"Java",
"Function"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/util/common.py#L630-L635
|
[
"def",
"callJavaFunc",
"(",
"func",
",",
"*",
"args",
")",
":",
"gateway",
"=",
"_get_gateway",
"(",
")",
"args",
"=",
"[",
"_py2java",
"(",
"gateway",
",",
"a",
")",
"for",
"a",
"in",
"args",
"]",
"result",
"=",
"func",
"(",
"*",
"args",
")",
"return",
"_java2py",
"(",
"gateway",
",",
"result",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
_to_java_object_rdd
|
Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever
the RDD is serialized in batch or not.
|
pyspark/bigdl/util/common.py
|
def _to_java_object_rdd(rdd):
""" Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever
the RDD is serialized in batch or not.
"""
rdd = rdd._reserialize(AutoBatchedSerializer(PickleSerializer()))
return \
rdd.ctx._jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.pythonToJava(
rdd._jrdd, True)
|
def _to_java_object_rdd(rdd):
""" Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever
the RDD is serialized in batch or not.
"""
rdd = rdd._reserialize(AutoBatchedSerializer(PickleSerializer()))
return \
rdd.ctx._jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.pythonToJava(
rdd._jrdd, True)
|
[
"Return",
"a",
"JavaRDD",
"of",
"Object",
"by",
"unpickling"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/util/common.py#L638-L648
|
[
"def",
"_to_java_object_rdd",
"(",
"rdd",
")",
":",
"rdd",
"=",
"rdd",
".",
"_reserialize",
"(",
"AutoBatchedSerializer",
"(",
"PickleSerializer",
"(",
")",
")",
")",
"return",
"rdd",
".",
"ctx",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"bigdl",
".",
"api",
".",
"python",
".",
"BigDLSerDe",
".",
"pythonToJava",
"(",
"rdd",
".",
"_jrdd",
",",
"True",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
_py2java
|
Convert Python object into Java
|
pyspark/bigdl/util/common.py
|
def _py2java(gateway, obj):
""" Convert Python object into Java """
if isinstance(obj, RDD):
obj = _to_java_object_rdd(obj)
elif isinstance(obj, DataFrame):
obj = obj._jdf
elif isinstance(obj, SparkContext):
obj = obj._jsc
elif isinstance(obj, (list, tuple)):
obj = ListConverter().convert([_py2java(gateway, x) for x in obj],
gateway._gateway_client)
elif isinstance(obj, dict):
result = {}
for (key, value) in obj.items():
result[key] = _py2java(gateway, value)
obj = MapConverter().convert(result, gateway._gateway_client)
elif isinstance(obj, JavaValue):
obj = obj.value
elif isinstance(obj, JavaObject):
pass
elif isinstance(obj, (int, long, float, bool, bytes, unicode)):
pass
else:
data = bytearray(PickleSerializer().dumps(obj))
obj = gateway.jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.loads(data)
return obj
|
def _py2java(gateway, obj):
""" Convert Python object into Java """
if isinstance(obj, RDD):
obj = _to_java_object_rdd(obj)
elif isinstance(obj, DataFrame):
obj = obj._jdf
elif isinstance(obj, SparkContext):
obj = obj._jsc
elif isinstance(obj, (list, tuple)):
obj = ListConverter().convert([_py2java(gateway, x) for x in obj],
gateway._gateway_client)
elif isinstance(obj, dict):
result = {}
for (key, value) in obj.items():
result[key] = _py2java(gateway, value)
obj = MapConverter().convert(result, gateway._gateway_client)
elif isinstance(obj, JavaValue):
obj = obj.value
elif isinstance(obj, JavaObject):
pass
elif isinstance(obj, (int, long, float, bool, bytes, unicode)):
pass
else:
data = bytearray(PickleSerializer().dumps(obj))
obj = gateway.jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.loads(data)
return obj
|
[
"Convert",
"Python",
"object",
"into",
"Java"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/util/common.py#L651-L676
|
[
"def",
"_py2java",
"(",
"gateway",
",",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"RDD",
")",
":",
"obj",
"=",
"_to_java_object_rdd",
"(",
"obj",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"DataFrame",
")",
":",
"obj",
"=",
"obj",
".",
"_jdf",
"elif",
"isinstance",
"(",
"obj",
",",
"SparkContext",
")",
":",
"obj",
"=",
"obj",
".",
"_jsc",
"elif",
"isinstance",
"(",
"obj",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"obj",
"=",
"ListConverter",
"(",
")",
".",
"convert",
"(",
"[",
"_py2java",
"(",
"gateway",
",",
"x",
")",
"for",
"x",
"in",
"obj",
"]",
",",
"gateway",
".",
"_gateway_client",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"dict",
")",
":",
"result",
"=",
"{",
"}",
"for",
"(",
"key",
",",
"value",
")",
"in",
"obj",
".",
"items",
"(",
")",
":",
"result",
"[",
"key",
"]",
"=",
"_py2java",
"(",
"gateway",
",",
"value",
")",
"obj",
"=",
"MapConverter",
"(",
")",
".",
"convert",
"(",
"result",
",",
"gateway",
".",
"_gateway_client",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"JavaValue",
")",
":",
"obj",
"=",
"obj",
".",
"value",
"elif",
"isinstance",
"(",
"obj",
",",
"JavaObject",
")",
":",
"pass",
"elif",
"isinstance",
"(",
"obj",
",",
"(",
"int",
",",
"long",
",",
"float",
",",
"bool",
",",
"bytes",
",",
"unicode",
")",
")",
":",
"pass",
"else",
":",
"data",
"=",
"bytearray",
"(",
"PickleSerializer",
"(",
")",
".",
"dumps",
"(",
"obj",
")",
")",
"obj",
"=",
"gateway",
".",
"jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"bigdl",
".",
"api",
".",
"python",
".",
"BigDLSerDe",
".",
"loads",
"(",
"data",
")",
"return",
"obj"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
get_activation_by_name
|
Convert to a bigdl activation layer
given the name of the activation as a string
|
pyspark/bigdl/util/common.py
|
def get_activation_by_name(activation_name, activation_id=None):
""" Convert to a bigdl activation layer
given the name of the activation as a string """
import bigdl.nn.layer as BLayer
activation = None
activation_name = activation_name.lower()
if activation_name == "tanh":
activation = BLayer.Tanh()
elif activation_name == "sigmoid":
activation = BLayer.Sigmoid()
elif activation_name == "hard_sigmoid":
activation = BLayer.HardSigmoid()
elif activation_name == "relu":
activation = BLayer.ReLU()
elif activation_name == "softmax":
activation = BLayer.SoftMax()
elif activation_name == "softplus":
activation = BLayer.SoftPlus(beta=1.0)
elif activation_name == "softsign":
activation = BLayer.SoftSign()
elif activation_name == "linear":
activation = BLayer.Identity()
else:
raise Exception("Unsupported activation type: %s" % activation_name)
if not activation_id:
activation.set_name(activation_id)
return activation
|
def get_activation_by_name(activation_name, activation_id=None):
""" Convert to a bigdl activation layer
given the name of the activation as a string """
import bigdl.nn.layer as BLayer
activation = None
activation_name = activation_name.lower()
if activation_name == "tanh":
activation = BLayer.Tanh()
elif activation_name == "sigmoid":
activation = BLayer.Sigmoid()
elif activation_name == "hard_sigmoid":
activation = BLayer.HardSigmoid()
elif activation_name == "relu":
activation = BLayer.ReLU()
elif activation_name == "softmax":
activation = BLayer.SoftMax()
elif activation_name == "softplus":
activation = BLayer.SoftPlus(beta=1.0)
elif activation_name == "softsign":
activation = BLayer.SoftSign()
elif activation_name == "linear":
activation = BLayer.Identity()
else:
raise Exception("Unsupported activation type: %s" % activation_name)
if not activation_id:
activation.set_name(activation_id)
return activation
|
[
"Convert",
"to",
"a",
"bigdl",
"activation",
"layer",
"given",
"the",
"name",
"of",
"the",
"activation",
"as",
"a",
"string"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/util/common.py#L704-L730
|
[
"def",
"get_activation_by_name",
"(",
"activation_name",
",",
"activation_id",
"=",
"None",
")",
":",
"import",
"bigdl",
".",
"nn",
".",
"layer",
"as",
"BLayer",
"activation",
"=",
"None",
"activation_name",
"=",
"activation_name",
".",
"lower",
"(",
")",
"if",
"activation_name",
"==",
"\"tanh\"",
":",
"activation",
"=",
"BLayer",
".",
"Tanh",
"(",
")",
"elif",
"activation_name",
"==",
"\"sigmoid\"",
":",
"activation",
"=",
"BLayer",
".",
"Sigmoid",
"(",
")",
"elif",
"activation_name",
"==",
"\"hard_sigmoid\"",
":",
"activation",
"=",
"BLayer",
".",
"HardSigmoid",
"(",
")",
"elif",
"activation_name",
"==",
"\"relu\"",
":",
"activation",
"=",
"BLayer",
".",
"ReLU",
"(",
")",
"elif",
"activation_name",
"==",
"\"softmax\"",
":",
"activation",
"=",
"BLayer",
".",
"SoftMax",
"(",
")",
"elif",
"activation_name",
"==",
"\"softplus\"",
":",
"activation",
"=",
"BLayer",
".",
"SoftPlus",
"(",
"beta",
"=",
"1.0",
")",
"elif",
"activation_name",
"==",
"\"softsign\"",
":",
"activation",
"=",
"BLayer",
".",
"SoftSign",
"(",
")",
"elif",
"activation_name",
"==",
"\"linear\"",
":",
"activation",
"=",
"BLayer",
".",
"Identity",
"(",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Unsupported activation type: %s\"",
"%",
"activation_name",
")",
"if",
"not",
"activation_id",
":",
"activation",
".",
"set_name",
"(",
"activation_id",
")",
"return",
"activation"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
JTensor.from_ndarray
|
Convert a ndarray to a DenseTensor which would be used in Java side.
>>> import numpy as np
>>> from bigdl.util.common import JTensor
>>> from bigdl.util.common import callBigDlFunc
>>> np.random.seed(123)
>>> data = np.random.uniform(0, 1, (2, 3)).astype("float32")
>>> result = JTensor.from_ndarray(data)
>>> expected_storage = np.array([[0.69646919, 0.28613934, 0.22685145], [0.55131477, 0.71946895, 0.42310646]])
>>> expected_shape = np.array([2, 3])
>>> np.testing.assert_allclose(result.storage, expected_storage, rtol=1e-6, atol=1e-6)
>>> np.testing.assert_allclose(result.shape, expected_shape)
>>> data_back = result.to_ndarray()
>>> (data == data_back).all()
True
>>> tensor1 = callBigDlFunc("float", "testTensor", JTensor.from_ndarray(data)) # noqa
>>> array_from_tensor = tensor1.to_ndarray()
>>> (array_from_tensor == data).all()
True
|
pyspark/bigdl/util/common.py
|
def from_ndarray(cls, a_ndarray, bigdl_type="float"):
"""
Convert a ndarray to a DenseTensor which would be used in Java side.
>>> import numpy as np
>>> from bigdl.util.common import JTensor
>>> from bigdl.util.common import callBigDlFunc
>>> np.random.seed(123)
>>> data = np.random.uniform(0, 1, (2, 3)).astype("float32")
>>> result = JTensor.from_ndarray(data)
>>> expected_storage = np.array([[0.69646919, 0.28613934, 0.22685145], [0.55131477, 0.71946895, 0.42310646]])
>>> expected_shape = np.array([2, 3])
>>> np.testing.assert_allclose(result.storage, expected_storage, rtol=1e-6, atol=1e-6)
>>> np.testing.assert_allclose(result.shape, expected_shape)
>>> data_back = result.to_ndarray()
>>> (data == data_back).all()
True
>>> tensor1 = callBigDlFunc("float", "testTensor", JTensor.from_ndarray(data)) # noqa
>>> array_from_tensor = tensor1.to_ndarray()
>>> (array_from_tensor == data).all()
True
"""
if a_ndarray is None:
return None
assert isinstance(a_ndarray, np.ndarray), \
"input should be a np.ndarray, not %s" % type(a_ndarray)
return cls(a_ndarray,
a_ndarray.shape if a_ndarray.shape else (a_ndarray.size),
bigdl_type)
|
def from_ndarray(cls, a_ndarray, bigdl_type="float"):
"""
Convert a ndarray to a DenseTensor which would be used in Java side.
>>> import numpy as np
>>> from bigdl.util.common import JTensor
>>> from bigdl.util.common import callBigDlFunc
>>> np.random.seed(123)
>>> data = np.random.uniform(0, 1, (2, 3)).astype("float32")
>>> result = JTensor.from_ndarray(data)
>>> expected_storage = np.array([[0.69646919, 0.28613934, 0.22685145], [0.55131477, 0.71946895, 0.42310646]])
>>> expected_shape = np.array([2, 3])
>>> np.testing.assert_allclose(result.storage, expected_storage, rtol=1e-6, atol=1e-6)
>>> np.testing.assert_allclose(result.shape, expected_shape)
>>> data_back = result.to_ndarray()
>>> (data == data_back).all()
True
>>> tensor1 = callBigDlFunc("float", "testTensor", JTensor.from_ndarray(data)) # noqa
>>> array_from_tensor = tensor1.to_ndarray()
>>> (array_from_tensor == data).all()
True
"""
if a_ndarray is None:
return None
assert isinstance(a_ndarray, np.ndarray), \
"input should be a np.ndarray, not %s" % type(a_ndarray)
return cls(a_ndarray,
a_ndarray.shape if a_ndarray.shape else (a_ndarray.size),
bigdl_type)
|
[
"Convert",
"a",
"ndarray",
"to",
"a",
"DenseTensor",
"which",
"would",
"be",
"used",
"in",
"Java",
"side",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/util/common.py#L184-L212
|
[
"def",
"from_ndarray",
"(",
"cls",
",",
"a_ndarray",
",",
"bigdl_type",
"=",
"\"float\"",
")",
":",
"if",
"a_ndarray",
"is",
"None",
":",
"return",
"None",
"assert",
"isinstance",
"(",
"a_ndarray",
",",
"np",
".",
"ndarray",
")",
",",
"\"input should be a np.ndarray, not %s\"",
"%",
"type",
"(",
"a_ndarray",
")",
"return",
"cls",
"(",
"a_ndarray",
",",
"a_ndarray",
".",
"shape",
"if",
"a_ndarray",
".",
"shape",
"else",
"(",
"a_ndarray",
".",
"size",
")",
",",
"bigdl_type",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
JTensor.sparse
|
Convert a three ndarray to SparseTensor which would be used in Java side.
For example:
a_ndarray = [1, 3, 2, 4]
i_ndarray = [[0, 0, 1, 2],
[0, 3, 2, 1]]
shape = [3, 4]
Present a dense tensor
[[ 1, 0, 0, 3],
[ 0, 0, 2, 0],
[ 0, 4, 0, 0]]
:param a_ndarray non-zero elements in this SparseTensor
:param i_ndarray zero-based indices for non-zero element
i_ndarray's shape should be (shape.size, a_ndarray.size)
And the i-th non-zero elements indices is i_ndarray[:, 1],
should be zero-based and ascending;
:param shape shape as a DenseTensor.
>>> import numpy as np
>>> from bigdl.util.common import JTensor
>>> from bigdl.util.common import callBigDlFunc
>>> np.random.seed(123)
>>> data = np.arange(1, 7).astype("float32")
>>> indices = np.arange(1, 7)
>>> shape = np.array([10])
>>> result = JTensor.sparse(data, indices, shape)
>>> expected_storage = np.array([1., 2., 3., 4., 5., 6.])
>>> expected_shape = np.array([10])
>>> expected_indices = np.array([1, 2, 3, 4, 5, 6])
>>> np.testing.assert_allclose(result.storage, expected_storage)
>>> np.testing.assert_allclose(result.shape, expected_shape)
>>> np.testing.assert_allclose(result.indices, expected_indices)
>>> tensor1 = callBigDlFunc("float", "testTensor", result) # noqa
>>> array_from_tensor = tensor1.to_ndarray()
>>> expected_ndarray = np.array([0, 1, 2, 3, 4, 5, 6, 0, 0, 0])
>>> (array_from_tensor == expected_ndarray).all()
True
|
pyspark/bigdl/util/common.py
|
def sparse(cls, a_ndarray, i_ndarray, shape, bigdl_type="float"):
"""
Convert a three ndarray to SparseTensor which would be used in Java side.
For example:
a_ndarray = [1, 3, 2, 4]
i_ndarray = [[0, 0, 1, 2],
[0, 3, 2, 1]]
shape = [3, 4]
Present a dense tensor
[[ 1, 0, 0, 3],
[ 0, 0, 2, 0],
[ 0, 4, 0, 0]]
:param a_ndarray non-zero elements in this SparseTensor
:param i_ndarray zero-based indices for non-zero element
i_ndarray's shape should be (shape.size, a_ndarray.size)
And the i-th non-zero elements indices is i_ndarray[:, 1],
should be zero-based and ascending;
:param shape shape as a DenseTensor.
>>> import numpy as np
>>> from bigdl.util.common import JTensor
>>> from bigdl.util.common import callBigDlFunc
>>> np.random.seed(123)
>>> data = np.arange(1, 7).astype("float32")
>>> indices = np.arange(1, 7)
>>> shape = np.array([10])
>>> result = JTensor.sparse(data, indices, shape)
>>> expected_storage = np.array([1., 2., 3., 4., 5., 6.])
>>> expected_shape = np.array([10])
>>> expected_indices = np.array([1, 2, 3, 4, 5, 6])
>>> np.testing.assert_allclose(result.storage, expected_storage)
>>> np.testing.assert_allclose(result.shape, expected_shape)
>>> np.testing.assert_allclose(result.indices, expected_indices)
>>> tensor1 = callBigDlFunc("float", "testTensor", result) # noqa
>>> array_from_tensor = tensor1.to_ndarray()
>>> expected_ndarray = np.array([0, 1, 2, 3, 4, 5, 6, 0, 0, 0])
>>> (array_from_tensor == expected_ndarray).all()
True
"""
if a_ndarray is None:
return None
assert isinstance(a_ndarray, np.ndarray), \
"values array should be a np.ndarray, not %s" % type(a_ndarray)
assert isinstance(i_ndarray, np.ndarray), \
"indices array should be a np.ndarray, not %s" % type(a_ndarray)
assert i_ndarray.size == a_ndarray.size * shape.size, \
"size of values and indices should match."
return cls(a_ndarray,
shape,
bigdl_type,
i_ndarray)
|
def sparse(cls, a_ndarray, i_ndarray, shape, bigdl_type="float"):
"""
Convert a three ndarray to SparseTensor which would be used in Java side.
For example:
a_ndarray = [1, 3, 2, 4]
i_ndarray = [[0, 0, 1, 2],
[0, 3, 2, 1]]
shape = [3, 4]
Present a dense tensor
[[ 1, 0, 0, 3],
[ 0, 0, 2, 0],
[ 0, 4, 0, 0]]
:param a_ndarray non-zero elements in this SparseTensor
:param i_ndarray zero-based indices for non-zero element
i_ndarray's shape should be (shape.size, a_ndarray.size)
And the i-th non-zero elements indices is i_ndarray[:, 1],
should be zero-based and ascending;
:param shape shape as a DenseTensor.
>>> import numpy as np
>>> from bigdl.util.common import JTensor
>>> from bigdl.util.common import callBigDlFunc
>>> np.random.seed(123)
>>> data = np.arange(1, 7).astype("float32")
>>> indices = np.arange(1, 7)
>>> shape = np.array([10])
>>> result = JTensor.sparse(data, indices, shape)
>>> expected_storage = np.array([1., 2., 3., 4., 5., 6.])
>>> expected_shape = np.array([10])
>>> expected_indices = np.array([1, 2, 3, 4, 5, 6])
>>> np.testing.assert_allclose(result.storage, expected_storage)
>>> np.testing.assert_allclose(result.shape, expected_shape)
>>> np.testing.assert_allclose(result.indices, expected_indices)
>>> tensor1 = callBigDlFunc("float", "testTensor", result) # noqa
>>> array_from_tensor = tensor1.to_ndarray()
>>> expected_ndarray = np.array([0, 1, 2, 3, 4, 5, 6, 0, 0, 0])
>>> (array_from_tensor == expected_ndarray).all()
True
"""
if a_ndarray is None:
return None
assert isinstance(a_ndarray, np.ndarray), \
"values array should be a np.ndarray, not %s" % type(a_ndarray)
assert isinstance(i_ndarray, np.ndarray), \
"indices array should be a np.ndarray, not %s" % type(a_ndarray)
assert i_ndarray.size == a_ndarray.size * shape.size, \
"size of values and indices should match."
return cls(a_ndarray,
shape,
bigdl_type,
i_ndarray)
|
[
"Convert",
"a",
"three",
"ndarray",
"to",
"SparseTensor",
"which",
"would",
"be",
"used",
"in",
"Java",
"side",
".",
"For",
"example",
":",
"a_ndarray",
"=",
"[",
"1",
"3",
"2",
"4",
"]",
"i_ndarray",
"=",
"[[",
"0",
"0",
"1",
"2",
"]",
"[",
"0",
"3",
"2",
"1",
"]]",
"shape",
"=",
"[",
"3",
"4",
"]",
"Present",
"a",
"dense",
"tensor",
"[[",
"1",
"0",
"0",
"3",
"]",
"[",
"0",
"0",
"2",
"0",
"]",
"[",
"0",
"4",
"0",
"0",
"]]"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/util/common.py#L215-L266
|
[
"def",
"sparse",
"(",
"cls",
",",
"a_ndarray",
",",
"i_ndarray",
",",
"shape",
",",
"bigdl_type",
"=",
"\"float\"",
")",
":",
"if",
"a_ndarray",
"is",
"None",
":",
"return",
"None",
"assert",
"isinstance",
"(",
"a_ndarray",
",",
"np",
".",
"ndarray",
")",
",",
"\"values array should be a np.ndarray, not %s\"",
"%",
"type",
"(",
"a_ndarray",
")",
"assert",
"isinstance",
"(",
"i_ndarray",
",",
"np",
".",
"ndarray",
")",
",",
"\"indices array should be a np.ndarray, not %s\"",
"%",
"type",
"(",
"a_ndarray",
")",
"assert",
"i_ndarray",
".",
"size",
"==",
"a_ndarray",
".",
"size",
"*",
"shape",
".",
"size",
",",
"\"size of values and indices should match.\"",
"return",
"cls",
"(",
"a_ndarray",
",",
"shape",
",",
"bigdl_type",
",",
"i_ndarray",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
JTensor.to_ndarray
|
Transfer JTensor to ndarray.
As SparseTensor may generate an very big ndarray, so we don't support this function for SparseTensor.
:return: a ndarray
|
pyspark/bigdl/util/common.py
|
def to_ndarray(self):
"""
Transfer JTensor to ndarray.
As SparseTensor may generate an very big ndarray, so we don't support this function for SparseTensor.
:return: a ndarray
"""
assert self.indices is None, "sparseTensor to ndarray is not supported"
return np.array(self.storage, dtype=get_dtype(self.bigdl_type)).reshape(self.shape)
|
def to_ndarray(self):
"""
Transfer JTensor to ndarray.
As SparseTensor may generate an very big ndarray, so we don't support this function for SparseTensor.
:return: a ndarray
"""
assert self.indices is None, "sparseTensor to ndarray is not supported"
return np.array(self.storage, dtype=get_dtype(self.bigdl_type)).reshape(self.shape)
|
[
"Transfer",
"JTensor",
"to",
"ndarray",
".",
"As",
"SparseTensor",
"may",
"generate",
"an",
"very",
"big",
"ndarray",
"so",
"we",
"don",
"t",
"support",
"this",
"function",
"for",
"SparseTensor",
".",
":",
"return",
":",
"a",
"ndarray"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/util/common.py#L268-L275
|
[
"def",
"to_ndarray",
"(",
"self",
")",
":",
"assert",
"self",
".",
"indices",
"is",
"None",
",",
"\"sparseTensor to ndarray is not supported\"",
"return",
"np",
".",
"array",
"(",
"self",
".",
"storage",
",",
"dtype",
"=",
"get_dtype",
"(",
"self",
".",
"bigdl_type",
")",
")",
".",
"reshape",
"(",
"self",
".",
"shape",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
Sample.from_ndarray
|
Convert a ndarray of features and labels to Sample, which would be used in Java side.
:param features: an ndarray or a list of ndarrays
:param labels: an ndarray or a list of ndarrays or a scalar
:param bigdl_type: "double" or "float"
>>> import numpy as np
>>> from bigdl.util.common import callBigDlFunc
>>> from numpy.testing import assert_allclose
>>> np.random.seed(123)
>>> sample = Sample.from_ndarray(np.random.random((2,3)), np.random.random((2,3)))
>>> sample_back = callBigDlFunc("float", "testSample", sample)
>>> assert_allclose(sample.features[0].to_ndarray(), sample_back.features[0].to_ndarray())
>>> assert_allclose(sample.label.to_ndarray(), sample_back.label.to_ndarray())
>>> expected_feature_storage = np.array(([[0.69646919, 0.28613934, 0.22685145], [0.55131477, 0.71946895, 0.42310646]]))
>>> expected_feature_shape = np.array([2, 3])
>>> expected_label_storage = np.array(([[0.98076421, 0.68482971, 0.48093191], [0.39211753, 0.343178, 0.72904968]]))
>>> expected_label_shape = np.array([2, 3])
>>> assert_allclose(sample.features[0].storage, expected_feature_storage, rtol=1e-6, atol=1e-6)
>>> assert_allclose(sample.features[0].shape, expected_feature_shape)
>>> assert_allclose(sample.labels[0].storage, expected_label_storage, rtol=1e-6, atol=1e-6)
>>> assert_allclose(sample.labels[0].shape, expected_label_shape)
|
pyspark/bigdl/util/common.py
|
def from_ndarray(cls, features, labels, bigdl_type="float"):
"""
Convert a ndarray of features and labels to Sample, which would be used in Java side.
:param features: an ndarray or a list of ndarrays
:param labels: an ndarray or a list of ndarrays or a scalar
:param bigdl_type: "double" or "float"
>>> import numpy as np
>>> from bigdl.util.common import callBigDlFunc
>>> from numpy.testing import assert_allclose
>>> np.random.seed(123)
>>> sample = Sample.from_ndarray(np.random.random((2,3)), np.random.random((2,3)))
>>> sample_back = callBigDlFunc("float", "testSample", sample)
>>> assert_allclose(sample.features[0].to_ndarray(), sample_back.features[0].to_ndarray())
>>> assert_allclose(sample.label.to_ndarray(), sample_back.label.to_ndarray())
>>> expected_feature_storage = np.array(([[0.69646919, 0.28613934, 0.22685145], [0.55131477, 0.71946895, 0.42310646]]))
>>> expected_feature_shape = np.array([2, 3])
>>> expected_label_storage = np.array(([[0.98076421, 0.68482971, 0.48093191], [0.39211753, 0.343178, 0.72904968]]))
>>> expected_label_shape = np.array([2, 3])
>>> assert_allclose(sample.features[0].storage, expected_feature_storage, rtol=1e-6, atol=1e-6)
>>> assert_allclose(sample.features[0].shape, expected_feature_shape)
>>> assert_allclose(sample.labels[0].storage, expected_label_storage, rtol=1e-6, atol=1e-6)
>>> assert_allclose(sample.labels[0].shape, expected_label_shape)
"""
if isinstance(features, np.ndarray):
features = [features]
else:
assert all(isinstance(feature, np.ndarray) for feature in features), \
"features should be a list of np.ndarray, not %s" % type(features)
if np.isscalar(labels): # in case labels is a scalar.
labels = [np.array(labels)]
elif isinstance(labels, np.ndarray):
labels = [labels]
else:
assert all(isinstance(label, np.ndarray) for label in labels), \
"labels should be a list of np.ndarray, not %s" % type(labels)
return cls(
features=[JTensor.from_ndarray(feature) for feature in features],
labels=[JTensor.from_ndarray(label) for label in labels],
bigdl_type=bigdl_type)
|
def from_ndarray(cls, features, labels, bigdl_type="float"):
"""
Convert a ndarray of features and labels to Sample, which would be used in Java side.
:param features: an ndarray or a list of ndarrays
:param labels: an ndarray or a list of ndarrays or a scalar
:param bigdl_type: "double" or "float"
>>> import numpy as np
>>> from bigdl.util.common import callBigDlFunc
>>> from numpy.testing import assert_allclose
>>> np.random.seed(123)
>>> sample = Sample.from_ndarray(np.random.random((2,3)), np.random.random((2,3)))
>>> sample_back = callBigDlFunc("float", "testSample", sample)
>>> assert_allclose(sample.features[0].to_ndarray(), sample_back.features[0].to_ndarray())
>>> assert_allclose(sample.label.to_ndarray(), sample_back.label.to_ndarray())
>>> expected_feature_storage = np.array(([[0.69646919, 0.28613934, 0.22685145], [0.55131477, 0.71946895, 0.42310646]]))
>>> expected_feature_shape = np.array([2, 3])
>>> expected_label_storage = np.array(([[0.98076421, 0.68482971, 0.48093191], [0.39211753, 0.343178, 0.72904968]]))
>>> expected_label_shape = np.array([2, 3])
>>> assert_allclose(sample.features[0].storage, expected_feature_storage, rtol=1e-6, atol=1e-6)
>>> assert_allclose(sample.features[0].shape, expected_feature_shape)
>>> assert_allclose(sample.labels[0].storage, expected_label_storage, rtol=1e-6, atol=1e-6)
>>> assert_allclose(sample.labels[0].shape, expected_label_shape)
"""
if isinstance(features, np.ndarray):
features = [features]
else:
assert all(isinstance(feature, np.ndarray) for feature in features), \
"features should be a list of np.ndarray, not %s" % type(features)
if np.isscalar(labels): # in case labels is a scalar.
labels = [np.array(labels)]
elif isinstance(labels, np.ndarray):
labels = [labels]
else:
assert all(isinstance(label, np.ndarray) for label in labels), \
"labels should be a list of np.ndarray, not %s" % type(labels)
return cls(
features=[JTensor.from_ndarray(feature) for feature in features],
labels=[JTensor.from_ndarray(label) for label in labels],
bigdl_type=bigdl_type)
|
[
"Convert",
"a",
"ndarray",
"of",
"features",
"and",
"labels",
"to",
"Sample",
"which",
"would",
"be",
"used",
"in",
"Java",
"side",
".",
":",
"param",
"features",
":",
"an",
"ndarray",
"or",
"a",
"list",
"of",
"ndarrays",
":",
"param",
"labels",
":",
"an",
"ndarray",
"or",
"a",
"list",
"of",
"ndarrays",
"or",
"a",
"scalar",
":",
"param",
"bigdl_type",
":",
"double",
"or",
"float"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/util/common.py#L306-L345
|
[
"def",
"from_ndarray",
"(",
"cls",
",",
"features",
",",
"labels",
",",
"bigdl_type",
"=",
"\"float\"",
")",
":",
"if",
"isinstance",
"(",
"features",
",",
"np",
".",
"ndarray",
")",
":",
"features",
"=",
"[",
"features",
"]",
"else",
":",
"assert",
"all",
"(",
"isinstance",
"(",
"feature",
",",
"np",
".",
"ndarray",
")",
"for",
"feature",
"in",
"features",
")",
",",
"\"features should be a list of np.ndarray, not %s\"",
"%",
"type",
"(",
"features",
")",
"if",
"np",
".",
"isscalar",
"(",
"labels",
")",
":",
"# in case labels is a scalar.",
"labels",
"=",
"[",
"np",
".",
"array",
"(",
"labels",
")",
"]",
"elif",
"isinstance",
"(",
"labels",
",",
"np",
".",
"ndarray",
")",
":",
"labels",
"=",
"[",
"labels",
"]",
"else",
":",
"assert",
"all",
"(",
"isinstance",
"(",
"label",
",",
"np",
".",
"ndarray",
")",
"for",
"label",
"in",
"labels",
")",
",",
"\"labels should be a list of np.ndarray, not %s\"",
"%",
"type",
"(",
"labels",
")",
"return",
"cls",
"(",
"features",
"=",
"[",
"JTensor",
".",
"from_ndarray",
"(",
"feature",
")",
"for",
"feature",
"in",
"features",
"]",
",",
"labels",
"=",
"[",
"JTensor",
".",
"from_ndarray",
"(",
"label",
")",
"for",
"label",
"in",
"labels",
"]",
",",
"bigdl_type",
"=",
"bigdl_type",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
FeatureTransformer.transform
|
transform ImageFeature
|
pyspark/bigdl/transform/vision/image.py
|
def transform(self, image_feature, bigdl_type="float"):
"""
transform ImageFeature
"""
callBigDlFunc(bigdl_type, "transformImageFeature", self.value, image_feature)
return image_feature
|
def transform(self, image_feature, bigdl_type="float"):
"""
transform ImageFeature
"""
callBigDlFunc(bigdl_type, "transformImageFeature", self.value, image_feature)
return image_feature
|
[
"transform",
"ImageFeature"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/transform/vision/image.py#L36-L41
|
[
"def",
"transform",
"(",
"self",
",",
"image_feature",
",",
"bigdl_type",
"=",
"\"float\"",
")",
":",
"callBigDlFunc",
"(",
"bigdl_type",
",",
"\"transformImageFeature\"",
",",
"self",
".",
"value",
",",
"image_feature",
")",
"return",
"image_feature"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
ImageFeature.get_label
|
get label as ndarray from ImageFeature
|
pyspark/bigdl/transform/vision/image.py
|
def get_label(self):
"""
get label as ndarray from ImageFeature
"""
label = callBigDlFunc(self.bigdl_type, "imageFeatureToLabelTensor", self.value)
return label.to_ndarray()
|
def get_label(self):
"""
get label as ndarray from ImageFeature
"""
label = callBigDlFunc(self.bigdl_type, "imageFeatureToLabelTensor", self.value)
return label.to_ndarray()
|
[
"get",
"label",
"as",
"ndarray",
"from",
"ImageFeature"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/transform/vision/image.py#L87-L92
|
[
"def",
"get_label",
"(",
"self",
")",
":",
"label",
"=",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"imageFeatureToLabelTensor\"",
",",
"self",
".",
"value",
")",
"return",
"label",
".",
"to_ndarray",
"(",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
ImageFrame.read
|
Read images as Image Frame
if sc is defined, Read image as DistributedImageFrame from local file system or HDFS
if sc is null, Read image as LocalImageFrame from local file system
:param path path to read images
if sc is defined, path can be local or HDFS. Wildcard character are supported.
if sc is null, path is local directory/image file/image file with wildcard character
:param sc SparkContext
:param min_partitions A suggestion value of the minimal splitting number for input data.
:return ImageFrame
|
pyspark/bigdl/transform/vision/image.py
|
def read(cls, path, sc=None, min_partitions=1, bigdl_type="float"):
"""
Read images as Image Frame
if sc is defined, Read image as DistributedImageFrame from local file system or HDFS
if sc is null, Read image as LocalImageFrame from local file system
:param path path to read images
if sc is defined, path can be local or HDFS. Wildcard character are supported.
if sc is null, path is local directory/image file/image file with wildcard character
:param sc SparkContext
:param min_partitions A suggestion value of the minimal splitting number for input data.
:return ImageFrame
"""
return ImageFrame(jvalue=callBigDlFunc(bigdl_type, "read", path, sc, min_partitions))
|
def read(cls, path, sc=None, min_partitions=1, bigdl_type="float"):
"""
Read images as Image Frame
if sc is defined, Read image as DistributedImageFrame from local file system or HDFS
if sc is null, Read image as LocalImageFrame from local file system
:param path path to read images
if sc is defined, path can be local or HDFS. Wildcard character are supported.
if sc is null, path is local directory/image file/image file with wildcard character
:param sc SparkContext
:param min_partitions A suggestion value of the minimal splitting number for input data.
:return ImageFrame
"""
return ImageFrame(jvalue=callBigDlFunc(bigdl_type, "read", path, sc, min_partitions))
|
[
"Read",
"images",
"as",
"Image",
"Frame",
"if",
"sc",
"is",
"defined",
"Read",
"image",
"as",
"DistributedImageFrame",
"from",
"local",
"file",
"system",
"or",
"HDFS",
"if",
"sc",
"is",
"null",
"Read",
"image",
"as",
"LocalImageFrame",
"from",
"local",
"file",
"system",
":",
"param",
"path",
"path",
"to",
"read",
"images",
"if",
"sc",
"is",
"defined",
"path",
"can",
"be",
"local",
"or",
"HDFS",
".",
"Wildcard",
"character",
"are",
"supported",
".",
"if",
"sc",
"is",
"null",
"path",
"is",
"local",
"directory",
"/",
"image",
"file",
"/",
"image",
"file",
"with",
"wildcard",
"character",
":",
"param",
"sc",
"SparkContext",
":",
"param",
"min_partitions",
"A",
"suggestion",
"value",
"of",
"the",
"minimal",
"splitting",
"number",
"for",
"input",
"data",
".",
":",
"return",
"ImageFrame"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/transform/vision/image.py#L115-L127
|
[
"def",
"read",
"(",
"cls",
",",
"path",
",",
"sc",
"=",
"None",
",",
"min_partitions",
"=",
"1",
",",
"bigdl_type",
"=",
"\"float\"",
")",
":",
"return",
"ImageFrame",
"(",
"jvalue",
"=",
"callBigDlFunc",
"(",
"bigdl_type",
",",
"\"read\"",
",",
"path",
",",
"sc",
",",
"min_partitions",
")",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
ImageFrame.read_parquet
|
Read parquet file as DistributedImageFrame
|
pyspark/bigdl/transform/vision/image.py
|
def read_parquet(cls, path, sc, bigdl_type="float"):
"""
Read parquet file as DistributedImageFrame
"""
return DistributedImageFrame(jvalue=callBigDlFunc(bigdl_type, "readParquet", path, sc))
|
def read_parquet(cls, path, sc, bigdl_type="float"):
"""
Read parquet file as DistributedImageFrame
"""
return DistributedImageFrame(jvalue=callBigDlFunc(bigdl_type, "readParquet", path, sc))
|
[
"Read",
"parquet",
"file",
"as",
"DistributedImageFrame"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/transform/vision/image.py#L130-L134
|
[
"def",
"read_parquet",
"(",
"cls",
",",
"path",
",",
"sc",
",",
"bigdl_type",
"=",
"\"float\"",
")",
":",
"return",
"DistributedImageFrame",
"(",
"jvalue",
"=",
"callBigDlFunc",
"(",
"bigdl_type",
",",
"\"readParquet\"",
",",
"path",
",",
"sc",
")",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
ImageFrame.write_parquet
|
write ImageFrame as parquet file
|
pyspark/bigdl/transform/vision/image.py
|
def write_parquet(cls, path, output, sc, partition_num = 1, bigdl_type="float"):
"""
write ImageFrame as parquet file
"""
return callBigDlFunc(bigdl_type, "writeParquet", path, output, sc, partition_num)
|
def write_parquet(cls, path, output, sc, partition_num = 1, bigdl_type="float"):
"""
write ImageFrame as parquet file
"""
return callBigDlFunc(bigdl_type, "writeParquet", path, output, sc, partition_num)
|
[
"write",
"ImageFrame",
"as",
"parquet",
"file"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/transform/vision/image.py#L137-L141
|
[
"def",
"write_parquet",
"(",
"cls",
",",
"path",
",",
"output",
",",
"sc",
",",
"partition_num",
"=",
"1",
",",
"bigdl_type",
"=",
"\"float\"",
")",
":",
"return",
"callBigDlFunc",
"(",
"bigdl_type",
",",
"\"writeParquet\"",
",",
"path",
",",
"output",
",",
"sc",
",",
"partition_num",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
ImageFrame.transform
|
transformImageFrame
|
pyspark/bigdl/transform/vision/image.py
|
def transform(self, transformer, bigdl_type="float"):
"""
transformImageFrame
"""
self.value = callBigDlFunc(bigdl_type,
"transformImageFrame", transformer, self.value)
return self
|
def transform(self, transformer, bigdl_type="float"):
"""
transformImageFrame
"""
self.value = callBigDlFunc(bigdl_type,
"transformImageFrame", transformer, self.value)
return self
|
[
"transformImageFrame"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/transform/vision/image.py#L155-L161
|
[
"def",
"transform",
"(",
"self",
",",
"transformer",
",",
"bigdl_type",
"=",
"\"float\"",
")",
":",
"self",
".",
"value",
"=",
"callBigDlFunc",
"(",
"bigdl_type",
",",
"\"transformImageFrame\"",
",",
"transformer",
",",
"self",
".",
"value",
")",
"return",
"self"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
ImageFrame.get_image
|
get image from ImageFrame
|
pyspark/bigdl/transform/vision/image.py
|
def get_image(self, float_key="floats", to_chw=True):
"""
get image from ImageFrame
"""
return self.image_frame.get_image(float_key, to_chw)
|
def get_image(self, float_key="floats", to_chw=True):
"""
get image from ImageFrame
"""
return self.image_frame.get_image(float_key, to_chw)
|
[
"get",
"image",
"from",
"ImageFrame"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/transform/vision/image.py#L163-L167
|
[
"def",
"get_image",
"(",
"self",
",",
"float_key",
"=",
"\"floats\"",
",",
"to_chw",
"=",
"True",
")",
":",
"return",
"self",
".",
"image_frame",
".",
"get_image",
"(",
"float_key",
",",
"to_chw",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
ImageFrame.random_split
|
Random split imageframes according to weights
:param weights: weights for each ImageFrame
:return:
|
pyspark/bigdl/transform/vision/image.py
|
def random_split(self, weights):
"""
Random split imageframes according to weights
:param weights: weights for each ImageFrame
:return:
"""
jvalues = self.image_frame.random_split(weights)
return [ImageFrame(jvalue) for jvalue in jvalues]
|
def random_split(self, weights):
"""
Random split imageframes according to weights
:param weights: weights for each ImageFrame
:return:
"""
jvalues = self.image_frame.random_split(weights)
return [ImageFrame(jvalue) for jvalue in jvalues]
|
[
"Random",
"split",
"imageframes",
"according",
"to",
"weights",
":",
"param",
"weights",
":",
"weights",
"for",
"each",
"ImageFrame",
":",
"return",
":"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/transform/vision/image.py#L200-L207
|
[
"def",
"random_split",
"(",
"self",
",",
"weights",
")",
":",
"jvalues",
"=",
"self",
".",
"image_frame",
".",
"random_split",
"(",
"weights",
")",
"return",
"[",
"ImageFrame",
"(",
"jvalue",
")",
"for",
"jvalue",
"in",
"jvalues",
"]"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
LocalImageFrame.get_image
|
get image list from ImageFrame
|
pyspark/bigdl/transform/vision/image.py
|
def get_image(self, float_key="floats", to_chw=True):
"""
get image list from ImageFrame
"""
tensors = callBigDlFunc(self.bigdl_type,
"localImageFrameToImageTensor", self.value, float_key, to_chw)
return map(lambda tensor: tensor.to_ndarray(), tensors)
|
def get_image(self, float_key="floats", to_chw=True):
"""
get image list from ImageFrame
"""
tensors = callBigDlFunc(self.bigdl_type,
"localImageFrameToImageTensor", self.value, float_key, to_chw)
return map(lambda tensor: tensor.to_ndarray(), tensors)
|
[
"get",
"image",
"list",
"from",
"ImageFrame"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/transform/vision/image.py#L226-L232
|
[
"def",
"get_image",
"(",
"self",
",",
"float_key",
"=",
"\"floats\"",
",",
"to_chw",
"=",
"True",
")",
":",
"tensors",
"=",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"localImageFrameToImageTensor\"",
",",
"self",
".",
"value",
",",
"float_key",
",",
"to_chw",
")",
"return",
"map",
"(",
"lambda",
"tensor",
":",
"tensor",
".",
"to_ndarray",
"(",
")",
",",
"tensors",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
DistributedImageFrame.get_label
|
get label rdd from ImageFrame
|
pyspark/bigdl/transform/vision/image.py
|
def get_label(self):
"""
get label rdd from ImageFrame
"""
tensor_rdd = callBigDlFunc(self.bigdl_type, "distributedImageFrameToLabelTensorRdd", self.value)
return tensor_rdd.map(lambda tensor: tensor.to_ndarray())
|
def get_label(self):
"""
get label rdd from ImageFrame
"""
tensor_rdd = callBigDlFunc(self.bigdl_type, "distributedImageFrameToLabelTensorRdd", self.value)
return tensor_rdd.map(lambda tensor: tensor.to_ndarray())
|
[
"get",
"label",
"rdd",
"from",
"ImageFrame"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/transform/vision/image.py#L283-L288
|
[
"def",
"get_label",
"(",
"self",
")",
":",
"tensor_rdd",
"=",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"distributedImageFrameToLabelTensorRdd\"",
",",
"self",
".",
"value",
")",
"return",
"tensor_rdd",
".",
"map",
"(",
"lambda",
"tensor",
":",
"tensor",
".",
"to_ndarray",
"(",
")",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
DistributedImageFrame.get_predict
|
get prediction rdd from ImageFrame
|
pyspark/bigdl/transform/vision/image.py
|
def get_predict(self, key="predict"):
"""
get prediction rdd from ImageFrame
"""
predicts = callBigDlFunc(self.bigdl_type, "distributedImageFrameToPredict", self.value, key)
return predicts.map(lambda predict: (predict[0], predict[1].to_ndarray()) if predict[1] else (predict[0], None))
|
def get_predict(self, key="predict"):
"""
get prediction rdd from ImageFrame
"""
predicts = callBigDlFunc(self.bigdl_type, "distributedImageFrameToPredict", self.value, key)
return predicts.map(lambda predict: (predict[0], predict[1].to_ndarray()) if predict[1] else (predict[0], None))
|
[
"get",
"prediction",
"rdd",
"from",
"ImageFrame"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/transform/vision/image.py#L290-L295
|
[
"def",
"get_predict",
"(",
"self",
",",
"key",
"=",
"\"predict\"",
")",
":",
"predicts",
"=",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"distributedImageFrameToPredict\"",
",",
"self",
".",
"value",
",",
"key",
")",
"return",
"predicts",
".",
"map",
"(",
"lambda",
"predict",
":",
"(",
"predict",
"[",
"0",
"]",
",",
"predict",
"[",
"1",
"]",
".",
"to_ndarray",
"(",
")",
")",
"if",
"predict",
"[",
"1",
"]",
"else",
"(",
"predict",
"[",
"0",
"]",
",",
"None",
")",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
SeqFileFolder.files_to_image_frame
|
Extract hadoop sequence files from an HDFS path as ImageFrame
:param url: sequence files folder path
:param sc: spark context
:param class_num: class number of data
:param partition_num: partition number, default: Engine.nodeNumber() * Engine.coreNumber()
|
pyspark/bigdl/transform/vision/image.py
|
def files_to_image_frame(cls,
url,
sc,
class_num,
partition_num=-1,
bigdl_type="float"):
"""
Extract hadoop sequence files from an HDFS path as ImageFrame
:param url: sequence files folder path
:param sc: spark context
:param class_num: class number of data
:param partition_num: partition number, default: Engine.nodeNumber() * Engine.coreNumber()
"""
jvalue = callBigDlFunc(bigdl_type,
"seqFilesToImageFrame",
url,
sc,
class_num,
partition_num)
return ImageFrame(jvalue=jvalue)
|
def files_to_image_frame(cls,
url,
sc,
class_num,
partition_num=-1,
bigdl_type="float"):
"""
Extract hadoop sequence files from an HDFS path as ImageFrame
:param url: sequence files folder path
:param sc: spark context
:param class_num: class number of data
:param partition_num: partition number, default: Engine.nodeNumber() * Engine.coreNumber()
"""
jvalue = callBigDlFunc(bigdl_type,
"seqFilesToImageFrame",
url,
sc,
class_num,
partition_num)
return ImageFrame(jvalue=jvalue)
|
[
"Extract",
"hadoop",
"sequence",
"files",
"from",
"an",
"HDFS",
"path",
"as",
"ImageFrame",
":",
"param",
"url",
":",
"sequence",
"files",
"folder",
"path",
":",
"param",
"sc",
":",
"spark",
"context",
":",
"param",
"class_num",
":",
"class",
"number",
"of",
"data",
":",
"param",
"partition_num",
":",
"partition",
"number",
"default",
":",
"Engine",
".",
"nodeNumber",
"()",
"*",
"Engine",
".",
"coreNumber",
"()"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/transform/vision/image.py#L729-L748
|
[
"def",
"files_to_image_frame",
"(",
"cls",
",",
"url",
",",
"sc",
",",
"class_num",
",",
"partition_num",
"=",
"-",
"1",
",",
"bigdl_type",
"=",
"\"float\"",
")",
":",
"jvalue",
"=",
"callBigDlFunc",
"(",
"bigdl_type",
",",
"\"seqFilesToImageFrame\"",
",",
"url",
",",
"sc",
",",
"class_num",
",",
"partition_num",
")",
"return",
"ImageFrame",
"(",
"jvalue",
"=",
"jvalue",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
KerasModelWrapper.evaluate
|
Evaluate a model by the given metrics.
:param x: ndarray or list of ndarray for local mode.
RDD[Sample] for distributed mode
:param y: ndarray or list of ndarray for local mode and would be None for cluster mode.
:param batch_size
:param is_distributed: run in local mode or distributed mode.
NB: if is_distributed=true, x should be RDD[Sample] and y should be None
:return:
|
pyspark/bigdl/keras/backend.py
|
def evaluate(self, x, y, batch_size=32, sample_weight=None, is_distributed=False):
"""
Evaluate a model by the given metrics.
:param x: ndarray or list of ndarray for local mode.
RDD[Sample] for distributed mode
:param y: ndarray or list of ndarray for local mode and would be None for cluster mode.
:param batch_size
:param is_distributed: run in local mode or distributed mode.
NB: if is_distributed=true, x should be RDD[Sample] and y should be None
:return:
"""
if sample_weight:
unsupport_exp("sample_weight")
if is_distributed:
if isinstance(x, np.ndarray):
input = to_sample_rdd(x, y)
elif isinstance(x, RDD):
input = x
if self.metrics:
sc = get_spark_context()
return [r.result for r in
self.bmodel.evaluate(input, batch_size, self.metrics)]
else:
raise Exception("No Metrics found.")
else:
raise Exception("We only support evaluation in distributed mode")
|
def evaluate(self, x, y, batch_size=32, sample_weight=None, is_distributed=False):
"""
Evaluate a model by the given metrics.
:param x: ndarray or list of ndarray for local mode.
RDD[Sample] for distributed mode
:param y: ndarray or list of ndarray for local mode and would be None for cluster mode.
:param batch_size
:param is_distributed: run in local mode or distributed mode.
NB: if is_distributed=true, x should be RDD[Sample] and y should be None
:return:
"""
if sample_weight:
unsupport_exp("sample_weight")
if is_distributed:
if isinstance(x, np.ndarray):
input = to_sample_rdd(x, y)
elif isinstance(x, RDD):
input = x
if self.metrics:
sc = get_spark_context()
return [r.result for r in
self.bmodel.evaluate(input, batch_size, self.metrics)]
else:
raise Exception("No Metrics found.")
else:
raise Exception("We only support evaluation in distributed mode")
|
[
"Evaluate",
"a",
"model",
"by",
"the",
"given",
"metrics",
".",
":",
"param",
"x",
":",
"ndarray",
"or",
"list",
"of",
"ndarray",
"for",
"local",
"mode",
".",
"RDD",
"[",
"Sample",
"]",
"for",
"distributed",
"mode",
":",
"param",
"y",
":",
"ndarray",
"or",
"list",
"of",
"ndarray",
"for",
"local",
"mode",
"and",
"would",
"be",
"None",
"for",
"cluster",
"mode",
".",
":",
"param",
"batch_size",
":",
"param",
"is_distributed",
":",
"run",
"in",
"local",
"mode",
"or",
"distributed",
"mode",
".",
"NB",
":",
"if",
"is_distributed",
"=",
"true",
"x",
"should",
"be",
"RDD",
"[",
"Sample",
"]",
"and",
"y",
"should",
"be",
"None",
":",
"return",
":"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/keras/backend.py#L33-L58
|
[
"def",
"evaluate",
"(",
"self",
",",
"x",
",",
"y",
",",
"batch_size",
"=",
"32",
",",
"sample_weight",
"=",
"None",
",",
"is_distributed",
"=",
"False",
")",
":",
"if",
"sample_weight",
":",
"unsupport_exp",
"(",
"\"sample_weight\"",
")",
"if",
"is_distributed",
":",
"if",
"isinstance",
"(",
"x",
",",
"np",
".",
"ndarray",
")",
":",
"input",
"=",
"to_sample_rdd",
"(",
"x",
",",
"y",
")",
"elif",
"isinstance",
"(",
"x",
",",
"RDD",
")",
":",
"input",
"=",
"x",
"if",
"self",
".",
"metrics",
":",
"sc",
"=",
"get_spark_context",
"(",
")",
"return",
"[",
"r",
".",
"result",
"for",
"r",
"in",
"self",
".",
"bmodel",
".",
"evaluate",
"(",
"input",
",",
"batch_size",
",",
"self",
".",
"metrics",
")",
"]",
"else",
":",
"raise",
"Exception",
"(",
"\"No Metrics found.\"",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"We only support evaluation in distributed mode\"",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
KerasModelWrapper.predict
|
Generates output predictions for the input samples,
processing the samples in a batched way.
# Arguments
x: the input data, as a Numpy array or list of Numpy array for local mode.
as RDD[Sample] for distributed mode
is_distributed: used to control run in local or cluster. the default value is False
# Returns
A Numpy array or RDD[Sample] of predictions.
|
pyspark/bigdl/keras/backend.py
|
def predict(self, x, batch_size=None, verbose=None, is_distributed=False):
"""Generates output predictions for the input samples,
processing the samples in a batched way.
# Arguments
x: the input data, as a Numpy array or list of Numpy array for local mode.
as RDD[Sample] for distributed mode
is_distributed: used to control run in local or cluster. the default value is False
# Returns
A Numpy array or RDD[Sample] of predictions.
"""
if batch_size or verbose:
raise Exception("we don't support batch_size or verbose for now")
if is_distributed:
if isinstance(x, np.ndarray):
input = to_sample_rdd(x, np.zeros([x.shape[0]]))
# np.asarray(self.bmodel.predict(x_rdd).collect())
elif isinstance(x, RDD):
input = x
return self.bmodel.predict(input)
else:
if isinstance(x, np.ndarray):
return self.bmodel.predict_local(x)
raise Exception("not supported type: %s" % x)
|
def predict(self, x, batch_size=None, verbose=None, is_distributed=False):
"""Generates output predictions for the input samples,
processing the samples in a batched way.
# Arguments
x: the input data, as a Numpy array or list of Numpy array for local mode.
as RDD[Sample] for distributed mode
is_distributed: used to control run in local or cluster. the default value is False
# Returns
A Numpy array or RDD[Sample] of predictions.
"""
if batch_size or verbose:
raise Exception("we don't support batch_size or verbose for now")
if is_distributed:
if isinstance(x, np.ndarray):
input = to_sample_rdd(x, np.zeros([x.shape[0]]))
# np.asarray(self.bmodel.predict(x_rdd).collect())
elif isinstance(x, RDD):
input = x
return self.bmodel.predict(input)
else:
if isinstance(x, np.ndarray):
return self.bmodel.predict_local(x)
raise Exception("not supported type: %s" % x)
|
[
"Generates",
"output",
"predictions",
"for",
"the",
"input",
"samples",
"processing",
"the",
"samples",
"in",
"a",
"batched",
"way",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/keras/backend.py#L60-L83
|
[
"def",
"predict",
"(",
"self",
",",
"x",
",",
"batch_size",
"=",
"None",
",",
"verbose",
"=",
"None",
",",
"is_distributed",
"=",
"False",
")",
":",
"if",
"batch_size",
"or",
"verbose",
":",
"raise",
"Exception",
"(",
"\"we don't support batch_size or verbose for now\"",
")",
"if",
"is_distributed",
":",
"if",
"isinstance",
"(",
"x",
",",
"np",
".",
"ndarray",
")",
":",
"input",
"=",
"to_sample_rdd",
"(",
"x",
",",
"np",
".",
"zeros",
"(",
"[",
"x",
".",
"shape",
"[",
"0",
"]",
"]",
")",
")",
"# np.asarray(self.bmodel.predict(x_rdd).collect())",
"elif",
"isinstance",
"(",
"x",
",",
"RDD",
")",
":",
"input",
"=",
"x",
"return",
"self",
".",
"bmodel",
".",
"predict",
"(",
"input",
")",
"else",
":",
"if",
"isinstance",
"(",
"x",
",",
"np",
".",
"ndarray",
")",
":",
"return",
"self",
".",
"bmodel",
".",
"predict_local",
"(",
"x",
")",
"raise",
"Exception",
"(",
"\"not supported type: %s\"",
"%",
"x",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
KerasModelWrapper.fit
|
Optimize the model by the given options
:param x: ndarray or list of ndarray for local mode.
RDD[Sample] for distributed mode
:param y: ndarray or list of ndarray for local mode and would be None for cluster mode.
is_distributed: used to control run in local or cluster. the default value is False.
NB: if is_distributed=true, x should be RDD[Sample] and y should be None
:param is_distributed: Whether to train in local mode or distributed mode
:return:
A Numpy array or RDD[Sample] of predictions.
|
pyspark/bigdl/keras/backend.py
|
def fit(self, x, y=None, batch_size=32, nb_epoch=10, verbose=1, callbacks=None,
validation_split=0., validation_data=None, shuffle=True,
class_weight=None, sample_weight=None, initial_epoch=0, is_distributed=False):
"""Optimize the model by the given options
:param x: ndarray or list of ndarray for local mode.
RDD[Sample] for distributed mode
:param y: ndarray or list of ndarray for local mode and would be None for cluster mode.
is_distributed: used to control run in local or cluster. the default value is False.
NB: if is_distributed=true, x should be RDD[Sample] and y should be None
:param is_distributed: Whether to train in local mode or distributed mode
:return:
A Numpy array or RDD[Sample] of predictions.
"""
if callbacks:
raise Exception("We don't support callbacks in fit for now")
if class_weight:
unsupport_exp("class_weight")
if sample_weight:
unsupport_exp("sample_weight")
if initial_epoch != 0:
unsupport_exp("initial_epoch")
if shuffle != True:
unsupport_exp("shuffle")
if validation_split != 0.:
unsupport_exp("validation_split")
bopt = self.__create_optimizer(x=x,
y=y,
batch_size=batch_size,
nb_epoch=nb_epoch,
validation_data=validation_data,
is_distributed=is_distributed)
bopt.optimize()
|
def fit(self, x, y=None, batch_size=32, nb_epoch=10, verbose=1, callbacks=None,
validation_split=0., validation_data=None, shuffle=True,
class_weight=None, sample_weight=None, initial_epoch=0, is_distributed=False):
"""Optimize the model by the given options
:param x: ndarray or list of ndarray for local mode.
RDD[Sample] for distributed mode
:param y: ndarray or list of ndarray for local mode and would be None for cluster mode.
is_distributed: used to control run in local or cluster. the default value is False.
NB: if is_distributed=true, x should be RDD[Sample] and y should be None
:param is_distributed: Whether to train in local mode or distributed mode
:return:
A Numpy array or RDD[Sample] of predictions.
"""
if callbacks:
raise Exception("We don't support callbacks in fit for now")
if class_weight:
unsupport_exp("class_weight")
if sample_weight:
unsupport_exp("sample_weight")
if initial_epoch != 0:
unsupport_exp("initial_epoch")
if shuffle != True:
unsupport_exp("shuffle")
if validation_split != 0.:
unsupport_exp("validation_split")
bopt = self.__create_optimizer(x=x,
y=y,
batch_size=batch_size,
nb_epoch=nb_epoch,
validation_data=validation_data,
is_distributed=is_distributed)
bopt.optimize()
|
[
"Optimize",
"the",
"model",
"by",
"the",
"given",
"options"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/keras/backend.py#L85-L117
|
[
"def",
"fit",
"(",
"self",
",",
"x",
",",
"y",
"=",
"None",
",",
"batch_size",
"=",
"32",
",",
"nb_epoch",
"=",
"10",
",",
"verbose",
"=",
"1",
",",
"callbacks",
"=",
"None",
",",
"validation_split",
"=",
"0.",
",",
"validation_data",
"=",
"None",
",",
"shuffle",
"=",
"True",
",",
"class_weight",
"=",
"None",
",",
"sample_weight",
"=",
"None",
",",
"initial_epoch",
"=",
"0",
",",
"is_distributed",
"=",
"False",
")",
":",
"if",
"callbacks",
":",
"raise",
"Exception",
"(",
"\"We don't support callbacks in fit for now\"",
")",
"if",
"class_weight",
":",
"unsupport_exp",
"(",
"\"class_weight\"",
")",
"if",
"sample_weight",
":",
"unsupport_exp",
"(",
"\"sample_weight\"",
")",
"if",
"initial_epoch",
"!=",
"0",
":",
"unsupport_exp",
"(",
"\"initial_epoch\"",
")",
"if",
"shuffle",
"!=",
"True",
":",
"unsupport_exp",
"(",
"\"shuffle\"",
")",
"if",
"validation_split",
"!=",
"0.",
":",
"unsupport_exp",
"(",
"\"validation_split\"",
")",
"bopt",
"=",
"self",
".",
"__create_optimizer",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"batch_size",
"=",
"batch_size",
",",
"nb_epoch",
"=",
"nb_epoch",
",",
"validation_data",
"=",
"validation_data",
",",
"is_distributed",
"=",
"is_distributed",
")",
"bopt",
".",
"optimize",
"(",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
DLImageTransformer.transform
|
Apply the transformer to the images in "inputCol" and store the transformed result
into "outputCols"
|
pyspark/bigdl/dlframes/dl_image_transformer.py
|
def transform(self, dataset):
"""
Apply the transformer to the images in "inputCol" and store the transformed result
into "outputCols"
"""
self._transfer_params_to_java()
return callBigDlFunc(self.bigdl_type, "dlImageTransform", self.value, dataset)
|
def transform(self, dataset):
"""
Apply the transformer to the images in "inputCol" and store the transformed result
into "outputCols"
"""
self._transfer_params_to_java()
return callBigDlFunc(self.bigdl_type, "dlImageTransform", self.value, dataset)
|
[
"Apply",
"the",
"transformer",
"to",
"the",
"images",
"in",
"inputCol",
"and",
"store",
"the",
"transformed",
"result",
"into",
"outputCols"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/dlframes/dl_image_transformer.py#L44-L50
|
[
"def",
"transform",
"(",
"self",
",",
"dataset",
")",
":",
"self",
".",
"_transfer_params_to_java",
"(",
")",
"return",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"dlImageTransform\"",
",",
"self",
".",
"value",
",",
"dataset",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
save_keras_definition
|
Save a Keras model definition to JSON with given path
|
pyspark/bigdl/examples/keras/keras_utils.py
|
def save_keras_definition(keras_model, path):
"""
Save a Keras model definition to JSON with given path
"""
model_json = keras_model.to_json()
with open(path, "w") as json_file:
json_file.write(model_json)
|
def save_keras_definition(keras_model, path):
"""
Save a Keras model definition to JSON with given path
"""
model_json = keras_model.to_json()
with open(path, "w") as json_file:
json_file.write(model_json)
|
[
"Save",
"a",
"Keras",
"model",
"definition",
"to",
"JSON",
"with",
"given",
"path"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/examples/keras/keras_utils.py#L20-L26
|
[
"def",
"save_keras_definition",
"(",
"keras_model",
",",
"path",
")",
":",
"model_json",
"=",
"keras_model",
".",
"to_json",
"(",
")",
"with",
"open",
"(",
"path",
",",
"\"w\"",
")",
"as",
"json_file",
":",
"json_file",
".",
"write",
"(",
"model_json",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
get_mnist
|
Download or load MNIST dataset to/from the specified path.
Normalize and transform input data into an RDD of Sample
|
pyspark/bigdl/examples/keras/mnist_cnn.py
|
def get_mnist(sc, data_type="train", location="/tmp/mnist"):
"""
Download or load MNIST dataset to/from the specified path.
Normalize and transform input data into an RDD of Sample
"""
from bigdl.dataset import mnist
from bigdl.dataset.transformer import normalizer
(images, labels) = mnist.read_data_sets(location, data_type)
images = images.reshape((images.shape[0], ) + input_shape)
images = sc.parallelize(images)
labels = sc.parallelize(labels + 1) # Target start from 1 in BigDL
record = images.zip(labels).map(lambda rec_tuple: (normalizer(rec_tuple[0], mnist.TRAIN_MEAN, mnist.TRAIN_STD),
rec_tuple[1])) \
.map(lambda t: Sample.from_ndarray(t[0], t[1]))
return record
|
def get_mnist(sc, data_type="train", location="/tmp/mnist"):
"""
Download or load MNIST dataset to/from the specified path.
Normalize and transform input data into an RDD of Sample
"""
from bigdl.dataset import mnist
from bigdl.dataset.transformer import normalizer
(images, labels) = mnist.read_data_sets(location, data_type)
images = images.reshape((images.shape[0], ) + input_shape)
images = sc.parallelize(images)
labels = sc.parallelize(labels + 1) # Target start from 1 in BigDL
record = images.zip(labels).map(lambda rec_tuple: (normalizer(rec_tuple[0], mnist.TRAIN_MEAN, mnist.TRAIN_STD),
rec_tuple[1])) \
.map(lambda t: Sample.from_ndarray(t[0], t[1]))
return record
|
[
"Download",
"or",
"load",
"MNIST",
"dataset",
"to",
"/",
"from",
"the",
"specified",
"path",
".",
"Normalize",
"and",
"transform",
"input",
"data",
"into",
"an",
"RDD",
"of",
"Sample"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/examples/keras/mnist_cnn.py#L34-L48
|
[
"def",
"get_mnist",
"(",
"sc",
",",
"data_type",
"=",
"\"train\"",
",",
"location",
"=",
"\"/tmp/mnist\"",
")",
":",
"from",
"bigdl",
".",
"dataset",
"import",
"mnist",
"from",
"bigdl",
".",
"dataset",
".",
"transformer",
"import",
"normalizer",
"(",
"images",
",",
"labels",
")",
"=",
"mnist",
".",
"read_data_sets",
"(",
"location",
",",
"data_type",
")",
"images",
"=",
"images",
".",
"reshape",
"(",
"(",
"images",
".",
"shape",
"[",
"0",
"]",
",",
")",
"+",
"input_shape",
")",
"images",
"=",
"sc",
".",
"parallelize",
"(",
"images",
")",
"labels",
"=",
"sc",
".",
"parallelize",
"(",
"labels",
"+",
"1",
")",
"# Target start from 1 in BigDL",
"record",
"=",
"images",
".",
"zip",
"(",
"labels",
")",
".",
"map",
"(",
"lambda",
"rec_tuple",
":",
"(",
"normalizer",
"(",
"rec_tuple",
"[",
"0",
"]",
",",
"mnist",
".",
"TRAIN_MEAN",
",",
"mnist",
".",
"TRAIN_STD",
")",
",",
"rec_tuple",
"[",
"1",
"]",
")",
")",
".",
"map",
"(",
"lambda",
"t",
":",
"Sample",
".",
"from_ndarray",
"(",
"t",
"[",
"0",
"]",
",",
"t",
"[",
"1",
"]",
")",
")",
"return",
"record"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
build_keras_model
|
Define a convnet model in Keras 1.2.2
|
pyspark/bigdl/examples/keras/mnist_cnn.py
|
def build_keras_model():
"""
Define a convnet model in Keras 1.2.2
"""
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
keras_model = Sequential()
keras_model.add(Convolution2D(32, 3, 3, border_mode='valid',
input_shape=input_shape))
keras_model.add(Activation('relu'))
keras_model.add(Convolution2D(32, 3, 3))
keras_model.add(Activation('relu'))
keras_model.add(MaxPooling2D(pool_size=(2, 2)))
keras_model.add(Dropout(0.25))
keras_model.add(Flatten())
keras_model.add(Dense(128))
keras_model.add(Activation('relu'))
keras_model.add(Dropout(0.5))
keras_model.add(Dense(10))
keras_model.add(Activation('softmax'))
return keras_model
|
def build_keras_model():
"""
Define a convnet model in Keras 1.2.2
"""
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
keras_model = Sequential()
keras_model.add(Convolution2D(32, 3, 3, border_mode='valid',
input_shape=input_shape))
keras_model.add(Activation('relu'))
keras_model.add(Convolution2D(32, 3, 3))
keras_model.add(Activation('relu'))
keras_model.add(MaxPooling2D(pool_size=(2, 2)))
keras_model.add(Dropout(0.25))
keras_model.add(Flatten())
keras_model.add(Dense(128))
keras_model.add(Activation('relu'))
keras_model.add(Dropout(0.5))
keras_model.add(Dense(10))
keras_model.add(Activation('softmax'))
return keras_model
|
[
"Define",
"a",
"convnet",
"model",
"in",
"Keras",
"1",
".",
"2",
".",
"2"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/examples/keras/mnist_cnn.py#L51-L73
|
[
"def",
"build_keras_model",
"(",
")",
":",
"from",
"keras",
".",
"models",
"import",
"Sequential",
"from",
"keras",
".",
"layers",
"import",
"Dense",
",",
"Dropout",
",",
"Activation",
",",
"Flatten",
"from",
"keras",
".",
"layers",
"import",
"Convolution2D",
",",
"MaxPooling2D",
"keras_model",
"=",
"Sequential",
"(",
")",
"keras_model",
".",
"add",
"(",
"Convolution2D",
"(",
"32",
",",
"3",
",",
"3",
",",
"border_mode",
"=",
"'valid'",
",",
"input_shape",
"=",
"input_shape",
")",
")",
"keras_model",
".",
"add",
"(",
"Activation",
"(",
"'relu'",
")",
")",
"keras_model",
".",
"add",
"(",
"Convolution2D",
"(",
"32",
",",
"3",
",",
"3",
")",
")",
"keras_model",
".",
"add",
"(",
"Activation",
"(",
"'relu'",
")",
")",
"keras_model",
".",
"add",
"(",
"MaxPooling2D",
"(",
"pool_size",
"=",
"(",
"2",
",",
"2",
")",
")",
")",
"keras_model",
".",
"add",
"(",
"Dropout",
"(",
"0.25",
")",
")",
"keras_model",
".",
"add",
"(",
"Flatten",
"(",
")",
")",
"keras_model",
".",
"add",
"(",
"Dense",
"(",
"128",
")",
")",
"keras_model",
".",
"add",
"(",
"Activation",
"(",
"'relu'",
")",
")",
"keras_model",
".",
"add",
"(",
"Dropout",
"(",
"0.5",
")",
")",
"keras_model",
".",
"add",
"(",
"Dense",
"(",
"10",
")",
")",
"keras_model",
".",
"add",
"(",
"Activation",
"(",
"'softmax'",
")",
")",
"return",
"keras_model"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
SharedStaticUtils.load
|
Load a pre-trained Bigdl model.
:param path: The path containing the pre-trained model.
:return: A pre-trained model.
|
pyspark/bigdl/nn/layer.py
|
def load(path, bigdl_type="float"):
"""
Load a pre-trained Bigdl model.
:param path: The path containing the pre-trained model.
:return: A pre-trained model.
"""
jmodel = callBigDlFunc(bigdl_type, "loadBigDL", path)
return Layer.of(jmodel)
|
def load(path, bigdl_type="float"):
"""
Load a pre-trained Bigdl model.
:param path: The path containing the pre-trained model.
:return: A pre-trained model.
"""
jmodel = callBigDlFunc(bigdl_type, "loadBigDL", path)
return Layer.of(jmodel)
|
[
"Load",
"a",
"pre",
"-",
"trained",
"Bigdl",
"model",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/layer.py#L68-L76
|
[
"def",
"load",
"(",
"path",
",",
"bigdl_type",
"=",
"\"float\"",
")",
":",
"jmodel",
"=",
"callBigDlFunc",
"(",
"bigdl_type",
",",
"\"loadBigDL\"",
",",
"path",
")",
"return",
"Layer",
".",
"of",
"(",
"jmodel",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
SharedStaticUtils.of
|
Create a Python Layer base on the given java value and the real type.
:param jvalue: Java object create by Py4j
:return: A Python Layer
|
pyspark/bigdl/nn/layer.py
|
def of(jvalue, bigdl_type="float"):
"""
Create a Python Layer base on the given java value and the real type.
:param jvalue: Java object create by Py4j
:return: A Python Layer
"""
def get_py_name(jclass_name):
if jclass_name == "StaticGraph" or jclass_name == "DynamicGraph":
return "Model"
elif jclass_name == "Input":
return "Layer"
else:
return jclass_name
jname = callBigDlFunc(bigdl_type,
"getRealClassNameOfJValue",
jvalue)
jpackage_name = ".".join(jname.split(".")[:-1])
pclass_name = get_py_name(jname.split(".")[-1])
if "com.intel.analytics.bigdl.nn.keras.Model" == jname or \
"com.intel.analytics.bigdl.nn.keras.Sequential" == jname:
base_module = importlib.import_module('bigdl.nn.keras.topology')
elif "com.intel.analytics.bigdl.nn.keras" == jpackage_name:
base_module = importlib.import_module('bigdl.nn.keras.layer')
else:
base_module = importlib.import_module('bigdl.nn.layer')
realClassName = "Layer" # The top base class
if pclass_name in dir(base_module):
realClassName = pclass_name
module = getattr(base_module, realClassName)
jvalue_creator = getattr(module, "from_jvalue")
model = jvalue_creator(jvalue, bigdl_type)
return model
|
def of(jvalue, bigdl_type="float"):
"""
Create a Python Layer base on the given java value and the real type.
:param jvalue: Java object create by Py4j
:return: A Python Layer
"""
def get_py_name(jclass_name):
if jclass_name == "StaticGraph" or jclass_name == "DynamicGraph":
return "Model"
elif jclass_name == "Input":
return "Layer"
else:
return jclass_name
jname = callBigDlFunc(bigdl_type,
"getRealClassNameOfJValue",
jvalue)
jpackage_name = ".".join(jname.split(".")[:-1])
pclass_name = get_py_name(jname.split(".")[-1])
if "com.intel.analytics.bigdl.nn.keras.Model" == jname or \
"com.intel.analytics.bigdl.nn.keras.Sequential" == jname:
base_module = importlib.import_module('bigdl.nn.keras.topology')
elif "com.intel.analytics.bigdl.nn.keras" == jpackage_name:
base_module = importlib.import_module('bigdl.nn.keras.layer')
else:
base_module = importlib.import_module('bigdl.nn.layer')
realClassName = "Layer" # The top base class
if pclass_name in dir(base_module):
realClassName = pclass_name
module = getattr(base_module, realClassName)
jvalue_creator = getattr(module, "from_jvalue")
model = jvalue_creator(jvalue, bigdl_type)
return model
|
[
"Create",
"a",
"Python",
"Layer",
"base",
"on",
"the",
"given",
"java",
"value",
"and",
"the",
"real",
"type",
".",
":",
"param",
"jvalue",
":",
"Java",
"object",
"create",
"by",
"Py4j",
":",
"return",
":",
"A",
"Python",
"Layer"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/layer.py#L80-L115
|
[
"def",
"of",
"(",
"jvalue",
",",
"bigdl_type",
"=",
"\"float\"",
")",
":",
"def",
"get_py_name",
"(",
"jclass_name",
")",
":",
"if",
"jclass_name",
"==",
"\"StaticGraph\"",
"or",
"jclass_name",
"==",
"\"DynamicGraph\"",
":",
"return",
"\"Model\"",
"elif",
"jclass_name",
"==",
"\"Input\"",
":",
"return",
"\"Layer\"",
"else",
":",
"return",
"jclass_name",
"jname",
"=",
"callBigDlFunc",
"(",
"bigdl_type",
",",
"\"getRealClassNameOfJValue\"",
",",
"jvalue",
")",
"jpackage_name",
"=",
"\".\"",
".",
"join",
"(",
"jname",
".",
"split",
"(",
"\".\"",
")",
"[",
":",
"-",
"1",
"]",
")",
"pclass_name",
"=",
"get_py_name",
"(",
"jname",
".",
"split",
"(",
"\".\"",
")",
"[",
"-",
"1",
"]",
")",
"if",
"\"com.intel.analytics.bigdl.nn.keras.Model\"",
"==",
"jname",
"or",
"\"com.intel.analytics.bigdl.nn.keras.Sequential\"",
"==",
"jname",
":",
"base_module",
"=",
"importlib",
".",
"import_module",
"(",
"'bigdl.nn.keras.topology'",
")",
"elif",
"\"com.intel.analytics.bigdl.nn.keras\"",
"==",
"jpackage_name",
":",
"base_module",
"=",
"importlib",
".",
"import_module",
"(",
"'bigdl.nn.keras.layer'",
")",
"else",
":",
"base_module",
"=",
"importlib",
".",
"import_module",
"(",
"'bigdl.nn.layer'",
")",
"realClassName",
"=",
"\"Layer\"",
"# The top base class",
"if",
"pclass_name",
"in",
"dir",
"(",
"base_module",
")",
":",
"realClassName",
"=",
"pclass_name",
"module",
"=",
"getattr",
"(",
"base_module",
",",
"realClassName",
")",
"jvalue_creator",
"=",
"getattr",
"(",
"module",
",",
"\"from_jvalue\"",
")",
"model",
"=",
"jvalue_creator",
"(",
"jvalue",
",",
"bigdl_type",
")",
"return",
"model"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
Layer.set_running_mean
|
Set the running mean of the layer.
Only use this method for a BatchNormalization layer.
:param running_mean: a Numpy array.
|
pyspark/bigdl/nn/layer.py
|
def set_running_mean(self, running_mean):
"""
Set the running mean of the layer.
Only use this method for a BatchNormalization layer.
:param running_mean: a Numpy array.
"""
callBigDlFunc(self.bigdl_type, "setRunningMean",
self.value, JTensor.from_ndarray(running_mean))
return self
|
def set_running_mean(self, running_mean):
"""
Set the running mean of the layer.
Only use this method for a BatchNormalization layer.
:param running_mean: a Numpy array.
"""
callBigDlFunc(self.bigdl_type, "setRunningMean",
self.value, JTensor.from_ndarray(running_mean))
return self
|
[
"Set",
"the",
"running",
"mean",
"of",
"the",
"layer",
".",
"Only",
"use",
"this",
"method",
"for",
"a",
"BatchNormalization",
"layer",
".",
":",
"param",
"running_mean",
":",
"a",
"Numpy",
"array",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/layer.py#L134-L142
|
[
"def",
"set_running_mean",
"(",
"self",
",",
"running_mean",
")",
":",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"setRunningMean\"",
",",
"self",
".",
"value",
",",
"JTensor",
".",
"from_ndarray",
"(",
"running_mean",
")",
")",
"return",
"self"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
Layer.set_running_std
|
Set the running variance of the layer.
Only use this method for a BatchNormalization layer.
:param running_std: a Numpy array.
|
pyspark/bigdl/nn/layer.py
|
def set_running_std(self, running_std):
"""
Set the running variance of the layer.
Only use this method for a BatchNormalization layer.
:param running_std: a Numpy array.
"""
callBigDlFunc(self.bigdl_type, "setRunningStd",
self.value, JTensor.from_ndarray(running_std))
return self
|
def set_running_std(self, running_std):
"""
Set the running variance of the layer.
Only use this method for a BatchNormalization layer.
:param running_std: a Numpy array.
"""
callBigDlFunc(self.bigdl_type, "setRunningStd",
self.value, JTensor.from_ndarray(running_std))
return self
|
[
"Set",
"the",
"running",
"variance",
"of",
"the",
"layer",
".",
"Only",
"use",
"this",
"method",
"for",
"a",
"BatchNormalization",
"layer",
".",
":",
"param",
"running_std",
":",
"a",
"Numpy",
"array",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/layer.py#L144-L152
|
[
"def",
"set_running_std",
"(",
"self",
",",
"running_std",
")",
":",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"setRunningStd\"",
",",
"self",
".",
"value",
",",
"JTensor",
".",
"from_ndarray",
"(",
"running_std",
")",
")",
"return",
"self"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
Layer.from_jvalue
|
Create a Python Model base on the given java value
:param jvalue: Java object create by Py4j
:return: A Python Model
|
pyspark/bigdl/nn/layer.py
|
def from_jvalue(jvalue, bigdl_type="float"):
"""
Create a Python Model base on the given java value
:param jvalue: Java object create by Py4j
:return: A Python Model
"""
model = Layer(jvalue=jvalue, bigdl_type=bigdl_type)
model.value = jvalue
return model
|
def from_jvalue(jvalue, bigdl_type="float"):
"""
Create a Python Model base on the given java value
:param jvalue: Java object create by Py4j
:return: A Python Model
"""
model = Layer(jvalue=jvalue, bigdl_type=bigdl_type)
model.value = jvalue
return model
|
[
"Create",
"a",
"Python",
"Model",
"base",
"on",
"the",
"given",
"java",
"value",
":",
"param",
"jvalue",
":",
"Java",
"object",
"create",
"by",
"Py4j",
":",
"return",
":",
"A",
"Python",
"Model"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/layer.py#L177-L185
|
[
"def",
"from_jvalue",
"(",
"jvalue",
",",
"bigdl_type",
"=",
"\"float\"",
")",
":",
"model",
"=",
"Layer",
"(",
"jvalue",
"=",
"jvalue",
",",
"bigdl_type",
"=",
"bigdl_type",
")",
"model",
".",
"value",
"=",
"jvalue",
"return",
"model"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
Layer.check_input
|
:param input: ndarray or list of ndarray or JTensor or list of JTensor.
:return: (list of JTensor, isTable)
|
pyspark/bigdl/nn/layer.py
|
def check_input(input):
"""
:param input: ndarray or list of ndarray or JTensor or list of JTensor.
:return: (list of JTensor, isTable)
"""
def to_jtensor(i):
if isinstance(i, np.ndarray):
return JTensor.from_ndarray(i)
elif isinstance(i, JTensor):
return i
else:
raise Exception("Error unknown input type %s" % type(i))
if type(input) is list:
if len(input) == 0:
raise Exception('Error when checking: empty input')
return list(map(lambda i: to_jtensor(i), input)), True
else:
return [to_jtensor(input)], False
|
def check_input(input):
"""
:param input: ndarray or list of ndarray or JTensor or list of JTensor.
:return: (list of JTensor, isTable)
"""
def to_jtensor(i):
if isinstance(i, np.ndarray):
return JTensor.from_ndarray(i)
elif isinstance(i, JTensor):
return i
else:
raise Exception("Error unknown input type %s" % type(i))
if type(input) is list:
if len(input) == 0:
raise Exception('Error when checking: empty input')
return list(map(lambda i: to_jtensor(i), input)), True
else:
return [to_jtensor(input)], False
|
[
":",
"param",
"input",
":",
"ndarray",
"or",
"list",
"of",
"ndarray",
"or",
"JTensor",
"or",
"list",
"of",
"JTensor",
".",
":",
"return",
":",
"(",
"list",
"of",
"JTensor",
"isTable",
")"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/layer.py#L218-L235
|
[
"def",
"check_input",
"(",
"input",
")",
":",
"def",
"to_jtensor",
"(",
"i",
")",
":",
"if",
"isinstance",
"(",
"i",
",",
"np",
".",
"ndarray",
")",
":",
"return",
"JTensor",
".",
"from_ndarray",
"(",
"i",
")",
"elif",
"isinstance",
"(",
"i",
",",
"JTensor",
")",
":",
"return",
"i",
"else",
":",
"raise",
"Exception",
"(",
"\"Error unknown input type %s\"",
"%",
"type",
"(",
"i",
")",
")",
"if",
"type",
"(",
"input",
")",
"is",
"list",
":",
"if",
"len",
"(",
"input",
")",
"==",
"0",
":",
"raise",
"Exception",
"(",
"'Error when checking: empty input'",
")",
"return",
"list",
"(",
"map",
"(",
"lambda",
"i",
":",
"to_jtensor",
"(",
"i",
")",
",",
"input",
")",
")",
",",
"True",
"else",
":",
"return",
"[",
"to_jtensor",
"(",
"input",
")",
"]",
",",
"False"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
Layer.forward
|
NB: It's for debug only, please use optimizer.optimize() in production.
Takes an input object, and computes the corresponding output of the module
:param input: ndarray or list of ndarray
:param input: ndarray or list of ndarray or JTensor or list of JTensor.
:return: ndarray or list of ndarray
|
pyspark/bigdl/nn/layer.py
|
def forward(self, input):
"""
NB: It's for debug only, please use optimizer.optimize() in production.
Takes an input object, and computes the corresponding output of the module
:param input: ndarray or list of ndarray
:param input: ndarray or list of ndarray or JTensor or list of JTensor.
:return: ndarray or list of ndarray
"""
jinput, input_is_table = self.check_input(input)
output = callBigDlFunc(self.bigdl_type,
"modelForward",
self.value,
jinput,
input_is_table)
return self.convert_output(output)
|
def forward(self, input):
"""
NB: It's for debug only, please use optimizer.optimize() in production.
Takes an input object, and computes the corresponding output of the module
:param input: ndarray or list of ndarray
:param input: ndarray or list of ndarray or JTensor or list of JTensor.
:return: ndarray or list of ndarray
"""
jinput, input_is_table = self.check_input(input)
output = callBigDlFunc(self.bigdl_type,
"modelForward",
self.value,
jinput,
input_is_table)
return self.convert_output(output)
|
[
"NB",
":",
"It",
"s",
"for",
"debug",
"only",
"please",
"use",
"optimizer",
".",
"optimize",
"()",
"in",
"production",
".",
"Takes",
"an",
"input",
"object",
"and",
"computes",
"the",
"corresponding",
"output",
"of",
"the",
"module"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/layer.py#L246-L261
|
[
"def",
"forward",
"(",
"self",
",",
"input",
")",
":",
"jinput",
",",
"input_is_table",
"=",
"self",
".",
"check_input",
"(",
"input",
")",
"output",
"=",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"modelForward\"",
",",
"self",
".",
"value",
",",
"jinput",
",",
"input_is_table",
")",
"return",
"self",
".",
"convert_output",
"(",
"output",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
Layer.backward
|
NB: It's for debug only, please use optimizer.optimize() in production.
Performs a back-propagation step through the module, with respect to the given input. In
general this method makes the assumption forward(input) has been called before, with the same
input. This is necessary for optimization reasons. If you do not respect this rule, backward()
will compute incorrect gradients.
:param input: ndarray or list of ndarray or JTensor or list of JTensor.
:param grad_output: ndarray or list of ndarray or JTensor or list of JTensor.
:return: ndarray or list of ndarray
|
pyspark/bigdl/nn/layer.py
|
def backward(self, input, grad_output):
"""
NB: It's for debug only, please use optimizer.optimize() in production.
Performs a back-propagation step through the module, with respect to the given input. In
general this method makes the assumption forward(input) has been called before, with the same
input. This is necessary for optimization reasons. If you do not respect this rule, backward()
will compute incorrect gradients.
:param input: ndarray or list of ndarray or JTensor or list of JTensor.
:param grad_output: ndarray or list of ndarray or JTensor or list of JTensor.
:return: ndarray or list of ndarray
"""
jinput, input_is_table = self.check_input(input)
jgrad_output, grad_output_is_table = self.check_input(grad_output)
output = callBigDlFunc(self.bigdl_type,
"modelBackward",
self.value,
jinput,
input_is_table,
jgrad_output,
grad_output_is_table)
return self.convert_output(output)
|
def backward(self, input, grad_output):
"""
NB: It's for debug only, please use optimizer.optimize() in production.
Performs a back-propagation step through the module, with respect to the given input. In
general this method makes the assumption forward(input) has been called before, with the same
input. This is necessary for optimization reasons. If you do not respect this rule, backward()
will compute incorrect gradients.
:param input: ndarray or list of ndarray or JTensor or list of JTensor.
:param grad_output: ndarray or list of ndarray or JTensor or list of JTensor.
:return: ndarray or list of ndarray
"""
jinput, input_is_table = self.check_input(input)
jgrad_output, grad_output_is_table = self.check_input(grad_output)
output = callBigDlFunc(self.bigdl_type,
"modelBackward",
self.value,
jinput,
input_is_table,
jgrad_output,
grad_output_is_table)
return self.convert_output(output)
|
[
"NB",
":",
"It",
"s",
"for",
"debug",
"only",
"please",
"use",
"optimizer",
".",
"optimize",
"()",
"in",
"production",
".",
"Performs",
"a",
"back",
"-",
"propagation",
"step",
"through",
"the",
"module",
"with",
"respect",
"to",
"the",
"given",
"input",
".",
"In",
"general",
"this",
"method",
"makes",
"the",
"assumption",
"forward",
"(",
"input",
")",
"has",
"been",
"called",
"before",
"with",
"the",
"same",
"input",
".",
"This",
"is",
"necessary",
"for",
"optimization",
"reasons",
".",
"If",
"you",
"do",
"not",
"respect",
"this",
"rule",
"backward",
"()",
"will",
"compute",
"incorrect",
"gradients",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/layer.py#L263-L284
|
[
"def",
"backward",
"(",
"self",
",",
"input",
",",
"grad_output",
")",
":",
"jinput",
",",
"input_is_table",
"=",
"self",
".",
"check_input",
"(",
"input",
")",
"jgrad_output",
",",
"grad_output_is_table",
"=",
"self",
".",
"check_input",
"(",
"grad_output",
")",
"output",
"=",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"modelBackward\"",
",",
"self",
".",
"value",
",",
"jinput",
",",
"input_is_table",
",",
"jgrad_output",
",",
"grad_output_is_table",
")",
"return",
"self",
".",
"convert_output",
"(",
"output",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
Layer.parameters
|
Get the model parameters which containing: weight, bias, gradBias, gradWeight
:return: dict(layername -> dict(parametername -> ndarray))
|
pyspark/bigdl/nn/layer.py
|
def parameters(self):
"""
Get the model parameters which containing: weight, bias, gradBias, gradWeight
:return: dict(layername -> dict(parametername -> ndarray))
"""
name_to_params = callBigDlFunc(self.bigdl_type,
"modelGetParameters",
self.value)
def to_ndarray(params):
return dict((param_name,
np.array(values[0], dtype=self.get_dtype()).reshape(
values[1])) for param_name, values in
params.items())
return dict((layer_name, to_ndarray(params)) for layer_name, params in
name_to_params.items())
|
def parameters(self):
"""
Get the model parameters which containing: weight, bias, gradBias, gradWeight
:return: dict(layername -> dict(parametername -> ndarray))
"""
name_to_params = callBigDlFunc(self.bigdl_type,
"modelGetParameters",
self.value)
def to_ndarray(params):
return dict((param_name,
np.array(values[0], dtype=self.get_dtype()).reshape(
values[1])) for param_name, values in
params.items())
return dict((layer_name, to_ndarray(params)) for layer_name, params in
name_to_params.items())
|
[
"Get",
"the",
"model",
"parameters",
"which",
"containing",
":",
"weight",
"bias",
"gradBias",
"gradWeight"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/layer.py#L310-L327
|
[
"def",
"parameters",
"(",
"self",
")",
":",
"name_to_params",
"=",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"modelGetParameters\"",
",",
"self",
".",
"value",
")",
"def",
"to_ndarray",
"(",
"params",
")",
":",
"return",
"dict",
"(",
"(",
"param_name",
",",
"np",
".",
"array",
"(",
"values",
"[",
"0",
"]",
",",
"dtype",
"=",
"self",
".",
"get_dtype",
"(",
")",
")",
".",
"reshape",
"(",
"values",
"[",
"1",
"]",
")",
")",
"for",
"param_name",
",",
"values",
"in",
"params",
".",
"items",
"(",
")",
")",
"return",
"dict",
"(",
"(",
"layer_name",
",",
"to_ndarray",
"(",
"params",
")",
")",
"for",
"layer_name",
",",
"params",
"in",
"name_to_params",
".",
"items",
"(",
")",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
Layer.evaluate
|
No argument passed in:
Evaluate the model to set train = false, useful when doing test/forward
:return: layer itself
Three arguments passed in:
A method to benchmark the model quality.
:param dataset: the input data
:param batch_size: batch size
:param val_methods: a list of validation methods. i.e: Top1Accuracy,Top5Accuracy and Loss.
:return: a list of the metrics result
|
pyspark/bigdl/nn/layer.py
|
def evaluate(self, *args):
"""
No argument passed in:
Evaluate the model to set train = false, useful when doing test/forward
:return: layer itself
Three arguments passed in:
A method to benchmark the model quality.
:param dataset: the input data
:param batch_size: batch size
:param val_methods: a list of validation methods. i.e: Top1Accuracy,Top5Accuracy and Loss.
:return: a list of the metrics result
"""
if len(args) == 0:
callBigDlFunc(self.bigdl_type,
"evaluate", self.value)
return self
elif len(args) == 3:
dataset, batch_size, val_methods = args
if (isinstance(dataset, ImageFrame)):
return callBigDlFunc(self.bigdl_type,
"modelEvaluateImageFrame",
self.value,
dataset, batch_size, val_methods)
else:
return callBigDlFunc(self.bigdl_type,
"modelEvaluate",
self.value,
dataset, batch_size, val_methods)
else:
raise Exception("Error when calling evaluate(): it takes no argument or exactly three arguments only")
|
def evaluate(self, *args):
"""
No argument passed in:
Evaluate the model to set train = false, useful when doing test/forward
:return: layer itself
Three arguments passed in:
A method to benchmark the model quality.
:param dataset: the input data
:param batch_size: batch size
:param val_methods: a list of validation methods. i.e: Top1Accuracy,Top5Accuracy and Loss.
:return: a list of the metrics result
"""
if len(args) == 0:
callBigDlFunc(self.bigdl_type,
"evaluate", self.value)
return self
elif len(args) == 3:
dataset, batch_size, val_methods = args
if (isinstance(dataset, ImageFrame)):
return callBigDlFunc(self.bigdl_type,
"modelEvaluateImageFrame",
self.value,
dataset, batch_size, val_methods)
else:
return callBigDlFunc(self.bigdl_type,
"modelEvaluate",
self.value,
dataset, batch_size, val_methods)
else:
raise Exception("Error when calling evaluate(): it takes no argument or exactly three arguments only")
|
[
"No",
"argument",
"passed",
"in",
":",
"Evaluate",
"the",
"model",
"to",
"set",
"train",
"=",
"false",
"useful",
"when",
"doing",
"test",
"/",
"forward",
":",
"return",
":",
"layer",
"itself"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/layer.py#L329-L360
|
[
"def",
"evaluate",
"(",
"self",
",",
"*",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"0",
":",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"evaluate\"",
",",
"self",
".",
"value",
")",
"return",
"self",
"elif",
"len",
"(",
"args",
")",
"==",
"3",
":",
"dataset",
",",
"batch_size",
",",
"val_methods",
"=",
"args",
"if",
"(",
"isinstance",
"(",
"dataset",
",",
"ImageFrame",
")",
")",
":",
"return",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"modelEvaluateImageFrame\"",
",",
"self",
".",
"value",
",",
"dataset",
",",
"batch_size",
",",
"val_methods",
")",
"else",
":",
"return",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"modelEvaluate\"",
",",
"self",
".",
"value",
",",
"dataset",
",",
"batch_size",
",",
"val_methods",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Error when calling evaluate(): it takes no argument or exactly three arguments only\"",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
Layer.predict_local
|
:param X: X can be a ndarray or list of ndarray if the model has multiple inputs.
The first dimension of X should be batch.
:param batch_size: total batch size of prediction.
:return: a ndarray as the prediction result.
|
pyspark/bigdl/nn/layer.py
|
def predict_local(self, X, batch_size = -1):
"""
:param X: X can be a ndarray or list of ndarray if the model has multiple inputs.
The first dimension of X should be batch.
:param batch_size: total batch size of prediction.
:return: a ndarray as the prediction result.
"""
jresults = callBigDlFunc(self.bigdl_type,
"predictLocal",
self.value,
self._to_jtensors(X),
batch_size)
return np.stack([j.to_ndarray()for j in jresults])
|
def predict_local(self, X, batch_size = -1):
"""
:param X: X can be a ndarray or list of ndarray if the model has multiple inputs.
The first dimension of X should be batch.
:param batch_size: total batch size of prediction.
:return: a ndarray as the prediction result.
"""
jresults = callBigDlFunc(self.bigdl_type,
"predictLocal",
self.value,
self._to_jtensors(X),
batch_size)
return np.stack([j.to_ndarray()for j in jresults])
|
[
":",
"param",
"X",
":",
"X",
"can",
"be",
"a",
"ndarray",
"or",
"list",
"of",
"ndarray",
"if",
"the",
"model",
"has",
"multiple",
"inputs",
".",
"The",
"first",
"dimension",
"of",
"X",
"should",
"be",
"batch",
".",
":",
"param",
"batch_size",
":",
"total",
"batch",
"size",
"of",
"prediction",
".",
":",
"return",
":",
"a",
"ndarray",
"as",
"the",
"prediction",
"result",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/layer.py#L372-L386
|
[
"def",
"predict_local",
"(",
"self",
",",
"X",
",",
"batch_size",
"=",
"-",
"1",
")",
":",
"jresults",
"=",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"predictLocal\"",
",",
"self",
".",
"value",
",",
"self",
".",
"_to_jtensors",
"(",
"X",
")",
",",
"batch_size",
")",
"return",
"np",
".",
"stack",
"(",
"[",
"j",
".",
"to_ndarray",
"(",
")",
"for",
"j",
"in",
"jresults",
"]",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
Layer.predict
|
Model inference base on the given data.
:param features: it can be a ndarray or list of ndarray for locally inference
or RDD[Sample] for running in distributed fashion
:param batch_size: total batch size of prediction.
:return: ndarray or RDD[Sample] depend on the the type of features.
|
pyspark/bigdl/nn/layer.py
|
def predict(self, features, batch_size = -1):
"""
Model inference base on the given data.
:param features: it can be a ndarray or list of ndarray for locally inference
or RDD[Sample] for running in distributed fashion
:param batch_size: total batch size of prediction.
:return: ndarray or RDD[Sample] depend on the the type of features.
"""
if isinstance(features, RDD):
return self.predict_distributed(features, batch_size)
else:
return self.predict_local(features, batch_size)
|
def predict(self, features, batch_size = -1):
"""
Model inference base on the given data.
:param features: it can be a ndarray or list of ndarray for locally inference
or RDD[Sample] for running in distributed fashion
:param batch_size: total batch size of prediction.
:return: ndarray or RDD[Sample] depend on the the type of features.
"""
if isinstance(features, RDD):
return self.predict_distributed(features, batch_size)
else:
return self.predict_local(features, batch_size)
|
[
"Model",
"inference",
"base",
"on",
"the",
"given",
"data",
".",
":",
"param",
"features",
":",
"it",
"can",
"be",
"a",
"ndarray",
"or",
"list",
"of",
"ndarray",
"for",
"locally",
"inference",
"or",
"RDD",
"[",
"Sample",
"]",
"for",
"running",
"in",
"distributed",
"fashion",
":",
"param",
"batch_size",
":",
"total",
"batch",
"size",
"of",
"prediction",
".",
":",
"return",
":",
"ndarray",
"or",
"RDD",
"[",
"Sample",
"]",
"depend",
"on",
"the",
"the",
"type",
"of",
"features",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/layer.py#L401-L412
|
[
"def",
"predict",
"(",
"self",
",",
"features",
",",
"batch_size",
"=",
"-",
"1",
")",
":",
"if",
"isinstance",
"(",
"features",
",",
"RDD",
")",
":",
"return",
"self",
".",
"predict_distributed",
"(",
"features",
",",
"batch_size",
")",
"else",
":",
"return",
"self",
".",
"predict_local",
"(",
"features",
",",
"batch_size",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
Layer.predict_class
|
Model inference base on the given data which returning label
:param features: it can be a ndarray or list of ndarray for locally inference
or RDD[Sample] for running in distributed fashion
:return: ndarray or RDD[Sample] depend on the the type of features.
|
pyspark/bigdl/nn/layer.py
|
def predict_class(self, features):
"""
Model inference base on the given data which returning label
:param features: it can be a ndarray or list of ndarray for locally inference
or RDD[Sample] for running in distributed fashion
:return: ndarray or RDD[Sample] depend on the the type of features.
"""
if isinstance(features, RDD):
return self.predict_class_distributed(features)
else:
return self.predict_class_local(features)
|
def predict_class(self, features):
"""
Model inference base on the given data which returning label
:param features: it can be a ndarray or list of ndarray for locally inference
or RDD[Sample] for running in distributed fashion
:return: ndarray or RDD[Sample] depend on the the type of features.
"""
if isinstance(features, RDD):
return self.predict_class_distributed(features)
else:
return self.predict_class_local(features)
|
[
"Model",
"inference",
"base",
"on",
"the",
"given",
"data",
"which",
"returning",
"label",
":",
"param",
"features",
":",
"it",
"can",
"be",
"a",
"ndarray",
"or",
"list",
"of",
"ndarray",
"for",
"locally",
"inference",
"or",
"RDD",
"[",
"Sample",
"]",
"for",
"running",
"in",
"distributed",
"fashion",
":",
"return",
":",
"ndarray",
"or",
"RDD",
"[",
"Sample",
"]",
"depend",
"on",
"the",
"the",
"type",
"of",
"features",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/layer.py#L414-L424
|
[
"def",
"predict_class",
"(",
"self",
",",
"features",
")",
":",
"if",
"isinstance",
"(",
"features",
",",
"RDD",
")",
":",
"return",
"self",
".",
"predict_class_distributed",
"(",
"features",
")",
"else",
":",
"return",
"self",
".",
"predict_class_local",
"(",
"features",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
Layer.predict_distributed
|
Model inference base on the given data.
You need to invoke collect() to trigger those action \
as the returning result is an RDD.
:param data_rdd: the data to be predict.
:param batch_size: total batch size of prediction.
:return: An RDD represent the predict result.
|
pyspark/bigdl/nn/layer.py
|
def predict_distributed(self, data_rdd, batch_size = -1):
"""
Model inference base on the given data.
You need to invoke collect() to trigger those action \
as the returning result is an RDD.
:param data_rdd: the data to be predict.
:param batch_size: total batch size of prediction.
:return: An RDD represent the predict result.
"""
result = callBigDlFunc(self.bigdl_type,
"modelPredictRDD", self.value, data_rdd, batch_size)
return result.map(lambda data: data.to_ndarray())
|
def predict_distributed(self, data_rdd, batch_size = -1):
"""
Model inference base on the given data.
You need to invoke collect() to trigger those action \
as the returning result is an RDD.
:param data_rdd: the data to be predict.
:param batch_size: total batch size of prediction.
:return: An RDD represent the predict result.
"""
result = callBigDlFunc(self.bigdl_type,
"modelPredictRDD", self.value, data_rdd, batch_size)
return result.map(lambda data: data.to_ndarray())
|
[
"Model",
"inference",
"base",
"on",
"the",
"given",
"data",
".",
"You",
"need",
"to",
"invoke",
"collect",
"()",
"to",
"trigger",
"those",
"action",
"\\",
"as",
"the",
"returning",
"result",
"is",
"an",
"RDD",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/layer.py#L426-L438
|
[
"def",
"predict_distributed",
"(",
"self",
",",
"data_rdd",
",",
"batch_size",
"=",
"-",
"1",
")",
":",
"result",
"=",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"modelPredictRDD\"",
",",
"self",
".",
"value",
",",
"data_rdd",
",",
"batch_size",
")",
"return",
"result",
".",
"map",
"(",
"lambda",
"data",
":",
"data",
".",
"to_ndarray",
"(",
")",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
Layer.predict_class_distributed
|
module predict, return the predict label
:param data_rdd: the data to be predict.
:return: An RDD represent the predict label.
|
pyspark/bigdl/nn/layer.py
|
def predict_class_distributed(self, data_rdd):
"""
module predict, return the predict label
:param data_rdd: the data to be predict.
:return: An RDD represent the predict label.
"""
result = callBigDlFunc(self.bigdl_type,
"modelPredictClass", self.value, data_rdd)
return result
|
def predict_class_distributed(self, data_rdd):
"""
module predict, return the predict label
:param data_rdd: the data to be predict.
:return: An RDD represent the predict label.
"""
result = callBigDlFunc(self.bigdl_type,
"modelPredictClass", self.value, data_rdd)
return result
|
[
"module",
"predict",
"return",
"the",
"predict",
"label"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/layer.py#L440-L449
|
[
"def",
"predict_class_distributed",
"(",
"self",
",",
"data_rdd",
")",
":",
"result",
"=",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"modelPredictClass\"",
",",
"self",
".",
"value",
",",
"data_rdd",
")",
"return",
"result"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
Layer.predict_image
|
model predict images, return imageFrame with predicted tensor
:param image_frame imageFrame that contains images
:param output_layer if output_layer is not null, the output of layer that matches
output_layer will be used as predicted output
:param share_buffer whether to share same memory for each batch predict results
:param batch_per_partition batch size per partition, default is 4
:param predict_key key to store predicted results
|
pyspark/bigdl/nn/layer.py
|
def predict_image(self, image_frame, output_layer=None, share_buffer=False,
batch_per_partition=4, predict_key="predict"):
"""
model predict images, return imageFrame with predicted tensor
:param image_frame imageFrame that contains images
:param output_layer if output_layer is not null, the output of layer that matches
output_layer will be used as predicted output
:param share_buffer whether to share same memory for each batch predict results
:param batch_per_partition batch size per partition, default is 4
:param predict_key key to store predicted results
"""
image_frame = callBigDlFunc(self.bigdl_type, "modelPredictImage", self.value,
image_frame,
output_layer,
share_buffer,
batch_per_partition,
predict_key)
return ImageFrame(image_frame)
|
def predict_image(self, image_frame, output_layer=None, share_buffer=False,
batch_per_partition=4, predict_key="predict"):
"""
model predict images, return imageFrame with predicted tensor
:param image_frame imageFrame that contains images
:param output_layer if output_layer is not null, the output of layer that matches
output_layer will be used as predicted output
:param share_buffer whether to share same memory for each batch predict results
:param batch_per_partition batch size per partition, default is 4
:param predict_key key to store predicted results
"""
image_frame = callBigDlFunc(self.bigdl_type, "modelPredictImage", self.value,
image_frame,
output_layer,
share_buffer,
batch_per_partition,
predict_key)
return ImageFrame(image_frame)
|
[
"model",
"predict",
"images",
"return",
"imageFrame",
"with",
"predicted",
"tensor",
":",
"param",
"image_frame",
"imageFrame",
"that",
"contains",
"images",
":",
"param",
"output_layer",
"if",
"output_layer",
"is",
"not",
"null",
"the",
"output",
"of",
"layer",
"that",
"matches",
"output_layer",
"will",
"be",
"used",
"as",
"predicted",
"output",
":",
"param",
"share_buffer",
"whether",
"to",
"share",
"same",
"memory",
"for",
"each",
"batch",
"predict",
"results",
":",
"param",
"batch_per_partition",
"batch",
"size",
"per",
"partition",
"default",
"is",
"4",
":",
"param",
"predict_key",
"key",
"to",
"store",
"predicted",
"results"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/layer.py#L451-L469
|
[
"def",
"predict_image",
"(",
"self",
",",
"image_frame",
",",
"output_layer",
"=",
"None",
",",
"share_buffer",
"=",
"False",
",",
"batch_per_partition",
"=",
"4",
",",
"predict_key",
"=",
"\"predict\"",
")",
":",
"image_frame",
"=",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"modelPredictImage\"",
",",
"self",
".",
"value",
",",
"image_frame",
",",
"output_layer",
",",
"share_buffer",
",",
"batch_per_partition",
",",
"predict_key",
")",
"return",
"ImageFrame",
"(",
"image_frame",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
Layer.set_weights
|
Set weights for this layer
:param weights: a list of numpy arrays which represent weight and bias
:return:
>>> linear = Linear(3,2)
creating: createLinear
>>> linear.set_weights([np.array([[1,2,3],[4,5,6]]), np.array([7,8])])
>>> weights = linear.get_weights()
>>> weights[0].shape == (2,3)
True
>>> np.testing.assert_allclose(weights[0][0], np.array([1., 2., 3.]))
>>> np.testing.assert_allclose(weights[1], np.array([7., 8.]))
>>> relu = ReLU()
creating: createReLU
>>> from py4j.protocol import Py4JJavaError
>>> try:
... relu.set_weights([np.array([[1,2,3],[4,5,6]]), np.array([7,8])])
... except Py4JJavaError as err:
... print(err.java_exception)
...
java.lang.IllegalArgumentException: requirement failed: this layer does not have weight/bias
>>> relu.get_weights()
The layer does not have weight/bias
>>> add = Add(2)
creating: createAdd
>>> try:
... add.set_weights([np.array([7,8]), np.array([1,2])])
... except Py4JJavaError as err:
... print(err.java_exception)
...
java.lang.IllegalArgumentException: requirement failed: the number of input weight/bias is not consistant with number of weight/bias of this layer, number of input 1, number of output 2
>>> cAdd = CAdd([4, 1])
creating: createCAdd
>>> cAdd.set_weights(np.ones([4, 1]))
>>> (cAdd.get_weights()[0] == np.ones([4, 1])).all()
True
|
pyspark/bigdl/nn/layer.py
|
def set_weights(self, weights):
"""
Set weights for this layer
:param weights: a list of numpy arrays which represent weight and bias
:return:
>>> linear = Linear(3,2)
creating: createLinear
>>> linear.set_weights([np.array([[1,2,3],[4,5,6]]), np.array([7,8])])
>>> weights = linear.get_weights()
>>> weights[0].shape == (2,3)
True
>>> np.testing.assert_allclose(weights[0][0], np.array([1., 2., 3.]))
>>> np.testing.assert_allclose(weights[1], np.array([7., 8.]))
>>> relu = ReLU()
creating: createReLU
>>> from py4j.protocol import Py4JJavaError
>>> try:
... relu.set_weights([np.array([[1,2,3],[4,5,6]]), np.array([7,8])])
... except Py4JJavaError as err:
... print(err.java_exception)
...
java.lang.IllegalArgumentException: requirement failed: this layer does not have weight/bias
>>> relu.get_weights()
The layer does not have weight/bias
>>> add = Add(2)
creating: createAdd
>>> try:
... add.set_weights([np.array([7,8]), np.array([1,2])])
... except Py4JJavaError as err:
... print(err.java_exception)
...
java.lang.IllegalArgumentException: requirement failed: the number of input weight/bias is not consistant with number of weight/bias of this layer, number of input 1, number of output 2
>>> cAdd = CAdd([4, 1])
creating: createCAdd
>>> cAdd.set_weights(np.ones([4, 1]))
>>> (cAdd.get_weights()[0] == np.ones([4, 1])).all()
True
"""
tensors = [JTensor.from_ndarray(param, self.bigdl_type) for param in to_list(weights)]
callBigDlFunc(self.bigdl_type, "setWeights", self.value, tensors)
|
def set_weights(self, weights):
"""
Set weights for this layer
:param weights: a list of numpy arrays which represent weight and bias
:return:
>>> linear = Linear(3,2)
creating: createLinear
>>> linear.set_weights([np.array([[1,2,3],[4,5,6]]), np.array([7,8])])
>>> weights = linear.get_weights()
>>> weights[0].shape == (2,3)
True
>>> np.testing.assert_allclose(weights[0][0], np.array([1., 2., 3.]))
>>> np.testing.assert_allclose(weights[1], np.array([7., 8.]))
>>> relu = ReLU()
creating: createReLU
>>> from py4j.protocol import Py4JJavaError
>>> try:
... relu.set_weights([np.array([[1,2,3],[4,5,6]]), np.array([7,8])])
... except Py4JJavaError as err:
... print(err.java_exception)
...
java.lang.IllegalArgumentException: requirement failed: this layer does not have weight/bias
>>> relu.get_weights()
The layer does not have weight/bias
>>> add = Add(2)
creating: createAdd
>>> try:
... add.set_weights([np.array([7,8]), np.array([1,2])])
... except Py4JJavaError as err:
... print(err.java_exception)
...
java.lang.IllegalArgumentException: requirement failed: the number of input weight/bias is not consistant with number of weight/bias of this layer, number of input 1, number of output 2
>>> cAdd = CAdd([4, 1])
creating: createCAdd
>>> cAdd.set_weights(np.ones([4, 1]))
>>> (cAdd.get_weights()[0] == np.ones([4, 1])).all()
True
"""
tensors = [JTensor.from_ndarray(param, self.bigdl_type) for param in to_list(weights)]
callBigDlFunc(self.bigdl_type, "setWeights", self.value, tensors)
|
[
"Set",
"weights",
"for",
"this",
"layer"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/layer.py#L471-L512
|
[
"def",
"set_weights",
"(",
"self",
",",
"weights",
")",
":",
"tensors",
"=",
"[",
"JTensor",
".",
"from_ndarray",
"(",
"param",
",",
"self",
".",
"bigdl_type",
")",
"for",
"param",
"in",
"to_list",
"(",
"weights",
")",
"]",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"setWeights\"",
",",
"self",
".",
"value",
",",
"tensors",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
Layer.get_weights
|
Get weights for this layer
:return: list of numpy arrays which represent weight and bias
|
pyspark/bigdl/nn/layer.py
|
def get_weights(self):
"""
Get weights for this layer
:return: list of numpy arrays which represent weight and bias
"""
tensorWeights = callBigDlFunc(self.bigdl_type,
"getWeights", self.value)
if tensorWeights is not None:
return [tensor.to_ndarray() for tensor in tensorWeights]
else:
print("The layer does not have weight/bias")
return None
|
def get_weights(self):
"""
Get weights for this layer
:return: list of numpy arrays which represent weight and bias
"""
tensorWeights = callBigDlFunc(self.bigdl_type,
"getWeights", self.value)
if tensorWeights is not None:
return [tensor.to_ndarray() for tensor in tensorWeights]
else:
print("The layer does not have weight/bias")
return None
|
[
"Get",
"weights",
"for",
"this",
"layer"
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/layer.py#L514-L526
|
[
"def",
"get_weights",
"(",
"self",
")",
":",
"tensorWeights",
"=",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"getWeights\"",
",",
"self",
".",
"value",
")",
"if",
"tensorWeights",
"is",
"not",
"None",
":",
"return",
"[",
"tensor",
".",
"to_ndarray",
"(",
")",
"for",
"tensor",
"in",
"tensorWeights",
"]",
"else",
":",
"print",
"(",
"\"The layer does not have weight/bias\"",
")",
"return",
"None"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
test
|
Layer.save_tensorflow
|
Save a model to protobuf files so that it can be used in tensorflow inference.
When saving the model, placeholders will be added to the tf model as input nodes. So
you need to pass in the names and shapes of the placeholders. BigDL model doesn't have
such information. The order of the placeholder information should be same as the inputs
of the graph model.
:param inputs: placeholder information, should be an array of tuples (input_name, shape)
where 'input_name' is a string and shape is an array of integer
:param path: the path to be saved to
:param byte_order: model byte order
:param data_format: model data format, should be "nhwc" or "nchw"
|
pyspark/bigdl/nn/layer.py
|
def save_tensorflow(self, inputs, path, byte_order="little_endian", data_format="nhwc"):
"""
Save a model to protobuf files so that it can be used in tensorflow inference.
When saving the model, placeholders will be added to the tf model as input nodes. So
you need to pass in the names and shapes of the placeholders. BigDL model doesn't have
such information. The order of the placeholder information should be same as the inputs
of the graph model.
:param inputs: placeholder information, should be an array of tuples (input_name, shape)
where 'input_name' is a string and shape is an array of integer
:param path: the path to be saved to
:param byte_order: model byte order
:param data_format: model data format, should be "nhwc" or "nchw"
"""
callBigDlFunc(self.bigdl_type, "saveTF", self.value, inputs, path, byte_order, data_format)
|
def save_tensorflow(self, inputs, path, byte_order="little_endian", data_format="nhwc"):
"""
Save a model to protobuf files so that it can be used in tensorflow inference.
When saving the model, placeholders will be added to the tf model as input nodes. So
you need to pass in the names and shapes of the placeholders. BigDL model doesn't have
such information. The order of the placeholder information should be same as the inputs
of the graph model.
:param inputs: placeholder information, should be an array of tuples (input_name, shape)
where 'input_name' is a string and shape is an array of integer
:param path: the path to be saved to
:param byte_order: model byte order
:param data_format: model data format, should be "nhwc" or "nchw"
"""
callBigDlFunc(self.bigdl_type, "saveTF", self.value, inputs, path, byte_order, data_format)
|
[
"Save",
"a",
"model",
"to",
"protobuf",
"files",
"so",
"that",
"it",
"can",
"be",
"used",
"in",
"tensorflow",
"inference",
"."
] |
intel-analytics/BigDL
|
python
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/layer.py#L543-L557
|
[
"def",
"save_tensorflow",
"(",
"self",
",",
"inputs",
",",
"path",
",",
"byte_order",
"=",
"\"little_endian\"",
",",
"data_format",
"=",
"\"nhwc\"",
")",
":",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"saveTF\"",
",",
"self",
".",
"value",
",",
"inputs",
",",
"path",
",",
"byte_order",
",",
"data_format",
")"
] |
e9c19788285986ab789a2e2998f9a85d7524779f
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.