_id
stringlengths 98
184
| text
stringlengths 91
10.9k
|
|---|---|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L732-L751
|
def get_config(self):
"""
"""
config = {
'event_shape': self._event_shape,
'convert_to_tensor_fn': _serialize(self._convert_to_tensor_fn),
'sample_dtype': self._sample_dtype,
'validate_args': self._validate_args
}
base_config = super(IndependentBernoulli, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/layer.py#L144-L152
|
def set_running_std(self, running_std):
"""
"""
callBigDlFunc(self.bigdl_type, "setRunningStd",
self.value, JTensor.from_ndarray(running_std))
return self
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/qubole_hook.py#L147-L162
|
def kill(self, ti):
"""
"""
if self.cmd is None:
if not ti and not self.task_instance:
raise Exception("Unable to cancel Qubole Command, context is unavailable!")
elif not ti:
ti = self.task_instance
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=ti.task_id)
self.cmd = self.cls.find(cmd_id)
if self.cls and self.cmd:
self.log.info('Sending KILL signal to Qubole Command Id: %s', self.cmd.id)
self.cmd.cancel()
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/internal/util.py#L66-L71
|
def make_name(super_name, default_super_name, sub_name):
""""""
name = super_name if super_name is not None else default_super_name
if sub_name is not None:
name += '_' + sub_name
return name
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/webhdfs_hook.py#L106-L132
|
def load_file(self, source, destination, overwrite=True, parallelism=1, **kwargs):
"""
conn = self.get_conn()
conn.upload(hdfs_path=destination,
local_path=source,
overwrite=overwrite,
n_threads=parallelism,
**kwargs)
self.log.debug("Uploaded file %s to %s", source, destination)
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/joint_distribution_named.py#L245-L267
|
def _prob_chain_rule_flatten(named_makers):
""""""
def _make(dist_fn, args):
if args is None:
return lambda *_: dist_fn
if not args:
return lambda *_: dist_fn()
def _fn(*xs):
kwargs = dict(zip(args, reversed(xs[-len(args):])))
kwargs.pop('_', None)
return dist_fn(**kwargs)
return _fn
named_makers = _convert_to_dict(named_makers)
g = {k: (None if distribution_util.is_distribution_instance(v)
else joint_distribution_sequential._get_required_args(v)) # pylint: disable=protected-access
for k, v in named_makers.items()}
g = _best_order(g)
dist_fn_name, dist_fn_args = zip(*g)
dist_fn_args = tuple(None if a is None else tuple(a) for a in dist_fn_args)
dist_fn_wrapped = tuple(_make(named_makers[name], parents)
for (name, parents) in g)
dist_fn = tuple(named_makers.get(n) for n in dist_fn_name)
return dist_fn, dist_fn_wrapped, dist_fn_args, dist_fn_name
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/cassandra_to_gcs.py#L247-L255
|
def convert_user_type(cls, name, value):
"""
"""
names = value._fields
values = [cls.convert_value(name, getattr(value, name)) for name in names]
return cls.generate_data_dict(names, values)
|
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/acfun.py#L42-L109
|
def acfun_download_by_vid(vid, title, output_dir='.', merge=True, info_only=False, **kwargs):
"""
"""
#first call the main parasing API
info = json.loads(get_content('http://www.acfun.cn/video/getVideo.aspx?id=' + vid))
sourceType = info['sourceType']
#decide sourceId to know which extractor to use
if 'sourceId' in info: sourceId = info['sourceId']
# danmakuId = info['danmakuId']
#call extractor decided by sourceId
if sourceType == 'sina':
sina_download_by_vid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'youku':
youku_download_by_vid(sourceId, title=title, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
elif sourceType == 'tudou':
tudou_download_by_iid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'qq':
qq_download_by_vid(sourceId, title, True, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'letv':
letvcloud_download_by_vu(sourceId, '2d8c027396', title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'zhuzhan':
#As in Jul.28.2016, Acfun is using embsig to anti hotlink so we need to pass this
#In Mar. 2017 there is a dedicated ``acfun_proxy'' in youku cloud player
#old code removed
url = 'http://www.acfun.cn/v/ac' + vid
yk_streams = youku_acfun_proxy(info['sourceId'], info['encode'], url)
seq = ['mp4hd3', 'mp4hd2', 'mp4hd', 'flvhd']
for t in seq:
if yk_streams.get(t):
preferred = yk_streams[t]
break
#total_size in the json could be incorrect(F.I. 0)
size = 0
for url in preferred[0]:
_, _, seg_size = url_info(url)
size += seg_size
#fallback to flvhd is not quite possible
if re.search(r'fid=[0-9A-Z\-]*.flv', preferred[0][0]):
ext = 'flv'
else:
ext = 'mp4'
print_info(site_info, title, ext, size)
if not info_only:
download_urls(preferred[0], title, ext, size, output_dir=output_dir, merge=merge)
else:
raise NotImplementedError(sourceType)
if not info_only and not dry_run:
if not kwargs['caption']:
print('Skipping danmaku.')
return
try:
title = get_filename(title)
print('Downloading %s ...\n' % (title + '.cmt.json'))
cmt = get_srt_json(vid)
with open(os.path.join(output_dir, title + '.cmt.json'), 'w', encoding='utf-8') as x:
x.write(cmt)
except:
pass
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/mssql_to_gcs.py#L127-L137
|
def _query_mssql(self):
"""
"""
mssql = MsSqlHook(mssql_conn_id=self.mssql_conn_id)
conn = mssql.get_conn()
cursor = conn.cursor()
cursor.execute(self.sql)
return cursor
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/azure_cosmos_hook.py#L142-L160
|
def create_database(self, database_name):
"""
"""
if database_name is None:
raise AirflowBadRequest("Database name cannot be None.")
# We need to check to see if this database already exists so we don't try
# to create it twice
existing_database = list(self.get_conn().QueryDatabases({
"query": "SELECT * FROM r WHERE r.id=@id",
"parameters": [
{"name": "@id", "value": database_name}
]
}))
# Only create if we did not find it already existing
if len(existing_database) == 0:
self.get_conn().CreateDatabase({"id": database_name})
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/util/common.py#L576-L592
|
def callBigDlFunc(bigdl_type, name, *args):
""" """
gateway = _get_gateway()
error = Exception("Cannot find function: %s" % name)
for jinvoker in JavaCreator.instance(bigdl_type, gateway).value:
# hasattr(jinvoker, name) always return true here,
# so you need to invoke the method to check if it exist or not
try:
api = getattr(jinvoker, name)
result = callJavaFunc(api, *args)
except Exception as e:
error = e
if "does not exist" not in str(e):
raise e
else:
return result
raise error
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/mcmc/elliptical_slice_sampler.py#L405-L417
|
def _prepare_args(log_likelihood_fn, state,
log_likelihood=None, description='log_likelihood'):
""""""
state_parts = list(state) if mcmc_util.is_list_like(state) else [state]
state_parts = [tf.convert_to_tensor(s, name='current_state')
for s in state_parts]
log_likelihood = _maybe_call_fn(
log_likelihood_fn,
state_parts,
log_likelihood,
description)
return [state_parts, log_likelihood]
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/keras/converter.py#L362-L368
|
def from_json_path(cls, json_path):
"""
"""
json_str = BCommon.text_from_path(json_path)
return DefinitionLoader.from_json_str(json_str)
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/examples/keras/keras_utils.py#L20-L26
|
def save_keras_definition(keras_model, path):
"""
"""
model_json = keras_model.to_json()
with open(path, "w") as json_file:
json_file.write(model_json)
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/normal.py#L241-L261
|
def _kl_normal_normal(n_a, n_b, name=None):
"""
"""
with tf.name_scope(name or "kl_normal_normal"):
one = tf.constant(1, dtype=n_a.dtype)
two = tf.constant(2, dtype=n_a.dtype)
half = tf.constant(0.5, dtype=n_a.dtype)
s_a_squared = tf.square(n_a.scale)
s_b_squared = tf.square(n_b.scale)
ratio = s_a_squared / s_b_squared
return (tf.square(n_a.loc - n_b.loc) / (two * s_b_squared) + half *
(ratio - one - tf.math.log(ratio)))
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/util/common.py#L215-L266
|
def sparse(cls, a_ndarray, i_ndarray, shape, bigdl_type="float"):
"""
"""
if a_ndarray is None:
return None
assert isinstance(a_ndarray, np.ndarray), \
"values array should be a np.ndarray, not %s" % type(a_ndarray)
assert isinstance(i_ndarray, np.ndarray), \
"indices array should be a np.ndarray, not %s" % type(a_ndarray)
assert i_ndarray.size == a_ndarray.size * shape.size, \
"size of values and indices should match."
return cls(a_ndarray,
shape,
bigdl_type,
i_ndarray)
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/distribution.py#L173-L175
|
def _remove_dict_keys_with_value(dict_, val):
""""""
return {k: v for k, v in dict_.items() if v is not val}
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/fun_mcmc/fun_mcmc_lib.py#L140-L157
|
def call_and_grads(fn: TransitionOperator, args: Union[Tuple[Any], Any]
) -> Tuple[tf.Tensor, TensorNest, TensorNest]:
"""
"""
with tf.GradientTape() as tape:
tape.watch(args)
ret, extra = call_fn(fn, args)
grads = tape.gradient(ret, args)
return ret, extra, grads
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/dbapi_hook.py#L168-L177
|
def set_autocommit(self, conn, autocommit):
"""
"""
if not self.supports_autocommit and autocommit:
self.log.warn(
("%s connection doesn't support "
"autocommit but autocommit activated."),
getattr(self, self.conn_name_attr))
conn.autocommit = autocommit
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/internal/util.py#L74-L111
|
def _choose_base_case(is_accepted,
accepted,
rejected,
name=None):
""""""
def _expand_is_accepted_like(x):
"""Helper to expand `is_accepted` like the shape of some input arg."""
with tf.compat.v1.name_scope('expand_is_accepted_like'):
expand_shape = tf.concat([
tf.shape(input=is_accepted),
tf.ones([tf.rank(x) - tf.rank(is_accepted)], dtype=tf.int32),
],
axis=0)
multiples = tf.concat([
tf.ones([tf.rank(is_accepted)], dtype=tf.int32),
tf.shape(input=x)[tf.rank(is_accepted):],
],
axis=0)
m = tf.tile(tf.reshape(is_accepted, expand_shape),
multiples)
m.set_shape(m.shape.merge_with(x.shape))
return m
def _where(accepted, rejected):
if accepted is rejected:
return accepted
accepted = tf.convert_to_tensor(value=accepted, name='accepted')
rejected = tf.convert_to_tensor(value=rejected, name='rejected')
r = tf.where(_expand_is_accepted_like(accepted), accepted, rejected)
r.set_shape(r.shape.merge_with(accepted.shape.merge_with(rejected.shape)))
return r
with tf.compat.v1.name_scope(
name, 'choose', values=[is_accepted, accepted, rejected]):
if not is_list_like(accepted):
return _where(accepted, rejected)
return [(choose(is_accepted, a, r, name=name) if is_namedtuple_like(a)
else _where(a, r))
for a, r in zip(accepted, rejected)]
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L680-L705
|
def rotate(img, angle, resample=False, expand=False, center=None):
"""
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
return img.rotate(angle, resample, expand, center)
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/random_variable.py#L287-L295
|
def _numpy_text(tensor, is_repr=False):
""""""
if tensor.dtype.is_numpy_compatible:
text = repr(tensor.numpy()) if is_repr else str(tensor.numpy())
else:
text = "<unprintable>"
if "\n" in text:
text = "\n" + text
return text
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/internal/moving_stats.py#L186-L245
|
def moving_mean_variance(value, decay, name=None):
"""
"""
with tf.compat.v1.variable_scope(name, "moving_mean_variance",
[value, decay]):
value = tf.convert_to_tensor(value=value, name="value")
base_dtype = value.dtype.base_dtype
if not base_dtype.is_floating:
raise TypeError(
"value.base_dtype({}) does not have float type `dtype`.".format(
base_dtype.name))
decay = tf.convert_to_tensor(value=decay, dtype=base_dtype, name="decay")
variance_var = tf.compat.v2.Variable(
name="moving_variance",
initial_value=tf.zeros(shape=value.shape, dtype=value.dtype),
trainable=False)
mean_var = tf.compat.v2.Variable(
name="moving_mean",
initial_value=tf.zeros(shape=value.shape, dtype=value.dtype),
trainable=False)
return assign_moving_mean_variance(
mean_var, variance_var, value, decay)
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/transforms.py#L875-L905
|
def get_params(brightness, contrast, saturation, hue):
"""
"""
transforms = []
if brightness is not None:
brightness_factor = random.uniform(brightness[0], brightness[1])
transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))
if contrast is not None:
contrast_factor = random.uniform(contrast[0], contrast[1])
transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))
if saturation is not None:
saturation_factor = random.uniform(saturation[0], saturation[1])
transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))
if hue is not None:
hue_factor = random.uniform(hue[0], hue[1])
transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))
random.shuffle(transforms)
transform = Compose(transforms)
return transform
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/task_runner/cgroup_task_runner.py#L90-L109
|
def _delete_cgroup(self, path):
"""
"""
node = trees.Tree().root
path_split = path.split("/")
for path_element in path_split:
name_to_node = {x.name: x for x in node.children}
if path_element not in name_to_node:
self.log.warning("Cgroup does not exist: %s", path)
return
else:
node = name_to_node[path_element]
# node is now the leaf node
parent = node.parent
self.log.debug("Deleting cgroup %s/%s", parent, node.name)
parent.delete_cgroup(node.name)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/security.py#L303-L312
|
def _has_perm(self, permission_name, view_menu_name):
"""
"""
if hasattr(self, 'perms'):
if (permission_name, view_menu_name) in self.perms:
return True
# rebuild the permissions set
self._get_and_cache_perms()
return (permission_name, view_menu_name) in self.perms
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/dataset/movielens.py#L25-L44
|
def read_data_sets(data_dir):
"""
"""
WHOLE_DATA = 'ml-1m.zip'
local_file = base.maybe_download(WHOLE_DATA, data_dir, SOURCE_URL + WHOLE_DATA)
zip_ref = zipfile.ZipFile(local_file, 'r')
extracted_to = os.path.join(data_dir, "ml-1m")
if not os.path.exists(extracted_to):
print("Extracting %s to %s" % (local_file, data_dir))
zip_ref.extractall(data_dir)
zip_ref.close()
rating_files = os.path.join(extracted_to,"ratings.dat")
rating_list = [i.strip().split("::") for i in open(rating_files,"r").readlines()]
movielens_data = np.array(rating_list).astype(int)
return movielens_data
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/util/common.py#L306-L345
|
def from_ndarray(cls, features, labels, bigdl_type="float"):
"""
"""
if isinstance(features, np.ndarray):
features = [features]
else:
assert all(isinstance(feature, np.ndarray) for feature in features), \
"features should be a list of np.ndarray, not %s" % type(features)
if np.isscalar(labels): # in case labels is a scalar.
labels = [np.array(labels)]
elif isinstance(labels, np.ndarray):
labels = [labels]
else:
assert all(isinstance(label, np.ndarray) for label in labels), \
"labels should be a list of np.ndarray, not %s" % type(labels)
return cls(
features=[JTensor.from_ndarray(feature) for feature in features],
labels=[JTensor.from_ndarray(label) for label in labels],
bigdl_type=bigdl_type)
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L1323-L1365
|
def prefer_static_broadcast_shape(shape1,
shape2,
name="prefer_static_broadcast_shape"):
"""
"""
with tf.name_scope(name):
def make_shape_tensor(x):
return tf.convert_to_tensor(value=x, name="shape", dtype=tf.int32)
def get_tensor_shape(s):
if isinstance(s, tf.TensorShape):
return s
s_ = tf.get_static_value(make_shape_tensor(s))
if s_ is not None:
return tf.TensorShape(s_)
return None
def get_shape_tensor(s):
if not isinstance(s, tf.TensorShape):
return make_shape_tensor(s)
if tensorshape_util.is_fully_defined(s):
return make_shape_tensor(tensorshape_util.as_list(s))
raise ValueError("Cannot broadcast from partially "
"defined `TensorShape`.")
shape1_ = get_tensor_shape(shape1)
shape2_ = get_tensor_shape(shape2)
if shape1_ is not None and shape2_ is not None:
return tf.broadcast_static_shape(shape1_, shape2_)
shape1_ = get_shape_tensor(shape1)
shape2_ = get_shape_tensor(shape2)
return tf.broadcast_dynamic_shape(shape1_, shape2_)
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/joint_distribution_sequential.py#L210-L218
|
def _build(self, model):
""""""
if not isinstance(model, collections.Sequence):
raise TypeError('`model` must be `list`-like (saw: {}).'.format(
type(model).__name__))
self._dist_fn = model
self._dist_fn_wrapped, self._dist_fn_args = zip(*[
_unify_call_signature(i, dist_fn)
for i, dist_fn in enumerate(model)])
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/linalg.py#L898-L911
|
def _maybe_validate_matrix(a, validate_args):
""""""
assertions = []
if not a.dtype.is_floating:
raise TypeError('Input `a` must have `float`-like `dtype` '
'(saw {}).'.format(a.dtype.name))
if a.shape.ndims is not None:
if a.shape.ndims < 2:
raise ValueError('Input `a` must have at least 2 dimensions '
'(saw: {}).'.format(a.shape.ndims))
elif validate_args:
assertions.append(tf.compat.v1.assert_rank_at_least(
a, rank=2, message='Input `a` must have at least 2 dimensions.'))
return assertions
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/lbfgs.py#L263-L274
|
def _get_initial_state(value_and_gradients_function,
initial_position,
num_correction_pairs,
tolerance):
""""""
init_args = bfgs_utils.get_initial_state_args(
value_and_gradients_function,
initial_position,
tolerance)
empty_queue = _make_empty_queue_for(num_correction_pairs, initial_position)
init_args.update(position_deltas=empty_queue, gradient_deltas=empty_queue)
return LBfgsOptimizerResults(**init_args)
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/models/local_lenet/local_lenet.py#L25-L35
|
def get_mnist(data_type="train", location="/tmp/mnist"):
"""
"""
X, Y = mnist.read_data_sets(location, data_type)
return X, Y + 1
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/bfgs.py#L289-L319
|
def _inv_hessian_control_inputs(inv_hessian):
"""
"""
# The easiest way to validate if the inverse Hessian is positive definite is
# to compute its Cholesky decomposition.
is_positive_definite = tf.reduce_all(
input_tensor=tf.math.is_finite(tf.linalg.cholesky(inv_hessian)),
axis=[-1, -2])
# Then check that the supplied inverse Hessian is symmetric.
is_symmetric = tf.equal(bfgs_utils.norm(
inv_hessian - _batch_transpose(inv_hessian), dims=2), 0)
# Simply adding a control dependencies on these results is not enough to
# trigger them, we need to add asserts on the results.
return [tf.Assert(is_positive_definite,
['Initial inverse Hessian is not positive definite.',
inv_hessian]),
tf.Assert(is_symmetric,
['Initial inverse Hessian is not symmetric',
inv_hessian])]
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L740-L762
|
def maybe_get_static_value(x, dtype=None):
"""
"""
if x is None:
return x
try:
# This returns an np.ndarray.
x_ = tf.get_static_value(x)
except TypeError:
x_ = x
if x_ is None or dtype is None:
return x_
return np.array(x_, dtype)
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/neutra/neutra_kernel.py#L350-L381
|
def one_step(self, current_state, previous_kernel_results):
"""
"""
@tfp.mcmc.internal.util.make_innermost_setter
def set_num_leapfrog_steps(kernel_results, num_leapfrog_steps):
return kernel_results._replace(
accepted_results=kernel_results.accepted_results._replace(
num_leapfrog_steps=num_leapfrog_steps))
step_size = previous_kernel_results.new_step_size
previous_kernel_results = set_num_leapfrog_steps(
previous_kernel_results, self._num_leapfrog_steps(step_size))
new_state, kernel_results = self._kernel.one_step(
self._flatten_state(current_state), previous_kernel_results)
return self._unflatten_state(new_state), kernel_results
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcs_hook.py#L360-L378
|
def get_md5hash(self, bucket_name, object_name):
"""
"""
self.log.info('Retrieving the MD5 hash of '
'object: %s in bucket: %s', object_name, bucket_name)
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob.reload()
blob_md5hash = blob.md5_hash
self.log.info('The md5Hash of %s is %s', object_name, blob_md5hash)
return blob_md5hash
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/keras/topology.py#L208-L216
|
def from_jvalue(jvalue, bigdl_type="float"):
"""
"""
model = Sequential(jvalue=jvalue)
model.value = jvalue
return model
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/agent/dialog_logger.py#L78-L110
|
def _log(self, utterance: Any, direction: str, dialog_id: Optional[Hashable]=None):
"""
"""
if isinstance(utterance, str):
pass
elif isinstance(utterance, RichMessage):
utterance = utterance.json()
elif isinstance(utterance, (list, dict)):
utterance = jsonify_data(utterance)
else:
utterance = str(utterance)
dialog_id = str(dialog_id) if not isinstance(dialog_id, str) else dialog_id
if self.log_file.tell() >= self.log_max_size * 1024:
self.log_file.close()
self.log_file = self._get_log_file()
else:
try:
log_msg = {}
log_msg['timestamp'] = self._get_timestamp_utc_str()
log_msg['dialog_id'] = dialog_id
log_msg['direction'] = direction
log_msg['message'] = utterance
log_str = json.dumps(log_msg, ensure_ascii=self.config['ensure_ascii'])
self.log_file.write(f'{log_str}\n')
except IOError:
log.error('Failed to write dialog log.')
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/models/vgg.py#L100-L111
|
def vgg11_bn(pretrained=False, **kwargs):
"""
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn']))
return model
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/datasets/utils.py#L54-L90
|
def download_url(url, root, filename=None, md5=None):
"""
"""
from six.moves import urllib
root = os.path.expanduser(root)
if not filename:
filename = os.path.basename(url)
fpath = os.path.join(root, filename)
makedir_exist_ok(root)
# downloads file
if os.path.isfile(fpath) and check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
try:
print('Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater()
)
except OSError:
if url[:5] == 'https':
url = url.replace('https:', 'http:')
print('Failed download. Trying https -> http instead.'
' Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater()
)
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/transform/vision/image.py#L226-L232
|
def get_image(self, float_key="floats", to_chw=True):
"""
"""
tensors = callBigDlFunc(self.bigdl_type,
"localImageFrameToImageTensor", self.value, float_key, to_chw)
return map(lambda tensor: tensor.to_ndarray(), tensors)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/compression.py#L26-L43
|
def uncompress_file(input_file_name, file_extension, dest_dir):
"""
"""
if file_extension.lower() not in ('.gz', '.bz2'):
raise NotImplementedError("Received {} format. Only gz and bz2 "
"files can currently be uncompressed."
.format(file_extension))
if file_extension.lower() == '.gz':
fmodule = gzip.GzipFile
elif file_extension.lower() == '.bz2':
fmodule = bz2.BZ2File
with fmodule(input_file_name, mode='rb') as f_compressed,\
NamedTemporaryFile(dir=dest_dir,
mode='wb',
delete=False) as f_uncompressed:
shutil.copyfileobj(f_compressed, f_uncompressed)
return f_uncompressed.name
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L372-L392
|
def resized_crop(img, i, j, h, w, size, interpolation=Image.BILINEAR):
"""
"""
assert _is_pil_image(img), 'img should be PIL Image'
img = crop(img, i, j, h, w)
img = resize(img, size, interpolation)
return img
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sagemaker_hook.py#L180-L201
|
def check_s3_url(self, s3url):
"""
"""
bucket, key = S3Hook.parse_s3_url(s3url)
if not self.s3_hook.check_for_bucket(bucket_name=bucket):
raise AirflowException(
"The input S3 Bucket {} does not exist ".format(bucket))
if key and not self.s3_hook.check_for_key(key=key, bucket_name=bucket)\
and not self.s3_hook.check_for_prefix(
prefix=key, bucket_name=bucket, delimiter='/'):
# check if s3 key exists in the case user provides a single file
# or if s3 prefix exists in the case user provides multiple files in
# a prefix
raise AirflowException("The input S3 Key "
"or Prefix {} does not exist in the Bucket {}"
.format(s3url, bucket))
return True
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L1979-L2007
|
def _manage_executor_state(self, running):
"""
"""
executor = self.executor
for key, state in list(executor.get_event_buffer().items()):
if key not in running:
self.log.warning(
"%s state %s not in running=%s",
key, state, running.values()
)
continue
ti = running[key]
ti.refresh_from_db()
self.log.debug("Executor state: %s task %s", state, ti)
if state == State.FAILED or state == State.SUCCESS:
if ti.state == State.RUNNING or ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
self.log.error(msg)
ti.handle_failure(msg)
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L1407-L1412
|
def gen_new_seed(seed, salt):
""""""
if seed is None:
return None
string = (str(seed) + salt).encode("utf-8")
return int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/diagnostic.py#L401-L410
|
def _broadcast_maybelist_arg(states, secondary_arg, name):
""""""
if _is_list_like(secondary_arg):
if len(secondary_arg) != len(states):
raise ValueError('Argument `%s` was a list of different length ({}) than '
'`states` ({})'.format(name, len(states)))
else:
secondary_arg = [secondary_arg] * len(states)
return secondary_arg
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/seasonal.py#L513-L526
|
def build_is_last_day_of_season(num_steps_per_season):
""""""
num_steps_per_cycle = np.sum(num_steps_per_season)
changepoints = np.cumsum(np.ravel(num_steps_per_season)) - 1
def is_last_day_of_season(t):
t_ = dist_util.maybe_get_static_value(t)
if t_ is not None: # static case
step_in_cycle = t_ % num_steps_per_cycle
return any(step_in_cycle == changepoints)
else:
step_in_cycle = tf.math.floormod(t, num_steps_per_cycle)
return tf.reduce_any(
input_tensor=tf.equal(step_in_cycle, changepoints))
return is_last_day_of_season
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/blockwise.py#L229-L269
|
def _kl_blockwise_blockwise(b0, b1, name=None):
"""
"""
if len(b0.distributions) != len(b1.distributions):
raise ValueError(
'Can only compute KL divergence between Blockwise distributions with '
'the same number of component distributions.')
# We also need to check that the event shapes match for each one.
b0_event_sizes = [_event_size(d) for d in b0.distributions]
b1_event_sizes = [_event_size(d) for d in b1.distributions]
assertions = []
message = ('Can only compute KL divergence between Blockwise distributions '
'with the same pairwise event shapes.')
if (all(isinstance(event_size, int) for event_size in b0_event_sizes) and
all(isinstance(event_size, int) for event_size in b1_event_sizes)):
if b0_event_sizes != b1_event_sizes:
raise ValueError(message)
else:
if b0.validate_args or b1.validate_args:
assertions.extend(
assert_util.assert_equal( # pylint: disable=g-complex-comprehension
e1, e2, message=message)
for e1, e2 in zip(b0_event_sizes, b1_event_sizes))
with tf.name_scope(name or 'kl_blockwise_blockwise'):
with tf.control_dependencies(assertions):
return sum([
kullback_leibler.kl_divergence(d1, d2) for d1, d2 in zip(
b0.distributions, b1.distributions)])
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dates.py#L214-L224
|
def scale_time_units(time_seconds_arr, unit):
"""
"""
if unit == 'minutes':
return list(map(lambda x: x * 1.0 / 60, time_seconds_arr))
elif unit == 'hours':
return list(map(lambda x: x * 1.0 / (60 * 60), time_seconds_arr))
elif unit == 'days':
return list(map(lambda x: x * 1.0 / (24 * 60 * 60), time_seconds_arr))
return time_seconds_arr
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L642-L683
|
def assert_integer_form(x,
data=None,
summarize=None,
message=None,
int_dtype=None,
name="assert_integer_form"):
"""
"""
with tf.name_scope(name):
x = tf.convert_to_tensor(value=x, name="x")
if dtype_util.is_integer(x.dtype):
return tf.no_op()
message = message or "{} has non-integer components".format(x)
if int_dtype is None:
try:
int_dtype = {
tf.float16: tf.int16,
tf.float32: tf.int32,
tf.float64: tf.int64,
}[dtype_util.base_dtype(x.dtype)]
except KeyError:
raise TypeError("Unrecognized type {}".format(dtype_util.name(x.dtype)))
return assert_util.assert_equal(
x,
tf.cast(tf.cast(x, int_dtype), x.dtype),
data=data,
summarize=summarize,
message=message,
name=name)
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/mcmc/elliptical_slice_sampler.py#L228-L372
|
def one_step(self, current_state, previous_kernel_results):
"""
"""
with tf.compat.v1.name_scope(
name=mcmc_util.make_name(self.name, 'elliptical_slice', 'one_step'),
values=[self._seed_stream,
current_state,
previous_kernel_results.log_likelihood]):
with tf.compat.v1.name_scope('initialize'):
[
init_state_parts,
init_log_likelihood
] = _prepare_args(
self.log_likelihood_fn,
current_state,
previous_kernel_results.log_likelihood)
normal_samples = self.normal_sampler_fn(self._seed_stream()) # pylint: disable=not-callable
normal_samples = list(normal_samples) if mcmc_util.is_list_like(
normal_samples) else [normal_samples]
u = tf.random.uniform(
shape=tf.shape(init_log_likelihood),
seed=self._seed_stream(),
dtype=init_log_likelihood.dtype.base_dtype,
)
threshold = init_log_likelihood + tf.math.log(u)
starting_angle = tf.random.uniform(
shape=tf.shape(init_log_likelihood),
minval=0.,
maxval=2 * np.pi,
name='angle',
seed=self._seed_stream(),
dtype=init_log_likelihood.dtype.base_dtype,
)
starting_angle_min = starting_angle - 2 * np.pi
starting_angle_max = starting_angle
starting_state_parts = _rotate_on_ellipse(
init_state_parts, normal_samples, starting_angle)
starting_log_likelihood = self.log_likelihood_fn(*starting_state_parts) # pylint: disable=not-callable
def chain_not_done(
angle,
angle_min,
angle_max,
current_state_parts,
current_log_likelihood):
del angle, angle_min, angle_max, current_state_parts
return tf.reduce_any(current_log_likelihood < threshold)
def sample_next_angle(
angle,
angle_min,
angle_max,
current_state_parts,
current_log_likelihood):
"""Slice sample a new angle, and rotate init_state by that amount."""
chain_not_done = current_log_likelihood < threshold
# Box in on angle. Only update angles for which we haven't generated a
# point that beats the threshold.
angle_min = tf.where(
tf.math.logical_and(angle < 0, chain_not_done),
angle,
angle_min)
angle_max = tf.where(
tf.math.logical_and(angle >= 0, chain_not_done),
angle,
angle_max)
new_angle = tf.random.uniform(
shape=tf.shape(current_log_likelihood),
minval=angle_min,
maxval=angle_max,
seed=self._seed_stream(),
dtype=angle.dtype.base_dtype
)
angle = tf.where(chain_not_done, new_angle, angle)
next_state_parts = _rotate_on_ellipse(
init_state_parts, normal_samples, angle)
new_state_parts = []
broadcasted_chain_not_done = _right_pad_with_ones(
chain_not_done, tf.rank(next_state_parts[0]))
for n_state, c_state in zip(next_state_parts, current_state_parts):
new_state_part = tf.where(
tf.broadcast_to(
broadcasted_chain_not_done,
tf.shape(n_state)),
n_state,
c_state)
new_state_parts.append(new_state_part)
return (
angle,
angle_min,
angle_max,
new_state_parts,
self.log_likelihood_fn(*new_state_parts) # pylint: disable=not-callable
)
[
next_angle,
_,
_,
next_state_parts,
next_log_likelihood,
] = tf.while_loop(
cond=chain_not_done,
body=sample_next_angle,
loop_vars=[
starting_angle,
starting_angle_min,
starting_angle_max,
starting_state_parts,
starting_log_likelihood
])
return [
next_state_parts if mcmc_util.is_list_like(
current_state) else next_state_parts[0],
EllipticalSliceSamplerKernelResults(
log_likelihood=next_log_likelihood,
angle=next_angle,
normal_samples=normal_samples,
),
]
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/S3_hook.py#L352-L382
|
def load_string(self,
string_data,
key,
bucket_name=None,
replace=False,
encrypt=False,
encoding='utf-8'):
"""
"""
self.load_bytes(string_data.encode(encoding),
key=key,
bucket_name=bucket_name,
replace=replace,
encrypt=encrypt)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/datastore_hook.py#L213-L233
|
def delete_operation(self, name):
"""
"""
conn = self.get_conn()
resp = (conn
.projects()
.operations()
.delete(name=name)
.execute(num_retries=self.num_retries))
return resp
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/decomposition.py#L109-L219
|
def decompose_by_component(model, observed_time_series, parameter_samples):
"""
"""
with tf.compat.v1.name_scope('decompose_by_component',
values=[observed_time_series]):
[
observed_time_series,
is_missing
] = sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series)
# Run smoothing over the training timesteps to extract the
# posterior on latents.
num_timesteps = dist_util.prefer_static_value(
tf.shape(input=observed_time_series))[-2]
ssm = model.make_state_space_model(num_timesteps=num_timesteps,
param_vals=parameter_samples)
posterior_means, posterior_covs = ssm.posterior_marginals(
observed_time_series, mask=is_missing)
return _decompose_from_posterior_marginals(
model, posterior_means, posterior_covs, parameter_samples)
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/fitting.py#L92-L264
|
def build_factored_variational_loss(model,
observed_time_series,
init_batch_shape=(),
seed=None,
name=None):
"""
"""
with tf.compat.v1.name_scope(
name, 'build_factored_variational_loss',
values=[observed_time_series]) as name:
seed = tfd.SeedStream(
seed, salt='StructuralTimeSeries_build_factored_variational_loss')
variational_distributions = collections.OrderedDict()
variational_samples = []
for param in model.parameters:
def initial_loc_fn(param):
return sample_uniform_initial_state(
param, return_constrained=True,
init_sample_shape=init_batch_shape,
seed=seed())
q = _build_trainable_posterior(param, initial_loc_fn=initial_loc_fn)
variational_distributions[param.name] = q
variational_samples.append(q.sample(seed=seed()))
# Multiple initializations (similar to HMC chains) manifest as an extra
# param batch dimension, so we need to add corresponding batch dimension(s)
# to `observed_time_series`.
observed_time_series = sts_util.pad_batch_dimension_for_multiple_chains(
observed_time_series, model, chain_batch_shape=init_batch_shape)
# Construct the variational bound.
log_prob_fn = model.joint_log_prob(observed_time_series)
expected_log_joint = log_prob_fn(*variational_samples)
entropy = tf.reduce_sum(
input_tensor=[
-q.log_prob(sample) for (q, sample) in zip(
variational_distributions.values(), variational_samples)
],
axis=0)
variational_loss = -(expected_log_joint + entropy) # -ELBO
return variational_loss, variational_distributions
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/sensors/aws_glue_catalog_partition_sensor.py#L70-L81
|
def poke(self, context):
"""
"""
if '.' in self.table_name:
self.database_name, self.table_name = self.table_name.split('.')
self.log.info(
'Poking for table %s. %s, expression %s', self.database_name, self.table_name, self.expression
)
return self.get_hook().check_for_partition(
self.database_name, self.table_name, self.expression)
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/pareto.py#L214-L245
|
def _extend_support(self, x, f, alt):
"""
"""
# We need to do a series of broadcasts for the tf.where.
scale = self.scale + tf.zeros_like(self.concentration)
is_invalid = x < scale
scale = scale + tf.zeros_like(x)
x = x + tf.zeros_like(scale)
# We need to do this to ensure gradients are sound.
y = f(tf.where(is_invalid, scale, x))
if alt == 0.:
alt = tf.zeros_like(y)
elif alt == 1.:
alt = tf.ones_like(y)
else:
alt = tf.fill(
dims=tf.shape(input=y),
value=dtype_util.as_numpy_dtype(self.dtype)(alt))
return tf.where(is_invalid, alt, y)
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/seasonal.py#L573-L604
|
def build_seasonal_transition_matrix(
num_seasons, is_last_day_of_season, dtype,
basis_change_matrix=None, basis_change_matrix_inv=None):
""""""
with tf.compat.v1.name_scope('build_seasonal_transition_matrix'):
# If the season is changing, the transition matrix permutes the latent
# state to shift all seasons up by a dimension, and sends the current
# season's effect to the bottom.
seasonal_permutation = np.concatenate(
[np.arange(1, num_seasons), [0]], axis=0)
seasonal_permutation_matrix = tf.constant(
np.eye(num_seasons)[seasonal_permutation], dtype=dtype)
# Optionally transform the transition matrix into a reparameterized space,
# enforcing the zero-sum constraint for ConstrainedSeasonalStateSpaceModel.
if basis_change_matrix is not None:
seasonal_permutation_matrix = tf.matmul(
basis_change_matrix,
tf.matmul(seasonal_permutation_matrix, basis_change_matrix_inv))
identity_matrix = tf.eye(
tf.shape(input=seasonal_permutation_matrix)[-1], dtype=dtype)
def seasonal_transition_matrix(t):
return tf.linalg.LinearOperatorFullMatrix(
matrix=dist_util.pick_scalar_condition(
is_last_day_of_season(t),
seasonal_permutation_matrix,
identity_matrix))
return seasonal_transition_matrix
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/cholesky_outer_product.py#L190-L217
|
def _make_columnar(self, x):
"""
"""
if tensorshape_util.rank(x.shape) is not None:
if tensorshape_util.rank(x.shape) == 1:
x = x[tf.newaxis, :]
return x
shape = tf.shape(input=x)
maybe_expanded_shape = tf.concat([
shape[:-1],
distribution_util.pick_vector(
tf.equal(tf.rank(x), 1), [1], np.array([], dtype=np.int32)),
shape[-1:],
], 0)
return tf.reshape(x, maybe_expanded_shape)
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/criterion.py#L86-L96
|
def of(cls, jcriterion, bigdl_type="float"):
"""
"""
criterion = Criterion(bigdl_type, jcriterion)
criterion.value = jcriterion
criterion.bigdl_type = bigdl_type
return criterion
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/transform/vision/image.py#L290-L295
|
def get_predict(self, key="predict"):
"""
"""
predicts = callBigDlFunc(self.bigdl_type, "distributedImageFrameToPredict", self.value, key)
return predicts.map(lambda predict: (predict[0], predict[1].to_ndarray()) if predict[1] else (predict[0], None))
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/masked_autoregressive.py#L903-L954
|
def _create_degrees(input_size,
hidden_units=None,
input_order="left-to-right",
hidden_degrees="equal"):
"""
"""
input_order = _create_input_order(input_size, input_order)
degrees = [input_order]
if hidden_units is None:
hidden_units = []
for units in hidden_units:
if isinstance(hidden_degrees, six.string_types):
if hidden_degrees == "random":
# samples from: [low, high)
degrees.append(
np.random.randint(low=min(np.min(degrees[-1]), input_size - 1),
high=input_size,
size=units))
elif hidden_degrees == "equal":
min_degree = min(np.min(degrees[-1]), input_size - 1)
degrees.append(np.maximum(
min_degree,
# Evenly divide the range `[1, input_size - 1]` in to `units + 1`
# segments, and pick the boundaries between the segments as degrees.
np.ceil(np.arange(1, units + 1)
* (input_size - 1) / float(units + 1)).astype(np.int32)))
else:
raise ValueError('Invalid hidden order: "{}".'.format(hidden_degrees))
return degrees
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/layer.py#L414-L424
|
def predict_class(self, features):
"""
"""
if isinstance(features, RDD):
return self.predict_class_distributed(features)
else:
return self.predict_class_local(features)
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/commands/train.py#L61-L66
|
def get_iterator_from_config(config: dict, data: dict):
""""""
iterator_config = config['dataset_iterator']
iterator: Union[DataLearningIterator, DataFittingIterator] = from_params(iterator_config,
data=data)
return iterator
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/kullback_leibler.py#L34-L47
|
def _registered_kl(type_a, type_b):
""""""
hierarchy_a = tf_inspect.getmro(type_a)
hierarchy_b = tf_inspect.getmro(type_b)
dist_to_children = None
kl_fn = None
for mro_to_a, parent_a in enumerate(hierarchy_a):
for mro_to_b, parent_b in enumerate(hierarchy_b):
candidate_dist = mro_to_a + mro_to_b
candidate_kl_fn = _DIVERGENCES.get((parent_a, parent_b), None)
if not kl_fn or (candidate_kl_fn and candidate_dist < dist_to_children):
dist_to_children = candidate_dist
kl_fn = candidate_kl_fn
return kl_fn
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_sql_hook.py#L911-L923
|
def retrieve_connection(self, session=None):
"""
"""
self.log.info("Retrieving connection %s", self.db_conn_id)
connections = session.query(Connection).filter(
Connection.conn_id == self.db_conn_id)
if connections.count():
return connections[0]
return None
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L1415-L1561
|
def fill_triangular(x, upper=False, name=None):
"""
with tf.name_scope(name or "fill_triangular"):
x = tf.convert_to_tensor(value=x, name="x")
m = tf.compat.dimension_value(
tensorshape_util.with_rank_at_least(x.shape, 1)[-1])
if m is not None:
# Formula derived by solving for n: m = n(n+1)/2.
m = np.int32(m)
n = np.sqrt(0.25 + 2. * m) - 0.5
if n != np.floor(n):
raise ValueError("Input right-most shape ({}) does not "
"correspond to a triangular matrix.".format(m))
n = np.int32(n)
static_final_shape = x.shape[:-1].concatenate([n, n])
else:
m = tf.shape(input=x)[-1]
# For derivation, see above. Casting automatically lops off the 0.5, so we
# omit it. We don't validate n is an integer because this has
# graph-execution cost; an error will be thrown from the reshape, below.
n = tf.cast(
tf.sqrt(0.25 + tf.cast(2 * m, dtype=tf.float32)), dtype=tf.int32)
static_final_shape = tensorshape_util.with_rank_at_least(
x.shape, 1)[:-1].concatenate([None, None])
# Try it out in numpy:
# n = 3
# x = np.arange(n * (n + 1) / 2)
# m = x.shape[0]
# n = np.int32(np.sqrt(.25 + 2 * m) - .5)
# x_tail = x[(m - (n**2 - m)):]
# np.concatenate([x_tail, x[::-1]], 0).reshape(n, n) # lower
# # ==> array([[3, 4, 5],
# [5, 4, 3],
# [2, 1, 0]])
# np.concatenate([x, x_tail[::-1]], 0).reshape(n, n) # upper
# # ==> array([[0, 1, 2],
# [3, 4, 5],
# [5, 4, 3]])
#
# Note that we can't simply do `x[..., -(n**2 - m):]` because this doesn't
# correctly handle `m == n == 1`. Hence, we do nonnegative indexing.
# Furthermore observe that:
# m - (n**2 - m)
# = n**2 / 2 + n / 2 - (n**2 - n**2 / 2 + n / 2)
# = 2 (n**2 / 2 + n / 2) - n**2
# = n**2 + n - n**2
# = n
ndims = prefer_static_rank(x)
if upper:
x_list = [x, tf.reverse(x[..., n:], axis=[ndims - 1])]
else:
x_list = [x[..., n:], tf.reverse(x, axis=[ndims - 1])]
new_shape = (
tensorshape_util.as_list(static_final_shape)
if tensorshape_util.is_fully_defined(static_final_shape) else tf.concat(
[tf.shape(input=x)[:-1], [n, n]], axis=0))
x = tf.reshape(tf.concat(x_list, axis=-1), new_shape)
x = tf.linalg.band_part(
x, num_lower=(0 if upper else -1), num_upper=(-1 if upper else 0))
tensorshape_util.set_shape(x, static_final_shape)
return x
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/util/common.py#L630-L635
|
def callJavaFunc(func, *args):
""" """
gateway = _get_gateway()
args = [_py2java(gateway, a) for a in args]
result = func(*args)
return _java2py(gateway, result)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_sql_hook.py#L984-L990
|
def reserve_free_tcp_port(self):
"""
"""
self.reserved_tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.reserved_tcp_socket.bind(('127.0.0.1', 0))
self.sql_proxy_tcp_port = self.reserved_tcp_socket.getsockname()[1]
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dag_processing.py#L580-L602
|
def harvest_simple_dags(self):
"""
"""
# Metadata and results to be harvested can be inconsistent,
# but it should not be a big problem.
self._sync_metadata()
# Heartbeating after syncing metadata so we do not restart manager
# if it processed all files for max_run times and exit normally.
self._heartbeat_manager()
simple_dags = []
# multiprocessing.Queue().qsize will not work on MacOS.
if sys.platform == "darwin":
qsize = self._result_count
else:
qsize = self._result_queue.qsize()
for _ in range(qsize):
simple_dags.append(self._result_queue.get())
self._result_count = 0
return simple_dags
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/masked_autoregressive.py#L854-L863
|
def call(self, x):
""""""
with tf.compat.v2.name_scope(self.name or "AutoregressiveLayer_call"):
x = tf.convert_to_tensor(value=x, dtype=self.dtype, name="x")
input_shape = tf.shape(input=x)
# TODO(b/67594795): Better support for dynamic shapes.
if tensorshape_util.rank(x.shape) == 1:
x = x[tf.newaxis, ...]
return tf.reshape(self._network(x),
tf.concat([input_shape, [self._params]], axis=0))
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/bfgs.py#L494-L506
|
def _batch_transpose(mat):
"""
"""
n = distribution_util.prefer_static_rank(mat)
perm = tf.range(n)
perm = tf.concat([perm[:-2], [perm[-1], perm[-2]]], axis=0)
return tf.transpose(a=mat, perm=perm)
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/internal/util.py#L247-L290
|
def smart_for_loop(loop_num_iter, body_fn, initial_loop_vars,
parallel_iterations=10, name=None):
"""
"""
with tf.compat.v1.name_scope(name, 'smart_for_loop',
[loop_num_iter, initial_loop_vars]):
loop_num_iter_ = tf.get_static_value(loop_num_iter)
if (loop_num_iter_ is None or tf.executing_eagerly() or
control_flow_util.GraphOrParentsInXlaContext(
tf.compat.v1.get_default_graph())):
# Cast to int32 to run the comparison against i in host memory,
# where while/LoopCond needs it.
loop_num_iter = tf.cast(loop_num_iter, dtype=tf.int32)
return tf.while_loop(
cond=lambda i, *args: i < loop_num_iter,
body=lambda i, *args: [i + 1] + list(body_fn(*args)),
loop_vars=[np.int32(0)] + initial_loop_vars,
parallel_iterations=parallel_iterations
)[1:]
result = initial_loop_vars
for _ in range(loop_num_iter_):
result = body_fn(*result)
return result
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/layer.py#L329-L360
|
def evaluate(self, *args):
"""
"""
if len(args) == 0:
callBigDlFunc(self.bigdl_type,
"evaluate", self.value)
return self
elif len(args) == 3:
dataset, batch_size, val_methods = args
if (isinstance(dataset, ImageFrame)):
return callBigDlFunc(self.bigdl_type,
"modelEvaluateImageFrame",
self.value,
dataset, batch_size, val_methods)
else:
return callBigDlFunc(self.bigdl_type,
"modelEvaluate",
self.value,
dataset, batch_size, val_methods)
else:
raise Exception("Error when calling evaluate(): it takes no argument or exactly three arguments only")
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_speech_to_text_hook.py#L42-L51
|
def get_conn(self):
"""
"""
if not self._client:
self._client = SpeechClient(credentials=self._get_credentials())
return self._client
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/hmc.py#L60-L162
|
def make_simple_step_size_update_policy(num_adaptation_steps,
target_rate=0.75,
decrement_multiplier=0.01,
increment_multiplier=0.01,
step_counter=None):
"""
"""
if step_counter is None and num_adaptation_steps is not None:
step_counter = tf.compat.v1.get_variable(
name='step_size_adaptation_step_counter',
initializer=np.array(-1, dtype=np.int32),
# Specify the dtype for variable sharing to work correctly
# (b/120599991).
dtype=tf.int32,
trainable=False,
use_resource=True)
def step_size_simple_update_fn(step_size_var, kernel_results):
"""Updates (list of) `step_size` using a standard adaptive MCMC procedure.
Args:
step_size_var: (List of) `tf.Variable`s representing the per `state_part`
HMC `step_size`.
kernel_results: `collections.namedtuple` containing `Tensor`s
representing values from most recent call to `one_step`.
Returns:
step_size_assign: (List of) `Tensor`(s) representing updated
`step_size_var`(s).
"""
if kernel_results is None:
if mcmc_util.is_list_like(step_size_var):
return [tf.identity(ss) for ss in step_size_var]
return tf.identity(step_size_var)
log_n = tf.math.log(
tf.cast(
tf.size(input=kernel_results.log_accept_ratio),
kernel_results.log_accept_ratio.dtype))
log_mean_accept_ratio = tf.reduce_logsumexp(
input_tensor=tf.minimum(kernel_results.log_accept_ratio, 0.)) - log_n
adjustment = tf.where(
log_mean_accept_ratio < tf.cast(
tf.math.log(target_rate), log_mean_accept_ratio.dtype),
-decrement_multiplier / (1. + decrement_multiplier),
increment_multiplier)
def build_assign_op():
if mcmc_util.is_list_like(step_size_var):
return [
ss.assign_add(ss * tf.cast(adjustment, ss.dtype))
for ss in step_size_var
]
return step_size_var.assign_add(
step_size_var * tf.cast(adjustment, step_size_var.dtype))
if num_adaptation_steps is None:
return build_assign_op()
else:
with tf.control_dependencies([step_counter.assign_add(1)]):
return tf.cond(
pred=step_counter < num_adaptation_steps,
true_fn=build_assign_op,
false_fn=lambda: step_size_var)
return step_size_simple_update_fn
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/nelder_mead.py#L459-L470
|
def _accept_reflected_fn(simplex,
objective_values,
worst_index,
reflected,
objective_at_reflected):
""""""
def _replace_worst_with_reflected():
next_simplex = _replace_at_index(simplex, worst_index, reflected)
next_objective_values = _replace_at_index(objective_values, worst_index,
objective_at_reflected)
return False, next_simplex, next_objective_values, 0
return _replace_worst_with_reflected
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/metrics/fmeasure.py#L60-L76
|
def round_f1_macro(y_true, y_predicted):
"""
"""
try:
predictions = [np.round(x) for x in y_predicted]
except TypeError:
predictions = y_predicted
return f1_score(np.array(y_true), np.array(predictions), average="macro")
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/transform/vision/image.py#L283-L288
|
def get_label(self):
"""
"""
tensor_rdd = callBigDlFunc(self.bigdl_type, "distributedImageFrameToLabelTensorRdd", self.value)
return tensor_rdd.map(lambda tensor: tensor.to_ndarray())
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/dagrun.py#L191-L206
|
def get_task_instance(self, task_id, session=None):
"""
"""
from airflow.models.taskinstance import TaskInstance # Avoid circular import
TI = TaskInstance
ti = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.execution_date == self.execution_date,
TI.task_id == task_id
).first()
return ti
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/eight_schools_hmc.py#L63-L129
|
def benchmark_eight_schools_hmc(
num_results=int(5e3),
num_burnin_steps=int(3e3),
num_leapfrog_steps=3,
step_size=0.4):
""""""
num_schools = 8
treatment_effects = tf.constant(
[28, 8, -3, 7, -1, 1, 18, 12],
dtype=np.float32,
name='treatment_effects')
treatment_stddevs = tf.constant(
[15, 10, 16, 11, 9, 11, 10, 18],
dtype=np.float32,
name='treatment_stddevs')
def unnormalized_posterior_log_prob(
avg_effect, avg_stddev, school_effects_standard):
"""Eight-schools unnormalized log posterior."""
return eight_schools_joint_log_prob(
treatment_effects, treatment_stddevs,
avg_effect, avg_stddev, school_effects_standard)
if tf.executing_eagerly():
sample_chain = tf.function(tfp.mcmc.sample_chain)
else:
sample_chain = tfp.mcmc.sample_chain
def computation():
"""The benchmark computation."""
_, kernel_results = sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=(
tf.zeros([], name='init_avg_effect'),
tf.zeros([], name='init_avg_stddev'),
tf.ones([num_schools], name='init_school_effects_standard'),
),
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=unnormalized_posterior_log_prob,
step_size=step_size,
num_leapfrog_steps=num_leapfrog_steps))
return kernel_results.is_accepted
# Let's force evaluation of graph to ensure build time is not part of our time
# trial.
is_accepted_tensor = computation()
if not tf.executing_eagerly():
session = tf.compat.v1.Session()
session.run(is_accepted_tensor)
start_time = time.time()
if tf.executing_eagerly():
is_accepted = computation()
else:
is_accepted = session.run(is_accepted_tensor)
wall_time = time.time() - start_time
num_accepted = np.sum(is_accepted)
acceptance_rate = np.float32(num_accepted) / np.float32(num_results)
return dict(
iters=(num_results + num_burnin_steps) * num_leapfrog_steps,
extras={'acceptance_rate': acceptance_rate},
wall_time=wall_time)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dag_processing.py#L1285-L1320
|
def end(self):
"""
"""
pids_to_kill = self.get_all_pids()
if len(pids_to_kill) > 0:
# First try SIGTERM
this_process = psutil.Process(os.getpid())
# Only check child processes to ensure that we don't have a case
# where we kill the wrong process because a child process died
# but the PID got reused.
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
for child in child_processes:
self.log.info("Terminating child PID: %s", child.pid)
child.terminate()
# TODO: Remove magic number
timeout = 5
self.log.info("Waiting up to %s seconds for processes to exit...", timeout)
try:
psutil.wait_procs(
child_processes, timeout=timeout,
callback=lambda x: self.log.info('Terminated PID %s', x.pid))
except psutil.TimeoutExpired:
self.log.debug("Ran out of time while waiting for processes to exit")
# Then SIGKILL
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
if len(child_processes) > 0:
self.log.info("SIGKILL processes that did not terminate gracefully")
for child in child_processes:
self.log.info("Killing child PID: %s", child.pid)
child.kill()
child.wait()
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/layer.py#L827-L837
|
def load_caffe_model(defPath, modelPath, bigdl_type="float"):
"""
"""
jmodel = callBigDlFunc(bigdl_type, "loadCaffeModel", defPath, modelPath)
return Layer.of(jmodel)
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/dtype_util.py#L90-L95
|
def is_complex(dtype):
""""""
dtype = tf.as_dtype(dtype)
if hasattr(dtype, 'is_complex'):
return dtype.is_complex
return np.issubdtype(np.dtype(dtype), np.complex)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/taskinstance.py#L414-L421
|
def error(self, session=None):
"""
"""
self.log.error("Recording the task instance as FAILED")
self.state = State.FAILED
session.merge(self)
session.commit()
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L395-L407
|
def hflip(img):
"""
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
return img.transpose(Image.FLIP_LEFT_RIGHT)
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/layer.py#L586-L593
|
def unfreeze(self, names=None):
"""
"""
callBigDlFunc(self.bigdl_type, "unFreeze", self.value, names)
return self
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/layers/tf_layers.py#L447-L472
|
def additive_self_attention(units, n_hidden=None, n_output_features=None, activation=None):
"""
"""
n_input_features = units.get_shape().as_list()[2]
if n_hidden is None:
n_hidden = n_input_features
if n_output_features is None:
n_output_features = n_input_features
units_pairs = tf.concat([expand_tile(units, 1), expand_tile(units, 2)], 3)
query = tf.layers.dense(units_pairs, n_hidden, activation=tf.tanh, kernel_initializer=INITIALIZER())
attention = tf.nn.softmax(tf.layers.dense(query, 1), dim=2)
attended_units = tf.reduce_sum(attention * expand_tile(units, 1), axis=2)
output = tf.layers.dense(attended_units, n_output_features, activation, kernel_initializer=INITIALIZER())
return output
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/fitting.py#L285-L537
|
def fit_with_hmc(model,
observed_time_series,
num_results=100,
num_warmup_steps=50,
num_leapfrog_steps=15,
initial_state=None,
initial_step_size=None,
chain_batch_shape=(),
num_variational_steps=150,
variational_optimizer=None,
seed=None,
name=None):
"""
"""
with tf.compat.v1.name_scope(
name, 'fit_with_hmc', values=[observed_time_series]) as name:
seed = tfd.SeedStream(seed, salt='StructuralTimeSeries_fit_with_hmc')
# Initialize state and step sizes from a variational posterior if not
# specified.
if initial_step_size is None or initial_state is None:
# To avoid threading variational distributions through the training
# while loop, we build our own copy here. `make_template` ensures
# that our variational distributions share the optimized parameters.
def make_variational():
return build_factored_variational_loss(
model, observed_time_series,
init_batch_shape=chain_batch_shape, seed=seed())
make_variational = tf.compat.v1.make_template('make_variational',
make_variational)
_, variational_distributions = make_variational()
minimize_op = _minimize_in_graph(
build_loss_fn=lambda: make_variational()[0], # return just the loss.
num_steps=num_variational_steps,
optimizer=variational_optimizer)
with tf.control_dependencies([minimize_op]):
if initial_state is None:
initial_state = [tf.stop_gradient(d.sample())
for d in variational_distributions.values()]
# Set step sizes using the unconstrained variational distribution.
if initial_step_size is None:
initial_step_size = [
transformed_q.distribution.stddev()
for transformed_q in variational_distributions.values()]
# Multiple chains manifest as an extra param batch dimension, so we need to
# add a corresponding batch dimension to `observed_time_series`.
observed_time_series = sts_util.pad_batch_dimension_for_multiple_chains(
observed_time_series, model, chain_batch_shape=chain_batch_shape)
# Run HMC to sample from the posterior on parameters.
samples, kernel_results = mcmc.sample_chain(
num_results=num_results,
current_state=initial_state,
num_burnin_steps=num_warmup_steps,
kernel=mcmc.SimpleStepSizeAdaptation(
inner_kernel=mcmc.TransformedTransitionKernel(
inner_kernel=mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=model.joint_log_prob(
observed_time_series),
step_size=initial_step_size,
num_leapfrog_steps=num_leapfrog_steps,
state_gradients_are_stopped=True,
seed=seed()),
bijector=[param.bijector for param in model.parameters]),
num_adaptation_steps=int(num_warmup_steps * 0.8),
adaptation_rate=tf.convert_to_tensor(
value=0.1, dtype=initial_state[0].dtype)),
parallel_iterations=1 if seed is not None else 10)
return samples, kernel_results
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/macros/__init__.py#L49-L66
|
def ds_format(ds, input_format, output_format):
"""
"""
return datetime.strptime(ds, input_format).strftime(output_format)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/cassandra_hook.py#L108-L115
|
def get_conn(self):
"""
"""
if self.session and not self.session.is_shutdown:
return self.session
self.session = self.cluster.connect(self.keyspace)
return self.session
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/linalg.py#L711-L735
|
def _lu_solve_assertions(lower_upper, perm, rhs, validate_args):
""""""
assertions = _lu_reconstruct_assertions(lower_upper, perm, validate_args)
message = 'Input `rhs` must have at least 2 dimensions.'
if rhs.shape.ndims is not None:
if rhs.shape.ndims < 2:
raise ValueError(message)
elif validate_args:
assertions.append(
tf.compat.v1.assert_rank_at_least(rhs, rank=2, message=message))
message = '`lower_upper.shape[-1]` must equal `rhs.shape[-1]`.'
if (tf.compat.dimension_value(lower_upper.shape[-1]) is not None and
tf.compat.dimension_value(rhs.shape[-2]) is not None):
if lower_upper.shape[-1] != rhs.shape[-2]:
raise ValueError(message)
elif validate_args:
assertions.append(
tf.compat.v1.assert_equal(
tf.shape(input=lower_upper)[-1],
tf.shape(input=rhs)[-2],
message=message))
return assertions
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dag_processing.py#L213-L233
|
def construct_task_instance(self, session=None, lock_for_update=False):
"""
"""
TI = airflow.models.TaskInstance
qry = session.query(TI).filter(
TI.dag_id == self._dag_id,
TI.task_id == self._task_id,
TI.execution_date == self._execution_date)
if lock_for_update:
ti = qry.with_for_update().first()
else:
ti = qry.first()
return ti
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/vector_diffeomixture.py#L108-L199
|
def quadrature_scheme_softmaxnormal_quantiles(
normal_loc, normal_scale, quadrature_size,
validate_args=False, name=None):
"""
"""
with tf.name_scope(name or "softmax_normal_grid_and_probs"):
normal_loc = tf.convert_to_tensor(value=normal_loc, name="normal_loc")
dt = dtype_util.base_dtype(normal_loc.dtype)
normal_scale = tf.convert_to_tensor(
value=normal_scale, dtype=dt, name="normal_scale")
normal_scale = maybe_check_quadrature_param(
normal_scale, "normal_scale", validate_args)
dist = normal.Normal(loc=normal_loc, scale=normal_scale)
def _get_batch_ndims():
"""Helper to get rank(dist.batch_shape), statically if possible."""
ndims = tensorshape_util.rank(dist.batch_shape)
if ndims is None:
ndims = tf.shape(input=dist.batch_shape_tensor())[0]
return ndims
batch_ndims = _get_batch_ndims()
def _get_final_shape(qs):
"""Helper to build `TensorShape`."""
bs = tensorshape_util.with_rank_at_least(dist.batch_shape, 1)
num_components = tf.compat.dimension_value(bs[-1])
if num_components is not None:
num_components += 1
tail = tf.TensorShape([num_components, qs])
return bs[:-1].concatenate(tail)
def _compute_quantiles():
"""Helper to build quantiles."""
# Omit {0, 1} since they might lead to Inf/NaN.
zero = tf.zeros([], dtype=dist.dtype)
edges = tf.linspace(zero, 1., quadrature_size + 3)[1:-1]
# Expand edges so its broadcast across batch dims.
edges = tf.reshape(
edges,
shape=tf.concat(
[[-1], tf.ones([batch_ndims], dtype=tf.int32)], axis=0))
quantiles = dist.quantile(edges)
quantiles = softmax_centered_bijector.SoftmaxCentered().forward(quantiles)
# Cyclically permute left by one.
perm = tf.concat([tf.range(1, 1 + batch_ndims), [0]], axis=0)
quantiles = tf.transpose(a=quantiles, perm=perm)
tensorshape_util.set_shape(
quantiles, _get_final_shape(quadrature_size + 1))
return quantiles
quantiles = _compute_quantiles()
# Compute grid as quantile midpoints.
grid = (quantiles[..., :-1] + quantiles[..., 1:]) / 2.
# Set shape hints.
tensorshape_util.set_shape(grid, _get_final_shape(quadrature_size))
# By construction probs is constant, i.e., `1 / quadrature_size`. This is
# important, because non-constant probs leads to non-reparameterizable
# samples.
probs = tf.fill(
dims=[quadrature_size], value=1. / tf.cast(quadrature_size, dist.dtype))
return grid, probs
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/disentangled_vae.py#L283-L315
|
def call(self, inputs, state):
"""
"""
# In order to allow the user to pass in a single example without a batch
# dimension, we always expand the input to at least two dimensions, then
# fix the output shape to remove the batch dimension if necessary.
original_shape = inputs.shape
if len(original_shape) < 2:
inputs = tf.reshape(inputs, [1, -1])
out, state = self.lstm_cell(inputs, state)
out = self.output_layer(out)
correct_shape = tf.concat((original_shape[:-1], tf.shape(input=out)[-1:]),
0)
out = tf.reshape(out, correct_shape)
loc = out[..., :self.dimensions]
scale_diag = tf.nn.softplus(out[..., self.dimensions:]) + 1e-5 # keep > 0
return tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale_diag), state
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/mysql_to_gcs.py#L134-L142
|
def _query_mysql(self):
"""
"""
mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id)
conn = mysql.get_conn()
cursor = conn.cursor()
cursor.execute(self.sql)
return cursor
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/keras/converter.py#L138-L152
|
def get_weights_from_kmodel(kmodel):
"""
"""
layers_with_weights = [layer for layer in kmodel.layers if layer.weights]
bweights = []
for klayer in layers_with_weights:
# bws would be [weights, bias] or [weights]
bws = WeightsConverter.get_bigdl_weights_from_klayer(klayer)
for w in bws:
bweights.append(w)
return bweights
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/presto_hook.py#L80-L88
|
def get_records(self, hql, parameters=None):
"""
"""
try:
return super().get_records(
self._strip_sql(hql), parameters)
except DatabaseError as e:
raise PrestoException(self._get_pretty_exception_message(e))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.