_id
stringlengths
98
184
text
stringlengths
91
10.9k
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/quantiles.py#L887-L891
def _sort_tensor(tensor): """""" sorted_, _ = tf.nn.top_k(tensor, k=tf.shape(input=tensor)[-1]) sorted_.set_shape(tensor.shape) return sorted_
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/prefer_static.py#L100-L117
def rank_from_shape(shape_tensor_fn, tensorshape=None): """""" if tensorshape is None: shape_tensor = (shape_tensor_fn() if callable(shape_tensor_fn) else shape_tensor_fn) if (hasattr(shape_tensor, 'shape') and hasattr(shape_tensor.shape, 'num_elements')): ndims_ = tensorshape_util.num_elements(shape_tensor.shape) else: ndims_ = len(shape_tensor) ndims_fn = lambda: tf.size(input=shape_tensor) else: ndims_ = tensorshape_util.rank(tensorshape) ndims_fn = lambda: tf.size(input=shape_tensor_fn() # pylint: disable=g-long-lambda if callable(shape_tensor_fn) else shape_tensor_fn) return ndims_fn() if ndims_ is None else ndims_
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/backend/numpy/linalg.py#L97-L109
def _matmul(a, b, transpose_a=False, transpose_b=False, adjoint_a=False, adjoint_b=False, a_is_sparse=False, b_is_sparse=False, name=None): # pylint: disable=unused-argument """""" if a_is_sparse or b_is_sparse: raise NotImplementedError('Numpy backend does not support sparse matmul.') if transpose_a or adjoint_a: a = _matrix_transpose(a, conjugate=adjoint_a) if transpose_b or adjoint_b: b = _matrix_transpose(b, conjugate=adjoint_b) return np.matmul(a, b)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/latent_dirichlet_allocation_edward2.py#L194-L223
def make_lda_variational(activation, num_topics, layer_sizes): """ """ encoder_net = tf.keras.Sequential() for num_hidden_units in layer_sizes: encoder_net.add( tf.keras.layers.Dense( num_hidden_units, activation=activation, kernel_initializer=tf.compat.v1.glorot_normal_initializer())) encoder_net.add( tf.keras.layers.Dense( num_topics, activation=tf.nn.softplus, kernel_initializer=tf.compat.v1.glorot_normal_initializer())) def lda_variational(bag_of_words): concentration = _clip_dirichlet_parameters(encoder_net(bag_of_words)) return ed.Dirichlet(concentration=concentration, name="topics_posterior") return lda_variational
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/diagnostic.py#L388-L393
def _axis_size(x, axis=None): """""" if axis is None: return tf.cast(tf.size(input=x), x.dtype) return tf.cast( tf.reduce_prod(input_tensor=tf.gather(tf.shape(input=x), axis)), x.dtype)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/vector_diffeomixture.py#L936-L941
def concat_vectors(*args): """""" args_ = [tf.get_static_value(x) for x in args] if any(vec is None for vec in args_): return tf.concat(args, axis=0) return [val for vec in args_ for val in vec]
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/internal/slicing.py#L145-L155
def _apply_single_step(dist, params_event_ndims, slices, params_overrides): """""" if len(slices) == 1 and slices[0] == Ellipsis: # The path used by Distribution.copy: batch_slice(...args..., Ellipsis) override_dict = {} else: override_dict = _slice_params_to_dict(dist, params_event_ndims, slices) override_dict.update(params_overrides) parameters = dict(dist.parameters, **override_dict) new_dist = type(dist)(**parameters) return new_dist
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/dbapi_hook.py#L81-L94
def get_pandas_df(self, sql, parameters=None): """ """ import pandas.io.sql as psql with closing(self.get_conn()) as conn: return psql.read_sql(sql, con=conn, params=parameters)
https://github.com/asciimoo/searx/blob/a84caa22cf947e973c10aa968d35fb2bdda6d048/searx/autocomplete.py#L37-L110
def searx_bang(full_query): '''''' # check if there is a query which can be parsed if len(full_query.getSearchQuery()) == 0: return [] results = [] # check if current query stats with !bang first_char = full_query.getSearchQuery()[0] if first_char == '!' or first_char == '?': if len(full_query.getSearchQuery()) == 1: # show some example queries # TODO, check if engine is not avaliable results.append(first_char + "images") results.append(first_char + "wikipedia") results.append(first_char + "osm") else: engine_query = full_query.getSearchQuery()[1:] # check if query starts with categorie name for categorie in categories: if categorie.startswith(engine_query): results.append(first_char + '{categorie}'.format(categorie=categorie)) # check if query starts with engine name for engine in engines: if engine.startswith(engine_query.replace('_', ' ')): results.append(first_char + '{engine}'.format(engine=engine.replace(' ', '_'))) # check if query starts with engine shortcut for engine_shortcut in engine_shortcuts: if engine_shortcut.startswith(engine_query): results.append(first_char + '{engine_shortcut}'.format(engine_shortcut=engine_shortcut)) # check if current query stats with :bang elif first_char == ':': if len(full_query.getSearchQuery()) == 1: # show some example queries results.append(":en") results.append(":en_us") results.append(":english") results.append(":united_kingdom") else: engine_query = full_query.getSearchQuery()[1:] for lc in language_codes: lang_id, lang_name, country, english_name = map(unicode.lower, lc) # check if query starts with language-id if lang_id.startswith(engine_query): if len(engine_query) <= 2: results.append(u':{lang_id}'.format(lang_id=lang_id.split('-')[0])) else: results.append(u':{lang_id}'.format(lang_id=lang_id)) # check if query starts with language name if lang_name.startswith(engine_query) or english_name.startswith(engine_query): results.append(u':{lang_name}'.format(lang_name=lang_name)) # check if query starts with country if country.startswith(engine_query.replace('_', ' ')): results.append(u':{country}'.format(country=country.replace(' ', '_'))) # remove duplicates result_set = set(results) # remove results which are already contained in the query for query_part in full_query.query_parts: if query_part in result_set: result_set.remove(query_part) # convert result_set back to list return list(result_set)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L1701-L1793
def reduce_weighted_logsumexp(logx, w=None, axis=None, keep_dims=False, return_sign=False, name=None): """ """ with tf.name_scope(name or "reduce_weighted_logsumexp"): logx = tf.convert_to_tensor(value=logx, name="logx") if w is None: lswe = tf.reduce_logsumexp( input_tensor=logx, axis=axis, keepdims=keep_dims) if return_sign: sgn = tf.ones_like(lswe) return lswe, sgn return lswe w = tf.convert_to_tensor(value=w, dtype=logx.dtype, name="w") log_absw_x = logx + tf.math.log(tf.abs(w)) max_log_absw_x = tf.reduce_max( input_tensor=log_absw_x, axis=axis, keepdims=True) # If the largest element is `-inf` or `inf` then we don't bother subtracting # off the max. We do this because otherwise we'd get `inf - inf = NaN`. That # this is ok follows from the fact that we're actually free to subtract any # value we like, so long as we add it back after taking the `log(sum(...))`. max_log_absw_x = tf.where( tf.math.is_inf(max_log_absw_x), tf.zeros_like(max_log_absw_x), max_log_absw_x) wx_over_max_absw_x = (tf.sign(w) * tf.exp(log_absw_x - max_log_absw_x)) sum_wx_over_max_absw_x = tf.reduce_sum( input_tensor=wx_over_max_absw_x, axis=axis, keepdims=keep_dims) if not keep_dims: max_log_absw_x = tf.squeeze(max_log_absw_x, axis) sgn = tf.sign(sum_wx_over_max_absw_x) lswe = max_log_absw_x + tf.math.log(sgn * sum_wx_over_max_absw_x) if return_sign: return lswe, sgn return lswe
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/glm/proximal_hessian.py#L235-L488
def fit_sparse(model_matrix, response, model, model_coefficients_start, tolerance, l1_regularizer, l2_regularizer=None, maximum_iterations=None, maximum_full_sweeps_per_iteration=1, learning_rate=None, name=None): """ graph_deps = [ model_matrix, response, model_coefficients_start, l1_regularizer, l2_regularizer, maximum_iterations, maximum_full_sweeps_per_iteration, # TODO(b/111925792): Replace `tolerance` arg with something like # `convergence_criteria_fn`. tolerance, learning_rate, ] with tf.compat.v1.name_scope(name, 'fit_sparse', graph_deps): # TODO(b/111922388): Include dispersion and offset parameters. def _grad_neg_log_likelihood_and_fim_fn(x): predicted_linear_response = sparse_or_dense_matvecmul(model_matrix, x) g, h_middle = _grad_neg_log_likelihood_and_fim( model_matrix, predicted_linear_response, response, model) return g, model_matrix, h_middle return tfp.optimizer.proximal_hessian_sparse_minimize( _grad_neg_log_likelihood_and_fim_fn, x_start=model_coefficients_start, l1_regularizer=l1_regularizer, l2_regularizer=l2_regularizer, maximum_iterations=maximum_iterations, maximum_full_sweeps_per_iteration=maximum_full_sweeps_per_iteration, learning_rate=learning_rate, tolerance=tolerance, name=name)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L2637-L2673
def heartbeat_callback(self, session=None): """""" if self.terminating: # ensure termination if processes are created later self.task_runner.terminate() return self.task_instance.refresh_from_db() ti = self.task_instance fqdn = get_hostname() same_hostname = fqdn == ti.hostname same_process = ti.pid == os.getpid() if ti.state == State.RUNNING: if not same_hostname: self.log.warning("The recorded hostname %s " "does not match this instance's hostname " "%s", ti.hostname, fqdn) raise AirflowException("Hostname of job runner does not match") elif not same_process: current_pid = os.getpid() self.log.warning("Recorded pid %s does not match " "the current pid %s", ti.pid, current_pid) raise AirflowException("PID of job runner does not match") elif ( self.task_runner.return_code() is None and hasattr(self.task_runner, 'process') ): self.log.warning( "State of this instance has been externally set to %s. " "Taking the poison pill.", ti.state ) self.task_runner.terminate() self.terminating = True
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/postgres_to_gcs_operator.py#L114-L122
def _query_postgres(self): """ """ postgres = PostgresHook(postgres_conn_id=self.postgres_conn_id) conn = postgres.get_conn() cursor = conn.cursor() cursor.execute(self.sql, self.parameters) return cursor
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L1275-L1394
def build_kalman_filter_step(get_transition_matrix_for_timestep, get_transition_noise_for_timestep, get_observation_matrix_for_timestep, get_observation_noise_for_timestep): """ """ def kalman_filter_step(state, elems_t): """Run a single step of Kalman filtering. Args: state: A `KalmanFilterState` object representing the previous filter state at time `t-1`. elems_t: A tuple of Tensors `(x_t, mask_t)`, or a `Tensor` `x_t`. `x_t` is a `Tensor` with rightmost shape dimensions `[observation_size, 1]` representing the vector observed at time `t`, and `mask_t` is a `Tensor` with rightmost dimensions`[1, 1]` representing the observation mask at time `t`. Both `x_t` and `mask_t` may have batch dimensions, which must be compatible with the batch dimensions of `state.predicted_mean` and `state.predictived_cov` respectively. If `mask_t` is not provided, it is assumed to be `None`. Returns: new_state: A `KalmanFilterState` object representing the new filter state at time `t`. """ if isinstance(elems_t, tuple): x_t, mask_t = elems_t else: x_t = elems_t mask_t = None observation_matrix = get_observation_matrix_for_timestep(state.timestep) observation_noise = get_observation_noise_for_timestep(state.timestep) if mask_t is not None: # Before running the update, fill in masked observations using the prior # expectation. The precise filled value shouldn't matter since updates # from masked elements will not be selected below, but we need to ensure # that any results we incidently compute on masked values are at least # finite (not inf or NaN) so that they don't screw up gradient propagation # through `tf.where`, as described in # https://github.com/tensorflow/tensorflow/issues/2540. # We fill with the prior expectation because any fixed value such as zero # might be arbitrarily unlikely under the prior, leading to overflow in # the updates, but the prior expectation should always be a # 'reasonable' observation. x_expected = _propagate_mean(state.predicted_mean, observation_matrix, observation_noise) * tf.ones_like(x_t) x_t = tf.where( tf.broadcast_to(mask_t, tf.shape(input=x_expected)), x_expected, tf.broadcast_to(x_t, tf.shape(input=x_expected))) # Given predicted mean u_{t|t-1} and covariance P_{t|t-1} from the # previous step, incorporate the observation x_t, producing the # filtered mean u_t and covariance P_t. (filtered_mean, filtered_cov, observation_dist) = linear_gaussian_update( state.predicted_mean, state.predicted_cov, observation_matrix, observation_noise, x_t) # Compute the marginal likelihood p(x_{t} | x_{:t-1}) for this # observation. log_marginal_likelihood = observation_dist.log_prob(x_t[..., 0]) if mask_t is not None: filtered_mean = tf.where( tf.broadcast_to(mask_t, tf.shape(input=filtered_mean)), state.predicted_mean, filtered_mean) filtered_cov = tf.where( tf.broadcast_to(mask_t, tf.shape(input=filtered_cov)), state.predicted_cov, filtered_cov) log_marginal_likelihood = tf.where( tf.broadcast_to(mask_t[..., 0, 0], tf.shape(input=log_marginal_likelihood)), tf.zeros_like(log_marginal_likelihood), log_marginal_likelihood) # Run the filtered posterior through the transition # model to predict the next time step: # u_{t|t-1} = F_t u_{t-1} + b_t # P_{t|t-1} = F_t P_{t-1} F_t' + Q_t predicted_mean, predicted_cov = kalman_transition( filtered_mean, filtered_cov, get_transition_matrix_for_timestep(state.timestep), get_transition_noise_for_timestep(state.timestep)) return KalmanFilterState( filtered_mean, filtered_cov, predicted_mean, predicted_cov, observation_dist.mean()[..., tf.newaxis], observation_dist.covariance(), log_marginal_likelihood, state.timestep+1) return kalman_filter_step
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L1362-L1399
def _change_state_for_tasks_failed_to_execute(self, session): """ """ if self.executor.queued_tasks: TI = models.TaskInstance filter_for_ti_state_change = ( [and_( TI.dag_id == dag_id, TI.task_id == task_id, TI.execution_date == execution_date, # The TI.try_number will return raw try_number+1 since the # ti is not running. And we need to -1 to match the DB record. TI._try_number == try_number - 1, TI.state == State.QUEUED) for dag_id, task_id, execution_date, try_number in self.executor.queued_tasks.keys()]) ti_query = (session.query(TI) .filter(or_(*filter_for_ti_state_change))) tis_to_set_to_scheduled = (ti_query .with_for_update() .all()) if len(tis_to_set_to_scheduled) == 0: session.commit() return # set TIs to queued state for task_instance in tis_to_set_to_scheduled: task_instance.state = State.SCHEDULED task_instance_str = "\n\t".join( [repr(x) for x in tis_to_set_to_scheduled]) session.commit() self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/mongo_hook.py#L281-L297
def delete_many(self, mongo_collection, filter_doc, mongo_db=None, **kwargs): """ """ collection = self.get_collection(mongo_collection, mongo_db=mongo_db) return collection.delete_many(filter_doc, **kwargs)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/qubole_hook.py#L192-L200
def get_log(self, ti): """ """ if self.cmd is None: cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id) Command.get_log_id(self.cls, cmd_id)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/mongo_to_s3.py#L71-L103
def execute(self, context): """ """ s3_conn = S3Hook(self.s3_conn_id) # Grab collection and execute query according to whether or not it is a pipeline if self.is_pipeline: results = MongoHook(self.mongo_conn_id).aggregate( mongo_collection=self.mongo_collection, aggregate_query=self.mongo_query, mongo_db=self.mongo_db ) else: results = MongoHook(self.mongo_conn_id).find( mongo_collection=self.mongo_collection, query=self.mongo_query, mongo_db=self.mongo_db ) # Performs transform then stringifies the docs results into json format docs_str = self._stringify(self.transform(results)) # Load Into S3 s3_conn.load_string( string_data=docs_str, key=self.s3_key, bucket_name=self.s3_bucket, replace=self.replace ) return True
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/api/common/experimental/get_task.py#L24-L40
def get_task(dag_id, task_id): """""" dagbag = DagBag() # Check DAG exists. if dag_id not in dagbag.dags: error_message = "Dag id {} not found".format(dag_id) raise DagNotFound(error_message) # Get DAG object and check Task Exists dag = dagbag.get_dag(dag_id) if not dag.has_task(task_id): error_message = 'Task {} not found in dag {}'.format(task_id, dag_id) raise TaskNotFound(error_message) # Return the task. return dag.get_task(task_id)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/vi/csiszar_divergence.py#L506-L547
def log1p_abs(logu, name=None): """ """ with tf.compat.v1.name_scope(name, "log1p_abs", [logu]): logu = tf.convert_to_tensor(value=logu, name="logu") return tf.math.expm1(tf.abs(logu))
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/masked_autoregressive.py#L957-L966
def _create_masks(degrees): """""" return [ # Create input->hidden and hidden->hidden masks. inp[:, np.newaxis] <= out for inp, out in zip(degrees[:-1], degrees[1:]) ] + [ # Create hidden->output mask. degrees[-1][:, np.newaxis] < degrees[0] ]
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L1325-L1359
def _execute_task_instances(self, simple_dag_bag, states, session=None): """ """ executable_tis = self._find_executable_task_instances(simple_dag_bag, states, session=session) def query(result, items): simple_tis_with_state_changed = \ self._change_state_for_executable_task_instances(items, states, session=session) self._enqueue_task_instances_with_queued_state( simple_dag_bag, simple_tis_with_state_changed) session.commit() return result + len(simple_tis_with_state_changed) return helpers.reduce_in_chunks(query, executable_tis, 0, self.max_tis_per_query)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/bfgs.py#L327-L352
def _update_inv_hessian(prev_state, next_state): """""" # Only update the inverse Hessian if not already failed or converged. should_update = ~next_state.converged & ~next_state.failed # Compute the normalization term (y^T . s), should not update if is singular. gradient_delta = next_state.objective_gradient - prev_state.objective_gradient position_delta = next_state.position - prev_state.position normalization_factor = tf.reduce_sum( input_tensor=gradient_delta * position_delta, axis=-1) should_update = should_update & ~tf.equal(normalization_factor, 0) def _do_update_inv_hessian(): next_inv_hessian = _bfgs_inv_hessian_update( gradient_delta, position_delta, normalization_factor, prev_state.inverse_hessian_estimate) return bfgs_utils.update_fields( next_state, inverse_hessian_estimate=tf.where(should_update, next_inv_hessian, prev_state.inverse_hessian_estimate)) return prefer_static.cond( tf.reduce_any(input_tensor=should_update), _do_update_inv_hessian, lambda: next_state)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/latent_dirichlet_allocation_distributions.py#L225-L255
def make_prior(num_topics, initial_value): """ """ def _softplus_inverse(x): return np.log(np.expm1(x)) logit_concentration = tf.compat.v1.get_variable( "logit_concentration", shape=[1, num_topics], initializer=tf.compat.v1.initializers.constant( _softplus_inverse(initial_value))) concentration = _clip_dirichlet_parameters( tf.nn.softplus(logit_concentration)) def prior(): return tfd.Dirichlet(concentration=concentration, name="topics_prior") prior_variables = [logit_concentration] return prior, prior_variables
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/bijector.py#L122-L129
def _deep_tuple(self, x): """""" if isinstance(x, dict): return self._deep_tuple(tuple(sorted(x.items()))) elif isinstance(x, (list, tuple)): return tuple(map(self._deep_tuple, x)) return x
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/decomposition.py#L222-L325
def decompose_forecast_by_component(model, forecast_dist, parameter_samples): """ """ with tf.compat.v1.name_scope('decompose_forecast_by_component'): try: forecast_lgssm = forecast_dist.components_distribution forecast_latent_mean, _ = forecast_lgssm._joint_mean() # pylint: disable=protected-access forecast_latent_covs, _ = forecast_lgssm._joint_covariances() # pylint: disable=protected-access except AttributeError as e: raise ValueError( 'Forecast distribution must be a MixtureSameFamily of' 'LinearGaussianStateSpaceModel distributions, such as returned by' '`tfp.sts.forecast()`. (saw exception: {})'.format(e)) # Since `parameter_samples` will have sample shape `[num_posterior_draws]`, # we need to move the `num_posterior_draws` dimension of the forecast # moments from the trailing batch dimension, where it's currently put by # `sts.forecast`, back to the leading (sample shape) dimension. forecast_latent_mean = dist_util.move_dimension( forecast_latent_mean, source_idx=-3, dest_idx=0) forecast_latent_covs = dist_util.move_dimension( forecast_latent_covs, source_idx=-4, dest_idx=0) return _decompose_from_posterior_marginals( model, forecast_latent_mean, forecast_latent_covs, parameter_samples)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/positive_semidefinite_kernels/internal/util.py#L157-L171
def maybe_get_common_dtype(arg_list): """ """ # Note that `all` defaults to `True` if `arg_list` is empty. if all(a is None for a in arg_list): return None return dtype_util.common_dtype(arg_list, tf.float32)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sftp_hook.py#L126-L143
def describe_directory(self, path): """ """ conn = self.get_conn() flist = conn.listdir_attr(path) files = {} for f in flist: modify = datetime.datetime.fromtimestamp( f.st_mtime).strftime('%Y%m%d%H%M%S') files[f.filename] = { 'size': f.st_size, 'type': 'dir' if stat.S_ISDIR(f.st_mode) else 'file', 'modify': modify} return files
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/util.py#L119-L193
def default_mean_field_normal_fn( is_singular=False, loc_initializer=tf.compat.v1.initializers.random_normal(stddev=0.1), untransformed_scale_initializer=tf.compat.v1.initializers.random_normal( mean=-3., stddev=0.1), loc_regularizer=None, untransformed_scale_regularizer=None, loc_constraint=None, untransformed_scale_constraint=None): """ """ loc_scale_fn = default_loc_scale_fn( is_singular=is_singular, loc_initializer=loc_initializer, untransformed_scale_initializer=untransformed_scale_initializer, loc_regularizer=loc_regularizer, untransformed_scale_regularizer=untransformed_scale_regularizer, loc_constraint=loc_constraint, untransformed_scale_constraint=untransformed_scale_constraint) def _fn(dtype, shape, name, trainable, add_variable_fn): """Creates multivariate `Deterministic` or `Normal` distribution. Args: dtype: Type of parameter's event. shape: Python `list`-like representing the parameter's event shape. name: Python `str` name prepended to any created (or existing) `tf.Variable`s. trainable: Python `bool` indicating all created `tf.Variable`s should be added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. add_variable_fn: `tf.get_variable`-like `callable` used to create (or access existing) `tf.Variable`s. Returns: Multivariate `Deterministic` or `Normal` distribution. """ loc, scale = loc_scale_fn(dtype, shape, name, trainable, add_variable_fn) if scale is None: dist = tfd.Deterministic(loc=loc) else: dist = tfd.Normal(loc=loc, scale=scale) batch_ndims = tf.size(input=dist.batch_shape_tensor()) return tfd.Independent(dist, reinterpreted_batch_ndims=batch_ndims) return _fn
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_api_base_hook.py#L137-L146
def _authorize(self): """ """ credentials = self._get_credentials() http = httplib2.Http() authed_http = google_auth_httplib2.AuthorizedHttp( credentials, http=http) return authed_http
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/qubole_hook.py#L164-L190
def get_results(self, ti=None, fp=None, inline=True, delim=None, fetch=True): """ """ if fp is None: iso = datetime.datetime.utcnow().isoformat() logpath = os.path.expanduser( configuration.conf.get('core', 'BASE_LOG_FOLDER') ) resultpath = logpath + '/' + self.dag_id + '/' + self.task_id + '/results' configuration.mkdir_p(resultpath) fp = open(resultpath + '/' + iso, 'wb') if self.cmd is None: cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id) self.cmd = self.cls.find(cmd_id) self.cmd.get_results(fp, inline, delim, fetch) fp.flush() fp.close() return fp.name
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_video_intelligence_hook.py#L41-L49
def get_conn(self): """ """ if not self._conn: self._conn = VideoIntelligenceServiceClient(credentials=self._get_credentials()) return self._conn
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/bfgs.py#L476-L491
def _tensor_product(t1, t2): """ """ return tf.matmul(tf.expand_dims(t1, axis=-1), tf.expand_dims(t2, axis=-2))
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L2103-L2159
def expand_to_vector(x, tensor_name=None, op_name=None, validate_args=False): """ """ with tf.name_scope(op_name or "expand_to_vector"): x = tf.convert_to_tensor(value=x, name="x") ndims = tensorshape_util.rank(x.shape) if ndims is None: # Maybe expand ndims from 0 to 1. if validate_args: x = with_dependencies([ assert_util.assert_rank_at_most( x, 1, message="Input is neither scalar nor vector.") ], x) ndims = tf.rank(x) expanded_shape = pick_vector( tf.equal(ndims, 0), np.array([1], dtype=np.int32), tf.shape(input=x)) return tf.reshape(x, expanded_shape) elif ndims == 0: # Definitely expand ndims from 0 to 1. x_const = tf.get_static_value(x) if x_const is not None: return tf.convert_to_tensor( value=dtype_util.as_numpy_dtype(x.dtype)([x_const]), name=tensor_name) else: return tf.reshape(x, [1]) elif ndims != 1: raise ValueError("Input is neither scalar nor vector.") # ndims == 1 return x
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/no_u_turn_sampler/nuts.py#L478-L500
def _leapfrog(value_and_gradients_fn, current_state, current_grads_target_log_prob, current_momentum, step_size): """""" mid_momentum = [ m + 0.5 * step * g for m, step, g in zip(current_momentum, step_size, current_grads_target_log_prob)] next_state = [ s + step * m for s, step, m in zip(current_state, step_size, mid_momentum)] next_target_log_prob, next_grads_target_log_prob = value_and_gradients_fn( *next_state) next_momentum = [ m + 0.5 * step * g for m, step, g in zip(mid_momentum, step_size, next_grads_target_log_prob)] return [ next_state, next_target_log_prob, next_grads_target_log_prob, next_momentum, ]
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/sqoop_operator.py#L166-L234
def execute(self, context): """ """ self.hook = SqoopHook( conn_id=self.conn_id, verbose=self.verbose, num_mappers=self.num_mappers, hcatalog_database=self.hcatalog_database, hcatalog_table=self.hcatalog_table, properties=self.properties ) if self.cmd_type == 'export': self.hook.export_table( table=self.table, export_dir=self.export_dir, input_null_string=self.input_null_string, input_null_non_string=self.input_null_non_string, staging_table=self.staging_table, clear_staging_table=self.clear_staging_table, enclosed_by=self.enclosed_by, escaped_by=self.escaped_by, input_fields_terminated_by=self.input_fields_terminated_by, input_lines_terminated_by=self.input_lines_terminated_by, input_optionally_enclosed_by=self.input_optionally_enclosed_by, batch=self.batch, relaxed_isolation=self.relaxed_isolation, extra_export_options=self.extra_export_options) elif self.cmd_type == 'import': # add create hcatalog table to extra import options if option passed # if new params are added to constructor can pass them in here # so don't modify sqoop_hook for each param if self.create_hcatalog_table: self.extra_import_options['create-hcatalog-table'] = '' if self.table and self.query: raise AirflowException( 'Cannot specify query and table together. Need to specify either or.' ) if self.table: self.hook.import_table( table=self.table, target_dir=self.target_dir, append=self.append, file_type=self.file_type, columns=self.columns, split_by=self.split_by, where=self.where, direct=self.direct, driver=self.driver, extra_import_options=self.extra_import_options) elif self.query: self.hook.import_query( query=self.query, target_dir=self.target_dir, append=self.append, file_type=self.file_type, split_by=self.split_by, direct=self.direct, driver=self.driver, extra_import_options=self.extra_import_options) else: raise AirflowException( "Provide query or table parameter to import using Sqoop" ) else: raise AirflowException("cmd_type should be 'import' or 'export'")
https://github.com/asciimoo/searx/blob/a84caa22cf947e973c10aa968d35fb2bdda6d048/searx/engines/currency_convert.py#L64-L87
def response(resp): """""" json_resp = resp.text[resp.text.find('\n') + 1:resp.text.rfind('\n') - 2] results = [] try: conversion_rate = float(json.loads(json_resp)['conversion']['converted-amount']) except: return results answer = '{0} {1} = {2} {3}, 1 {1} ({5}) = {4} {3} ({6})'.format( resp.search_params['amount'], resp.search_params['from'], resp.search_params['amount'] * conversion_rate, resp.search_params['to'], conversion_rate, resp.search_params['from_name'], resp.search_params['to_name'], ) url = 'https://duckduckgo.com/js/spice/currency/1/{0}/{1}'.format( resp.search_params['from'].upper(), resp.search_params['to']) results.append({'answer': answer, 'url': url}) return results
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/differential_evolution.py#L712-L768
def _get_mixing_indices(size, seed=None, name=None): """ """ with tf.compat.v1.name_scope( name, default_name='get_mixing_indices', values=[size]): size = tf.convert_to_tensor(value=size) dtype = size.dtype seed_stream = distributions.SeedStream(seed, salt='get_mixing_indices') first = tf.random.uniform([size], maxval=size-1, dtype=dtype, seed=seed_stream()) second = tf.random.uniform([size], maxval=size-2, dtype=dtype, seed=seed_stream()) third = tf.random.uniform([size], maxval=size-3, dtype=dtype, seed=seed_stream()) # Shift second if it is on top of or to the right of first second = tf.where(first < second, x=second, y=second + 1) smaller = tf.math.minimum(first, second) larger = tf.math.maximum(first, second) # Shift the third one so it does not coincide with either the first or the # second number. Assuming first < second, shift by 1 if the number is in # [first, second) and by 2 if the number is greater than or equal to the # second. third = tf.where(third < smaller, x=third, y=third + 1) third = tf.where(third < larger, x=third, y=third + 1) sample = tf.stack([first, second, third], axis=1) to_avoid = tf.expand_dims(tf.range(size), axis=-1) sample = tf.where(sample < to_avoid, x=sample, y=sample + 1) return sample
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/api/common/experimental/mark_tasks.py#L243-L280
def set_dag_run_state_to_failed(dag, execution_date, commit=False, session=None): """ """ res = [] if not dag or not execution_date: return res # Mark the dag run to failed. if commit: _set_dag_run_state(dag.dag_id, execution_date, State.FAILED, session) # Mark only RUNNING task instances. TI = TaskInstance task_ids = [task.task_id for task in dag.tasks] tis = session.query(TI).filter( TI.dag_id == dag.dag_id, TI.execution_date == execution_date, TI.task_id.in_(task_ids)).filter(TI.state == State.RUNNING) task_ids_of_running_tis = [ti.task_id for ti in tis] for task in dag.tasks: if task.task_id not in task_ids_of_running_tis: continue task.dag = dag new_state = set_state(task=task, execution_date=execution_date, state=State.FAILED, commit=commit) res.extend(new_state) return res
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/sample.py#L81-L372
def sample_chain( num_results, current_state, previous_kernel_results=None, kernel=None, num_burnin_steps=0, num_steps_between_results=0, trace_fn=lambda current_state, kernel_results: kernel_results, return_final_kernel_results=False, parallel_iterations=10, name=None, ): """ """ if not kernel.is_calibrated: warnings.warn("supplied `TransitionKernel` is not calibrated. Markov " "chain may not converge to intended target distribution.") with tf.compat.v1.name_scope( name, "mcmc_sample_chain", [num_results, num_burnin_steps, num_steps_between_results]): num_results = tf.convert_to_tensor( value=num_results, dtype=tf.int32, name="num_results") num_burnin_steps = tf.convert_to_tensor( value=num_burnin_steps, dtype=tf.int32, name="num_burnin_steps") num_steps_between_results = tf.convert_to_tensor( value=num_steps_between_results, dtype=tf.int32, name="num_steps_between_results") current_state = tf.nest.map_structure( lambda x: tf.convert_to_tensor(value=x, name="current_state"), current_state) if previous_kernel_results is None: previous_kernel_results = kernel.bootstrap_results(current_state) if trace_fn is None: # It simplifies the logic to use a dummy function here. trace_fn = lambda *args: () no_trace = True else: no_trace = False if trace_fn is sample_chain.__defaults__[4]: warnings.warn("Tracing all kernel results by default is deprecated. Set " "the `trace_fn` argument to None (the future default " "value) or an explicit callback that traces the values " "you are interested in.") def _trace_scan_fn(state_and_results, num_steps): next_state, current_kernel_results = mcmc_util.smart_for_loop( loop_num_iter=num_steps, body_fn=kernel.one_step, initial_loop_vars=list(state_and_results), parallel_iterations=parallel_iterations) return next_state, current_kernel_results (_, final_kernel_results), (all_states, trace) = mcmc_util.trace_scan( loop_fn=_trace_scan_fn, initial_state=(current_state, previous_kernel_results), elems=tf.one_hot( indices=0, depth=num_results, on_value=1 + num_burnin_steps, off_value=1 + num_steps_between_results, dtype=tf.int32), # pylint: disable=g-long-lambda trace_fn=lambda state_and_results: (state_and_results[0], trace_fn(*state_and_results)), # pylint: enable=g-long-lambda parallel_iterations=parallel_iterations) if return_final_kernel_results: return CheckpointableStatesAndTrace( all_states=all_states, trace=trace, final_kernel_results=final_kernel_results) else: if no_trace: return all_states else: return StatesAndTrace(all_states=all_states, trace=trace)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/ti_deps/deps/base_ti_dep.py#L110-L125
def is_met(self, ti, session, dep_context=None): """ """ return all(status.passed for status in self.get_dep_statuses(ti, session, dep_context))
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/api/experimental/endpoints.py#L47-L92
def trigger_dag(dag_id): """ """ data = request.get_json(force=True) run_id = None if 'run_id' in data: run_id = data['run_id'] conf = None if 'conf' in data: conf = data['conf'] execution_date = None if 'execution_date' in data and data['execution_date'] is not None: execution_date = data['execution_date'] # Convert string datetime into actual datetime try: execution_date = timezone.parse(execution_date) except ValueError: error_message = ( 'Given execution date, {}, could not be identified ' 'as a date. Example date format: 2015-11-16T14:34:15+00:00' .format(execution_date)) _log.info(error_message) response = jsonify({'error': error_message}) response.status_code = 400 return response try: dr = trigger.trigger_dag(dag_id, run_id, conf, execution_date) except AirflowException as err: _log.error(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response if getattr(g, 'user', None): _log.info("User %s created %s", g.user, dr) response = jsonify(message="Created {}".format(dr)) return response
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/interceptor.py#L199-L245
def tape(): """ """ tape_data = collections.OrderedDict({}) def record(f, *args, **kwargs): """Records execution to a tape.""" name = kwargs.get("name") output = interceptable(f)(*args, **kwargs) if name: tape_data[name] = output return output with interception(record): yield tape_data
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/independent.py#L279-L339
def _kl_independent(a, b, name="kl_independent"): """ """ p = a.distribution q = b.distribution # The KL between any two (non)-batched distributions is a scalar. # Given that the KL between two factored distributions is the sum, i.e. # KL(p1(x)p2(y) || q1(x)q2(y)) = KL(p1 || q1) + KL(q1 || q2), we compute # KL(p || q) and do a `reduce_sum` on the reinterpreted batch dimensions. if (tensorshape_util.is_fully_defined(a.event_shape) and tensorshape_util.is_fully_defined(b.event_shape)): if a.event_shape == b.event_shape: if p.event_shape == q.event_shape: num_reduce_dims = (tensorshape_util.rank(a.event_shape) - tensorshape_util.rank(p.event_shape)) reduce_dims = [-i - 1 for i in range(0, num_reduce_dims)] return tf.reduce_sum( input_tensor=kullback_leibler.kl_divergence(p, q, name=name), axis=reduce_dims) else: raise NotImplementedError("KL between Independents with different " "event shapes not supported.") else: raise ValueError("Event shapes do not match.") else: with tf.control_dependencies( [ assert_util.assert_equal(a.event_shape_tensor(), b.event_shape_tensor()), assert_util.assert_equal(p.event_shape_tensor(), q.event_shape_tensor()) ]): num_reduce_dims = ( prefer_static.rank_from_shape( a.event_shape_tensor, a.event_shape) - prefer_static.rank_from_shape( p.event_shape_tensor, a.event_shape)) reduce_dims = prefer_static.range(-num_reduce_dims - 1, -1, 1) return tf.reduce_sum( input_tensor=kullback_leibler.kl_divergence(p, q, name=name), axis=reduce_dims)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L2162-L2195
def with_dependencies(dependencies, output_tensor, name=None): """ """ if tf.executing_eagerly(): return output_tensor with tf.name_scope(name or "control_dependency") as name: with tf.control_dependencies(d for d in dependencies if d is not None): output_tensor = tf.convert_to_tensor(value=output_tensor) if isinstance(output_tensor, tf.Tensor): return tf.identity(output_tensor, name=name) else: return tf.IndexedSlices( tf.identity(output_tensor.values, name=name), output_tensor.indices, output_tensor.dense_shape)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py#L241-L288
def _secant2_inner_update(value_and_gradients_function, initial_args, val_0, val_c, f_lim, sufficient_decrease_param, curvature_param): """""" # Fail if `val_c` is no longer finite. new_failed = initial_args.active & ~is_finite(val_c) active = initial_args.active & ~new_failed failed = initial_args.failed | new_failed # We converge when we find a point satisfying the Wolfe conditions, in those # cases we set `val_left = val_right = val_c`. found_wolfe = active & _satisfies_wolfe( val_0, val_c, f_lim, sufficient_decrease_param, curvature_param) val_left = val_where(found_wolfe, val_c, initial_args.left) val_right = val_where(found_wolfe, val_c, initial_args.right) converged = initial_args.converged | found_wolfe active = active & ~found_wolfe # If any active batch members remain, we apply the `update` function to # squeeze further their corresponding left/right bracketing interval. def _apply_update(): update_result = update( value_and_gradients_function, val_left, val_right, val_c, f_lim, active=active) return _Secant2Result( active=tf.zeros_like(active), # End of secant2, no actives anymore. converged=converged, failed=failed | update_result.failed, num_evals=initial_args.num_evals + update_result.num_evals, left=update_result.left, right=update_result.right) # Otherwise just return the current results. def _default(): return _Secant2Result( active=active, converged=converged, failed=failed, num_evals=initial_args.num_evals, left=val_left, right=val_right) return prefer_static.cond( tf.reduce_any(input_tensor=active), _apply_update, _default)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/conv_variational.py#L1111-L1126
def get_config(self): """ """ config = { 'seed': self.seed, } base_config = super(_ConvFlipout, self).get_config() return dict(list(base_config.items()) + list(config.items()))
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/redshift_hook.py#L68-L83
def describe_cluster_snapshots(self, cluster_identifier): """ """ response = self.get_conn().describe_cluster_snapshots( ClusterIdentifier=cluster_identifier ) if 'Snapshots' not in response: return None snapshots = response['Snapshots'] snapshots = filter(lambda x: x['Status'], snapshots) snapshots.sort(key=lambda x: x['SnapshotCreateTime'], reverse=True) return snapshots
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/dirichlet_multinomial.py#L322-L327
def _variance_scale_term(self): """""" # Expand back the last dim so the shape of _variance_scale_term matches the # shape of self.concentration. c0 = self.total_concentration[..., tf.newaxis] return tf.sqrt((1. + c0 / self.total_count[..., tf.newaxis]) / (1. + c0))
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/root_search.py#L44-L321
def secant_root(objective_fn, initial_position, next_position=None, value_at_position=None, position_tolerance=1e-8, value_tolerance=1e-8, max_iterations=50, stopping_policy_fn=tf.reduce_all, validate_args=False, name=None): """ if not callable(stopping_policy_fn): raise ValueError('stopping_policy_fn must be callable') position = tf.convert_to_tensor( value=initial_position, name='position', ) value_at_position = tf.convert_to_tensor( value=value_at_position or objective_fn(position), name='value_at_position', dtype=position.dtype.base_dtype) zero = tf.zeros_like(position) position_tolerance = tf.convert_to_tensor( value=position_tolerance, name='position_tolerance', dtype=position.dtype) value_tolerance = tf.convert_to_tensor( value=value_tolerance, name='value_tolerance', dtype=position.dtype) num_iterations = tf.zeros_like(position, dtype=tf.int32) max_iterations = tf.convert_to_tensor(value=max_iterations, dtype=tf.int32) max_iterations = tf.broadcast_to( max_iterations, name='max_iterations', shape=position.shape) # Compute the step from `next_position` if present. This covers the case where # a user has two starting points, which bound the root or has a specific step # size in mind. if next_position is None: epsilon = tf.constant(1e-4, dtype=position.dtype, shape=position.shape) step = position * epsilon + tf.sign(position) * epsilon else: step = next_position - initial_position finished = tf.constant(False, shape=position.shape) # Negate `stopping_condition` to determine if the search should continue. # This means, in particular, that tf.reduce_*all* will return only when the # search is finished for *all* starting points. def _should_continue(position, value_at_position, num_iterations, step, finished): """Indicates whether the overall search should continue. Args: position: `Tensor` containing the current root estimates. value_at_position: `Tensor` containing the value of `objective_fn` at `position`. num_iterations: `Tensor` containing the current iteration index for each point. step: `Tensor` containing the size of the step to take for each point. finished: `Tensor` indicating for which points the search is finished. Returns: A boolean value indicating whether the overall search should continue. """ del position, value_at_position, num_iterations, step # Unused return ~tf.convert_to_tensor( value=stopping_policy_fn(finished), name='should_stop', dtype=tf.bool) # For each point in `position`, the search is stopped if either: # (1) A root has been found # (2) f(position) == f(position + step) # (3) The maximum number of iterations has been reached # In case (2), the search may be stopped both before the desired tolerance is # achieved (or even a root is found), and the maximum number of iterations is # reached. def _body(position, value_at_position, num_iterations, step, finished): """Performs one iteration of the secant root-finding algorithm. Args: position: `Tensor` containing the current root estimates. value_at_position: `Tensor` containing the value of `objective_fn` at `position`. num_iterations: `Tensor` containing the current iteration index for each point. step: `Tensor` containing the size of the step to take for each point. finished: `Tensor` indicating for which points the search is finished. Returns: The `Tensor`s to use for the next iteration of the algorithm. """ # True if the search was already finished, or (1) or (3) just became true. was_finished = finished | (num_iterations >= max_iterations) | ( tf.abs(step) < position_tolerance) | ( tf.abs(value_at_position) < value_tolerance) # Compute the next position and the value at that point. next_position = tf.where(was_finished, position, position + step) value_at_next_position = tf.where(was_finished, value_at_position, objective_fn(next_position)) # True if the search was already finished, or (2) just became true. is_finished = tf.equal(value_at_position, value_at_next_position) # Use the mid-point between the last two positions if (2) just became true. next_position = tf.where(is_finished & ~was_finished, (position + next_position) * 0.5, next_position) # Once finished, stop updating the iteration index and set the step to zero. num_iterations = tf.where(is_finished, num_iterations, num_iterations + 1) next_step = tf.where( is_finished, zero, step * value_at_next_position / (value_at_position - value_at_next_position)) return (next_position, value_at_next_position, num_iterations, next_step, is_finished) with tf.compat.v1.name_scope( name, 'secant_root', [position, next_position, value_at_position, max_iterations]): assertions = [] if validate_args: assertions += [ tf.Assert( tf.reduce_all(input_tensor=position_tolerance > zero), [position_tolerance]), tf.Assert( tf.reduce_all(input_tensor=value_tolerance > zero), [value_tolerance]), tf.Assert( tf.reduce_all(input_tensor=max_iterations >= num_iterations), [max_iterations]), ] with tf.control_dependencies(assertions): root, value_at_root, num_iterations, _, _ = tf.while_loop( cond=_should_continue, body=_body, loop_vars=[ position, value_at_position, num_iterations, step, finished ]) return RootSearchResults( estimated_root=root, objective_at_estimated_root=value_at_root, num_iterations=num_iterations)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/wishart.py#L425-L430
def _multi_lgamma(self, a, p, name="multi_lgamma"): """""" with self._name_scope(name): seq = self._multi_gamma_sequence(a, p) return (0.25 * p * (p - 1.) * math.log(math.pi) + tf.reduce_sum(input_tensor=tf.math.lgamma(seq), axis=[-1]))
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/sensors/hdfs_sensor.py#L59-L76
def filter_for_filesize(result, size=None): """ """ if size: log = LoggingMixin().log log.debug( 'Filtering for file size >= %s in files: %s', size, map(lambda x: x['path'], result) ) size *= settings.MEGABYTE result = [x for x in result if x['length'] >= size] log.debug('HdfsSensor.poke: after size filter result is %s', result) return result
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/linalg.py#L323-L445
def pinv(a, rcond=None, validate_args=False, name=None): """ """ with tf.compat.v1.name_scope(name, 'pinv', [a, rcond]): a = tf.convert_to_tensor(value=a, name='a') assertions = _maybe_validate_matrix(a, validate_args) if assertions: with tf.control_dependencies(assertions): a = tf.identity(a) dtype = a.dtype.as_numpy_dtype if rcond is None: def get_dim_size(dim): if tf.compat.dimension_value(a.shape[dim]) is not None: return tf.compat.dimension_value(a.shape[dim]) return tf.shape(input=a)[dim] num_rows = get_dim_size(-2) num_cols = get_dim_size(-1) if isinstance(num_rows, int) and isinstance(num_cols, int): max_rows_cols = float(max(num_rows, num_cols)) else: max_rows_cols = tf.cast(tf.maximum(num_rows, num_cols), dtype) rcond = 10. * max_rows_cols * np.finfo(dtype).eps rcond = tf.convert_to_tensor(value=rcond, dtype=dtype, name='rcond') # Calculate pseudo inverse via SVD. # Note: if a is symmetric then u == v. (We might observe additional # performance by explicitly setting `v = u` in such cases.) [ singular_values, # Sigma left_singular_vectors, # U right_singular_vectors, # V ] = tf.linalg.svd(a, full_matrices=False, compute_uv=True) # Saturate small singular values to inf. This has the effect of make # `1. / s = 0.` while not resulting in `NaN` gradients. cutoff = rcond * tf.reduce_max(input_tensor=singular_values, axis=-1) singular_values = tf.where( singular_values > cutoff[..., tf.newaxis], singular_values, tf.fill(tf.shape(input=singular_values), np.array(np.inf, dtype))) # Although `a == tf.matmul(u, s * v, transpose_b=True)` we swap # `u` and `v` here so that `tf.matmul(pinv(A), A) = tf.eye()`, i.e., # a matrix inverse has "transposed" semantics. a_pinv = tf.matmul( right_singular_vectors / singular_values[..., tf.newaxis, :], left_singular_vectors, adjoint_b=True) if a.shape.ndims is not None: a_pinv.set_shape(a.shape[:-2].concatenate([a.shape[-1], a.shape[-2]])) return a_pinv
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/internal/missing_values_util.py#L109-L134
def moments_of_masked_time_series(time_series_tensor, broadcast_mask): """ """ num_unmasked_entries = tf.cast( tf.reduce_sum(input_tensor=tf.cast(~broadcast_mask, tf.int32), axis=-1), time_series_tensor.dtype) # Manually compute mean and variance, excluding masked entries. mean = (tf.reduce_sum(input_tensor=tf.where( broadcast_mask, tf.zeros_like(time_series_tensor), time_series_tensor), axis=-1) / num_unmasked_entries) variance = (tf.reduce_sum(input_tensor=tf.where( broadcast_mask, tf.zeros_like(time_series_tensor), (time_series_tensor - mean[..., tf.newaxis]) ** 2), axis=-1) / num_unmasked_entries) return mean, variance
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/cassandra_to_gcs.py#L147-L154
def _query_cassandra(self): """ """ self.hook = CassandraHook(cassandra_conn_id=self.cassandra_conn_id) session = self.hook.get_conn() cursor = session.execute(self.cql) return cursor
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L1657-L1703
def get_datasets_list(self, project_id=None): """ """ dataset_project_id = project_id if project_id else self.project_id try: datasets_list = self.service.datasets().list( projectId=dataset_project_id).execute(num_retries=self.num_retries)['datasets'] self.log.info("Datasets List: %s", datasets_list) except HttpError as err: raise AirflowException( 'BigQuery job failed. Error was: {}'.format(err.content)) return datasets_list
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sagemaker_hook.py#L398-L426
def create_transform_job(self, config, wait_for_completion=True, check_interval=30, max_ingestion_time=None): """ """ self.check_s3_url(config['TransformInput']['DataSource']['S3DataSource']['S3Uri']) response = self.get_conn().create_transform_job(**config) if wait_for_completion: self.check_status(config['TransformJobName'], 'TransformJobStatus', self.describe_transform_job, check_interval, max_ingestion_time ) return response
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/bijector.py#L112-L120
def _merge(self, old, new, use_equals=False): """""" if old is None: return new if new is None: return old if (old == new) if use_equals else (old is new): return old raise ValueError("Incompatible values: %s != %s" % (old, new))
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/quantiles.py#L790-L802
def _get_best_effort_ndims(x, expect_ndims=None, expect_ndims_at_least=None, expect_ndims_no_more_than=None): """""" ndims_static = _get_static_ndims( x, expect_ndims=expect_ndims, expect_ndims_at_least=expect_ndims_at_least, expect_ndims_no_more_than=expect_ndims_no_more_than) if ndims_static is not None: return ndims_static return tf.rank(x)
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L410-L432
def _get_perspective_coeffs(startpoints, endpoints): """ """ matrix = [] for p1, p2 in zip(endpoints, startpoints): matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]]) matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]]) A = torch.tensor(matrix, dtype=torch.float) B = torch.tensor(startpoints, dtype=torch.float).view(8) res = torch.gels(B, A)[0] return res.squeeze_(1).tolist()
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/opsgenie_alert_hook.py#L61-L74
def get_conn(self, headers=None): """ """ conn = self.get_connection(self.http_conn_id) self.base_url = conn.host if conn.host else 'https://api.opsgenie.com' session = requests.Session() if headers: session.headers.update(headers) return session
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/gamma.py#L273-L296
def _kl_gamma_gamma(g0, g1, name=None): """ """ with tf.name_scope(name or "kl_gamma_gamma"): # Result from: # http://www.fil.ion.ucl.ac.uk/~wpenny/publications/densities.ps # For derivation see: # http://stats.stackexchange.com/questions/11646/kullback-leibler-divergence-between-two-gamma-distributions pylint: disable=line-too-long return (((g0.concentration - g1.concentration) * tf.math.digamma(g0.concentration)) + tf.math.lgamma(g1.concentration) - tf.math.lgamma(g0.concentration) + g1.concentration * tf.math.log(g0.rate) - g1.concentration * tf.math.log(g1.rate) + g0.concentration * (g1.rate / g0.rate - 1.))
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/grammar_vae.py#L166-L182
def mask(self, symbol, on_value, off_value): """ """ mask_values = [on_value if lhs == symbol else off_value for lhs, _ in self.production_rules] mask_values = tf.reshape(mask_values, [1, len(self.production_rules)]) return mask_values
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/seasonal.py#L628-L691
def build_constrained_seasonal_transition_noise( drift_scale, num_seasons, is_last_day_of_season): """""" # Conceptually, this method takes the noise covariance on effects L @ L' # computed by `build_seasonal_transition_noise`, with scale factor # L = [ 0, 0, ..., 0 # ... # 0, 0, ..., drift_scale], # and transforms it to act on the constrained-residual representation. # # The resulting noise covariance M @ M' is equivalent to # M @ M' = effects_to_residuals @ LL' @ residuals_to_effects # where `@` is matrix multiplication. However because this matrix is # rank-deficient, we can't take its Cholesky decomposition directly, so we'll # construct its lower-triangular scale factor `M` by hand instead. # # Concretely, let `M = P @ R @ L` be the scale factor in the # transformed space, with matrices `R`, `P` applying the reparameterization # and zero-mean constraint respectively as defined in the # "Mathematical Details" section of `ConstrainedSeasonalStateSpaceModel`. It's # easy to see (*) that the implied covariance # `M @ M' = P @ R @ L @ L' @ R' @ P'` is just the constant matrix # `M @ M' = [ 1, 1, ..., 1, 0 # 1, 1, ..., 1, 0 # ... # 1, 1, ..., 1, 0 # 0, 0, ..., 0, 0] * (drift_scale / num_seasons)**2` # with zeros in the final row and column. So we can directly construct # the lower-triangular factor # `Q = [ 1, 0, ... 0 # 1, 0, ..., 0 # ... # 1, 0, ..., 0 # 0, 0, ..., 0 ] * drift_scale/num_seasons` # such that Q @ Q' = M @ M'. In practice, we don't reify the final row and # column full of zeroes, i.e., we construct # `Q[:num_seasons-1, :num_seasons-1]` as the scale-TriL covariance factor. # # (*) Argument: `L` is zero everywhere but the last column, so `R @ L` will be # too. Since the last column of `R` is the constant `-1/num_seasons`, `R @ L` # is simply the matrix with constant `-drift_scale/num_seasons` in the final # column (except the final row, which is negated) and zero in all other # columns, and `M = P @ R @ L` additionally zeroes out the final row. Then # M @ M' is just the outer product of that final column with itself (since all # other columns are zero), which gives the matrix shown above. drift_scale_tril_nonzeros = tf.concat([ tf.ones([num_seasons - 1, 1], dtype=drift_scale.dtype), tf.zeros([num_seasons - 1, num_seasons - 2], dtype=drift_scale.dtype)], axis=-1) drift_scale_tril = (drift_scale_tril_nonzeros * drift_scale[..., tf.newaxis, tf.newaxis] / num_seasons) # Inject transition noise iff it is the last day of the season. def seasonal_transition_noise(t): noise_scale_tril = dist_util.pick_scalar_condition( is_last_day_of_season(t), drift_scale_tril, tf.zeros_like(drift_scale_tril)) return tfd.MultivariateNormalTriL( loc=tf.zeros(num_seasons-1, dtype=drift_scale.dtype), scale_tril=noise_scale_tril) return seasonal_transition_noise
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/keras/topology.py#L150-L168
def evaluate(self, x, y=None, batch_size=32): """ """ if isinstance(x, np.ndarray) and isinstance(y, np.ndarray): evaluation_data = to_sample_rdd(x, y) elif isinstance(x, RDD) and not y: evaluation_data = x else: raise TypeError("Unsupported evaluation data type: %s" % type(x)) return callBigDlFunc(self.bigdl_type, "evaluate", self.value, evaluation_data, batch_size)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/latent_dirichlet_allocation_distributions.py#L164-L194
def make_encoder(activation, num_topics, layer_sizes): """ """ encoder_net = tf.keras.Sequential() for num_hidden_units in layer_sizes: encoder_net.add( tf.keras.layers.Dense( num_hidden_units, activation=activation, kernel_initializer=tf.compat.v1.glorot_normal_initializer())) encoder_net.add( tf.keras.layers.Dense( num_topics, activation=tf.nn.softplus, kernel_initializer=tf.compat.v1.glorot_normal_initializer())) def encoder(bag_of_words): net = _clip_dirichlet_parameters(encoder_net(bag_of_words)) return tfd.Dirichlet(concentration=net, name="topics_posterior") return encoder
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/vae.py#L462-L476
def build_fake_input_fns(batch_size): """""" random_sample = np.random.rand(batch_size, *IMAGE_SHAPE).astype("float32") def train_input_fn(): dataset = tf.data.Dataset.from_tensor_slices( random_sample).map(lambda row: (row, 0)).batch(batch_size).repeat() return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next() def eval_input_fn(): dataset = tf.data.Dataset.from_tensor_slices( random_sample).map(lambda row: (row, 0)).batch(batch_size) return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next() return train_input_fn, eval_input_fn
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sagemaker_hook.py#L450-L477
def create_endpoint(self, config, wait_for_completion=True, check_interval=30, max_ingestion_time=None): """ """ response = self.get_conn().create_endpoint(**config) if wait_for_completion: self.check_status(config['EndpointName'], 'EndpointStatus', self.describe_endpoint, check_interval, max_ingestion_time, non_terminal_states=self.endpoint_non_terminal_states ) return response
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L696-L720
def new(params, event_shape=(), dtype=None, validate_args=False, name=None): """""" with tf.compat.v1.name_scope(name, 'IndependentBernoulli', [params, event_shape]): params = tf.convert_to_tensor(value=params, name='params') event_shape = dist_util.expand_to_vector( tf.convert_to_tensor( value=event_shape, name='event_shape', dtype_hint=tf.int32), tensor_name='event_shape') new_shape = tf.concat([ tf.shape(input=params)[:-1], event_shape, ], axis=0) dist = tfd.Independent( tfd.Bernoulli( logits=tf.reshape(params, new_shape), dtype=dtype or params.dtype.base_dtype, validate_args=validate_args), reinterpreted_batch_ndims=tf.size(input=event_shape), validate_args=validate_args) dist._logits = dist.distribution._logits # pylint: disable=protected-access dist._probs = dist.distribution._probs # pylint: disable=protected-access dist.logits = tfd.Bernoulli.logits dist.probs = tfd.Bernoulli.probs return dist
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/langevin.py#L691-L745
def _get_drift(step_size_parts, volatility_parts, grads_volatility, grads_target_log_prob, name=None): """ """ with tf.compat.v1.name_scope(name, 'mala_get_drift', [ step_size_parts, volatility_parts, grads_volatility, grads_target_log_prob ]): drift_parts = [] for step_size, volatility, grad_volatility, grad_target_log_prob in ( zip(step_size_parts, volatility_parts, grads_volatility, grads_target_log_prob)): volatility_squared = tf.square(volatility) drift = 0.5 * step_size * (volatility_squared * grad_target_log_prob + grad_volatility) drift_parts.append(drift) return drift_parts
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L1042-L1214
def _find_executable_task_instances(self, simple_dag_bag, states, session=None): """ """ executable_tis = [] # Get all task instances associated with scheduled # DagRuns which are not backfilled, in the given states, # and the dag is not paused TI = models.TaskInstance DR = models.DagRun DM = models.DagModel ti_query = ( session .query(TI) .filter(TI.dag_id.in_(simple_dag_bag.dag_ids)) .outerjoin( DR, and_(DR.dag_id == TI.dag_id, DR.execution_date == TI.execution_date) ) .filter(or_(DR.run_id == None, # noqa: E711 not_(DR.run_id.like(BackfillJob.ID_PREFIX + '%')))) .outerjoin(DM, DM.dag_id == TI.dag_id) .filter(or_(DM.dag_id == None, # noqa: E711 not_(DM.is_paused))) ) # Additional filters on task instance state if None in states: ti_query = ti_query.filter( or_(TI.state == None, TI.state.in_(states)) # noqa: E711 ) else: ti_query = ti_query.filter(TI.state.in_(states)) task_instances_to_examine = ti_query.all() if len(task_instances_to_examine) == 0: self.log.debug("No tasks to consider for execution.") return executable_tis # Put one task instance on each line task_instance_str = "\n\t".join( [repr(x) for x in task_instances_to_examine]) self.log.info( "%s tasks up for execution:\n\t%s", len(task_instances_to_examine), task_instance_str ) # Get the pool settings pools = {p.pool: p for p in session.query(models.Pool).all()} pool_to_task_instances = defaultdict(list) for task_instance in task_instances_to_examine: pool_to_task_instances[task_instance.pool].append(task_instance) states_to_count_as_running = [State.RUNNING, State.QUEUED] # dag_id to # of running tasks and (dag_id, task_id) to # of running tasks. dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps( states=states_to_count_as_running, session=session) # Go through each pool, and queue up a task for execution if there are # any open slots in the pool. for pool, task_instances in pool_to_task_instances.items(): pool_name = pool if not pool: # Arbitrary: # If queued outside of a pool, trigger no more than # non_pooled_task_slot_count open_slots = models.Pool.default_pool_open_slots() pool_name = models.Pool.default_pool_name else: if pool not in pools: self.log.warning( "Tasks using non-existent pool '%s' will not be scheduled", pool ) open_slots = 0 else: open_slots = pools[pool].open_slots(session=session) num_ready = len(task_instances) self.log.info( "Figuring out tasks to run in Pool(name=%s) with %s open slots " "and %s task instances ready to be queued", pool, open_slots, num_ready ) priority_sorted_task_instances = sorted( task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date)) # Number of tasks that cannot be scheduled because of no open slot in pool num_starving_tasks = 0 for current_index, task_instance in enumerate(priority_sorted_task_instances): if open_slots <= 0: self.log.info( "Not scheduling since there are %s open slots in pool %s", open_slots, pool ) # Can't schedule any more since there are no more open slots. num_starving_tasks = len(priority_sorted_task_instances) - current_index break # Check to make sure that the task concurrency of the DAG hasn't been # reached. dag_id = task_instance.dag_id simple_dag = simple_dag_bag.get_dag(dag_id) current_dag_concurrency = dag_concurrency_map[dag_id] dag_concurrency_limit = simple_dag_bag.get_dag(dag_id).concurrency self.log.info( "DAG %s has %s/%s running and queued tasks", dag_id, current_dag_concurrency, dag_concurrency_limit ) if current_dag_concurrency >= dag_concurrency_limit: self.log.info( "Not executing %s since the number of tasks running or queued " "from DAG %s is >= to the DAG's task concurrency limit of %s", task_instance, dag_id, dag_concurrency_limit ) continue task_concurrency_limit = simple_dag.get_task_special_arg( task_instance.task_id, 'task_concurrency') if task_concurrency_limit is not None: current_task_concurrency = task_concurrency_map[ (task_instance.dag_id, task_instance.task_id) ] if current_task_concurrency >= task_concurrency_limit: self.log.info("Not executing %s since the task concurrency for" " this task has been reached.", task_instance) continue if self.executor.has_task(task_instance): self.log.debug( "Not handling task %s as the executor reports it is running", task_instance.key ) continue executable_tis.append(task_instance) open_slots -= 1 dag_concurrency_map[dag_id] += 1 task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1 Stats.gauge('pool.starving_tasks.{pool_name}'.format(pool_name=pool_name), num_starving_tasks) task_instance_str = "\n\t".join( [repr(x) for x in executable_tis]) self.log.info( "Setting the following tasks to queued state:\n\t%s", task_instance_str) # so these dont expire on commit for ti in executable_tis: copy_dag_id = ti.dag_id copy_execution_date = ti.execution_date copy_task_id = ti.task_id make_transient(ti) ti.dag_id = copy_dag_id ti.execution_date = copy_execution_date ti.task_id = copy_task_id return executable_tis
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L1864-L1929
def process_quadrature_grid_and_probs(quadrature_grid_and_probs, dtype, validate_args, name=None): """ """ with tf.name_scope(name or "process_quadrature_grid_and_probs"): if quadrature_grid_and_probs is None: grid, probs = np.polynomial.hermite.hermgauss(deg=8) grid = grid.astype(dtype_util.as_numpy_dtype(dtype)) probs = probs.astype(dtype_util.as_numpy_dtype(dtype)) probs /= np.linalg.norm(probs, ord=1, keepdims=True) grid = tf.convert_to_tensor(value=grid, name="grid", dtype=dtype) probs = tf.convert_to_tensor(value=probs, name="probs", dtype=dtype) return grid, probs grid, probs = tuple(quadrature_grid_and_probs) grid = tf.convert_to_tensor(value=grid, name="grid", dtype=dtype) probs = tf.convert_to_tensor( value=probs, name="unnormalized_probs", dtype=dtype) probs /= tf.norm(tensor=probs, ord=1, axis=-1, keepdims=True, name="probs") def _static_event_size(x): """Returns the static size of a specific dimension or `None`.""" return tf.compat.dimension_value( tensorshape_util.with_rank_at_least(x.shape, 1)[-1]) m, n = _static_event_size(probs), _static_event_size(grid) if m is not None and n is not None: if m != n: raise ValueError("`quadrature_grid_and_probs` must be a `tuple` of " "same-length zero-th-dimension `Tensor`s " "(saw lengths {}, {})".format(m, n)) elif validate_args: assertions = [ assert_util.assert_equal( dimension_size(probs, axis=-1), dimension_size(grid, axis=-1), message=("`quadrature_grid_and_probs` must be a `tuple` of " "same-length zero-th-dimension `Tensor`s")), ] with tf.control_dependencies(assertions): grid = tf.identity(grid) probs = tf.identity(probs) return grid, probs
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/timezone.py#L67-L79
def utc_epoch(): """ """ # pendulum utcnow() is not used as that sets a TimezoneInfo object # instead of a Timezone. This is not pickable and also creates issues # when using replace() d = dt.datetime(1970, 1, 1) d = d.replace(tzinfo=utc) return d
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L1688-L1721
def build_pushforward_latents_step(get_observation_matrix_for_timestep, get_observation_noise_for_timestep): """ """ def pushforward_latents_step(_, latent_t_mean_cov): """Loop body fn to pushforward latents to observations at a time step.""" t, latent_mean, latent_cov = latent_t_mean_cov observation_matrix = get_observation_matrix_for_timestep(t) observation_noise = get_observation_noise_for_timestep(t) observation_mean = _propagate_mean(latent_mean, observation_matrix, observation_noise) observation_cov = _propagate_cov(latent_cov, observation_matrix, observation_noise) return (observation_mean, observation_cov) return pushforward_latents_step
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/optim/optimizer.py#L848-L894
def create(model, training_set, criterion, end_trigger=None, batch_size=32, optim_method=None, cores=None, bigdl_type="float"): """ """ if not end_trigger: end_trigger = MaxEpoch(1) if not optim_method: optim_method = SGD() if isinstance(training_set, RDD) or isinstance(training_set, DataSet): return DistriOptimizer(model=model, training_rdd=training_set, criterion=criterion, end_trigger=end_trigger, batch_size=batch_size, optim_method=optim_method, bigdl_type=bigdl_type) elif isinstance(training_set, tuple) and len(training_set) == 2: x, y = training_set return LocalOptimizer(X=x, Y=y, model=model, criterion=criterion, end_trigger=end_trigger, batch_size=batch_size, optim_method=optim_method, cores=cores, bigdl_type="float") else: raise Exception("Not supported training set: %s" % type(training_set))
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L281-L351
def shapes_from_loc_and_scale(loc, scale, name="shapes_from_loc_and_scale"): """ """ if loc is not None and tensorshape_util.rank(loc.shape) == 0: loc = None # scalar loc is irrelevant to determining batch/event shape. with tf.name_scope(name): # Get event shape. event_size = scale.range_dimension_tensor() event_size_ = tf.get_static_value(event_size) loc_event_size_ = (None if loc is None else tf.compat.dimension_value(loc.shape[-1])) if event_size_ is not None and loc_event_size_ is not None: # Static check that event shapes match. if loc_event_size_ != 1 and loc_event_size_ != event_size_: raise ValueError( "Event size of 'scale' ({}) could not be broadcast up to that " "of 'loc' ({}).".format(event_size_, loc_event_size_)) elif loc_event_size_ is not None and loc_event_size_ != 1: event_size_ = loc_event_size_ if event_size_ is None: event_shape = event_size[tf.newaxis] else: event_shape = tf.convert_to_tensor( value=np.reshape(event_size_, [1]), dtype=tf.int32, name="event_shape") # Get batch shape. batch_shape = scale.batch_shape_tensor() if loc is not None: loc_batch_shape = tensorshape_util.with_rank_at_least(loc.shape, 1)[:-1] if tensorshape_util.rank( loc.shape) is None or not tensorshape_util.is_fully_defined( loc_batch_shape): loc_batch_shape = tf.shape(input=loc)[:-1] else: loc_batch_shape = tf.convert_to_tensor( value=loc_batch_shape, dtype=tf.int32, name="loc_batch_shape") # This is defined in the core util module. batch_shape = prefer_static_broadcast_shape(batch_shape, loc_batch_shape) # pylint: disable=undefined-variable batch_shape = tf.convert_to_tensor( value=batch_shape, dtype=tf.int32, name="batch_shape") return batch_shape, event_shape
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/taskinstance.py#L218-L247
def command( self, mark_success=False, ignore_all_deps=False, ignore_depends_on_past=False, ignore_task_deps=False, ignore_ti_state=False, local=False, pickle_id=None, raw=False, job_id=None, pool=None, cfg_path=None): """ """ return " ".join(self.command_as_list( mark_success=mark_success, ignore_all_deps=ignore_all_deps, ignore_depends_on_past=ignore_depends_on_past, ignore_task_deps=ignore_task_deps, ignore_ti_state=ignore_ti_state, local=local, pickle_id=pickle_id, raw=raw, job_id=job_id, pool=pool, cfg_path=cfg_path))
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/internal/correlation_matrix_volumes_lib.py#L172-L216
def correlation_matrix_volume_rejection_samples( det_bounds, dim, sample_shape, dtype, seed): """ """ with tf.compat.v1.name_scope("rejection_sampler"): rej_proposals = _uniform_correlation_like_matrix( dim, sample_shape, dtype, seed=seed) rej_proposal_volume = 2. ** (dim * (dim - 1) / 2.) # The density of proposing any given point is 1 / rej_proposal_volume; # The weight of that point should be scaled by # 1 / density = rej_proposal_volume. rej_weights = rej_proposal_volume * _psd_mask( rej_proposals) * _det_large_enough_mask(rej_proposals, det_bounds) return rej_weights, rej_proposal_volume
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/real_nvp.py#L228-L305
def real_nvp_default_template(hidden_layers, shift_only=False, activation=tf.nn.relu, name=None, *args, # pylint: disable=keyword-arg-before-vararg **kwargs): """ """ with tf.compat.v2.name_scope(name or "real_nvp_default_template"): def _fn(x, output_units, **condition_kwargs): """Fully connected MLP parameterized via `real_nvp_template`.""" if condition_kwargs: raise NotImplementedError( "Conditioning not implemented in the default template.") if tensorshape_util.rank(x.shape) == 1: x = x[tf.newaxis, ...] reshape_output = lambda x: x[0] else: reshape_output = lambda x: x for units in hidden_layers: x = tf.compat.v1.layers.dense( inputs=x, units=units, activation=activation, *args, # pylint: disable=keyword-arg-before-vararg **kwargs) x = tf.compat.v1.layers.dense( inputs=x, units=(1 if shift_only else 2) * output_units, activation=None, *args, # pylint: disable=keyword-arg-before-vararg **kwargs) if shift_only: return reshape_output(x), None shift, log_scale = tf.split(x, 2, axis=-1) return reshape_output(shift), reshape_output(log_scale) return tf.compat.v1.make_template("real_nvp_default_template", _fn)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/dtype_util.py#L201-L228
def assert_same_float_dtype(tensors=None, dtype=None): """ """ if tensors: dtype = _assert_same_base_type(tensors, dtype) if not dtype: dtype = tf.float32 elif not is_floating(dtype): raise ValueError('Expected floating point type, got {}.'.format(dtype)) return dtype
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/lbfgs.py#L80-L260
def minimize(value_and_gradients_function, initial_position, num_correction_pairs=10, tolerance=1e-8, x_tolerance=0, f_relative_tolerance=0, initial_inverse_hessian_estimate=None, max_iterations=50, parallel_iterations=1, stopping_condition=None, name=None): """ """ if initial_inverse_hessian_estimate is not None: raise NotImplementedError( 'Support of initial_inverse_hessian_estimate arg not yet implemented') if stopping_condition is None: stopping_condition = bfgs_utils.converged_all with tf.compat.v1.name_scope(name, 'minimize', [initial_position, tolerance]): initial_position = tf.convert_to_tensor( value=initial_position, name='initial_position') dtype = initial_position.dtype.base_dtype tolerance = tf.convert_to_tensor( value=tolerance, dtype=dtype, name='grad_tolerance') f_relative_tolerance = tf.convert_to_tensor( value=f_relative_tolerance, dtype=dtype, name='f_relative_tolerance') x_tolerance = tf.convert_to_tensor( value=x_tolerance, dtype=dtype, name='x_tolerance') max_iterations = tf.convert_to_tensor( value=max_iterations, name='max_iterations') # The `state` here is a `LBfgsOptimizerResults` tuple with values for the # current state of the algorithm computation. def _cond(state): """Continue if iterations remain and stopping condition is not met.""" return ((state.num_iterations < max_iterations) & tf.logical_not(stopping_condition(state.converged, state.failed))) def _body(current_state): """Main optimization loop.""" search_direction = _get_search_direction(current_state) # TODO(b/120134934): Check if the derivative at the start point is not # negative, if so then reset position/gradient deltas and recompute # search direction. next_state = bfgs_utils.line_search_step( current_state, value_and_gradients_function, search_direction, tolerance, f_relative_tolerance, x_tolerance, stopping_condition) # If not failed or converged, update the Hessian estimate. should_update = ~(next_state.converged | next_state.failed) state_after_inv_hessian_update = bfgs_utils.update_fields( next_state, position_deltas=_queue_push( current_state.position_deltas, should_update, next_state.position - current_state.position), gradient_deltas=_queue_push( current_state.gradient_deltas, should_update, next_state.objective_gradient - current_state.objective_gradient)) return [state_after_inv_hessian_update] initial_state = _get_initial_state(value_and_gradients_function, initial_position, num_correction_pairs, tolerance) return tf.while_loop( cond=_cond, body=_body, loop_vars=[initial_state], parallel_iterations=parallel_iterations)[0]
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/models/alexnet.py#L51-L61
def alexnet(pretrained=False, **kwargs): """ model = AlexNet(**kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['alexnet'])) return model
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L1622-L1685
def build_kalman_sample_step(get_transition_matrix_for_timestep, get_transition_noise_for_timestep, get_observation_matrix_for_timestep, get_observation_noise_for_timestep, full_sample_and_batch_shape, stream, validate_args=False): """ """ def sample_step(sampled_prev, t): """Sample values for a single timestep.""" latent_prev, _ = sampled_prev transition_matrix = get_transition_matrix_for_timestep(t - 1) transition_noise = get_transition_noise_for_timestep(t - 1) latent_pred = transition_matrix.matmul(latent_prev) latent_sampled = latent_pred + transition_noise.sample( sample_shape=_augment_sample_shape( transition_noise, full_sample_and_batch_shape, validate_args), seed=stream())[..., tf.newaxis] observation_matrix = get_observation_matrix_for_timestep(t) observation_noise = get_observation_noise_for_timestep(t) observation_pred = observation_matrix.matmul(latent_sampled) observation_sampled = observation_pred + observation_noise.sample( sample_shape=_augment_sample_shape( observation_noise, full_sample_and_batch_shape, validate_args), seed=stream())[..., tf.newaxis] return (latent_sampled, observation_sampled) return sample_step
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L1274-L1320
def pick_vector(cond, true_vector, false_vector, name="pick_vector"): """ """ with tf.name_scope(name): cond = tf.convert_to_tensor( value=cond, dtype_hint=tf.bool, name="cond") if cond.dtype != tf.bool: raise TypeError( "{}.dtype={} which is not {}".format(cond, cond.dtype, tf.bool)) true_vector = tf.convert_to_tensor(value=true_vector, name="true_vector") false_vector = tf.convert_to_tensor(value=false_vector, name="false_vector") if true_vector.dtype != false_vector.dtype: raise TypeError( "{}.dtype={} does not match {}.dtype={}".format( true_vector, true_vector.dtype, false_vector, false_vector.dtype)) cond_value_static = tf.get_static_value(cond) if cond_value_static is not None: return true_vector if cond_value_static else false_vector n = tf.shape(input=true_vector)[0] return tf.slice( tf.concat([true_vector, false_vector], 0), [tf.where(cond, 0, n)], [tf.where(cond, n, -1)])
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/ti_deps/deps/base_ti_dep.py#L128-L142
def get_failure_reasons(self, ti, session, dep_context=None): """ """ for dep_status in self.get_dep_statuses(ti, session, dep_context): if not dep_status.passed: yield dep_status.reason
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/pinot_hook.py#L36-L49
def get_conn(self): """ """ conn = self.get_connection(self.pinot_broker_conn_id) pinot_broker_conn = connect( host=conn.host, port=conn.port, path=conn.extra_dejson.get('endpoint', '/pql'), scheme=conn.extra_dejson.get('schema', 'http') ) self.log.info('Get the connection to pinot ' 'broker on {host}'.format(host=conn.host)) return pinot_broker_conn
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/task/task_runner/base_task_runner.py#L101-L135
def run_command(self, run_with=None, join_args=False): """ """ run_with = run_with or [] cmd = [" ".join(self._command)] if join_args else self._command full_cmd = run_with + cmd self.log.info('Running: %s', full_cmd) proc = subprocess.Popen( full_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, close_fds=True, env=os.environ.copy(), preexec_fn=os.setsid ) # Start daemon thread to read subprocess logging output log_reader = threading.Thread( target=self._read_task_logs, args=(proc.stdout,), ) log_reader.daemon = True log_reader.start() return proc
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/joint_distribution.py#L324-L341
def maybe_check_wont_broadcast(flat_xs, validate_args): """""" flat_xs = tuple(flat_xs) # So we can receive generators. if not validate_args: # Note: we don't try static validation because it is theoretically # possible that a user wants to take advantage of broadcasting. # Only when `validate_args` is `True` do we enforce the validation. return flat_xs msg = 'Broadcasting probably indicates an error in model specification.' s = tuple(x.shape for x in flat_xs) if all(tensorshape_util.is_fully_defined(s_) for s_ in s): if not all(a == b for a, b in zip(s[1:], s[:-1])): raise ValueError(msg) return flat_xs assertions = [assert_util.assert_equal(a, b, message=msg) for a, b in zip(s[1:], s[:-1])] with tf.control_dependencies(assertions): return tuple(tf.identity(x) for x in flat_xs)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/redshift_hook.py#L46-L66
def delete_cluster( self, cluster_identifier, skip_final_cluster_snapshot=True, final_cluster_snapshot_identifier=''): """ """ response = self.get_conn().delete_cluster( ClusterIdentifier=cluster_identifier, SkipFinalClusterSnapshot=skip_final_cluster_snapshot, FinalClusterSnapshotIdentifier=final_cluster_snapshot_identifier ) return response['Cluster'] if response['Cluster'] else None
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/ti_deps/deps/base_ti_dep.py#L78-L107
def get_dep_statuses(self, ti, session, dep_context=None): """ """ # this avoids a circular dependency from airflow.ti_deps.dep_context import DepContext if dep_context is None: dep_context = DepContext() if self.IGNOREABLE and dep_context.ignore_all_deps: yield self._passing_status( reason="Context specified all dependencies should be ignored.") return if self.IS_TASK_DEP and dep_context.ignore_task_deps: yield self._passing_status( reason="Context specified all task dependencies should be ignored.") return for dep_status in self._get_dep_statuses(ti, session, dep_context): yield dep_status
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/transformed_distribution.py#L41-L49
def _pick_scalar_condition(pred, cond_true, cond_false): """""" # Note: This function is only valid if all of pred, cond_true, and cond_false # are scalars. This means its semantics are arguably more like tf.cond than # tf.where even though we use tf.where to implement it. pred_ = tf.get_static_value(tf.convert_to_tensor(value=pred)) if pred_ is None: return tf.where(pred, cond_true, cond_false) return cond_true if pred_ else cond_false
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/bijector.py#L104-L110
def remove(self, field): """""" return _Mapping( x=None if field == "x" else self.x, y=None if field == "y" else self.y, ildj=self.ildj, kwargs=self.kwargs)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py#L599-L643
def _bisect(value_and_gradients_function, initial_args, f_lim): """""" def _loop_cond(curr): # TODO(b/112524024): Also take into account max_iterations. return ~tf.reduce_all(input_tensor=curr.stopped) def _loop_body(curr): """Narrow down interval to satisfy opposite slope conditions.""" mid = value_and_gradients_function((curr.left.x + curr.right.x) / 2) # Fail if function values at mid point are no longer finite; or left/right # points are so close to it that we can't distinguish them any more. failed = (curr.failed | ~is_finite(mid) | tf.equal(mid.x, curr.left.x) | tf.equal(mid.x, curr.right.x)) # If mid point has a negative slope and the function value at that point is # small enough, we can use it as a new left end point to narrow down the # interval. If mid point has a positive slope, then we have found a suitable # right end point to bracket a minima within opposite slopes. Otherwise, the # mid point has a negative slope but the function value at that point is too # high to work as left end point, we are in the same situation in which we # started the loop so we just update the right end point and continue. to_update = ~(curr.stopped | failed) update_left = (mid.df < 0) & (mid.f <= f_lim) left = val_where(to_update & update_left, mid, curr.left) right = val_where(to_update & ~update_left, mid, curr.right) # We're done when the right end point has a positive slope. stopped = curr.stopped | failed | (right.df >= 0) return [_IntermediateResult( iteration=curr.iteration, stopped=stopped, failed=failed, num_evals=curr.num_evals + 1, left=left, right=right)] # The interval needs updating if the right end point has a negative slope and # the value of the function at that point is too high. It is not a valid left # end point but along with the current left end point, it encloses another # minima. The loop above tries to narrow the interval so that it satisfies the # opposite slope conditions. return tf.while_loop( cond=_loop_cond, body=_loop_body, loop_vars=[initial_args])[0]
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/structural_time_series.py#L211-L272
def joint_log_prob(self, observed_time_series): """ """ with tf.compat.v1.name_scope( 'joint_log_prob', values=[observed_time_series]): [ observed_time_series, mask ] = sts_util.canonicalize_observed_time_series_with_mask( observed_time_series) num_timesteps = distribution_util.prefer_static_value( tf.shape(input=observed_time_series))[-2] def log_joint_fn(*param_vals): """Generated log-density function.""" # Sum the log_prob values from parameter priors. param_lp = sum([ param.prior.log_prob(param_val) for (param, param_val) in zip(self.parameters, param_vals) ]) # Build a linear Gaussian state space model and evaluate the marginal # log_prob on observations. lgssm = self.make_state_space_model( param_vals=param_vals, num_timesteps=num_timesteps) observation_lp = lgssm.log_prob(observed_time_series, mask=mask) # Sum over likelihoods from iid observations. Without this sum, # adding `param_lp + observation_lp` would broadcast the param priors # over the sample shape, which incorrectly multi-counts the param # priors. sample_ndims = tf.maximum(0, tf.rank(observation_lp) - tf.rank(param_lp)) observation_lp = tf.reduce_sum( input_tensor=observation_lp, axis=tf.range(sample_ndims)) return param_lp + observation_lp return log_joint_fn
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/internal/correlation_matrix_volumes_lib.py#L107-L130
def _det_large_enough_mask(x, det_bounds): """ """ # For the curious: I wonder whether it is possible and desirable to # use a Cholesky decomposition-based algorithm for this, since the # only matrices whose determinant this code cares about will be PSD. # Didn't figure out how to code that in TensorFlow. # # Expert opinion is that it would be about twice as fast since # Cholesky is roughly half the cost of Gaussian Elimination with # Partial Pivoting. But this is less of an impact than the switch in # _psd_mask. return tf.cast(tf.linalg.det(x) > det_bounds, dtype=x.dtype)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/linalg.py#L874-L895
def _sparse_block_diag(sp_a): """ """ # Construct the matrix [[M, N], [1, 0], [0, 1]] which would map the index # (b, i, j) to (Mb + i, Nb + j). This effectively creates a block-diagonal # matrix of dense shape [B * M, B * N]. # Note that this transformation doesn't increase the number of non-zero # entries in the SparseTensor. sp_a_shape = tf.convert_to_tensor(value=_get_shape(sp_a, tf.int64)) ind_mat = tf.concat([[sp_a_shape[-2:]], tf.eye(2, dtype=tf.int64)], axis=0) indices = tf.matmul(sp_a.indices, ind_mat) dense_shape = sp_a_shape[0] * sp_a_shape[1:] return tf.SparseTensor( indices=indices, values=sp_a.values, dense_shape=dense_shape)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/sprites_dataset.py#L188-L191
def create_random_seq(character, action_metadata, direction, length=8): """""" start = tf.random.uniform([], maxval=action_metadata[1], dtype=tf.int32) return create_seq(character, action_metadata, direction, length, start)
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/layer.py#L68-L76
def load(path, bigdl_type="float"): """ """ jmodel = callBigDlFunc(bigdl_type, "loadBigDL", path) return Layer.of(jmodel)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/sprites_dataset.py#L152-L185
def create_seq(character, action_metadata, direction, length=8, start=0): """ """ sprite_start = (action_metadata[0]+direction) * FRAME_SIZE sprite_end = (action_metadata[0]+direction+1) * FRAME_SIZE sprite_line = character[sprite_start:sprite_end, ...] # Extract 64x64 patches that are side-by-side in the sprite, and limit # to the actual number of frames for the given action. frames = tf.stack(tf.split(sprite_line, 13, axis=1)) # 13 is a hack frames = frames[0:action_metadata[1]] # Extract a slice of the desired length. # NOTE: Length could be longer than the number of frames, so tile as needed. frames = tf.roll(frames, shift=-start, axis=0) frames = tf.tile(frames, [2, 1, 1, 1]) # 2 is a hack frames = frames[:length] frames = tf.cast(frames, dtype=tf.float32) frames.set_shape([length, FRAME_SIZE, FRAME_SIZE, CHANNELS]) return frames
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/linesearch/hager_zhang.py#L666-L680
def _print(pass_through_tensor, values): """""" flat_values = [] for value in values: # Checks if it is a namedtuple. if hasattr(value, '_fields'): for field in value._fields: flat_values.extend([field, _to_str(getattr(value, field))]) continue if isinstance(value, (list, tuple)): for v in value: flat_values.append(_to_str(v)) continue flat_values.append(_to_str(value)) return tf.compat.v1.Print(pass_through_tensor, flat_values)