_id
stringlengths
98
184
text
stringlengths
91
10.9k
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/state.py#L107-L120
def unfinished(cls): """ """ return [ cls.NONE, cls.SCHEDULED, cls.QUEUED, cls.RUNNING, cls.SHUTDOWN, cls.UP_FOR_RETRY, cls.UP_FOR_RESCHEDULE ]
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/internal/distribution_tensor_coercible.py#L71-L128
def _value(self, dtype=None, name=None, as_ref=False): # pylint: disable=g-doc-args """ """ # pylint: disable=protected-access if as_ref: raise NotImplementedError( 'Cannot convert a `Distribution` to a reference ' '(e.g., `tf.Variable`).') if self._concrete_value is None: if self._convert_to_tensor_fn is None: raise NotImplementedError( 'Failed to convert object of type {} to Tensor. Contents: {}. ' 'Call `distribution.set_tensor_conversion(lambda self: ...)` to ' 'enable `tf.convert_to_tensor` capability. For example: ' '`x = tfd.Normal(0,1).set_tensor_conversion(tfd.Distribution.mean)`' ' results in `tf.convert_to_tensor(x)` being identical to ' '`x.mean()`.'.format(type(self), self)) with self._name_scope('value'): self._concrete_value = (self._convert_to_tensor_fn(self) if callable(self._convert_to_tensor_fn) else self._convert_to_tensor_fn) if not tf.is_tensor(self._concrete_value): self._concrete_value = tfd._convert_to_tensor( value=self._concrete_value, name=name or 'concrete_value', dtype=dtype, dtype_hint=self.dtype) return self._concrete_value
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hdfs_hook.py#L57-L98
def get_conn(self): """ """ # When using HAClient, proxy_user must be the same, so is ok to always # take the first. effective_user = self.proxy_user autoconfig = self.autoconfig use_sasl = configuration.conf.get('core', 'security') == 'kerberos' try: connections = self.get_connections(self.hdfs_conn_id) if not effective_user: effective_user = connections[0].login if not autoconfig: autoconfig = connections[0].extra_dejson.get('autoconfig', False) hdfs_namenode_principal = connections[0].extra_dejson.get( 'hdfs_namenode_principal') except AirflowException: if not autoconfig: raise if autoconfig: # will read config info from $HADOOP_HOME conf files client = AutoConfigClient(effective_user=effective_user, use_sasl=use_sasl) elif len(connections) == 1: client = Client(connections[0].host, connections[0].port, effective_user=effective_user, use_sasl=use_sasl, hdfs_namenode_principal=hdfs_namenode_principal) elif len(connections) > 1: nn = [Namenode(conn.host, conn.port) for conn in connections] client = HAClient(nn, effective_user=effective_user, use_sasl=use_sasl, hdfs_namenode_principal=hdfs_namenode_principal) else: raise HDFSHookException("conn_id doesn't exist in the repository " "and autoconfig is not specified") return client
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/aws_lambda_hook.py#L53-L68
def invoke_lambda(self, payload): """ """ awslambda_conn = self.get_conn() response = awslambda_conn.invoke( FunctionName=self.function_name, InvocationType=self.invocation_type, LogType=self.log_type, Payload=payload, Qualifier=self.qualifier ) return response
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/nelder_mead.py#L602-L606
def _replace_at_index(x, index, replacement): """""" x_new = tf.concat([x[:index], tf.expand_dims(replacement, axis=0), x[(index + 1):]], axis=0) return x_new
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/datasets/sbu.py#L87-L110
def download(self): """""" import tarfile if self._check_integrity(): print('Files already downloaded and verified') return download_url(self.url, self.root, self.filename, self.md5_checksum) # Extract file with tarfile.open(os.path.join(self.root, self.filename), 'r:gz') as tar: tar.extractall(path=self.root) # Download individual photos with open(os.path.join(self.root, 'dataset', 'SBU_captioned_photo_dataset_urls.txt')) as fh: for line in fh: url = line.rstrip() try: download_url(url, os.path.join(self.root, 'dataset')) except OSError: # The images point to public images on Flickr. # Note: Images might be removed by users at anytime. pass
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/log/s3_task_handler.py#L146-L170
def s3_write(self, log, remote_log_location, append=True): """ """ if append and self.s3_log_exists(remote_log_location): old_log = self.s3_read(remote_log_location) log = '\n'.join([old_log, log]) if old_log else log try: self.hook.load_string( log, key=remote_log_location, replace=True, encrypt=configuration.conf.getboolean('core', 'ENCRYPT_S3_LOGS'), ) except Exception: self.log.exception('Could not write logs to %s', remote_log_location)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/transformed_kernel.py#L230-L273
def one_step(self, current_state, previous_kernel_results): """ """ with tf.compat.v1.name_scope( name=mcmc_util.make_name(self.name, 'transformed_kernel', 'one_step'), values=[previous_kernel_results]): transformed_next_state, kernel_results = self._inner_kernel.one_step( previous_kernel_results.transformed_state, previous_kernel_results.inner_results) transformed_next_state_parts = ( transformed_next_state if mcmc_util.is_list_like(transformed_next_state) else [transformed_next_state]) next_state_parts = self._forward_transform(transformed_next_state_parts) next_state = ( next_state_parts if mcmc_util.is_list_like(transformed_next_state) else next_state_parts[0]) kernel_results = TransformedTransitionKernelResults( transformed_state=transformed_next_state, inner_results=kernel_results) return next_state, kernel_results
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/opsgenie_alert_operator.py#L126-L131
def execute(self, context): """ """ self.hook = OpsgenieAlertHook(self.opsgenie_conn_id) self.hook.execute(self._build_opsgenie_payload())
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/hmc.py#L1052-L1145
def _compute_log_acceptance_correction(current_momentums, proposed_momentums, independent_chain_ndims, name=None): """ """ with tf.compat.v1.name_scope( name, 'compute_log_acceptance_correction', [independent_chain_ndims, current_momentums, proposed_momentums]): log_current_kinetic, log_proposed_kinetic = [], [] for current_momentum, proposed_momentum in zip( current_momentums, proposed_momentums): axis = tf.range(independent_chain_ndims, tf.rank(current_momentum)) log_current_kinetic.append(_log_sum_sq(current_momentum, axis)) log_proposed_kinetic.append(_log_sum_sq(proposed_momentum, axis)) current_kinetic = 0.5 * tf.exp( tf.reduce_logsumexp( input_tensor=tf.stack(log_current_kinetic, axis=-1), axis=-1)) proposed_kinetic = 0.5 * tf.exp( tf.reduce_logsumexp( input_tensor=tf.stack(log_proposed_kinetic, axis=-1), axis=-1)) return mcmc_util.safe_sum([current_kinetic, -proposed_kinetic])
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_vision_hook.py#L298-L307
def delete_product(self, location, product_id, project_id=None, retry=None, timeout=None, metadata=None): """ """ client = self.get_conn() name = ProductSearchClient.product_path(project_id, location, product_id) self.log.info('Deleting ProductSet: %s', name) client.delete_product(name=name, retry=retry, timeout=timeout, metadata=metadata) self.log.info('Product with the name [%s] deleted:', name)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py#L426-L545
def bracket(value_and_gradients_function, search_interval, f_lim, max_iterations, expansion_param=5.0): """ """ already_stopped = search_interval.failed | search_interval.converged # If the slope at right end point is positive, step B1 in [2], then the given # initial points already bracket a minimum. bracketed = search_interval.right.df >= 0 # Bisection is needed, step B2, if right end point almost works as a new left # end point but the objective value is too high. needs_bisect = ( search_interval.right.df < 0) & (search_interval.right.f > f_lim) # In these three cases bracketing is already `stopped` and there is no need # to perform further evaluations. Otherwise the bracketing loop is needed to # expand the interval, step B3, until the conditions are met. initial_args = _IntermediateResult( iteration=search_interval.iterations, stopped=already_stopped | bracketed | needs_bisect, failed=search_interval.failed, num_evals=search_interval.func_evals, left=search_interval.left, right=search_interval.right) def _loop_cond(curr): return (curr.iteration < max_iterations) & ~tf.reduce_all(input_tensor=curr.stopped) def _loop_body(curr): """Main body of bracketing loop.""" # The loop maintains the invariant that curr.stopped is true if we have # either: failed, successfully bracketed, or not yet bracketed but needs # bisect. On the only remaining case, step B3 in [2]. case we need to # expand and update the left/right values appropriately. new_right = value_and_gradients_function(expansion_param * curr.right.x) left = val_where(curr.stopped, curr.left, curr.right) right = val_where(curr.stopped, curr.right, new_right) # Updated the failed, bracketed, and needs_bisect conditions. failed = curr.failed | ~is_finite(right) bracketed = right.df >= 0 needs_bisect = (right.df < 0) & (right.f > f_lim) return [_IntermediateResult( iteration=curr.iteration + 1, stopped=curr.stopped | failed | bracketed | needs_bisect, failed=failed, num_evals=curr.num_evals + 1, left=left, right=right)] bracket_result = tf.while_loop( cond=_loop_cond, body=_loop_body, loop_vars=[initial_args])[0] # For entries where bisect is still needed, mark them as not yet stopped, # reset the left end point, and run `_bisect` on them. needs_bisect = ( (bracket_result.right.df < 0) & (bracket_result.right.f > f_lim)) stopped = already_stopped | bracket_result.failed | ~needs_bisect left = val_where(stopped, bracket_result.left, search_interval.left) bisect_args = bracket_result._replace(stopped=stopped, left=left) return _bisect(value_and_gradients_function, bisect_args, f_lim)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/mysql_to_gcs.py#L290-L310
def _get_col_type_dict(self): """ """ schema = [] if isinstance(self.schema, string_types): schema = json.loads(self.schema) elif isinstance(self.schema, list): schema = self.schema elif self.schema is not None: self.log.warn('Using default schema due to unexpected type.' 'Should be a string or list.') col_type_dict = {} try: col_type_dict = {col['name']: col['type'] for col in schema} except KeyError: self.log.warn('Using default schema due to missing name or type. Please ' 'refer to: https://cloud.google.com/bigquery/docs/schemas' '#specifying_a_json_schema_file') return col_type_dict
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L754-L768
def _eval_all_one_hot(fn, dist, name=None): """""" with tf.compat.v1.name_scope(name, 'eval_all_one_hot'): event_size = dist.event_shape_tensor()[-1] batch_ndims = tf.size(input=dist.batch_shape_tensor()) # Reshape `eye(d)` to: `[d] + [1]*batch_ndims + [d]`. x = tf.reshape( tf.eye(event_size, dtype=dist.dtype), shape=tf.pad( tensor=tf.ones(batch_ndims, tf.int32), paddings=[[1, 1]], constant_values=event_size)) # Compute `fn(x)` then cyclically left-transpose one dim. perm = tf.pad(tensor=tf.range(1, batch_ndims + 1), paddings=[[0, 1]]) return tf.transpose(a=fn(dist, x), perm=perm)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/interceptor.py#L96-L172
def get_next_interceptor(): """ """ try: interceptor = _interceptor_stack.stack.pop() yield interceptor finally: _interceptor_stack.stack.append(interceptor)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/azure_fileshare_hook.py#L64-L81
def check_for_file(self, share_name, directory_name, file_name, **kwargs): """ """ return self.connection.exists(share_name, directory_name, file_name, **kwargs)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/bfgs_utils.py#L309-L322
def _check_convergence(current_position, next_position, current_objective, next_objective, next_gradient, grad_tolerance, f_relative_tolerance, x_tolerance): """""" grad_converged = norm(next_gradient, dims=1) <= grad_tolerance x_converged = norm(next_position - current_position, dims=1) <= x_tolerance f_converged = (norm(next_objective - current_objective, dims=0) <= f_relative_tolerance * current_objective) return grad_converged | x_converged | f_converged
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/auth/backends/password_auth.py#L107-L132
def authenticate(session, username, password): """ """ if not username or not password: raise AuthenticationError() user = session.query(PasswordUser).filter( PasswordUser.username == username).first() if not user: raise AuthenticationError() if not user.authenticate(password): raise AuthenticationError() log.info("User %s successfully authenticated", username) return user
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L1729-L1732
def _propagate_cov(cov, linop, dist): """""" # For linop A and input cov P, returns `A P A' + dist.cov()` return linop.matmul(linop.matmul(cov), adjoint_arg=True) + dist.covariance()
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/hmc.py#L556-L564
def bootstrap_results(self, init_state): """""" kernel_results = self._impl.bootstrap_results(init_state) if self.step_size_update_fn is not None: step_size_assign = self.step_size_update_fn(self.step_size, None) # pylint: disable=not-callable kernel_results = kernel_results._replace( extra=HamiltonianMonteCarloExtraKernelResults( step_size_assign=step_size_assign)) return kernel_results
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/bayesian_neural_network.py#L190-L210
def build_fake_data(num_examples=10): """""" class Dummy(object): pass num_examples = 10 mnist_data = Dummy() mnist_data.train = Dummy() mnist_data.train.images = np.float32(np.random.randn( num_examples, *IMAGE_SHAPE)) mnist_data.train.labels = np.int32(np.random.permutation( np.arange(num_examples))) mnist_data.train.num_examples = num_examples mnist_data.validation = Dummy() mnist_data.validation.images = np.float32(np.random.randn( num_examples, *IMAGE_SHAPE)) mnist_data.validation.labels = np.int32(np.random.permutation( np.arange(num_examples))) mnist_data.validation.num_examples = num_examples return mnist_data
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/S3_hook.py#L48-L60
def check_for_bucket(self, bucket_name): """ """ try: self.get_conn().head_bucket(Bucket=bucket_name) return True except ClientError as e: self.log.info(e.response["Error"]["Message"]) return False
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/disentangled_vae.py#L846-L866
def sample_static_prior(self, samples, batch_size, fixed=False): """ """ dist = self.static_prior() if fixed: # in either case, shape is (samples, batch, latent) sample = dist.sample((samples, 1)) + tf.zeros([batch_size, 1]) else: sample = dist.sample((samples, batch_size)) return sample, dist
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/deep_exponential_family.py#L178-L231
def load_nips2011_papers(path): """ """ path = os.path.expanduser(path) filename = "NIPS_1987-2015.csv" filepath = os.path.join(path, filename) if not os.path.exists(filepath): url = ("https://archive.ics.uci.edu/ml/machine-learning-databases/" "00371/NIPS_1987-2015.csv") if not tf.io.gfile.exists(path): tf.io.gfile.makedirs(path) print("Downloading %s to %s" % (url, filepath)) urllib.request.urlretrieve(url, filepath) with open(filepath) as f: iterator = csv.reader(f) documents = next(iterator)[1:] words = [] x_train = [] for row in iterator: words.append(row[0]) x_train.append(row[1:]) x_train = np.array(x_train, dtype=np.int) # Subset to documents in 2011 and words appearing in at least two documents # and have a total word count of at least 10. doc_idx = [i for i, document in enumerate(documents) if document.startswith("2011")] documents = [documents[doc] for doc in doc_idx] x_train = x_train[:, doc_idx] word_idx = np.logical_and(np.sum(x_train != 0, 1) >= 2, np.sum(x_train, 1) >= 10) words = [word for word, idx in zip(words, word_idx) if idx] bag_of_words = x_train[word_idx, :].T return bag_of_words, words
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L1705-L1779
def insert_all(self, project_id, dataset_id, table_id, rows, ignore_unknown_values=False, skip_invalid_rows=False, fail_on_error=False): """ """ dataset_project_id = project_id if project_id else self.project_id body = { "rows": rows, "ignoreUnknownValues": ignore_unknown_values, "kind": "bigquery#tableDataInsertAllRequest", "skipInvalidRows": skip_invalid_rows, } try: self.log.info( 'Inserting %s row(s) into Table %s:%s.%s', len(rows), dataset_project_id, dataset_id, table_id ) resp = self.service.tabledata().insertAll( projectId=dataset_project_id, datasetId=dataset_id, tableId=table_id, body=body ).execute(num_retries=self.num_retries) if 'insertErrors' not in resp: self.log.info( 'All row(s) inserted successfully: %s:%s.%s', dataset_project_id, dataset_id, table_id ) else: error_msg = '{} insert error(s) occurred: {}:{}.{}. Details: {}'.format( len(resp['insertErrors']), dataset_project_id, dataset_id, table_id, resp['insertErrors']) if fail_on_error: raise AirflowException( 'BigQuery job failed. Error was: {}'.format(error_msg) ) self.log.info(error_msg) except HttpError as err: raise AirflowException( 'BigQuery job failed. Error was: {}'.format(err.content) )
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/text_messages_hmc.py#L64-L153
def benchmark_text_messages_hmc( num_results=int(3e3), num_burnin_steps=int(3e3), num_leapfrog_steps=3): """""" if not tf.executing_eagerly(): tf.compat.v1.reset_default_graph() # Build a static, pretend dataset. count_data = tf.cast( tf.concat( [tfd.Poisson(rate=15.).sample(43), tfd.Poisson(rate=25.).sample(31)], axis=0), dtype=tf.float32) if tf.executing_eagerly(): count_data = count_data.numpy() else: with tf.compat.v1.Session(): count_data = count_data.eval() # Define a closure over our joint_log_prob. def unnormalized_log_posterior(lambda1, lambda2, tau): return text_messages_joint_log_prob(count_data, lambda1, lambda2, tau) if tf.executing_eagerly(): sample_chain = tf.function(tfp.mcmc.sample_chain) else: sample_chain = tfp.mcmc.sample_chain # Initialize the step_size. (It will be automatically adapted.) step_size = tf.compat.v2.Variable( name='step_size', initial_value=tf.constant(0.05, dtype=tf.float32), trainable=False) def computation(): """The benchmark computation.""" initial_chain_state = [ tf.constant(count_data.mean(), name='init_lambda1'), tf.constant(count_data.mean(), name='init_lambda2'), tf.constant(0.5, name='init_tau'), ] unconstraining_bijectors = [ tfp.bijectors.Exp(), # Maps a positive real to R. tfp.bijectors.Exp(), # Maps a positive real to R. tfp.bijectors.Sigmoid(), # Maps [0,1] to R. ] _, kernel_results = sample_chain( num_results=num_results, num_burnin_steps=num_burnin_steps, current_state=initial_chain_state, kernel=tfp.mcmc.TransformedTransitionKernel( inner_kernel=tfp.mcmc.HamiltonianMonteCarlo( target_log_prob_fn=unnormalized_log_posterior, num_leapfrog_steps=num_leapfrog_steps, step_size=step_size, step_size_update_fn= tfp.mcmc.make_simple_step_size_update_policy(num_burnin_steps), state_gradients_are_stopped=True), bijector=unconstraining_bijectors)) return kernel_results.inner_results.is_accepted # Let's force evaluation of graph to ensure build time is not part of our time # trial. is_accepted_tensor = computation() if not tf.executing_eagerly(): session = tf.compat.v1.Session() session.run(tf.compat.v1.global_variables_initializer()) session.run(is_accepted_tensor) start_time = time.time() if tf.executing_eagerly(): is_accepted = computation() else: is_accepted = session.run(is_accepted_tensor) wall_time = time.time() - start_time num_accepted = np.sum(is_accepted) acceptance_rate = np.float32(num_accepted) / np.float32(num_results) return dict( iters=(num_results + num_burnin_steps) * num_leapfrog_steps, extras={'acceptance_rate': acceptance_rate}, wall_time=wall_time)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/transformed_distribution.py#L415-L430
def _finish_log_prob_for_one_fiber(self, y, x, ildj, event_ndims, **distribution_kwargs): """""" x = self._maybe_rotate_dims(x, rotate_right=True) log_prob = self.distribution.log_prob(x, **distribution_kwargs) if self._is_maybe_event_override: log_prob = tf.reduce_sum( input_tensor=log_prob, axis=self._reduce_event_indices) log_prob += tf.cast(ildj, log_prob.dtype) if self._is_maybe_event_override and isinstance(event_ndims, int): tensorshape_util.set_shape( log_prob, tf.broadcast_static_shape( tensorshape_util.with_rank_at_least(y.shape, 1)[:-event_ndims], self.batch_shape)) return log_prob
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L95-L122
def get_pandas_df(self, sql, parameters=None, dialect=None): """ """ private_key = self._get_field('key_path', None) or self._get_field('keyfile_dict', None) if dialect is None: dialect = 'legacy' if self.use_legacy_sql else 'standard' return read_gbq(sql, project_id=self._get_field('project'), dialect=dialect, verbose=False, private_key=private_key)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_mlengine_hook.py#L165-L183
def create_version(self, project_id, model_name, version_spec): """ """ parent_name = 'projects/{}/models/{}'.format(project_id, model_name) create_request = self._mlengine.projects().models().versions().create( parent=parent_name, body=version_spec) response = create_request.execute() get_request = self._mlengine.projects().operations().get( name=response['name']) return _poll_with_exponential_delay( request=get_request, max_n=9, is_done_func=lambda resp: resp.get('done', False), is_error_func=lambda resp: resp.get('error', None) is not None)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/_vendor/nvd3/NVD3Chart.py#L474-L485
def buildcontent(self): """ """ self.buildcontainer() # if the subclass has a method buildjs this method will be # called instead of the method defined here # when this subclass method is entered it does call # the method buildjschart defined here self.buildjschart() self.htmlcontent = self.template_chart_nvd3.render(chart=self)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/internal/util.py#L255-L293
def _maybe_expand_trailing_dim(observed_time_series_tensor): """ """ with tf.compat.v1.name_scope( 'maybe_expand_trailing_dim', values=[observed_time_series_tensor]): if (observed_time_series_tensor.shape.ndims is not None and tf.compat.dimension_value( observed_time_series_tensor.shape[-1]) is not None): expanded_time_series = ( observed_time_series_tensor if observed_time_series_tensor.shape[-1] == 1 else observed_time_series_tensor[..., tf.newaxis]) else: expanded_time_series = tf.cond( pred=tf.equal(tf.shape(input=observed_time_series_tensor)[-1], 1), true_fn=lambda: observed_time_series_tensor, false_fn=lambda: observed_time_series_tensor[..., tf.newaxis]) return expanded_time_series
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_container_hook.py#L111-L129
def _append_label(cluster_proto, key, val): """ """ val = val.replace('.', '-').replace('+', '-') cluster_proto.resource_labels.update({key: val}) return cluster_proto
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/log/gcs_task_handler.py#L166-L177
def parse_gcs_url(gsurl): """ """ parsed_url = urlparse(gsurl) if not parsed_url.netloc: raise AirflowException('Please provide a bucket name') else: bucket = parsed_url.netloc blob = parsed_url.path.strip('/') return bucket, blob
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/no_u_turn_sampler/nuts.py#L457-L466
def _embed_no_none_gradient_check(value_and_gradients_fn): """""" @functools.wraps(value_and_gradients_fn) def func_wrapped(*args, **kwargs): """Wrapped function which checks for None gradients.""" value, grads = value_and_gradients_fn(*args, **kwargs) if any(grad is None for grad in grads): raise ValueError("Gradient is None for a state.") return value, grads return func_wrapped
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L255-L340
def pad(img, padding, fill=0, padding_mode='constant'): """ if not _is_pil_image(img): raise TypeError('img should be PIL Image. Got {}'.format(type(img))) if not isinstance(padding, (numbers.Number, tuple)): raise TypeError('Got inappropriate padding arg') if not isinstance(fill, (numbers.Number, str, tuple)): raise TypeError('Got inappropriate fill arg') if not isinstance(padding_mode, str): raise TypeError('Got inappropriate padding_mode arg') if isinstance(padding, Sequence) and len(padding) not in [2, 4]: raise ValueError("Padding must be an int or a 2, or 4 element tuple, not a " + "{} element tuple".format(len(padding))) assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'], \ 'Padding mode should be either constant, edge, reflect or symmetric' if padding_mode == 'constant': if img.mode == 'P': palette = img.getpalette() image = ImageOps.expand(img, border=padding, fill=fill) image.putpalette(palette) return image return ImageOps.expand(img, border=padding, fill=fill) else: if isinstance(padding, int): pad_left = pad_right = pad_top = pad_bottom = padding if isinstance(padding, Sequence) and len(padding) == 2: pad_left = pad_right = padding[0] pad_top = pad_bottom = padding[1] if isinstance(padding, Sequence) and len(padding) == 4: pad_left = padding[0] pad_top = padding[1] pad_right = padding[2] pad_bottom = padding[3] if img.mode == 'P': palette = img.getpalette() img = np.asarray(img) img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode) img = Image.fromarray(img) img.putpalette(palette) return img img = np.asarray(img) # RGB image if len(img.shape) == 3: img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), padding_mode) # Grayscale image if len(img.shape) == 2: img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode) return Image.fromarray(img)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/dense_variational.py#L193-L213
def compute_output_shape(self, input_shape): """ """ input_shape = tf.TensorShape(input_shape) input_shape = input_shape.with_rank_at_least(2) if tf.compat.dimension_value(input_shape[-1]) is None: raise ValueError( 'The innermost dimension of `input_shape` must be defined, ' 'but saw: {}'.format(input_shape)) return input_shape[:-1].concatenate(self.units)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/glm/family.py#L174-L179
def _name_scope(self, name=None, default_name=None, values=None): """""" with tf.compat.v1.name_scope(self.name): with tf.compat.v1.name_scope( name, default_name, values=values or []) as scope: yield scope
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/diagnostic.py#L35-L143
def effective_sample_size(states, filter_threshold=0., filter_beyond_lag=None, name=None): """ """ states_was_list = _is_list_like(states) # Convert all args to lists. if not states_was_list: states = [states] filter_beyond_lag = _broadcast_maybelist_arg(states, filter_beyond_lag, 'filter_beyond_lag') filter_threshold = _broadcast_maybelist_arg(states, filter_threshold, 'filter_threshold') # Process items, one at a time. with tf.compat.v1.name_scope(name, 'effective_sample_size'): ess_list = [ _effective_sample_size_single_state(s, ml, mlt) for (s, ml, mlt) in zip(states, filter_beyond_lag, filter_threshold) ] if states_was_list: return ess_list return ess_list[0]
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/disentangled_vae.py#L787-L844
def reconstruct(self, inputs, samples=1, sample_static=False, sample_dynamic=False, swap_static=False, swap_dynamic=False, fix_static=False, fix_dynamic=False): """ """ batch_size = tf.shape(input=inputs)[-5] length = len(tf.unstack(inputs, axis=-4)) # hack for graph mode features = self.compressor(inputs) # (..., batch, timesteps, hidden) if sample_static: static_sample, _ = self.sample_static_prior( samples, batch_size, fix_static) else: static_sample, _ = self.sample_static_posterior(features, samples) if swap_static: static_sample = tf.reverse(static_sample, axis=[1]) if sample_dynamic: dynamic_sample, _ = self.sample_dynamic_prior( samples, batch_size, length, fix_dynamic) else: dynamic_sample, _ = self.sample_dynamic_posterior( features, samples, static_sample) if swap_dynamic: dynamic_sample = tf.reverse(dynamic_sample, axis=[1]) likelihood = self.decoder((dynamic_sample, static_sample)) return likelihood
https://github.com/asciimoo/searx/blob/a84caa22cf947e973c10aa968d35fb2bdda6d048/searx/poolrequests.py#L90-L128
def request(method, url, **kwargs): """""" time_before_request = time() # session start session = SessionSinglePool() # proxies kwargs['proxies'] = settings['outgoing'].get('proxies') or None # timeout if 'timeout' in kwargs: timeout = kwargs['timeout'] else: timeout = getattr(threadLocal, 'timeout', None) if timeout is not None: kwargs['timeout'] = timeout # do request response = session.request(method=method, url=url, **kwargs) time_after_request = time() # is there a timeout for this engine ? if timeout is not None: timeout_overhead = 0.2 # seconds # start_time = when the user request started start_time = getattr(threadLocal, 'start_time', time_before_request) search_duration = time_after_request - start_time if search_duration > timeout + timeout_overhead: raise requests.exceptions.Timeout(response=response) # session end session.close() if hasattr(threadLocal, 'total_time'): threadLocal.total_time += time_after_request - time_before_request return response
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/models/bayesian_resnet.py#L25-L92
def bayesian_resnet(input_shape, num_classes=10, kernel_posterior_scale_mean=-9.0, kernel_posterior_scale_stddev=0.1, kernel_posterior_scale_constraint=0.2): """ """ filters = [64, 128, 256, 512] kernels = [3, 3, 3, 3] strides = [1, 2, 2, 2] def _untransformed_scale_constraint(t): return tf.clip_by_value(t, -1000, tf.math.log(kernel_posterior_scale_constraint)) kernel_posterior_fn = tfp.layers.default_mean_field_normal_fn( untransformed_scale_initializer=tf.compat.v1.initializers.random_normal( mean=kernel_posterior_scale_mean, stddev=kernel_posterior_scale_stddev), untransformed_scale_constraint=_untransformed_scale_constraint) image = tf.keras.layers.Input(shape=input_shape, dtype='float32') x = tfp.layers.Convolution2DFlipout( 64, 3, strides=1, padding='same', kernel_posterior_fn=kernel_posterior_fn)(image) for i in range(len(kernels)): x = _resnet_block( x, filters[i], kernels[i], strides[i], kernel_posterior_fn) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.AveragePooling2D(4, 1)(x) x = tf.keras.layers.Flatten()(x) x = tfp.layers.DenseFlipout( num_classes, kernel_posterior_fn=kernel_posterior_fn)(x) model = tf.keras.Model(inputs=image, outputs=x, name='resnet18') return model
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/nelder_mead.py#L832-L844
def _resolve_parameters(dim, reflection, expansion, contraction, shrinkage, dtype): """""" dim = tf.cast(dim, dtype=dtype) reflection = 1. if reflection is None else reflection expansion = (1. + 2. / dim) if expansion is None else expansion contraction = (0.75 - 1. / (2 * dim)) if contraction is None else contraction shrinkage = (1. - 1. / dim) if shrinkage is None else shrinkage return reflection, expansion, contraction, shrinkage
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/glm/fisher_scoring.py#L517-L620
def prepare_args(model_matrix, response, model_coefficients, predicted_linear_response, offset, name=None): """ """ graph_deps = [model_matrix, response, model_coefficients, predicted_linear_response, offset] with tf.compat.v1.name_scope(name, 'prepare_args', graph_deps): dtype = dtype_util.common_dtype(graph_deps, np.float32) model_matrix = tf.convert_to_tensor( value=model_matrix, dtype=dtype, name='model_matrix') if offset is not None: offset = tf.convert_to_tensor(value=offset, dtype=dtype, name='offset') response = tf.convert_to_tensor( value=response, dtype=dtype, name='response') use_default_model_coefficients = model_coefficients is None if use_default_model_coefficients: # User did not supply model coefficients; assume they're all zero. batch_shape = tf.shape(input=model_matrix)[:-2] num_columns = tf.shape(input=model_matrix)[-1] model_coefficients = tf.zeros( shape=tf.concat([batch_shape, [num_columns]], axis=0), dtype=dtype, name='model_coefficients') else: # User did supply model coefficients; convert to Tensor in case it's # numpy or literal. model_coefficients = tf.convert_to_tensor( value=model_coefficients, dtype=dtype, name='model_coefficients') if predicted_linear_response is None: if use_default_model_coefficients: # Since we're using zeros for model_coefficients, we know the predicted # linear response will also be all zeros. if offset is None: predicted_linear_response = tf.zeros_like( response, dtype, name='predicted_linear_response') else: predicted_linear_response = tf.broadcast_to( offset, tf.shape(input=response), name='predicted_linear_response') else: # We were given model_coefficients but not the predicted linear # response. predicted_linear_response = calculate_linear_predictor( model_matrix, model_coefficients, offset) else: predicted_linear_response = tf.convert_to_tensor( value=predicted_linear_response, dtype=dtype, name='predicted_linear_response') return [ model_matrix, response, model_coefficients, predicted_linear_response, offset, ]
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/lkj.py#L47-L56
def _uniform_unit_norm(dimension, shape, dtype, seed): """""" # This works because the Gaussian distribution is spherically symmetric. # raw shape: shape + [dimension] raw = normal.Normal( loc=dtype_util.as_numpy_dtype(dtype)(0), scale=dtype_util.as_numpy_dtype(dtype)(1)).sample( tf.concat([shape, [dimension]], axis=0), seed=seed()) unit_norm = raw / tf.norm(tensor=raw, ord=2, axis=-1)[..., tf.newaxis] return unit_norm
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/forecast.py#L172-L362
def forecast(model, observed_time_series, parameter_samples, num_steps_forecast): """ """ with tf.compat.v1.name_scope( 'forecast', values=[observed_time_series, parameter_samples, num_steps_forecast]): [ observed_time_series, mask ] = sts_util.canonicalize_observed_time_series_with_mask( observed_time_series) # Run filtering over the observed timesteps to extract the # latent state posterior at timestep T+1 (i.e., the final # filtering distribution, pushed through the transition model). # This is the prior for the forecast model ("today's prior # is yesterday's posterior"). num_observed_steps = dist_util.prefer_static_value( tf.shape(input=observed_time_series))[-2] observed_data_ssm = model.make_state_space_model( num_timesteps=num_observed_steps, param_vals=parameter_samples) (_, _, _, predictive_means, predictive_covs, _, _ ) = observed_data_ssm.forward_filter(observed_time_series, mask=mask) # Build a batch of state-space models over the forecast period. Because # we'll use MixtureSameFamily to mix over the posterior draws, we need to # do some shenanigans to move the `[num_posterior_draws]` batch dimension # from the leftmost to the rightmost side of the model's batch shape. # TODO(b/120245392): enhance `MixtureSameFamily` to reduce along an # arbitrary axis, and eliminate `move_dimension` calls here. parameter_samples = model._canonicalize_param_vals_as_map(parameter_samples) # pylint: disable=protected-access parameter_samples_with_reordered_batch_dimension = { param.name: dist_util.move_dimension( parameter_samples[param.name], 0, -(1 + _prefer_static_event_ndims(param.prior))) for param in model.parameters} forecast_prior = tfd.MultivariateNormalFullCovariance( loc=dist_util.move_dimension(predictive_means[..., -1, :], 0, -2), covariance_matrix=dist_util.move_dimension( predictive_covs[..., -1, :, :], 0, -3)) # Ugly hack: because we moved `num_posterior_draws` to the trailing (rather # than leading) dimension of parameters, the parameter batch shapes no # longer broadcast against the `constant_offset` attribute used in `sts.Sum` # models. We fix this by manually adding an extra broadcasting dim to # `constant_offset` if present. # The root cause of this hack is that we mucked with param dimensions above # and are now passing params that are 'invalid' in the sense that they don't # match the shapes of the model's param priors. The fix (as above) will be # to update MixtureSameFamily so we can avoid changing param dimensions # altogether. # TODO(b/120245392): enhance `MixtureSameFamily` to reduce along an # arbitrary axis, and eliminate this hack. kwargs = {} if hasattr(model, 'constant_offset'): kwargs['constant_offset'] = tf.convert_to_tensor( value=model.constant_offset, dtype=forecast_prior.dtype)[..., tf.newaxis] # We assume that any STS model that has a `constant_offset` attribute # will allow it to be overridden as a kwarg. This is currently just # `sts.Sum`. # TODO(b/120245392): when kwargs hack is removed, switch back to calling # the public version of `_make_state_space_model`. forecast_ssm = model._make_state_space_model( # pylint: disable=protected-access num_timesteps=num_steps_forecast, param_map=parameter_samples_with_reordered_batch_dimension, initial_state_prior=forecast_prior, initial_step=num_observed_steps, **kwargs) num_posterior_draws = dist_util.prefer_static_value( forecast_ssm.batch_shape_tensor())[-1] return tfd.MixtureSameFamily( mixture_distribution=tfd.Categorical( logits=tf.zeros([num_posterior_draws], dtype=forecast_ssm.dtype)), components_distribution=forecast_ssm)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcs_hook.py#L447-L475
def insert_bucket_acl(self, bucket_name, entity, role, user_project=None): """ """ self.log.info('Creating a new ACL entry in bucket: %s', bucket_name) client = self.get_conn() bucket = client.bucket(bucket_name=bucket_name) bucket.acl.reload() bucket.acl.entity_from_dict(entity_dict={"entity": entity, "role": role}) if user_project: bucket.acl.user_project = user_project bucket.acl.save() self.log.info('A new ACL entry created in bucket: %s', bucket_name)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/dagrun.py#L242-L329
def update_state(self, session=None): """ """ dag = self.get_dag() tis = self.get_task_instances(session=session) self.log.debug("Updating state for %s considering %s task(s)", self, len(tis)) for ti in list(tis): # skip in db? if ti.state == State.REMOVED: tis.remove(ti) else: ti.task = dag.get_task(ti.task_id) # pre-calculate # db is faster start_dttm = timezone.utcnow() unfinished_tasks = self.get_task_instances( state=State.unfinished(), session=session ) none_depends_on_past = all(not t.task.depends_on_past for t in unfinished_tasks) none_task_concurrency = all(t.task.task_concurrency is None for t in unfinished_tasks) # small speed up if unfinished_tasks and none_depends_on_past and none_task_concurrency: # todo: this can actually get pretty slow: one task costs between 0.01-015s no_dependencies_met = True for ut in unfinished_tasks: # We need to flag upstream and check for changes because upstream # failures/re-schedules can result in deadlock false positives old_state = ut.state deps_met = ut.are_dependencies_met( dep_context=DepContext( flag_upstream_failed=True, ignore_in_retry_period=True, ignore_in_reschedule_period=True), session=session) if deps_met or old_state != ut.current_state(session=session): no_dependencies_met = False break duration = (timezone.utcnow() - start_dttm).total_seconds() * 1000 Stats.timing("dagrun.dependency-check.{}".format(self.dag_id), duration) root_ids = [t.task_id for t in dag.roots] roots = [t for t in tis if t.task_id in root_ids] # if all roots finished and at least one failed, the run failed if (not unfinished_tasks and any(r.state in (State.FAILED, State.UPSTREAM_FAILED) for r in roots)): self.log.info('Marking run %s failed', self) self.set_state(State.FAILED) dag.handle_callback(self, success=False, reason='task_failure', session=session) # if all roots succeeded and no unfinished tasks, the run succeeded elif not unfinished_tasks and all(r.state in (State.SUCCESS, State.SKIPPED) for r in roots): self.log.info('Marking run %s successful', self) self.set_state(State.SUCCESS) dag.handle_callback(self, success=True, reason='success', session=session) # if *all tasks* are deadlocked, the run failed elif (unfinished_tasks and none_depends_on_past and none_task_concurrency and no_dependencies_met): self.log.info('Deadlock; marking run %s failed', self) self.set_state(State.FAILED) dag.handle_callback(self, success=False, reason='all_tasks_deadlocked', session=session) # finally, if the roots aren't done, the dag is still running else: self.set_state(State.RUNNING) self._emit_duration_stats_for_finished_state() # todo: determine we want to use with_for_update to make sure to lock the run session.merge(self) session.commit() return self.state
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/macros/__init__.py#L28-L46
def ds_add(ds, days): """ """ ds = datetime.strptime(ds, '%Y-%m-%d') if days: ds = ds + timedelta(days) return ds.isoformat()[:10]
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/utils.py#L79-L185
def generate_pages(current_page, num_of_pages, search=None, showPaused=None, window=7): """ """ void_link = 'javascript:void(0)' first_node = Markup("""<li class="paginate_button {disabled}" id="dags_first"> <a href="{href_link}" aria-controls="dags" data-dt-idx="0" tabindex="0">&laquo;</a> </li>""") previous_node = Markup("""<li class="paginate_button previous {disabled}" id="dags_previous"> <a href="{href_link}" aria-controls="dags" data-dt-idx="0" tabindex="0">&lt;</a> </li>""") next_node = Markup("""<li class="paginate_button next {disabled}" id="dags_next"> <a href="{href_link}" aria-controls="dags" data-dt-idx="3" tabindex="0">&gt;</a> </li>""") last_node = Markup("""<li class="paginate_button {disabled}" id="dags_last"> <a href="{href_link}" aria-controls="dags" data-dt-idx="3" tabindex="0">&raquo;</a> </li>""") page_node = Markup("""<li class="paginate_button {is_active}"> <a href="{href_link}" aria-controls="dags" data-dt-idx="2" tabindex="0">{page_num}</a> </li>""") output = [Markup('<ul class="pagination" style="margin-top:0px;">')] is_disabled = 'disabled' if current_page <= 0 else '' output.append(first_node.format(href_link="?{}" .format(get_params(page=0, search=search, showPaused=showPaused)), disabled=is_disabled)) page_link = void_link if current_page > 0: page_link = '?{}'.format(get_params(page=(current_page - 1), search=search, showPaused=showPaused)) output.append(previous_node.format(href_link=page_link, disabled=is_disabled)) mid = int(window / 2) last_page = num_of_pages - 1 if current_page <= mid or num_of_pages < window: pages = [i for i in range(0, min(num_of_pages, window))] elif mid < current_page < last_page - mid: pages = [i for i in range(current_page - mid, current_page + mid + 1)] else: pages = [i for i in range(num_of_pages - window, last_page + 1)] def is_current(current, page): return page == current for page in pages: vals = { 'is_active': 'active' if is_current(current_page, page) else '', 'href_link': void_link if is_current(current_page, page) else '?{}'.format(get_params(page=page, search=search, showPaused=showPaused)), 'page_num': page + 1 } output.append(page_node.format(**vals)) is_disabled = 'disabled' if current_page >= num_of_pages - 1 else '' page_link = (void_link if current_page >= num_of_pages - 1 else '?{}'.format(get_params(page=current_page + 1, search=search, showPaused=showPaused))) output.append(next_node.format(href_link=page_link, disabled=is_disabled)) output.append(last_node.format(href_link="?{}" .format(get_params(page=last_page, search=search, showPaused=showPaused)), disabled=is_disabled)) output.append(Markup('</ul>')) return Markup('\n'.join(output))
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L1097-L1145
def latents_to_observations(self, latent_means, latent_covs): """ """ with tf.name_scope("latents_to_observations"): pushforward_latents_step = build_pushforward_latents_step( self.get_observation_matrix_for_timestep, self.get_observation_noise_for_timestep) latent_means = distribution_util.move_dimension( latent_means, source_idx=-2, dest_idx=0) latent_means = latent_means[..., tf.newaxis] # Make matmul happy. latent_covs = distribution_util.move_dimension( latent_covs, source_idx=-3, dest_idx=0) (initial_observation_mean, initial_observation_cov) = pushforward_latents_step( _=None, # Loop body ignores previous observations. latent_t_mean_cov=(self.initial_step, latent_means[self.initial_step], latent_covs[self.initial_step])) # TODO(davmre) this loop is embarassingly parallel; replace with `pfor`. timesteps = tf.range(self.initial_step, self.initial_step + self.num_timesteps) observation_means, observation_covs = tf.scan( pushforward_latents_step, elems=(timesteps, latent_means, latent_covs), initializer=(initial_observation_mean, initial_observation_cov), parallel_iterations=10000) observation_means = distribution_util.move_dimension( observation_means[..., 0], source_idx=0, dest_idx=-2) observation_covs = distribution_util.move_dimension( observation_covs, source_idx=0, dest_idx=-3) return observation_means, observation_covs
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/nest_util.py#L98-L113
def _nested_convert_to_tensor(struct, dtype=None, name=None): """""" if dtype is not None or not tf.nest.is_nested(struct): return tf.convert_to_tensor(struct, dtype=dtype) if _maybe_convertible_to_tensor(struct): try: # Try converting the structure wholesale. return tf.convert_to_tensor(value=struct, name=name) except (ValueError, TypeError): # Unfortunately Eager/Graph mode don't agree on the error type. pass # Try converting all of its children. shallow_struct = _get_shallow_structure(struct) return nest.map_structure_up_to( shallow_struct, lambda s: _nested_convert_to_tensor(s, name=name), struct)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L1577-L1619
def build_kalman_cov_step(get_transition_matrix_for_timestep, get_transition_noise_for_timestep, get_observation_matrix_for_timestep, get_observation_noise_for_timestep): """ """ def cov_step(previous_covs, t): """Single step of prior covariance recursion.""" previous_latent_cov, _ = previous_covs latent_cov = _propagate_cov( previous_latent_cov, get_transition_matrix_for_timestep(t - 1), get_transition_noise_for_timestep(t - 1)) observation_cov = _propagate_cov( latent_cov, get_observation_matrix_for_timestep(t), get_observation_noise_for_timestep(t)) return (latent_cov, observation_cov) return cov_step
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L468-L499
def five_crop(img, size): """ """ if isinstance(size, numbers.Number): size = (int(size), int(size)) else: assert len(size) == 2, "Please provide only two dimensions (h, w) for size." w, h = img.size crop_h, crop_w = size if crop_w > w or crop_h > h: raise ValueError("Requested crop size {} is bigger than input size {}".format(size, (h, w))) tl = img.crop((0, 0, crop_w, crop_h)) tr = img.crop((w - crop_w, 0, w, crop_h)) bl = img.crop((0, h - crop_h, crop_w, h)) br = img.crop((w - crop_w, h - crop_h, w, h)) center = center_crop(img, (crop_h, crop_w)) return (tl, tr, bl, br, center)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/replica_exchange_mc.py#L569-L575
def _get_field(kernel_results, field_name): """""" if hasattr(kernel_results, field_name): return getattr(kernel_results, field_name) if hasattr(kernel_results, 'accepted_results'): return getattr(kernel_results.accepted_results, field_name) raise TypeError('Cannot extract %s from %s' % (field_name, kernel_results))
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/vi/csiszar_divergence.py#L550-L585
def jeffreys(logu, name=None): """ """ with tf.compat.v1.name_scope(name, "jeffreys", [logu]): logu = tf.convert_to_tensor(value=logu, name="logu") return 0.5 * tf.math.expm1(logu) * logu
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/dagrun.py#L115-L160
def find(dag_id=None, run_id=None, execution_date=None, state=None, external_trigger=None, no_backfills=False, session=None): """ """ DR = DagRun qry = session.query(DR) if dag_id: qry = qry.filter(DR.dag_id == dag_id) if run_id: qry = qry.filter(DR.run_id == run_id) if execution_date: if isinstance(execution_date, list): qry = qry.filter(DR.execution_date.in_(execution_date)) else: qry = qry.filter(DR.execution_date == execution_date) if state: qry = qry.filter(DR.state == state) if external_trigger is not None: qry = qry.filter(DR.external_trigger == external_trigger) if no_backfills: # in order to prevent a circular dependency from airflow.jobs import BackfillJob qry = qry.filter(DR.run_id.notlike(BackfillJob.ID_PREFIX + '%')) dr = qry.order_by(DR.execution_date).all() return dr
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/structural_time_series.py#L96-L109
def batch_shape_tensor(self): """ """ batch_shape = tf.constant([], dtype=tf.int32) for param in self.parameters: batch_shape = tf.broadcast_dynamic_shape( batch_shape, param.prior.batch_shape_tensor()) return batch_shape
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/nelder_mead.py#L847-L880
def _evaluate_objective_multiple(objective_function, arg_batch, batch_evaluate_objective): """ """ n_points = tf.shape(input=arg_batch)[0] if batch_evaluate_objective: return objective_function(arg_batch), n_points return tf.map_fn(objective_function, arg_batch), n_points
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L586-L594
def params_size(event_size, num_components, name=None): """""" with tf.compat.v1.name_scope( name, 'CategoricalMixtureOfOneHotCategorical_params_size', [event_size, num_components]): return MixtureSameFamily.params_size( num_components, OneHotCategorical.params_size(event_size, name=name), name=name)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/models/bayesian_resnet.py#L95-L121
def _resnet_block(x, filters, kernel, stride, kernel_posterior_fn): """""" x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.Activation('relu')(x) if stride != 1 or filters != x.shape[1]: shortcut = _projection_shortcut(x, filters, stride, kernel_posterior_fn) else: shortcut = x x = tfp.layers.Convolution2DFlipout( filters, kernel, strides=stride, padding='same', kernel_posterior_fn=kernel_posterior_fn)(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.Activation('relu')(x) x = tfp.layers.Convolution2DFlipout( filters, kernel, strides=1, padding='same', kernel_posterior_fn=kernel_posterior_fn)(x) x = tf.keras.layers.add([x, shortcut]) return x
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/half_cauchy.py#L36-L63
def check_arg_in_support(f): """ """ @functools.wraps(f) def _check_arg_and_apply_f(*args, **kwargs): dist = args[0] x = args[1] with tf.control_dependencies([ assert_util.assert_greater_equal( x, dist.loc, message="x is not in the support of the distribution") ] if dist.validate_args else []): return f(*args, **kwargs) return _check_arg_and_apply_f
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/vi/csiszar_divergence.py#L620-L661
def modified_gan(logu, self_normalized=False, name=None): """ """ with tf.compat.v1.name_scope(name, "chi_square", [logu]): logu = tf.convert_to_tensor(value=logu, name="logu") y = tf.nn.softplus(logu) - logu if self_normalized: y += 0.5 * tf.math.expm1(logu) return y
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/quantiles.py#L805-L817
def _insert_back_keep_dims(x, axis): """ """ for i in sorted(axis): x = tf.expand_dims(x, axis=i) return x
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/glm/family.py#L137-L161
def log_prob(self, response, predicted_linear_response, name=None): """ """ with self._name_scope( name, 'log_prob', [response, predicted_linear_response]): dtype = dtype_util.common_dtype([response, predicted_linear_response]) response = tf.convert_to_tensor( value=response, dtype=dtype, name='response') predicted_linear_response = tf.convert_to_tensor( value=predicted_linear_response, name='predicted_linear_response') return self._log_prob(response, predicted_linear_response)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/_vendor/nvd3/NVD3Chart.py#L363-L371
def buildhtml(self): """ """ self.buildcontent() self.content = self.htmlcontent self.htmlcontent = self.template_page_nvd3.render(chart=self)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/quantiles.py#L626-L723
def quantiles(x, num_quantiles, axis=None, interpolation=None, keep_dims=False, validate_args=False, name=None): """ """ with tf.compat.v1.name_scope( name, 'quantiles', values=[x, num_quantiles, axis]): x = tf.convert_to_tensor(value=x, name='x') return percentile( x, q=tf.linspace( # percentile casts q to float64 before using it...so may as well use # float64 here. Note that using x.dtype won't work with linspace # if x is integral type (which is anothe motivation for hard-coding # float64). tf.convert_to_tensor(value=0, dtype=tf.float64), tf.convert_to_tensor(value=100, dtype=tf.float64), num=num_quantiles + 1), axis=axis, interpolation=interpolation, keep_dims=keep_dims, validate_args=validate_args, preserve_gradients=False)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py#L39-L47
def val_where(cond, tval, fval): """""" if isinstance(tval, tf.Tensor): return tf.where(cond, tval, fval) elif isinstance(tval, tuple): cls = type(tval) return cls(*(val_where(cond, t, f) for t, f in zip(tval, fval))) else: raise Exception(TypeError)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/sample_stats.py#L602-L638
def variance(x, sample_axis=0, keepdims=False, name=None): """ """ with tf.compat.v1.name_scope(name, 'variance', values=[x, sample_axis]): return covariance( x, y=None, sample_axis=sample_axis, event_axis=None, keepdims=keepdims)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/bayesian_neural_network.py#L161-L187
def build_input_pipeline(mnist_data, batch_size, heldout_size): """""" # Build an iterator over training batches. training_dataset = tf.data.Dataset.from_tensor_slices( (mnist_data.train.images, np.int32(mnist_data.train.labels))) training_batches = training_dataset.shuffle( 50000, reshuffle_each_iteration=True).repeat().batch(batch_size) training_iterator = tf.compat.v1.data.make_one_shot_iterator(training_batches) # Build a iterator over the heldout set with batch_size=heldout_size, # i.e., return the entire heldout set as a constant. heldout_dataset = tf.data.Dataset.from_tensor_slices( (mnist_data.validation.images, np.int32(mnist_data.validation.labels))) heldout_frozen = (heldout_dataset.take(heldout_size). repeat().batch(heldout_size)) heldout_iterator = tf.compat.v1.data.make_one_shot_iterator(heldout_frozen) # Combine these into a feedable iterator that can switch between training # and validation inputs. handle = tf.compat.v1.placeholder(tf.string, shape=[]) feedable_iterator = tf.compat.v1.data.Iterator.from_string_handle( handle, training_batches.output_types, training_batches.output_shapes) images, labels = feedable_iterator.get_next() return images, labels, handle, training_iterator, heldout_iterator
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/program_transformations.py#L226-L246
def _get_function_inputs(f, src_kwargs): """ """ if hasattr(f, "_func"): # functions returned by tf.make_template f = f._func # pylint: disable=protected-access try: # getargspec was deprecated in Python 3.6 argspec = inspect.getfullargspec(f) except AttributeError: argspec = inspect.getargspec(f) fkwargs = {k: v for k, v in six.iteritems(src_kwargs) if k in argspec.args} return fkwargs
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_bigtable_hook.py#L199-L214
def delete_table(self, instance_id, table_id, project_id=None): """ """ table = self.get_instance(instance_id=instance_id, project_id=project_id).table(table_id=table_id) table.delete()
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/sample_stats.py#L552-L599
def stddev(x, sample_axis=0, keepdims=False, name=None): """ """ with tf.compat.v1.name_scope(name, 'stddev', values=[x, sample_axis]): return tf.sqrt(variance(x, sample_axis=sample_axis, keepdims=keepdims))
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/aws_firehose_hook.py#L44-L56
def put_records(self, records): """ """ firehose_conn = self.get_conn() response = firehose_conn.put_record_batch( DeliveryStreamName=self.delivery_stream, Records=records ) return response
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/generated_random_variables.py#L148-L175
def _make_random_variable(distribution_cls): """""" @interceptable @functools.wraps(distribution_cls, assigned=('__module__', '__name__')) @docstring_util.expand_docstring( cls=distribution_cls.__name__, doc=inspect.cleandoc(distribution_cls.__init__.__doc__ or '')) def func(*args, **kwargs): # pylint: disable=g-doc-args """Create a random variable for ${cls}. See ${cls} for more details. Returns: RandomVariable. #### Original Docstring for Distribution ${doc} """ # pylint: enable=g-doc-args sample_shape = kwargs.pop('sample_shape', ()) value = kwargs.pop('value', None) return RandomVariable(distribution=distribution_cls(*args, **kwargs), sample_shape=sample_shape, value=value) return func
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/dtype_util.py#L132-L139
def name(dtype): """""" dtype = tf.as_dtype(dtype) if hasattr(dtype, 'name'): return dtype.name if hasattr(dtype, '__name__'): return dtype.__name__ return str(dtype)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/quantiles.py#L726-L787
def _get_static_ndims(x, expect_static=False, expect_ndims=None, expect_ndims_no_more_than=None, expect_ndims_at_least=None): """ """ ndims = x.shape.ndims if ndims is None: shape_const = tf.get_static_value(tf.shape(input=x)) if shape_const is not None: ndims = shape_const.ndim if ndims is None: if expect_static: raise ValueError( 'Expected argument `x` to have statically defined `ndims`. Found: ' % x) return if expect_ndims is not None: ndims_message = ('Expected argument `x` to have ndims %s. Found tensor %s' % (expect_ndims, x)) if ndims != expect_ndims: raise ValueError(ndims_message) if expect_ndims_at_least is not None: ndims_at_least_message = ( 'Expected argument `x` to have ndims >= %d. Found tensor %s' % (expect_ndims_at_least, x)) if ndims < expect_ndims_at_least: raise ValueError(ndims_at_least_message) if expect_ndims_no_more_than is not None: ndims_no_more_than_message = ( 'Expected argument `x` to have ndims <= %d. Found tensor %s' % (expect_ndims_no_more_than, x)) if ndims > expect_ndims_no_more_than: raise ValueError(ndims_no_more_than_message) return ndims
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sqoop_hook.py#L202-L233
def import_table(self, table, target_dir=None, append=False, file_type="text", columns=None, split_by=None, where=None, direct=False, driver=None, extra_import_options=None): """ """ cmd = self._import_cmd(target_dir, append, file_type, split_by, direct, driver, extra_import_options) cmd += ["--table", table] if columns: cmd += ["--columns", columns] if where: cmd += ["--where", where] self.Popen(cmd)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/sprites_dataset.py#L140-L149
def create_character(skin, hair, top, pants): """""" dtype = skin.dtype hair_mask = tf.cast(hair[..., -1:] <= 0, dtype) top_mask = tf.cast(top[..., -1:] <= 0, dtype) pants_mask = tf.cast(pants[..., -1:] <= 0, dtype) char = (skin * hair_mask) + hair char = (char * top_mask) + top char = (char * pants_mask) + pants return char
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/internal/util.py#L440-L474
def enable_store_parameters_in_results(kernel): """ """ kernel_stack = [] while hasattr(kernel, 'parameters') and 'inner_kernel' in kernel.parameters: kernel_stack.append(kernel) kernel = kernel.parameters['inner_kernel'] def _recreate_kernel(kernel, parameters): new_parameters = kernel.parameters.copy() new_parameters.update(parameters) if 'store_parameters_in_results' in new_parameters: new_parameters['store_parameters_in_results'] = True with deprecation.silence(): return type(kernel)(**new_parameters) if hasattr(kernel, 'parameters'): kernel = _recreate_kernel(kernel, {}) for outer_kernel in reversed(kernel_stack): outer_kernel = _recreate_kernel(outer_kernel, {'inner_kernel': kernel}) kernel = outer_kernel return kernel
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L1526-L1666
def _execute_helper(self): """ """ self.executor.start() self.log.info("Resetting orphaned tasks for active dag runs") self.reset_state_for_orphaned_tasks() # Start after resetting orphaned tasks to avoid stressing out DB. self.processor_agent.start() execute_start_time = timezone.utcnow() # Last time that self.heartbeat() was called. last_self_heartbeat_time = timezone.utcnow() # For the execute duration, parse and schedule DAGs while True: self.log.debug("Starting Loop...") loop_start_time = time.time() if self.using_sqlite: self.processor_agent.heartbeat() # For the sqlite case w/ 1 thread, wait until the processor # is finished to avoid concurrent access to the DB. self.log.debug( "Waiting for processors to finish since we're using sqlite") self.processor_agent.wait_until_finished() self.log.debug("Harvesting DAG parsing results") simple_dags = self.processor_agent.harvest_simple_dags() self.log.debug("Harvested {} SimpleDAGs".format(len(simple_dags))) # Send tasks for execution if available simple_dag_bag = SimpleDagBag(simple_dags) if len(simple_dags) > 0: try: simple_dag_bag = SimpleDagBag(simple_dags) # Handle cases where a DAG run state is set (perhaps manually) to # a non-running state. Handle task instances that belong to # DAG runs in those states # If a task instance is up for retry but the corresponding DAG run # isn't running, mark the task instance as FAILED so we don't try # to re-run it. self._change_state_for_tis_without_dagrun(simple_dag_bag, [State.UP_FOR_RETRY], State.FAILED) # If a task instance is scheduled or queued or up for reschedule, # but the corresponding DAG run isn't running, set the state to # NONE so we don't try to re-run it. self._change_state_for_tis_without_dagrun(simple_dag_bag, [State.QUEUED, State.SCHEDULED, State.UP_FOR_RESCHEDULE], State.NONE) self._execute_task_instances(simple_dag_bag, (State.SCHEDULED,)) except Exception as e: self.log.error("Error queuing tasks") self.log.exception(e) continue # Call heartbeats self.log.debug("Heartbeating the executor") self.executor.heartbeat() self._change_state_for_tasks_failed_to_execute() # Process events from the executor self._process_executor_events(simple_dag_bag) # Heartbeat the scheduler periodically time_since_last_heartbeat = (timezone.utcnow() - last_self_heartbeat_time).total_seconds() if time_since_last_heartbeat > self.heartrate: self.log.debug("Heartbeating the scheduler") self.heartbeat() last_self_heartbeat_time = timezone.utcnow() is_unit_test = conf.getboolean('core', 'unit_test_mode') loop_end_time = time.time() loop_duration = loop_end_time - loop_start_time self.log.debug( "Ran scheduling loop in %.2f seconds", loop_duration) if not is_unit_test: self.log.debug("Sleeping for %.2f seconds", self._processor_poll_interval) time.sleep(self._processor_poll_interval) # Exit early for a test mode, run one additional scheduler loop # to reduce the possibility that parsed DAG was put into the queue # by the DAG manager but not yet received by DAG agent. if self.processor_agent.done: self._last_loop = True if self._last_loop: self.log.info("Exiting scheduler loop as all files" " have been processed {} times".format(self.num_runs)) break if loop_duration < 1 and not is_unit_test: sleep_length = 1 - loop_duration self.log.debug( "Sleeping for {0:.2f} seconds to prevent excessive logging" .format(sleep_length)) sleep(sleep_length) # Stop any processors self.processor_agent.terminate() # Verify that all files were processed, and if so, deactivate DAGs that # haven't been touched by the scheduler as they likely have been # deleted. if self.processor_agent.all_files_processed: self.log.info( "Deactivating DAGs that haven't been touched since %s", execute_start_time.isoformat() ) models.DAG.deactivate_stale_dags(execute_start_time) self.executor.end() settings.Session.remove()
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/views.py#L2492-L2500
def get_count_query(self): """ """ return ( super().get_count_query() .filter(models.DagModel.is_active) .filter(~models.DagModel.is_subdag) )
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/lbfgs.py#L277-L366
def _get_search_direction(state): """ """ # The number of correction pairs that have been collected so far. num_elements = tf.minimum( state.num_iterations, distribution_util.prefer_static_shape(state.position_deltas)[0]) def _two_loop_algorithm(): """L-BFGS two-loop algorithm.""" # Correction pairs are always appended to the end, so only the latest # `num_elements` vectors have valid position/gradient deltas. position_deltas = state.position_deltas[-num_elements:] gradient_deltas = state.gradient_deltas[-num_elements:] # Pre-compute all `inv_rho[i]`s. inv_rhos = tf.reduce_sum( input_tensor=gradient_deltas * position_deltas, axis=-1) def first_loop(acc, args): _, q_direction = acc position_delta, gradient_delta, inv_rho = args alpha = tf.reduce_sum( input_tensor=position_delta * q_direction, axis=-1) / inv_rho direction_delta = tf.expand_dims(alpha, axis=-1) * gradient_delta return (alpha, q_direction - direction_delta) # Run first loop body computing and collecting `alpha[i]`s, while also # computing the updated `q_direction` at each step. zero = tf.zeros_like(inv_rhos[0]) alphas, q_directions = tf.scan( first_loop, [position_deltas, gradient_deltas, inv_rhos], initializer=(zero, state.objective_gradient), reverse=True) # We use `H^0_k = gamma_k * I` as an estimate for the initial inverse # hessian for the k-th iteration; then `r_direction = H^0_k * q_direction`. gamma_k = inv_rhos[-1] / tf.reduce_sum( input_tensor=gradient_deltas[-1] * gradient_deltas[-1], axis=-1) r_direction = tf.expand_dims(gamma_k, axis=-1) * q_directions[0] def second_loop(r_direction, args): alpha, position_delta, gradient_delta, inv_rho = args beta = tf.reduce_sum( input_tensor=gradient_delta * r_direction, axis=-1) / inv_rho direction_delta = tf.expand_dims(alpha - beta, axis=-1) * position_delta return r_direction + direction_delta # Finally, run second loop body computing the updated `r_direction` at each # step. r_directions = tf.scan( second_loop, [alphas, position_deltas, gradient_deltas, inv_rhos], initializer=r_direction) return -r_directions[-1] return prefer_static.cond(tf.equal(num_elements, 0), (lambda: -state.objective_gradient), _two_loop_algorithm)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/logistic_regression.py#L89-L131
def visualize_decision(features, labels, true_w_b, candidate_w_bs, fname): """ """ fig = figure.Figure(figsize=(6, 6)) canvas = backend_agg.FigureCanvasAgg(fig) ax = fig.add_subplot(1, 1, 1) ax.scatter(features[:, 0], features[:, 1], c=np.float32(labels[:, 0]), cmap=cm.get_cmap("binary"), edgecolors="k") def plot_weights(w, b, **kwargs): w1, w2 = w x1s = np.linspace(-1, 1, 100) x2s = -(w1 * x1s + b) / w2 ax.plot(x1s, x2s, **kwargs) for w, b in candidate_w_bs: plot_weights(w, b, alpha=1./np.sqrt(len(candidate_w_bs)), lw=1, color="blue") if true_w_b is not None: plot_weights(*true_w_b, lw=4, color="green", label="true separator") ax.set_xlim([-1.5, 1.5]) ax.set_ylim([-1.5, 1.5]) ax.legend() canvas.print_figure(fname, format="png") print("saved {}".format(fname))
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/wasb_hook.py#L137-L151
def read_file(self, container_name, blob_name, **kwargs): """ """ return self.connection.get_blob_to_text(container_name, blob_name, **kwargs).content
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L572-L639
def move_dimension(x, source_idx, dest_idx): """ """ ndims = prefer_static_rank(x) dtype = dtype_util.common_dtype([source_idx, dest_idx], preferred_dtype=tf.int32) source_idx = tf.convert_to_tensor(value=source_idx, dtype=dtype) dest_idx = tf.convert_to_tensor(value=dest_idx, dtype=dtype) # Handle negative indexing. source_idx = pick_scalar_condition(source_idx < 0, ndims + source_idx, source_idx) dest_idx = pick_scalar_condition(dest_idx < 0, ndims + dest_idx, dest_idx) # Construct the appropriate permutation of dimensions, depending # whether the source is before or after the destination. def move_left_permutation(): return prefer_static_value( tf.concat([ tf.range(0, dest_idx, dtype=dtype), [source_idx], tf.range(dest_idx, source_idx, dtype=dtype), tf.range(source_idx + 1, ndims, dtype=dtype) ], axis=0)) def move_right_permutation(): return prefer_static_value( tf.concat([ tf.range(0, source_idx, dtype=dtype), tf.range(source_idx + 1, dest_idx + 1, dtype=dtype), [source_idx], tf.range(dest_idx + 1, ndims, dtype=dtype) ], axis=0)) def x_permuted(): return tf.transpose( a=x, perm=prefer_static.cond(source_idx < dest_idx, move_right_permutation, move_left_permutation)) # One final conditional to handle the special case where source # and destination indices are equal. return prefer_static.cond(tf.equal(source_idx, dest_idx), lambda: x, x_permuted)
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/datasets/utils.py#L93-L112
def list_dir(root, prefix=False): """ """ root = os.path.expanduser(root) directories = list( filter( lambda p: os.path.isdir(os.path.join(root, p)), os.listdir(root) ) ) if prefix is True: directories = [os.path.join(root, d) for d in directories] return directories
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/tensorshape_util.py#L119-L141
def constant_value_as_shape(tensor): # pylint: disable=invalid-name """ """ shape = tf.get_static_value(tensor) if shape is not None: return [None if dim == -1 else dim for dim in shape] return tensor_util.constant_value_as_shape(tensor)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/aws_glue_catalog_hook.py#L139-L152
def get_table_location(self, database_name, table_name): """ """ table = self.get_table(database_name, table_name) return table['StorageDescriptor']['Location']
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/interpolation.py#L893-L927
def _make_expand_x_fn_for_batch_interpolation(y_ref, axis): """""" # This expansion is to help x broadcast with `y`, the output. # In the batch case, the output shape is going to be # Broadcast(y_ref.shape[:axis], x.shape[:-1]) + # x.shape[-1:] + y_ref.shape[axis+1:] # Recall we made axis non-negative y_ref_shape = tf.shape(input=y_ref) y_ref_shape_left = y_ref_shape[:axis] y_ref_shape_right = y_ref_shape[axis + 1:] def expand_right_dims(x, broadcast=False): """Expand x so it can bcast w/ tensors of output shape.""" expanded_shape_left = tf.broadcast_dynamic_shape( tf.shape(input=x)[:-1], tf.ones([tf.size(input=y_ref_shape_left)], dtype=tf.int32)) expanded_shape = tf.concat( (expanded_shape_left, tf.shape(input=x)[-1:], tf.ones([tf.size(input=y_ref_shape_right)], dtype=tf.int32)), axis=0) x_expanded = tf.reshape(x, expanded_shape) if broadcast: broadcast_shape_left = tf.broadcast_dynamic_shape( tf.shape(input=x)[:-1], y_ref_shape_left) broadcast_shape = tf.concat( (broadcast_shape_left, tf.shape(input=x)[-1:], y_ref_shape_right), axis=0) if x.dtype.is_bool: x_expanded = x_expanded | tf.cast(tf.zeros(broadcast_shape), tf.bool) else: x_expanded += tf.zeros(broadcast_shape, dtype=x.dtype) return x_expanded return expand_right_dims
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/differential_evolution.py#L674-L709
def _get_mutants(population, population_size, mixing_indices, differential_weight): """ """ mixing_indices = tf.reshape(mixing_indices, [-1]) weights = tf.stack([1.0, differential_weight, -differential_weight]) def _mutant_part(population_part): donors = tf.gather(population_part, mixing_indices) donors = tf.transpose( a=tf.reshape(donors, [population_size, 3, -1]), perm=[0, 2, 1]) return tf.math.reduce_sum(input_tensor=donors * weights, axis=-1) return [_mutant_part(population_part) for population_part in population]
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/task_runner/cgroup_task_runner.py#L197-L210
def _get_cgroup_names(): """ """ with open("/proc/self/cgroup") as f: lines = f.readlines() d = {} for line in lines: line_split = line.rstrip().split(":") subsystem = line_split[1] group_name = line_split[2] d[subsystem] = group_name return d
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/prefer_static.py#L120-L151
def cond(pred, true_fn=None, false_fn=None, name=None): """ """ if not callable(true_fn): raise TypeError('`true_fn` must be callable.') if not callable(false_fn): raise TypeError('`false_fn` must be callable.') pred_value = _get_static_predicate(pred) if pred_value is not None: if pred_value: return true_fn() else: return false_fn() else: return tf.cond(pred=pred, true_fn=true_fn, false_fn=false_fn, name=name)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/sprites_dataset.py#L113-L118
def read_image(filepath): """""" im_bytes = tf.io.read_file(filepath) im = tf.image.decode_image(im_bytes, channels=CHANNELS) im = tf.image.convert_image_dtype(im, tf.float32) return im
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/positive_semidefinite_kernels/internal/util.py#L68-L86
def sum_rightmost_ndims_preserving_shape(x, ndims): """ """ x = tf.convert_to_tensor(value=x) if x.shape.ndims is not None: axes = tf.range(x.shape.ndims - ndims, x.shape.ndims) else: axes = tf.range(tf.rank(x) - ndims, tf.rank(x)) return tf.reduce_sum(input_tensor=x, axis=axes)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/bfgs_utils.py#L47-L91
def get_initial_state_args(value_and_gradients_function, initial_position, grad_tolerance, control_inputs=None): """ """ if control_inputs: with tf.control_dependencies(control_inputs): f0, df0 = value_and_gradients_function(initial_position) else: f0, df0 = value_and_gradients_function(initial_position) converged = norm(df0, dims=1) < grad_tolerance return dict( converged=converged, failed=tf.zeros_like(converged), # i.e. False. num_iterations=tf.convert_to_tensor(value=0), num_objective_evaluations=tf.convert_to_tensor(value=1), position=initial_position, objective_value=f0, objective_gradient=df0)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dag_processing.py#L920-L928
def _print_stat(self): """ """ if ((timezone.utcnow() - self.last_stat_print_time).total_seconds() > self.print_stats_interval): if len(self._file_paths) > 0: self._log_file_processing_stats(self._file_paths) self.last_stat_print_time = timezone.utcnow()
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/security.py#L246-L266
def get_accessible_dag_ids(self, username=None): """ """ if not username: username = g.user if username.is_anonymous or 'Public' in username.roles: # return an empty set if the role is public return set() roles = {role.name for role in username.roles} if {'Admin', 'Viewer', 'User', 'Op'} & roles: return self.DAG_VMS user_perms_views = self.get_all_permissions_views() # return a set of all dags that the user could access return set([view for perm, view in user_perms_views if perm in self.DAG_PERMS])
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/internal/slice_sampler_utils.py#L26-L87
def _left_doubling_increments(batch_shape, max_doublings, step_size, seed=None, name=None): """ """ with tf.compat.v1.name_scope(name, 'left_doubling_increments', [batch_shape, max_doublings, step_size]): step_size = tf.convert_to_tensor(value=step_size) dtype = step_size.dtype.base_dtype # Output shape of the left increments tensor. output_shape = tf.concat(([max_doublings + 1], batch_shape), axis=0) # A sample realization of X_k. expand_left = distributions.Bernoulli(0.5, dtype=dtype).sample( sample_shape=output_shape, seed=seed) # The widths of the successive intervals. Starts with 1.0 and ends with # 2^max_doublings. width_multipliers = tf.cast(2 ** tf.range(0, max_doublings+1), dtype=dtype) # Output shape of the `widths` tensor. widths_shape = tf.concat(([max_doublings + 1], tf.ones_like(batch_shape)), axis=0) width_multipliers = tf.reshape(width_multipliers, shape=widths_shape) # Widths shape is [max_doublings + 1, 1, 1, 1...]. widths = width_multipliers * step_size # Take the cumulative sum of the left side increments in slice width to give # the resulting distance from the inital lower bound. left_increments = tf.cumsum(widths * expand_left, exclusive=True, axis=0) return left_increments, widths
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L342-L417
def _launch_process(result_queue, file_path, pickle_dags, dag_id_white_list, thread_name, zombies): """ """ def helper(): # This helper runs in the newly created process log = logging.getLogger("airflow.processor") stdout = StreamLogWriter(log, logging.INFO) stderr = StreamLogWriter(log, logging.WARN) set_context(log, file_path) try: # redirect stdout/stderr to log sys.stdout = stdout sys.stderr = stderr # Re-configure the ORM engine as there are issues with multiple processes settings.configure_orm() # Change the thread name to differentiate log lines. This is # really a separate process, but changing the name of the # process doesn't work, so changing the thread name instead. threading.current_thread().name = thread_name start_time = time.time() log.info("Started process (PID=%s) to work on %s", os.getpid(), file_path) scheduler_job = SchedulerJob(dag_ids=dag_id_white_list, log=log) result = scheduler_job.process_file(file_path, zombies, pickle_dags) result_queue.put(result) end_time = time.time() log.info( "Processing %s took %.3f seconds", file_path, end_time - start_time ) except Exception: # Log exceptions through the logging framework. log.exception("Got an exception! Propagating...") raise finally: sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ # We re-initialized the ORM within this Process above so we need to # tear it down manually here settings.dispose_orm() p = multiprocessing.Process(target=helper, args=(), name="{}-Process".format(thread_name)) p.start() return p
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/prefer_static.py#L154-L179
def case(pred_fn_pairs, default=None, exclusive=False, name='smart_case'): """ """ return control_flow_ops._case_helper( # pylint: disable=protected-access cond, pred_fn_pairs, default, exclusive, name, allow_python_preds=True)