INSTRUCTION
stringlengths 1
8.43k
| RESPONSE
stringlengths 75
104k
|
|---|---|
TODO: rewrite docstring Fit all transformers using X transform the data and concatenate results. Parameters ---------- X: array - like or sparse matrix shape ( n_samples n_features ) Input data to be transformed. Returns ------- X_t: array - like or sparse matrix shape ( n_samples sum_n_components ) hstack of results of transformers. sum_n_components is the sum of n_components ( output dimension ) over transformers.
|
def fit_transform(self, Z, **fit_params):
"""TODO: rewrite docstring
Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
return self.fit(Z, **fit_params).transform(Z)
|
TODO: rewrite docstring Transform X separately by each transformer concatenate results. Parameters ---------- X: array - like or sparse matrix shape ( n_samples n_features ) Input data to be transformed. Returns ------- X_t: array - like or sparse matrix shape ( n_samples sum_n_components ) hstack of results of transformers. sum_n_components is the sum of n_components ( output dimension ) over transformers.
|
def transform(self, Z):
"""TODO: rewrite docstring
Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
if isinstance(Z, DictRDD):
X = Z[:, 'X']
else:
X = Z
Zs = [_transform_one(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list]
X_rdd = reduce(lambda x, y: x.zip(y._rdd), Zs)
X_rdd = X_rdd.map(flatten)
mapper = np.hstack
for item in X_rdd.first():
if sp.issparse(item):
mapper = sp.hstack
X_rdd = X_rdd.map(lambda x: mapper(x))
if isinstance(Z, DictRDD):
return DictRDD([X_rdd, Z[:, 'y']],
columns=Z.columns,
dtype=Z.dtype,
bsize=Z.bsize)
else:
return X_rdd
|
Fit the model according to the given training data.
|
def fit(self, Z, classes=None):
"""Fit the model according to the given training data.
Parameters
----------
Z : DictRDD containing (X, y) pairs
X - Training vector
y - Target labels
classes : iterable
The set of available classes
Returns
-------
self : object
Returns self.
"""
check_rdd(Z, {'X': (sp.spmatrix, np.ndarray)})
mapper = lambda X_y: super(SparkRandomForestClassifier, self).fit(
X_y[0], X_y[1]
)
models = Z.map(mapper).collect()
self.__dict__ = models[0].__dict__
self.estimators_ = []
for m in models:
self.estimators_ += m.estimators_
self.n_estimators = len(self.estimators_)
return self
|
Actual fitting performing the search over parameters.
|
def _fit(self, Z, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
cv = self.cv
cv = _check_cv(cv, Z)
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch, backend="threading"
)(
delayed(_fit_and_score)(clone(base_estimator), Z, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
best_estimator.fit(Z, **self.fit_params)
self.best_estimator_ = best_estimator
return self
|
Fit label encoder Parameters ---------- y: ArrayRDD ( n_samples ) Target values. Returns ------- self: returns an instance of self.
|
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : ArrayRDD (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
def mapper(y):
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
return np.unique(y)
def reducer(a, b):
return np.unique(np.concatenate((a, b)))
self.classes_ = y.map(mapper).reduce(reducer)
return self
|
Transform labels to normalized encoding. Parameters ---------- y: ArrayRDD [ n_samples ] Target values. Returns ------- y: ArrayRDD [ n_samples ]
|
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : ArrayRDD [n_samples]
Target values.
Returns
-------
y : ArrayRDD [n_samples]
"""
mapper = super(SparkLabelEncoder, self).transform
mapper = self.broadcast(mapper, y.context)
return y.transform(mapper)
|
Compute the score of an estimator on a given test set.
|
def _score(estimator, Z_test, scorer):
"""Compute the score of an estimator on a given test set."""
score = scorer(estimator, Z_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
|
Compute k - means clustering.
|
def fit(self, Z):
"""Compute k-means clustering.
Parameters
----------
Z : ArrayRDD or DictRDD containing array-like or sparse matrix
Train data.
Returns
-------
self
"""
X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z
check_rdd(X, (np.ndarray, sp.spmatrix))
if self.init == 'k-means||':
self._mllib_model = MLlibKMeans.train(
X.unblock(),
self.n_clusters,
maxIterations=self.max_iter,
initializationMode="k-means||")
self.cluster_centers_ = self._mllib_model.centers
else:
models = X.map(lambda X: super(SparkKMeans, self).fit(X))
models = models.map(lambda model: model.cluster_centers_).collect()
return super(SparkKMeans, self).fit(np.concatenate(models))
|
Predict the closest cluster each sample in X belongs to.
|
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : ArrayRDD containing array-like, sparse matrix
New data to predict.
Returns
-------
labels : ArrayRDD with predictions
Index of the cluster each sample belongs to.
"""
check_rdd(X, (np.ndarray, sp.spmatrix))
if hasattr(self, '_mllib_model'):
if isinstance(X, ArrayRDD):
X = X.unblock()
return X.map(lambda x: self._mllib_model.predict(x))
else:
rdd = X.map(lambda X: super(SparkKMeans, self).predict(X))
return ArrayRDD(rdd)
|
Fit the model according to the given training data.
|
def fit(self, Z, classes=None):
"""Fit the model according to the given training data.
Parameters
----------
Z : DictRDD containing (X, y) pairs
X - Training vector
y - Target labels
classes : iterable
The set of available classes
Returns
-------
self : object
Returns self.
"""
check_rdd(Z, {'X': (sp.spmatrix, np.ndarray)})
self._classes_ = np.unique(classes)
return self._spark_fit(SparkSGDClassifier, Z)
|
Distributed method to predict class labels for samples in X.
|
def predict(self, X):
"""Distributed method to predict class labels for samples in X.
Parameters
----------
X : ArrayRDD containing {array-like, sparse matrix}
Samples.
Returns
-------
C : ArrayRDD
Predicted class label per sample.
"""
check_rdd(X, (sp.spmatrix, np.ndarray))
return self._spark_predict(SparkSGDClassifier, X)
|
Checks if the blocks in the RDD matches the expected types.
|
def check_rdd_dtype(rdd, expected_dtype):
"""Checks if the blocks in the RDD matches the expected types.
Parameters:
-----------
rdd: splearn.BlockRDD
The RDD to check
expected_dtype: {type, list of types, tuple of types, dict of types}
Expected type(s). If the RDD is a DictRDD the parameter type is
restricted to dict.
Returns:
--------
accept: bool
Returns if the types are matched.
"""
if not isinstance(rdd, BlockRDD):
raise TypeError("Expected {0} for parameter rdd, got {1}."
.format(BlockRDD, type(rdd)))
if isinstance(rdd, DictRDD):
if not isinstance(expected_dtype, dict):
raise TypeError('Expected {0} for parameter '
'expected_dtype, got {1}.'
.format(dict, type(expected_dtype)))
accept = True
types = dict(list(zip(rdd.columns, rdd.dtype)))
for key, values in expected_dtype.items():
if not isinstance(values, (tuple, list)):
values = [values]
accept = accept and types[key] in values
return accept
if not isinstance(expected_dtype, (tuple, list)):
expected_dtype = [expected_dtype]
return rdd.dtype in expected_dtype
|
Learn a list of feature name - > indices mappings.
|
def fit(self, Z):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
Z : DictRDD with column 'X'
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
Returns
-------
self
"""
X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z
"""Create vocabulary
"""
class SetAccum(AccumulatorParam):
def zero(self, initialValue):
return set(initialValue)
def addInPlace(self, v1, v2):
v1 |= v2
return v1
accum = X.context.accumulator(set(), SetAccum())
def mapper(X, separator=self.separator):
feature_names = []
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
feature_names.append(f)
accum.add(set(feature_names))
X.foreach(mapper) # init vocabulary
feature_names = list(accum.value)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
|
Transform ArrayRDD s ( or DictRDD s X column s ) feature - > value dicts to array or sparse matrix. Named features not encountered during fit or fit_transform will be silently ignored.
|
def transform(self, Z):
"""Transform ArrayRDD's (or DictRDD's 'X' column's) feature->value dicts
to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
Z : ArrayRDD or DictRDD with column 'X' containing Mapping or
iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
Returns
-------
Z : transformed, containing {array, sparse matrix}
Feature vectors; always 2-d.
"""
mapper = self.broadcast(super(SparkDictVectorizer, self).transform,
Z.context)
dtype = sp.spmatrix if self.sparse else np.ndarray
return Z.transform(mapper, column='X', dtype=dtype)
|
Learn empirical variances from X.
|
def fit(self, Z):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z
check_rdd(X, (np.ndarray, sp.spmatrix))
def mapper(X):
"""Calculate statistics for every numpy or scipy blocks."""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
mean, var = mean_variance_axis(X, axis=0)
else:
mean, var = np.mean(X, axis=0), np.var(X, axis=0)
return X.shape[0], mean, var
def reducer(a, b):
"""Calculate the combined statistics."""
n_a, mean_a, var_a = a
n_b, mean_b, var_b = b
n_ab = n_a + n_b
mean_ab = ((mean_a * n_a) + (mean_b * n_b)) / n_ab
var_ab = (((n_a * var_a) + (n_b * var_b)) / n_ab) + \
((n_a * n_b) * ((mean_b - mean_a) / n_ab) ** 2)
return (n_ab, mean_ab, var_ab)
_, _, self.variances_ = X.map(mapper).treeReduce(reducer)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
|
Calculate the SVD of a blocked RDD directly returning only the leading k singular vectors. Assumes n rows and d columns efficient when n >> d Must be able to fit d^2 within the memory of a single machine. Parameters ---------- blocked_rdd: RDD RDD with data points in numpy array blocks k: Int Number of singular vectors to return Returns ---------- u: RDD of blocks Left eigenvectors s: numpy array Singular values v: numpy array Right eigenvectors
|
def svd(blocked_rdd, k):
"""
Calculate the SVD of a blocked RDD directly, returning only the leading k
singular vectors. Assumes n rows and d columns, efficient when n >> d
Must be able to fit d^2 within the memory of a single machine.
Parameters
----------
blocked_rdd : RDD
RDD with data points in numpy array blocks
k : Int
Number of singular vectors to return
Returns
----------
u : RDD of blocks
Left eigenvectors
s : numpy array
Singular values
v : numpy array
Right eigenvectors
"""
# compute the covariance matrix (without mean subtraction)
# TODO use one func for this (with mean subtraction as an option?)
c = blocked_rdd.map(lambda x: (x.T.dot(x), x.shape[0]))
prod, n = c.reduce(lambda x, y: (x[0] + y[0], x[1] + y[1]))
# do local eigendecomposition
w, v = ln.eig(prod / n)
w = np.real(w)
v = np.real(v)
inds = np.argsort(w)[::-1]
s = np.sqrt(w[inds[0:k]]) * np.sqrt(n)
v = v[:, inds[0:k]].T
# project back into data, normalize by singular values
u = blocked_rdd.map(lambda x: np.inner(x, v) / s)
return u, s, v
|
Calculate the SVD of a blocked RDD using an expectation maximization algorithm ( from Roweis NIPS 1997 ) that avoids explicitly computing the covariance matrix returning only the leading k singular vectors. Assumes n rows and d columns does not require d^2 to fit into memory on a single machine. Parameters ---------- blocked_rdd: ArrayRDD ArrayRDD with data points in numpy array blocks k: Int Number of singular vectors to return maxiter: Int optional default = 20 Number of iterations to perform tol: Double optional default = 1e - 5 Tolerance for stopping iterative updates seed: Int optional default = None Seed for random number generator for initializing subspace Returns ---------- u: RDD of blocks Left eigenvectors s: numpy array Singular values v: numpy array Right eigenvectors
|
def svd_em(blocked_rdd, k, maxiter=20, tol=1e-6, compute_u=True, seed=None):
"""
Calculate the SVD of a blocked RDD using an expectation maximization
algorithm (from Roweis, NIPS, 1997) that avoids explicitly
computing the covariance matrix, returning only the leading k
singular vectors. Assumes n rows and d columns, does not require
d^2 to fit into memory on a single machine.
Parameters
----------
blocked_rdd : ArrayRDD
ArrayRDD with data points in numpy array blocks
k : Int
Number of singular vectors to return
maxiter : Int, optional, default = 20
Number of iterations to perform
tol : Double, optional, default = 1e-5
Tolerance for stopping iterative updates
seed : Int, optional, default = None
Seed for random number generator for initializing subspace
Returns
----------
u : RDD of blocks
Left eigenvectors
s : numpy array
Singular values
v : numpy array
Right eigenvectors
"""
n, m = blocked_rdd.shape[:2]
sc = blocked_rdd._rdd.context
def outerprod(x):
return x.T.dot(x)
# global run_sum
# def accumsum(x):
# global run_sum
# run_sum += x
# class MatrixAccum(AccumulatorParam):
# def zero(self, value):
# return np.zeros(np.shape(value))
# def addInPlace(self, val1, val2):
# val1 += val2
# return val1
if seed is not None:
rng = np.random.RandomState(seed)
c = rng.randn(k, m)
else:
c = np.random.randn(k, m)
iter = 0
error = 100
# iteratively update subspace using expectation maximization
# e-step: x = (cc')^-1 c y
# m-step: c = y x' (xx')^-1
while (iter < maxiter) & (error > tol):
c_old = c
# pre compute (cc')^-1 c
c_inv = np.dot(c.T, ln.inv(np.dot(c, c.T)))
premult1 = sc.broadcast(c_inv)
# compute (xx')^-1 through a map reduce
xx = blocked_rdd.map(lambda x: outerprod(safe_sparse_dot(x, premult1.value))) \
.treeReduce(add)
# compute (xx')^-1 using an accumulator
# run_sum = sc.accumulator(np.zeros((k, k)), MatrixAccum())
# blocked_rdd.map(lambda x: outerprod(safe_sparse_dot(x, premult1.value))) \
# .foreachPartition(lambda l: accumsum(sum(l)))
# xx = run_sum.value
xx_inv = ln.inv(xx)
# pre compute (cc')^-1 c (xx')^-1
premult2 = blocked_rdd.context.broadcast(np.dot(c_inv, xx_inv))
# compute the new c through a map reduce
c = blocked_rdd.map(lambda x: safe_sparse_dot(x.T, safe_sparse_dot(x, premult2.value))) \
.treeReduce(add)
# compute the new c using an accumulator
# run_sum = sc.accumulator(np.zeros((m, k)), MatrixAccum())
# blocked_rdd.map(lambda x: safe_sparse_dot(x.T, safe_sparse_dot(x, premult2.value))) \
# .foreachPartition(lambda l: accumsum(sum(l)))
# c = run_sum.value
c = c.T
error = np.sum((c - c_old) ** 2)
iter += 1
# project data into subspace spanned by columns of c
# use standard eigendecomposition to recover an orthonormal basis
c = ln.orth(c.T).T
cov = blocked_rdd.map(lambda x: safe_sparse_dot(x, c.T)) \
.map(lambda x: outerprod(x)) \
.treeReduce(add)
w, v = ln.eig(cov / n)
w = np.real(w)
v = np.real(v)
inds = np.argsort(w)[::-1]
s = np.sqrt(w[inds[0:k]]) * np.sqrt(n)
v = np.dot(v[:, inds[0:k]].T, c)
if compute_u:
v_broadcasted = blocked_rdd.context.broadcast(v)
u = blocked_rdd.map(
lambda x: safe_sparse_dot(x, v_broadcasted.value.T) / s)
return u, s, v
else:
return s, v
|
Fit LSI model to X and perform dimensionality reduction on X.
|
def fit_transform(self, Z):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z
check_rdd(X, (sp.spmatrix, np.ndarray))
if self.algorithm == "em":
X = X.persist() # boosting iterative svm
Sigma, V = svd_em(X, k=self.n_components, maxiter=self.n_iter,
tol=self.tol, compute_u=False,
seed=self.random_state)
self.components_ = V
X.unpersist()
return self.transform(Z)
else:
# TODO: raise warning non distributed
return super(SparkTruncatedSVD, self).fit_transform(X.tosparse())
|
Perform dimensionality reduction on X.
|
def transform(self, Z):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z
check_rdd(X, (sp.spmatrix, np.ndarray))
mapper = self.broadcast(
super(SparkTruncatedSVD, self).transform, Z.context)
return Z.transform(mapper, column='X', dtype=np.ndarray)
|
Pack rdd with a specific collection constructor.
|
def _block_collection(iterator, dtype, bsize=-1):
"""Pack rdd with a specific collection constructor."""
i = 0
accumulated = []
for a in iterator:
if (bsize > 0) and (i >= bsize):
yield _pack_accumulated(accumulated, dtype)
accumulated = []
i = 0
accumulated.append(a)
i += 1
if i > 0:
yield _pack_accumulated(accumulated, dtype)
|
Pack rdd of tuples as tuples of arrays or scipy. sparse matrices.
|
def _block_tuple(iterator, dtypes, bsize=-1):
"""Pack rdd of tuples as tuples of arrays or scipy.sparse matrices."""
i = 0
blocked_tuple = None
for tuple_i in iterator:
if blocked_tuple is None:
blocked_tuple = tuple([] for _ in range(len(tuple_i)))
if (bsize > 0) and (i >= bsize):
yield tuple(_pack_accumulated(x, dtype)
for x, dtype in zip(blocked_tuple, dtypes))
blocked_tuple = tuple([] for _ in range(len(tuple_i)))
i = 0
for x_j, x in zip(tuple_i, blocked_tuple):
x.append(x_j)
i += 1
if i > 0:
yield tuple(_pack_accumulated(x, dtype)
for x, dtype in zip(blocked_tuple, dtypes))
|
Block an RDD
|
def block(rdd, bsize=-1, dtype=None):
"""Block an RDD
Parameters
----------
rdd : RDD
RDD of data points to block into either numpy arrays,
scipy sparse matrices, or pandas data frames.
Type of data point will be automatically inferred
and blocked accordingly.
bsize : int, optional, default None
Size of each block (number of elements), if None all data points
from each partition will be combined in a block.
Returns
-------
rdd : ArrayRDD or TupleRDD or DictRDD
The transformed rdd with added functionality
"""
try:
entry = rdd.first()
except IndexError:
# empty RDD: do not block
return rdd
# do different kinds of block depending on the type
if isinstance(entry, dict):
rdd = rdd.map(lambda x: list(x.values()))
return DictRDD(rdd, list(entry.keys()), bsize, dtype)
elif isinstance(entry, tuple):
return DictRDD(rdd, bsize=bsize, dtype=dtype)
elif sp.issparse(entry):
return SparseRDD(rdd, bsize)
elif isinstance(entry, np.ndarray):
return ArrayRDD(rdd, bsize)
else:
return BlockRDD(rdd, bsize, dtype)
|
Execute the blocking process on the given rdd.
|
def _block(self, rdd, bsize, dtype):
"""Execute the blocking process on the given rdd.
Parameters
----------
rdd : pyspark.rdd.RDD
Distributed data to block
bsize : int or None
The desired size of the blocks
Returns
-------
rdd : pyspark.rdd.RDD
Blocked rdd.
"""
return rdd.mapPartitions(
lambda x: _block_collection(x, dtype, bsize))
|
Equivalent to map compatibility purpose only. Column parameter ignored.
|
def transform(self, fn, dtype=None, *args, **kwargs):
"""Equivalent to map, compatibility purpose only.
Column parameter ignored.
"""
rdd = self._rdd.map(fn)
if dtype is None:
return self.__class__(rdd, noblock=True, **self.get_params())
if dtype is np.ndarray:
return ArrayRDD(rdd, bsize=self.bsize, noblock=True)
elif dtype is sp.spmatrix:
return SparseRDD(rdd, bsize=self.bsize, noblock=True)
else:
return BlockRDD(rdd, bsize=self.bsize, dtype=dtype, noblock=True)
|
Returns the shape of the data.
|
def shape(self):
"""Returns the shape of the data."""
# TODO cache
first = self.first().shape
shape = self._rdd.map(lambda x: x.shape[0]).sum()
return (shape,) + first[1:]
|
Returns the data as numpy. array from each partition.
|
def toarray(self):
"""Returns the data as numpy.array from each partition."""
rdd = self._rdd.map(lambda x: x.toarray())
return np.concatenate(rdd.collect())
|
Execute the blocking process on the given rdd.
|
def _block(self, rdd, bsize, dtype):
"""Execute the blocking process on the given rdd.
Parameters
----------
rdd : pyspark.rdd.RDD
Distributed data to block
bsize : int or None
The desired size of the blocks
Returns
-------
rdd : pyspark.rdd.RDD
Blocked rdd.
"""
return rdd.mapPartitions(lambda x: _block_tuple(x, dtype, bsize))
|
Execute a transformation on a column or columns. Returns the modified DictRDD.
|
def transform(self, fn, column=None, dtype=None):
"""Execute a transformation on a column or columns. Returns the modified
DictRDD.
Parameters
----------
f : function
The function to execute on the columns.
column : {str, list or None}
The column(s) to transform. If None is specified the method is
equivalent to map.
column : {str, list or None}
The dtype of the column(s) to transform.
Returns
-------
result : DictRDD
DictRDD with transformed column(s).
TODO: optimize
"""
dtypes = self.dtype
if column is None:
indices = list(range(len(self.columns)))
else:
if not type(column) in (list, tuple):
column = [column]
indices = [self.columns.index(c) for c in column]
if dtype is not None:
if not type(dtype) in (list, tuple):
dtype = [dtype]
dtypes = [dtype[indices.index(i)] if i in indices else t
for i, t in enumerate(self.dtype)]
def mapper(values):
result = fn(*[values[i] for i in indices])
if len(indices) == 1:
result = (result,)
elif not isinstance(result, (tuple, list)):
raise ValueError("Transformer function must return an"
" iterable!")
elif len(result) != len(indices):
raise ValueError("Transformer result's length must be"
" equal to the given columns length!")
return tuple(result[indices.index(i)] if i in indices else v
for i, v in enumerate(values))
return DictRDD(self._rdd.map(mapper),
columns=self.columns, dtype=dtypes,
bsize=self.bsize, noblock=True)
|
Returns zero if there are no permissions for a bit of the perm. of a file. Otherwise it returns a positive value
|
def bitperm(s, perm, pos):
"""Returns zero if there are no permissions for a bit of the perm. of a file. Otherwise it returns a positive value
:param os.stat_result s: os.stat(file) object
:param str perm: R (Read) or W (Write) or X (eXecute)
:param str pos: USR (USeR) or GRP (GRouP) or OTH (OTHer)
:return: mask value
:rtype: int
"""
perm = perm.upper()
pos = pos.upper()
assert perm in ['R', 'W', 'X']
assert pos in ['USR', 'GRP', 'OTH']
return s.st_mode & getattr(stat, 'S_I{}{}'.format(perm, pos))
|
File is only writable by root
|
def only_root_write(path):
"""File is only writable by root
:param str path: Path to file
:return: True if only root can write
:rtype: bool
"""
s = os.stat(path)
for ug, bp in [(s.st_uid, bitperm(s, 'w', 'usr')), (s.st_gid, bitperm(s, 'w', 'grp'))]:
# User id (is not root) and bit permission
if ug and bp:
return False
if bitperm(s, 'w', 'oth'):
return False
return True
|
Command to check configuration file. Raises InvalidConfig on error
|
def check_config(file, printfn=print):
"""Command to check configuration file. Raises InvalidConfig on error
:param str file: path to config file
:param printfn: print function for success message
:return: None
"""
Config(file).read()
printfn('The configuration file "{}" is correct'.format(file))
|
Parse and validate the config file. The read data is accessible as a dictionary in this instance
|
def read(self):
"""Parse and validate the config file. The read data is accessible as a dictionary in this instance
:return: None
"""
try:
data = load(open(self.file), Loader)
except (UnicodeDecodeError, YAMLError) as e:
raise InvalidConfig(self.file, '{}'.format(e))
try:
validate(data, SCHEMA)
except ValidationError as e:
raise InvalidConfig(self.file, e)
self.update(data)
|
Get the arguments to execute a command as a user
|
def run_as_cmd(cmd, user, shell='bash'):
"""Get the arguments to execute a command as a user
:param str cmd: command to execute
:param user: User for use
:param shell: Bash, zsh, etc.
:return: arguments
:rtype: list
"""
to_execute = get_shell(shell) + [EXECUTE_SHELL_PARAM, cmd]
if user == 'root':
return to_execute
return ['sudo', '-s', '--set-home', '-u', user] + to_execute
|
Excecute command on thread
|
def execute_cmd(cmd, cwd=None, timeout=5):
"""Excecute command on thread
:param cmd: Command to execute
:param cwd: current working directory
:return: None
"""
p = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
p.wait(timeout=timeout)
except subprocess.TimeoutExpired:
return None
else:
stdout, stderr = p.stdout.read(), p.stderr.read()
if sys.version_info >= (3,):
stdout, stderr = stdout.decode('utf-8', errors='ignore'), stderr.decode('utf-8', errors='ignore')
if p.returncode:
raise ExecuteError('Error running command {}: The error code {} has returned. Stderr: {}'.format(
' '.join(cmd), p.returncode, stderr
))
else:
return stdout, stderr
|
Excecute command on remote machine using SSH
|
def execute_over_ssh(cmd, ssh, cwd=None, shell='bash'):
"""Excecute command on remote machine using SSH
:param cmd: Command to execute
:param ssh: Server to connect. Port is optional
:param cwd: current working directory
:return: None
"""
port = None
parts = ssh.split(':', 1)
if len(parts) > 1 and not parts[1].isdigit():
raise InvalidConfig(extra_body='Invalid port number on ssh config: {}'.format(parts[1]))
elif len(parts) > 1:
port = parts[1]
quoted_cmd = ' '.join([x.replace("'", """'"'"'""") for x in cmd.split(' ')])
remote_cmd = ' '.join([
' '.join(get_shell(shell)), # /usr/bin/env bash
' '.join([EXECUTE_SHELL_PARAM, "'", ' '.join((['cd', cwd, ';'] if cwd else []) + [quoted_cmd]), "'"])],
)
return ['ssh', parts[0]] + (['-p', port] if port else []) + ['-C'] + [remote_cmd]
|
Execute using self. data
|
def execute(self, root_allowed=False):
"""Execute using self.data
:param bool root_allowed: Allow execute as root commands
:return:
"""
if self.user == ROOT_USER and not root_allowed and not self.data.get('ssh'):
raise SecurityException('For security, execute commands as root is not allowed. '
'Use --root-allowed to allow executing commands as root. '
' It is however recommended to add a user to the configuration '
'of the device (device: {})'.format(self.name))
if self.data.get('user') and self.data.get('ssh'):
raise InvalidConfig('User option is unsupported in ssh mode. The ssh user must be defined in '
'the ssh option. For example: user@machine')
if self.data.get('ssh'):
cmd = execute_over_ssh(self.data['cmd'], self.data['ssh'], self.data.get('cwd'))
output = execute_cmd(cmd)
else:
cmd = run_as_cmd(self.data['cmd'], self.user)
output = execute_cmd(cmd, self.data.get('cwd'))
if output:
return output[0]
|
Check self. data. Raise InvalidConfig on error
|
def validate(self):
"""Check self.data. Raise InvalidConfig on error
:return: None
"""
if (self.data.get('content-type') or self.data.get('body')) and \
self.data.get('method', '').lower() not in CONTENT_TYPE_METHODS:
raise InvalidConfig(
extra_body='The body/content-type option only can be used with the {} methods. The device is {}. '
'Check the configuration file.'.format(', '.join(CONTENT_TYPE_METHODS), self.name)
)
self.data['content-type'] = CONTENT_TYPE_ALIASES.get(self.data.get('content-type'),
self.data.get('content-type'))
form_type = CONTENT_TYPE_ALIASES['form']
if self.data.get('body') and (self.data.get('content-type') or form_type) == form_type:
try:
self.data['body'] = json.loads(self.data['body'])
except JSONDecodeError:
raise InvalidConfig(
extra_body='Invalid JSON body on {} device.'.format(self.name)
)
|
Execute using self. data
|
def execute(self, root_allowed=False):
"""Execute using self.data
:param bool root_allowed: Only used for ExecuteCmd
:return:
"""
kwargs = {'stream': True, 'timeout': 15,
'headers': self.data.get('headers', {})}
if self.data.get('content-type'):
kwargs['headers']['content-type'] = self.data['content-type']
if self.data.get('body'):
kwargs['data'] = self.data['body']
if self.data.get('auth'):
kwargs['auth'] = tuple(self.data['auth'].split(':', 1))
try:
resp = request(self.data.get('method', 'get').lower(), self.data['url'],
verify=self.data.get('verify', True),
**kwargs)
except RequestException as e:
raise ExecuteError('Exception on request to {}: {}'.format(self.data['url'], e))
if resp.status_code >= 400:
raise ExecuteError('"{}" return code {}.'.format(self.data['url'], resp.status_code))
data = resp.raw.read(1000, decode_content=True)
if sys.version_info >= (3,):
data = data.decode('utf-8', errors='ignore')
return data
|
Get HTTP Headers to send. By default default_headers
|
def get_headers(self):
"""Get HTTP Headers to send. By default default_headers
:return: HTTP Headers
:rtype: dict
"""
headers = copy.copy(self.default_headers or {})
headers.update(self.data.get('headers') or {})
return headers
|
API url
|
def get_url(self):
"""API url
:return: url
:rtype: str
"""
url = self.data[self.execute_name]
parsed = urlparse(url)
if not parsed.scheme:
url = '{}://{}'.format(self.default_protocol, url)
if not url.split(':')[-1].isalnum():
url += ':{}'.format(self.default_port)
return url
|
Return data value on self. data
|
def get_body(self):
"""Return "data" value on self.data
:return: data to send
:rtype: str
"""
if self.default_body:
return self.default_body
data = self.data.get('data')
if isinstance(data, dict):
return json.dumps(data)
return data
|
Home assistant url
|
def get_url(self):
"""Home assistant url
:return: url
:rtype: str
"""
url = super(ExecuteHomeAssistant, self).get_url()
if not self.data.get('event'):
raise InvalidConfig(extra_body='Event option is required for HomeAsistant on {} device.'.format(self.name))
url += '/api/events/{}'.format(self.data['event'])
return url
|
IFTTT Webhook url
|
def get_url(self):
"""IFTTT Webhook url
:return: url
:rtype: str
"""
if not self.data[self.execute_name]:
raise InvalidConfig(extra_body='Value for IFTTT is required on {} device. Get your key here: '
'https://ifttt.com/services/maker_webhooks/settings'.format(self.name))
if not self.data.get('event'):
raise InvalidConfig(extra_body='Event option is required for IFTTT on {} device. '
'You define the event name when creating a Webhook '
'applet'.format(self.name))
url = self.url_pattern.format(event=self.data['event'], key=self.data[self.execute_name])
return url
|
Return source mac address for this Scapy Packet
|
def pkt_text(pkt):
"""Return source mac address for this Scapy Packet
:param scapy.packet.Packet pkt: Scapy Packet
:return: Mac address. Include (Amazon Device) for these devices
:rtype: str
"""
if pkt.src.upper() in BANNED_DEVICES:
body = ''
elif pkt.src.upper()[:8] in AMAZON_DEVICES:
body = '{} (Amazon Device)'.format(pkt.src)
else:
body = pkt.src
return body
|
Scandevice callback. Register src mac to avoid src repetition. Print device on screen.
|
def discovery_print(pkt):
"""Scandevice callback. Register src mac to avoid src repetition.
Print device on screen.
:param scapy.packet.Packet pkt: Scapy Packet
:return: None
"""
if pkt.src in mac_id_list:
return
mac_id_list.append(pkt.src)
text = pkt_text(pkt)
click.secho(text, fg='magenta') if 'Amazon' in text else click.echo(text)
|
Print help and scan devices on screen.
|
def discover(interface=None):
"""Print help and scan devices on screen.
:return: None
"""
click.secho(HELP, fg='yellow')
scan_devices(discovery_print, lfilter=lambda d: d.src not in mac_id_list, iface=interface)
|
Execute this device
|
def execute(self, root_allowed=False):
"""Execute this device
:param bool root_allowed: Only used for ExecuteCmd
:return: None
"""
logger.debug('%s device executed (mac %s)', self.name, self.src)
if not self.execute_instance:
msg = '%s: There is not execution method in device conf.'
logger.warning(msg, self.name)
self.send_confirmation(msg % self.name, False)
return
try:
result = self.execute_instance.execute(root_allowed)
except Exception as e:
self.send_confirmation('Error executing the device {}: {}'.format(self.name, e), False)
raise
else:
result = 'The {} device has been started and is running right now'.format(self.name) \
if result is None else result
result = result or 'The {} device has been executed successfully'.format(self.name)
self.send_confirmation(result)
return result
|
Send success or error message to configured confirmation
|
def send_confirmation(self, message, success=True):
"""Send success or error message to configured confirmation
:param str message: Body message to send
:param bool success: Device executed successfully to personalize message
:return: None
"""
message = message.strip()
if not self.confirmation:
return
try:
self.confirmation.send(message, success)
except Exception as e:
logger.warning('Error sending confirmation on device {}: {}'.format(self.name, e))
|
Press button. Check DEFAULT_DELAY.
|
def on_push(self, device):
"""Press button. Check DEFAULT_DELAY.
:param scapy.packet.Packet device: Scapy packet
:return: None
"""
src = device.src.lower()
if last_execution[src] + self.settings.get('delay', DEFAULT_DELAY) > time.time():
return
last_execution[src] = time.time()
self.execute(device)
|
Execute a device. Used if the time between executions is greater than DEFAULT_DELAY
|
def execute(self, device):
"""Execute a device. Used if the time between executions is greater than DEFAULT_DELAY
:param scapy.packet.Packet device: Scapy packet
:return: None
"""
src = device.src.lower()
device = self.devices[src]
threading.Thread(target=device.execute, kwargs={
'root_allowed': self.root_allowed
}).start()
|
Start daemon mode
|
def run(self, root_allowed=False):
"""Start daemon mode
:param bool root_allowed: Only used for ExecuteCmd
:return: loop
"""
self.root_allowed = root_allowed
scan_devices(self.on_push, lambda d: d.src.lower() in self.devices, self.settings.get('interface'))
|
Sniff packages
|
def scan_devices(fn, lfilter, iface=None):
"""Sniff packages
:param fn: callback on packet
:param lfilter: filter packages
:return: loop
"""
try:
sniff(prn=fn, store=0,
# filter="udp",
filter="arp or (udp and src port 68 and dst port 67 and src host 0.0.0.0)",
lfilter=lfilter, iface=iface)
except PermissionError:
raise SocketPermissionError
|
Loads a web page in the current browser session.: param absolgenerateute_or_relative_url: an absolute url to web page in case of config. base_url is not specified otherwise - relative url correspondingly
|
def open_url(absolute_or_relative_url):
"""
Loads a web page in the current browser session.
:param absolgenerateute_or_relative_url:
an absolute url to web page in case of config.base_url is not specified,
otherwise - relative url correspondingly
:Usage:
open_url('http://mydomain.com/subpage1')
open_url('http://mydomain.com/subpage2')
# OR
config.base_url = 'http://mydomain.com'
open_url('/subpage1')
open_url('/subpage2')
"""
# todo: refactor next line when app_host is removed
base_url = selene.config.app_host if selene.config.app_host else selene.config.base_url
driver().get(base_url + absolute_or_relative_url)
|
Convert an OFX Transaction to a posting
|
def convert(self, txn):
"""
Convert an OFX Transaction to a posting
"""
ofxid = self.mk_ofxid(txn.id)
metadata = {}
posting_metadata = {"ofxid": ofxid}
if isinstance(txn, OfxTransaction):
posting = Posting(self.name,
Amount(txn.amount, self.currency),
metadata=posting_metadata)
return Transaction(
date=txn.date,
payee=self.format_payee(txn),
postings=[
posting,
posting.clone_inverted(
self.mk_dynamic_account(self.format_payee(txn),
exclude=self.name))])
elif isinstance(txn, InvestmentTransaction):
acct1 = self.name
acct2 = self.name
posting1 = None
posting2 = None
security = self.maybe_get_ticker(txn.security)
if isinstance(txn.type, str):
# recent versions of ofxparse
if re.match('^(buy|sell)', txn.type):
acct2 = self.unknownaccount or 'Assets:Unknown'
elif txn.type == 'transfer':
acct2 = 'Transfer'
elif txn.type == 'reinvest':
# reinvestment of income
# TODO: make this configurable
acct2 = 'Income:Interest'
elif txn.type == 'income' and txn.income_type == 'DIV':
# Fidelity lists non-reinvested dividend income as
# type: income, income_type: DIV
# TODO: determine how dividend income is listed from other institutions
# income/DIV transactions do not involve buying or selling a security
# so their postings need special handling compared to
# others
metadata['dividend_from'] = security
acct2 = 'Income:Dividends'
posting1 = Posting(acct1,
Amount(txn.total, self.currency),
metadata=posting_metadata)
posting2 = posting1.clone_inverted(acct2)
else:
# ???
pass
else:
# Old version of ofxparse
if (txn.type in [0, 1, 3, 4]):
# buymf, sellmf, buystock, sellstock
acct2 = self.unknownaccount or 'Assets:Unknown'
elif (txn.type == 2):
# reinvest
acct2 = 'Income:Interest'
else:
# ???
pass
aux_date = None
if txn.settleDate is not None and \
txn.settleDate != txn.tradeDate:
aux_date = txn.settleDate
# income/DIV already defined above;
# this block defines all other posting types
if posting1 is None and posting2 is None:
posting1 = Posting(
acct1,
Amount(
txn.units,
security,
unlimited=True),
unit_price=Amount(
txn.unit_price,
self.currency,
unlimited=True),
metadata=posting_metadata)
posting2 = Posting(
acct2,
Amount(
txn.units *
txn.unit_price,
self.currency,
reverse=True))
else:
# Previously defined if type:income income_type/DIV
pass
return Transaction(
date=txn.tradeDate,
aux_date=aux_date,
payee=self.format_payee(txn),
metadata=metadata,
postings=[posting1, posting2]
)
|
Returns main ledger file path or raise exception if it cannot be \ found.
|
def find_ledger_file(ledgerrcpath=None):
"""Returns main ledger file path or raise exception if it cannot be \
found."""
if ledgerrcpath is None:
ledgerrcpath = os.path.abspath(os.path.expanduser("~/.ledgerrc"))
if "LEDGER_FILE" in os.environ:
return os.path.abspath(os.path.expanduser(os.environ["LEDGER_FILE"]))
elif os.path.exists(ledgerrcpath):
# hacky
ledgerrc = open(ledgerrcpath)
for line in ledgerrc.readlines():
md = re.match(r"--file\s+([^\s]+).*", line)
if md is not None:
return os.path.abspath(os.path.expanduser(md.group(1)))
else:
return None
|
This function is the final common pathway of program:
|
def print_results(converter, ofx, ledger, txns, args):
"""
This function is the final common pathway of program:
Print initial balance if requested;
Print transactions surviving de-duplication filter;
Print balance assertions if requested;
Print commodity prices obtained from position statements
"""
if args.initial:
if (not(ledger.check_transaction_by_id
("ofxid", converter.mk_ofxid(AUTOSYNC_INITIAL))) and
not(ledger.check_transaction_by_id("ofxid", ALL_AUTOSYNC_INITIAL))):
print(converter.format_initial_balance(ofx.account.statement))
for txn in txns:
print(converter.convert(txn).format(args.indent))
if args.assertions:
print(converter.format_balance(ofx.account.statement))
# if OFX has positions use these to obtain commodity prices
# and print "P" records to provide dated/timed valuations
# Note that this outputs only the commodity price,
# not your position (e.g. # shares), even though this is in the OFX record
if hasattr(ofx.account.statement, 'positions'):
for pos in ofx.account.statement.positions:
print(converter.format_position(pos))
|
Run the unit test suite with each support library and Python version.
|
def compatibility(session, install):
"""Run the unit test suite with each support library and Python version."""
session.install('-e', '.[dev]')
session.install(install)
_run_tests(session)
|
Returns the width in pixels of a string in DejaVu Sans 110pt.
|
def text_width(self, text: str) -> float:
"""Returns the width, in pixels, of a string in DejaVu Sans 110pt."""
width, _ = self._font.getsize(text)
return width
|
Transform README. md into a usable long description.
|
def get_long_description():
"""Transform README.md into a usable long description.
Replaces relative references to svg images to absolute https references.
"""
with open('README.md') as f:
read_me = f.read()
def replace_relative_with_absolute(match):
svg_path = match.group(0)[1:-1]
return ('(https://github.com/google/pybadges/raw/master/'
'%s?sanitize=true)' % svg_path)
return re.sub(r'\(tests/golden-images/.*?\.svg\)',
replace_relative_with_absolute,
read_me)
|
Returns the width in pixels of a string in DejaVu Sans 110pt.
|
def text_width(self, text: str) -> float:
"""Returns the width, in pixels, of a string in DejaVu Sans 110pt."""
width = 0
for index, c in enumerate(text):
width += self._char_to_width.get(c, self._default_character_width)
width -= self._pair_to_kern.get(text[index:index + 2], 0)
return width
|
Return a PrecalculatedTextMeasurer given a JSON stream.
|
def from_json(f: TextIO) -> 'PrecalculatedTextMeasurer':
"""Return a PrecalculatedTextMeasurer given a JSON stream.
See precalculate_text.py for details on the required format.
"""
o = json.load(f)
return PrecalculatedTextMeasurer(o['mean-character-length'],
o['character-lengths'],
o['kerning-pairs'])
|
Returns a reasonable default PrecalculatedTextMeasurer.
|
def default(cls) -> 'PrecalculatedTextMeasurer':
"""Returns a reasonable default PrecalculatedTextMeasurer."""
if cls._default_cache is not None:
return cls._default_cache
if pkg_resources.resource_exists(__name__, 'default-widths.json.xz'):
import lzma
with pkg_resources.resource_stream(__name__,
'default-widths.json.xz') as f:
with lzma.open(f, "rt") as g:
cls._default_cache = PrecalculatedTextMeasurer.from_json(
cast(TextIO, g))
return cls._default_cache
elif pkg_resources.resource_exists(__name__, 'default-widths.json'):
with pkg_resources.resource_stream(__name__,
'default-widths.json') as f:
cls._default_cache = PrecalculatedTextMeasurer.from_json(
io.TextIOWrapper(f, encoding='utf-8'))
return cls._default_cache
else:
raise ValueError('could not load default-widths.json')
|
Creates a github - style badge as an SVG image.
|
def badge(left_text: str, right_text: str, left_link: Optional[str] = None,
right_link: Optional[str] = None,
whole_link: Optional[str] = None, logo: Optional[str] = None,
left_color: str = '#555', right_color: str = '#007ec6',
measurer: Optional[text_measurer.TextMeasurer] = None,
embed_logo: bool = False) -> str:
"""Creates a github-style badge as an SVG image.
>>> badge(left_text='coverage', right_text='23%', right_color='red')
'<svg...</svg>'
>>> badge(left_text='build', right_text='green', right_color='green',
... whole_link="http://www.example.com/")
'<svg...</svg>'
Args:
left_text: The text that should appear on the left-hand-side of the
badge e.g. "coverage".
right_text: The text that should appear on the right-hand-side of the
badge e.g. "23%".
left_link: The URL that should be redirected to when the left-hand text
is selected.
right_link: The URL that should be redirected to when the right-hand
text is selected.
whole_link: The link that should be redirected to when the badge is
selected. If set then left_link and right_right may not be set.
logo: A url representing a logo that will be displayed inside the
badge. Can be a data URL e.g. "data:image/svg+xml;utf8,<svg..."
left_color: The color of the part of the badge containing the left-hand
text. Can be an valid CSS color
(see https://developer.mozilla.org/en-US/docs/Web/CSS/color) or a
color name defined here:
https://github.com/badges/shields/blob/master/lib/colorscheme.json
right_color: The color of the part of the badge containing the
right-hand text. Can be an valid CSS color
(see https://developer.mozilla.org/en-US/docs/Web/CSS/color) or a
color name defined here:
https://github.com/badges/shields/blob/master/lib/colorscheme.json
measurer: A text_measurer.TextMeasurer that can be used to measure the
width of left_text and right_text.
embed_logo: If True then embed the logo image directly in the badge.
This can prevent an HTTP request and some browsers will not render
external image referenced. When True, `logo` must be a HTTP/HTTPS
URI or a filesystem path. Also, the `badge` call may raise an
exception if the logo cannot be loaded, is not an image, etc.
"""
if measurer is None:
measurer = (
precalculated_text_measurer.PrecalculatedTextMeasurer
.default())
if (left_link or right_link) and whole_link:
raise ValueError(
'whole_link may not bet set with left_link or right_link')
template = _JINJA2_ENVIRONMENT.get_template('badge-template-full.svg')
if logo and embed_logo:
logo = _embed_image(logo)
svg = template.render(
left_text=left_text,
right_text=right_text,
left_text_width=measurer.text_width(left_text) / 10.0,
right_text_width=measurer.text_width(right_text) / 10.0,
left_link=left_link,
right_link=right_link,
whole_link=whole_link,
logo=logo,
left_color=_NAME_TO_COLOR.get(left_color, left_color),
right_color=_NAME_TO_COLOR.get(right_color, right_color),
)
xml = minidom.parseString(svg)
_remove_blanks(xml)
xml.normalize()
return xml.documentElement.toxml()
|
Generate the characters support by the font at the given path.
|
def generate_supported_characters(deja_vu_sans_path: str) -> Iterable[str]:
"""Generate the characters support by the font at the given path."""
font = ttLib.TTFont(deja_vu_sans_path)
for cmap in font['cmap'].tables:
if cmap.isUnicode():
for code in cmap.cmap:
yield chr(code)
|
Generates the subset of characters that can be encoded by encodings.
|
def generate_encodeable_characters(characters: Iterable[str],
encodings: Iterable[str]) -> Iterable[str]:
"""Generates the subset of 'characters' that can be encoded by 'encodings'.
Args:
characters: The characters to check for encodeability e.g. 'abcd'.
encodings: The encodings to check against e.g. ['cp1252', 'iso-8859-5'].
Returns:
The subset of 'characters' that can be encoded using one of the provided
encodings.
"""
for c in characters:
for encoding in encodings:
try:
c.encode(encoding)
yield c
except UnicodeEncodeError:
pass
|
Return a mapping between each given character and its length.
|
def calculate_character_to_length_mapping(
measurer: text_measurer.TextMeasurer,
characters: Iterable[str]) -> Mapping[str, float]:
"""Return a mapping between each given character and its length.
Args:
measurer: The TextMeasurer used to measure the width of the text in
pixels.
characters: The characters to measure e.g. "ml".
Returns:
A mapping from the given characters to their length in pixels, as
determined by 'measurer' e.g. {'m': 5.2, 'l', 1.2}.
"""
char_to_length = {}
for c in characters:
char_to_length[c] = measurer.text_width(c)
return char_to_length
|
Returns a mapping between each * pair * of characters and their kerning.
|
def calculate_pair_to_kern_mapping(
measurer: text_measurer.TextMeasurer,
char_to_length: Mapping[str, float],
characters: Iterable[str]) -> Mapping[str, float]:
"""Returns a mapping between each *pair* of characters and their kerning.
Args:
measurer: The TextMeasurer used to measure the width of each pair of
characters.
char_to_length: A mapping between characters and their length in pixels.
Must contain every character in 'characters' e.g.
{'h': 5.2, 'e': 4.0, 'l', 1.2, 'o': 5.0}.
characters: The characters to generate the kerning mapping for e.g.
'hel'.
Returns:
A mapping between each pair of given characters
(e.g. 'hh', he', hl', 'eh', 'ee', 'el', 'lh, 'le', 'll') and the kerning
adjustment for that pair of characters i.e. the difference between the
length of the two characters calculated using 'char_to_length' vs.
the length calculated by `measurer`. Positive values indicate that the
length is less than using the sum of 'char_to_length'. Zero values are
excluded from the map e.g. {'hl': 3.1, 'ee': -0.5}.
"""
pair_to_kerning = {}
for a, b in itertools.permutations(characters, 2):
kerned_width = measurer.text_width(a + b)
unkerned_width = char_to_length[a] + char_to_length[b]
kerning = unkerned_width - kerned_width
if abs(kerning) > 0.05:
pair_to_kerning[a + b] = round(kerning, 3)
return pair_to_kerning
|
Write the data required by PrecalculatedTextMeasurer to a stream.
|
def write_json(f: TextIO, deja_vu_sans_path: str,
measurer: text_measurer.TextMeasurer,
encodings: Iterable[str]) -> None:
"""Write the data required by PrecalculatedTextMeasurer to a stream."""
supported_characters = list(
generate_supported_characters(deja_vu_sans_path))
kerning_characters = ''.join(
generate_encodeable_characters(supported_characters, encodings))
char_to_length = calculate_character_to_length_mapping(measurer,
supported_characters)
pair_to_kerning = calculate_pair_to_kern_mapping(measurer, char_to_length,
kerning_characters)
json.dump(
{'mean-character-length': statistics.mean(char_to_length.values()),
'character-lengths': char_to_length,
'kerning-characters': kerning_characters,
'kerning-pairs': pair_to_kerning},
f, sort_keys=True, indent=1)
|
Convolve 2d gaussian.
|
def convolve_gaussian_2d(image, gaussian_kernel_1d):
"""Convolve 2d gaussian."""
result = scipy.ndimage.filters.correlate1d(
image, gaussian_kernel_1d, axis=0)
result = scipy.ndimage.filters.correlate1d(
result, gaussian_kernel_1d, axis=1)
return result
|
Generate a gaussian kernel.
|
def get_gaussian_kernel(gaussian_kernel_width=11, gaussian_kernel_sigma=1.5):
"""Generate a gaussian kernel."""
# 1D Gaussian kernel definition
gaussian_kernel_1d = numpy.ndarray((gaussian_kernel_width))
norm_mu = int(gaussian_kernel_width / 2)
# Fill Gaussian kernel
for i in range(gaussian_kernel_width):
gaussian_kernel_1d[i] = (exp(-(((i - norm_mu) ** 2)) /
(2 * (gaussian_kernel_sigma ** 2))))
return gaussian_kernel_1d / numpy.sum(gaussian_kernel_1d)
|
Convert PIL image to numpy grayscale array and numpy alpha array.
|
def to_grayscale(img):
"""Convert PIL image to numpy grayscale array and numpy alpha array.
Args:
img (PIL.Image): PIL Image object.
Returns:
(gray, alpha): both numpy arrays.
"""
gray = numpy.asarray(ImageOps.grayscale(img)).astype(numpy.float)
imbands = img.getbands()
alpha = None
if 'A' in imbands:
alpha = numpy.asarray(img.split()[-1]).astype(numpy.float)
return gray, alpha
|
Main function for pyssim.
|
def main():
"""Main function for pyssim."""
description = '\n'.join([
'Compares an image with a list of images using the SSIM metric.',
' Example:',
' pyssim test-images/test1-1.png "test-images/*"'
])
parser = argparse.ArgumentParser(
prog='pyssim', formatter_class=argparse.RawTextHelpFormatter,
description=description)
parser.add_argument('--cw', help='compute the complex wavelet SSIM',
action='store_true')
parser.add_argument(
'base_image', metavar='image1.png', type=argparse.FileType('r'))
parser.add_argument(
'comparison_images', metavar='image path with* or image2.png')
parser.add_argument('--width', type=int, default=None,
help='scales the image before computing SSIM')
parser.add_argument('--height', type=int, default=None,
help='scales the image before computing SSIM')
args = parser.parse_args()
if args.width and args.height:
size = (args.width, args.height)
else:
size = None
if not args.cw:
gaussian_kernel_sigma = 1.5
gaussian_kernel_width = 11
gaussian_kernel_1d = get_gaussian_kernel(
gaussian_kernel_width, gaussian_kernel_sigma)
comparison_images = glob.glob(args.comparison_images)
is_a_single_image = len(comparison_images) == 1
for comparison_image in comparison_images:
if args.cw:
ssim = SSIM(args.base_image.name, size=size)
ssim_value = ssim.cw_ssim_value(comparison_image)
else:
ssim = SSIM(args.base_image.name, gaussian_kernel_1d, size=size)
ssim_value = ssim.ssim_value(comparison_image)
if is_a_single_image:
sys.stdout.write('%.7g' % ssim_value)
else:
sys.stdout.write('%s - %s: %.7g' % (
args.base_image.name, comparison_image, ssim_value))
sys.stdout.write('\n')
|
Compute the SSIM value from the reference image to the target image.
|
def ssim_value(self, target):
"""Compute the SSIM value from the reference image to the target image.
Args:
target (str or PIL.Image): Input image to compare the reference image
to. This may be a PIL Image object or, to save time, an SSIMImage
object (e.g. the img member of another SSIM object).
Returns:
Computed SSIM float value.
"""
# Performance boost if handed a compatible SSIMImage object.
if not isinstance(target, SSIMImage) \
or not np.array_equal(self.gaussian_kernel_1d,
target.gaussian_kernel_1d):
target = SSIMImage(target, self.gaussian_kernel_1d, self.img.size)
img_mat_12 = self.img.img_gray * target.img_gray
img_mat_sigma_12 = convolve_gaussian_2d(
img_mat_12, self.gaussian_kernel_1d)
img_mat_mu_12 = self.img.img_gray_mu * target.img_gray_mu
img_mat_sigma_12 = img_mat_sigma_12 - img_mat_mu_12
# Numerator of SSIM
num_ssim = ((2 * img_mat_mu_12 + self.c_1) *
(2 * img_mat_sigma_12 + self.c_2))
# Denominator of SSIM
den_ssim = (
(self.img.img_gray_mu_squared + target.img_gray_mu_squared +
self.c_1) *
(self.img.img_gray_sigma_squared +
target.img_gray_sigma_squared + self.c_2))
ssim_map = num_ssim / den_ssim
index = np.average(ssim_map)
return index
|
Compute the complex wavelet SSIM ( CW - SSIM ) value from the reference image to the target image.
|
def cw_ssim_value(self, target, width=30):
"""Compute the complex wavelet SSIM (CW-SSIM) value from the reference
image to the target image.
Args:
target (str or PIL.Image): Input image to compare the reference image
to. This may be a PIL Image object or, to save time, an SSIMImage
object (e.g. the img member of another SSIM object).
width: width for the wavelet convolution (default: 30)
Returns:
Computed CW-SSIM float value.
"""
if not isinstance(target, SSIMImage):
target = SSIMImage(target, size=self.img.size)
# Define a width for the wavelet convolution
widths = np.arange(1, width+1)
# Use the image data as arrays
sig1 = np.asarray(self.img.img_gray.getdata())
sig2 = np.asarray(target.img_gray.getdata())
# Convolution
cwtmatr1 = signal.cwt(sig1, signal.ricker, widths)
cwtmatr2 = signal.cwt(sig2, signal.ricker, widths)
# Compute the first term
c1c2 = np.multiply(abs(cwtmatr1), abs(cwtmatr2))
c1_2 = np.square(abs(cwtmatr1))
c2_2 = np.square(abs(cwtmatr2))
num_ssim_1 = 2 * np.sum(c1c2, axis=0) + self.k
den_ssim_1 = np.sum(c1_2, axis=0) + np.sum(c2_2, axis=0) + self.k
# Compute the second term
c1c2_conj = np.multiply(cwtmatr1, np.conjugate(cwtmatr2))
num_ssim_2 = 2 * np.abs(np.sum(c1c2_conj, axis=0)) + self.k
den_ssim_2 = 2 * np.sum(np.abs(c1c2_conj), axis=0) + self.k
# Construct the result
ssim_map = (num_ssim_1 / den_ssim_1) * (num_ssim_2 / den_ssim_2)
# Average the per pixel results
index = np.average(ssim_map)
return index
|
Computes SSIM.
|
def compute_ssim(image1, image2, gaussian_kernel_sigma=1.5,
gaussian_kernel_width=11):
"""Computes SSIM.
Args:
im1: First PIL Image object to compare.
im2: Second PIL Image object to compare.
Returns:
SSIM float value.
"""
gaussian_kernel_1d = get_gaussian_kernel(
gaussian_kernel_width, gaussian_kernel_sigma)
return SSIM(image1, gaussian_kernel_1d).ssim_value(image2)
|
Replicated decorator. Use it to mark your class members that modifies a class state. Function will be called asynchronously. Function accepts flowing additional parameters ( optional ): callback: callback ( result failReason ) failReason - FAIL_REASON <#pysyncobj. FAIL_REASON > _. sync: True - to block execution and wait for result False - async call. If callback is passed sync option is ignored. timeout: if sync is enabled and no result is available for timeout seconds - SyncObjException will be raised. These parameters are reserved and should not be used in kwargs of your replicated method.
|
def replicated(*decArgs, **decKwargs):
"""Replicated decorator. Use it to mark your class members that modifies
a class state. Function will be called asynchronously. Function accepts
flowing additional parameters (optional):
'callback': callback(result, failReason), failReason - `FAIL_REASON <#pysyncobj.FAIL_REASON>`_.
'sync': True - to block execution and wait for result, False - async call. If callback is passed,
'sync' option is ignored.
'timeout': if 'sync' is enabled, and no result is available for 'timeout' seconds -
SyncObjException will be raised.
These parameters are reserved and should not be used in kwargs of your replicated method.
:param func: arbitrary class member
:type func: function
:param ver: (optional) - code version (for zero deployment)
:type ver: int
"""
def replicatedImpl(func):
def newFunc(self, *args, **kwargs):
if kwargs.pop('_doApply', False):
return func(self, *args, **kwargs)
else:
if isinstance(self, SyncObj):
applier = self._applyCommand
funcName = self._getFuncName(func.__name__)
funcID = self._methodToID[funcName]
elif isinstance(self, SyncObjConsumer):
consumerId = id(self)
funcName = self._syncObj._getFuncName((consumerId, func.__name__))
funcID = self._syncObj._methodToID[(consumerId, funcName)]
applier = self._syncObj._applyCommand
else:
raise SyncObjException("Class should be inherited from SyncObj or SyncObjConsumer")
callback = kwargs.pop('callback', None)
if kwargs:
cmd = (funcID, args, kwargs)
elif args and not kwargs:
cmd = (funcID, args)
else:
cmd = funcID
sync = kwargs.pop('sync', False)
if callback is not None:
sync = False
if sync:
asyncResult = AsyncResult()
callback = asyncResult.onResult
timeout = kwargs.pop('timeout', None)
applier(pickle.dumps(cmd), callback, _COMMAND_TYPE.REGULAR)
if sync:
res = asyncResult.event.wait(timeout)
if not res:
raise SyncObjException('Timeout')
if not asyncResult.error == 0:
raise SyncObjException(asyncResult.error)
return asyncResult.result
func_dict = newFunc.__dict__ if is_py3 else newFunc.func_dict
func_dict['replicated'] = True
func_dict['ver'] = int(decKwargs.get('ver', 0))
func_dict['origName'] = func.__name__
callframe = sys._getframe(1 if decKwargs else 2)
namespace = callframe.f_locals
newFuncName = func.__name__ + '_v' + str(func_dict['ver'])
namespace[newFuncName] = __copy_func(newFunc, newFuncName)
functools.update_wrapper(newFunc, func)
return newFunc
if len(decArgs) == 1 and len(decKwargs) == 0 and callable(decArgs[0]):
return replicatedImpl(decArgs[0])
return replicatedImpl
|
Correctly destroy SyncObj. Stop autoTickThread close connections etc.
|
def destroy(self):
"""
Correctly destroy SyncObj. Stop autoTickThread, close connections, etc.
"""
if self.__conf.autoTick:
self.__destroying = True
else:
self._doDestroy()
|
Waits until initialized ( binded port ). If success - just returns. If failed to initialized after conf. maxBindRetries - raise SyncObjException.
|
def waitBinded(self):
"""
Waits until initialized (binded port).
If success - just returns.
If failed to initialized after conf.maxBindRetries - raise SyncObjException.
"""
try:
self.__transport.waitReady()
except TransportNotReadyError:
raise SyncObjException('BindError')
if not self.__transport.ready:
raise SyncObjException('BindError')
|
Switch to a new code version on all cluster nodes. You should ensure that cluster nodes are updated otherwise they won t be able to apply commands.
|
def setCodeVersion(self, newVersion, callback = None):
"""Switch to a new code version on all cluster nodes. You
should ensure that cluster nodes are updated, otherwise they
won't be able to apply commands.
:param newVersion: new code version
:type int
:param callback: will be called on cussess or fail
:type callback: function(`FAIL_REASON <#pysyncobj.FAIL_REASON>`_, None)
"""
assert isinstance(newVersion, int)
if newVersion > self.__selfCodeVersion:
raise Exception('wrong version, current version is %d, requested version is %d' % (self.__selfCodeVersion, newVersion))
if newVersion < self.__enabledCodeVersion:
raise Exception('wrong version, enabled version is %d, requested version is %d' % (self.__enabledCodeVersion, newVersion))
self._applyCommand(pickle.dumps(newVersion), callback, _COMMAND_TYPE.VERSION)
|
Remove single node from cluster ( dynamic membership changes ). Async. You should wait until node successfully added before adding next node.
|
def removeNodeFromCluster(self, node, callback = None):
"""Remove single node from cluster (dynamic membership changes). Async.
You should wait until node successfully added before adding
next node.
:param node: node object or 'nodeHost:nodePort'
:type node: Node | str
:param callback: will be called on success or fail
:type callback: function(`FAIL_REASON <#pysyncobj.FAIL_REASON>`_, None)
"""
if not self.__conf.dynamicMembershipChange:
raise Exception('dynamicMembershipChange is disabled')
if not isinstance(node, Node):
node = self.__nodeClass(node)
self._applyCommand(pickle.dumps(['rem', node.id, node]), callback, _COMMAND_TYPE.MEMBERSHIP)
|
Dumps different debug info about cluster to dict and return it
|
def getStatus(self):
"""Dumps different debug info about cluster to dict and return it"""
status = {}
status['version'] = VERSION
status['revision'] = REVISION
status['self'] = self.__selfNode
status['state'] = self.__raftState
status['leader'] = self.__raftLeader
status['partner_nodes_count'] = len(self.__otherNodes)
for node in self.__otherNodes:
status['partner_node_status_server_' + node.id] = 2 if node in self.__connectedNodes else 0
status['readonly_nodes_count'] = len(self.__readonlyNodes)
for node in self.__readonlyNodes:
status['readonly_node_status_server_' + node.id] = 2 if node in self.__connectedNodes else 0
status['log_len'] = len(self.__raftLog)
status['last_applied'] = self.__raftLastApplied
status['commit_idx'] = self.__raftCommitIndex
status['raft_term'] = self.__raftCurrentTerm
status['next_node_idx_count'] = len(self.__raftNextIndex)
for node, idx in iteritems(self.__raftNextIndex):
status['next_node_idx_server_' + node.id] = idx
status['match_idx_count'] = len(self.__raftMatchIndex)
for node, idx in iteritems(self.__raftMatchIndex):
status['match_idx_server_' + node.id] = idx
status['leader_commit_idx'] = self.__leaderCommitIndex
status['uptime'] = int(time.time() - self.__startTime)
status['self_code_version'] = self.__selfCodeVersion
status['enabled_code_version'] = self.__enabledCodeVersion
return status
|
Dumps different debug info about cluster to default logger
|
def printStatus(self):
"""Dumps different debug info about cluster to default logger"""
status = self.getStatus()
for k, v in iteritems(status):
logging.info('%s: %s' % (str(k), str(v)))
|
Find the node to which a connection belongs.
|
def _connToNode(self, conn):
"""
Find the node to which a connection belongs.
:param conn: connection object
:type conn: TcpConnection
:returns corresponding node or None if the node cannot be found
:rtype Node or None
"""
for node in self._connections:
if self._connections[node] is conn:
return node
return None
|
Create the TCP server ( but don t bind yet )
|
def _createServer(self):
"""
Create the TCP server (but don't bind yet)
"""
conf = self._syncObj.conf
bindAddr = conf.bindAddress or getattr(self._selfNode, 'address')
if not bindAddr:
raise RuntimeError('Unable to determine bind address')
host, port = bindAddr.rsplit(':', 1)
host = globalDnsResolver().resolve(host)
self._server = TcpServer(self._syncObj._poller, host, port, onNewConnection = self._onNewIncomingConnection,
sendBufferSize = conf.sendBufferSize,
recvBufferSize = conf.recvBufferSize,
connectionTimeout = conf.connectionTimeout)
|
Bind the server unless it is already bound this is a read - only node or the last attempt was too recently.
|
def _maybeBind(self):
"""
Bind the server unless it is already bound, this is a read-only node, or the last attempt was too recently.
:raises TransportNotReadyError if the bind attempt fails
"""
if self._ready or self._selfIsReadonlyNode or time.time() < self._lastBindAttemptTime + self._syncObj.conf.bindRetryTime:
return
self._lastBindAttemptTime = time.time()
try:
self._server.bind()
except Exception as e:
self._bindAttempts += 1
if self._syncObj.conf.maxBindRetries and self._bindAttempts >= self._syncObj.conf.maxBindRetries:
self._bindOverEvent.set()
raise TransportNotReadyError
else:
self._ready = True
self._bindOverEvent.set()
|
Callback for connections initiated by the other side
|
def _onNewIncomingConnection(self, conn):
"""
Callback for connections initiated by the other side
:param conn: connection object
:type conn: TcpConnection
"""
self._unknownConnections.add(conn)
encryptor = self._syncObj.encryptor
if encryptor:
conn.encryptor = encryptor
conn.setOnMessageReceivedCallback(functools.partial(self._onIncomingMessageReceived, conn))
conn.setOnDisconnectedCallback(functools.partial(self._onDisconnected, conn))
|
Callback for initial messages on incoming connections. Handles encryption utility messages and association of the connection with a Node. Once this initial setup is done the relevant connected callback is executed and further messages are deferred to the onMessageReceived callback.
|
def _onIncomingMessageReceived(self, conn, message):
"""
Callback for initial messages on incoming connections. Handles encryption, utility messages, and association of the connection with a Node.
Once this initial setup is done, the relevant connected callback is executed, and further messages are deferred to the onMessageReceived callback.
:param conn: connection object
:type conn: TcpConnection
:param message: received message
:type message: any
"""
if self._syncObj.encryptor and not conn.sendRandKey:
conn.sendRandKey = message
conn.recvRandKey = os.urandom(32)
conn.send(conn.recvRandKey)
return
# Utility messages
if isinstance(message, list):
done = False
try:
if message[0] == 'status':
conn.send(self._syncObj.getStatus())
done = True
elif message[0] == 'add':
self._syncObj.addNodeToCluster(message[1], callback = functools.partial(self._utilityCallback, conn = conn, cmd = 'ADD', arg = message[1]))
done = True
elif message[0] == 'remove':
if message[1] == self._selfNode.address:
conn.send('FAIL REMOVE ' + message[1])
else:
self._syncObj.removeNodeFromCluster(message[1], callback = functools.partial(self._utilityCallback, conn = conn, cmd = 'REMOVE', arg = message[1]))
done = True
elif message[0] == 'set_version':
self._syncObj.setCodeVersion(message[1], callback = functools.partial(self._utilityCallback, conn = conn, cmd = 'SET_VERSION', arg = str(message[1])))
done = True
except Exception as e:
conn.send(str(e))
done = True
if done:
return
# At this point, message should be either a node ID (i.e. address) or 'readonly'
node = self._nodeAddrToNode[message] if message in self._nodeAddrToNode else None
if node is None and message != 'readonly':
conn.disconnect()
self._unknownConnections.discard(conn)
return
readonly = node is None
if readonly:
nodeId = str(self._readonlyNodesCounter)
node = Node(nodeId)
self._readonlyNodes.add(node)
self._readonlyNodesCounter += 1
self._unknownConnections.discard(conn)
self._connections[node] = conn
conn.setOnMessageReceivedCallback(functools.partial(self._onMessageReceived, node))
if not readonly:
self._onNodeConnected(node)
else:
self._onReadonlyNodeConnected(node)
|
Callback for the utility messages
|
def _utilityCallback(self, res, err, conn, cmd, arg):
"""
Callback for the utility messages
:param res: result of the command
:param err: error code (one of pysyncobj.config.FAIL_REASON)
:param conn: utility connection
:param cmd: command
:param arg: command arguments
"""
cmdResult = 'FAIL'
if err == FAIL_REASON.SUCCESS:
cmdResult = 'SUCCESS'
conn.send(cmdResult + ' ' + cmd + ' ' + arg)
|
Check whether this node should initiate a connection to another node
|
def _shouldConnect(self, node):
"""
Check whether this node should initiate a connection to another node
:param node: the other node
:type node: Node
"""
return isinstance(node, TCPNode) and node not in self._preventConnectNodes and (self._selfIsReadonlyNode or self._selfNode.address > node.address)
|
Connect to a node if necessary.
|
def _connectIfNecessarySingle(self, node):
"""
Connect to a node if necessary.
:param node: node to connect to
:type node: Node
"""
if node in self._connections and self._connections[node].state != CONNECTION_STATE.DISCONNECTED:
return True
if not self._shouldConnect(node):
return False
assert node in self._connections # Since we "should connect" to this node, there should always be a connection object already in place.
if node in self._lastConnectAttempt and time.time() - self._lastConnectAttempt[node] < self._syncObj.conf.connectionRetryTime:
return False
self._lastConnectAttempt[node] = time.time()
return self._connections[node].connect(node.ip, node.port)
|
Callback for when a new connection from this to another node is established. Handles encryption and informs the other node which node this is. If encryption is disabled this triggers the onNodeConnected callback and messages are deferred to the onMessageReceived callback. If encryption is enabled the first message is handled by _onOutgoingMessageReceived.
|
def _onOutgoingConnected(self, conn):
"""
Callback for when a new connection from this to another node is established. Handles encryption and informs the other node which node this is.
If encryption is disabled, this triggers the onNodeConnected callback and messages are deferred to the onMessageReceived callback.
If encryption is enabled, the first message is handled by _onOutgoingMessageReceived.
:param conn: connection object
:type conn: TcpConnection
"""
if self._syncObj.encryptor:
conn.setOnMessageReceivedCallback(functools.partial(self._onOutgoingMessageReceived, conn)) # So we can process the sendRandKey
conn.recvRandKey = os.urandom(32)
conn.send(conn.recvRandKey)
else:
# The onMessageReceived callback is configured in addNode already.
if not self._selfIsReadonlyNode:
conn.send(self._selfNode.address)
else:
conn.send('readonly')
self._onNodeConnected(self._connToNode(conn))
|
Callback for receiving a message on a new outgoing connection. Used only if encryption is enabled to exchange the random keys. Once the key exchange is done this triggers the onNodeConnected callback and further messages are deferred to the onMessageReceived callback.
|
def _onOutgoingMessageReceived(self, conn, message):
"""
Callback for receiving a message on a new outgoing connection. Used only if encryption is enabled to exchange the random keys.
Once the key exchange is done, this triggers the onNodeConnected callback, and further messages are deferred to the onMessageReceived callback.
:param conn: connection object
:type conn: TcpConnection
:param message: received message
:type message: any
"""
if not conn.sendRandKey:
conn.sendRandKey = message
conn.send(self._selfNode.address)
node = self._connToNode(conn)
conn.setOnMessageReceivedCallback(functools.partial(self._onMessageReceived, node))
self._onNodeConnected(node)
|
Callback for when a connection is terminated or considered dead. Initiates a reconnect if necessary.
|
def _onDisconnected(self, conn):
"""
Callback for when a connection is terminated or considered dead. Initiates a reconnect if necessary.
:param conn: connection object
:type conn: TcpConnection
"""
self._unknownConnections.discard(conn)
node = self._connToNode(conn)
if node is not None:
if node in self._nodes:
self._onNodeDisconnected(node)
self._connectIfNecessarySingle(node)
else:
self._readonlyNodes.discard(node)
self._onReadonlyNodeDisconnected(node)
|
Add a node to the network
|
def addNode(self, node):
"""
Add a node to the network
:param node: node to add
:type node: TCPNode
"""
self._nodes.add(node)
self._nodeAddrToNode[node.address] = node
if self._shouldConnect(node):
conn = TcpConnection(poller = self._syncObj._poller,
timeout = self._syncObj.conf.connectionTimeout,
sendBufferSize = self._syncObj.conf.sendBufferSize,
recvBufferSize = self._syncObj.conf.recvBufferSize)
conn.encryptor = self._syncObj.encryptor
conn.setOnConnectedCallback(functools.partial(self._onOutgoingConnected, conn))
conn.setOnMessageReceivedCallback(functools.partial(self._onMessageReceived, node))
conn.setOnDisconnectedCallback(functools.partial(self._onDisconnected, conn))
self._connections[node] = conn
|
Drop a node from the network
|
def dropNode(self, node):
"""
Drop a node from the network
:param node: node to drop
:type node: Node
"""
conn = self._connections.pop(node, None)
if conn is not None:
# Calling conn.disconnect() immediately triggers the onDisconnected callback if the connection isn't already disconnected, so this is necessary to prevent the automatic reconnect.
self._preventConnectNodes.add(node)
conn.disconnect()
self._preventConnectNodes.remove(node)
if isinstance(node, TCPNode):
self._nodes.discard(node)
self._nodeAddrToNode.pop(node.address, None)
else:
self._readonlyNodes.discard(node)
self._lastConnectAttempt.pop(node, None)
|
Send a message to a node. Returns False if the connection appears to be dead either before or after actually trying to send the message.
|
def send(self, node, message):
"""
Send a message to a node. Returns False if the connection appears to be dead either before or after actually trying to send the message.
:param node: target node
:type node: Node
:param message: message
:param message: any
:returns success
:rtype bool
"""
if node not in self._connections or self._connections[node].state != CONNECTION_STATE.CONNECTED:
return False
self._connections[node].send(message)
if self._connections[node].state != CONNECTION_STATE.CONNECTED:
return False
return True
|
Destroy this transport
|
def destroy(self):
"""
Destroy this transport
"""
self.setOnMessageReceivedCallback(None)
self.setOnNodeConnectedCallback(None)
self.setOnNodeDisconnectedCallback(None)
self.setOnReadonlyNodeConnectedCallback(None)
self.setOnReadonlyNodeDisconnectedCallback(None)
for node in self._nodes | self._readonlyNodes:
self.dropNode(node)
if self._server is not None:
self._server.unbind()
for conn in self._unknownConnections:
conn.disconnect()
self._unknownConnections = set()
|
Put an item into the queue. True - if item placed in queue. False - if queue is full and item can not be placed.
|
def put(self, item):
"""Put an item into the queue.
True - if item placed in queue.
False - if queue is full and item can not be placed."""
if self.__maxsize and len(self.__data) >= self.__maxsize:
return False
self.__data.append(item)
return True
|
Put an item into the queue. Items should be comparable eg. tuples. True - if item placed in queue. False - if queue is full and item can not be placed.
|
def put(self, item):
"""Put an item into the queue. Items should be comparable, eg. tuples.
True - if item placed in queue.
False - if queue is full and item can not be placed."""
if self.__maxsize and len(self.__data) >= self.__maxsize:
return False
heapq.heappush(self.__data, item)
return True
|
Extract the smallest item from queue. Return default if queue is empty.
|
def get(self, default=None):
"""Extract the smallest item from queue.
Return default if queue is empty."""
if not self.__data:
return default
return heapq.heappop(self.__data)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.