partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
test
SparkFeatureUnion.fit_transform
TODO: rewrite docstring Fit all transformers using X, transform the data and concatenate results. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Input data to be transformed. Returns ------- X_t : array-like or sparse matrix, shape (n_samples, sum_n_components) hstack of results of transformers. sum_n_components is the sum of n_components (output dimension) over transformers.
splearn/pipeline.py
def fit_transform(self, Z, **fit_params): """TODO: rewrite docstring Fit all transformers using X, transform the data and concatenate results. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Input data to be transformed. Returns ------- X_t : array-like or sparse matrix, shape (n_samples, sum_n_components) hstack of results of transformers. sum_n_components is the sum of n_components (output dimension) over transformers. """ return self.fit(Z, **fit_params).transform(Z)
def fit_transform(self, Z, **fit_params): """TODO: rewrite docstring Fit all transformers using X, transform the data and concatenate results. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Input data to be transformed. Returns ------- X_t : array-like or sparse matrix, shape (n_samples, sum_n_components) hstack of results of transformers. sum_n_components is the sum of n_components (output dimension) over transformers. """ return self.fit(Z, **fit_params).transform(Z)
[ "TODO", ":", "rewrite", "docstring", "Fit", "all", "transformers", "using", "X", "transform", "the", "data", "and", "concatenate", "results", ".", "Parameters", "----------", "X", ":", "array", "-", "like", "or", "sparse", "matrix", "shape", "(", "n_samples", "n_features", ")", "Input", "data", "to", "be", "transformed", ".", "Returns", "-------", "X_t", ":", "array", "-", "like", "or", "sparse", "matrix", "shape", "(", "n_samples", "sum_n_components", ")", "hstack", "of", "results", "of", "transformers", ".", "sum_n_components", "is", "the", "sum", "of", "n_components", "(", "output", "dimension", ")", "over", "transformers", "." ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/pipeline.py#L248-L262
[ "def", "fit_transform", "(", "self", ",", "Z", ",", "*", "*", "fit_params", ")", ":", "return", "self", ".", "fit", "(", "Z", ",", "*", "*", "fit_params", ")", ".", "transform", "(", "Z", ")" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
SparkFeatureUnion.transform
TODO: rewrite docstring Transform X separately by each transformer, concatenate results. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Input data to be transformed. Returns ------- X_t : array-like or sparse matrix, shape (n_samples, sum_n_components) hstack of results of transformers. sum_n_components is the sum of n_components (output dimension) over transformers.
splearn/pipeline.py
def transform(self, Z): """TODO: rewrite docstring Transform X separately by each transformer, concatenate results. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Input data to be transformed. Returns ------- X_t : array-like or sparse matrix, shape (n_samples, sum_n_components) hstack of results of transformers. sum_n_components is the sum of n_components (output dimension) over transformers. """ if isinstance(Z, DictRDD): X = Z[:, 'X'] else: X = Z Zs = [_transform_one(trans, name, X, self.transformer_weights) for name, trans in self.transformer_list] X_rdd = reduce(lambda x, y: x.zip(y._rdd), Zs) X_rdd = X_rdd.map(flatten) mapper = np.hstack for item in X_rdd.first(): if sp.issparse(item): mapper = sp.hstack X_rdd = X_rdd.map(lambda x: mapper(x)) if isinstance(Z, DictRDD): return DictRDD([X_rdd, Z[:, 'y']], columns=Z.columns, dtype=Z.dtype, bsize=Z.bsize) else: return X_rdd
def transform(self, Z): """TODO: rewrite docstring Transform X separately by each transformer, concatenate results. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Input data to be transformed. Returns ------- X_t : array-like or sparse matrix, shape (n_samples, sum_n_components) hstack of results of transformers. sum_n_components is the sum of n_components (output dimension) over transformers. """ if isinstance(Z, DictRDD): X = Z[:, 'X'] else: X = Z Zs = [_transform_one(trans, name, X, self.transformer_weights) for name, trans in self.transformer_list] X_rdd = reduce(lambda x, y: x.zip(y._rdd), Zs) X_rdd = X_rdd.map(flatten) mapper = np.hstack for item in X_rdd.first(): if sp.issparse(item): mapper = sp.hstack X_rdd = X_rdd.map(lambda x: mapper(x)) if isinstance(Z, DictRDD): return DictRDD([X_rdd, Z[:, 'y']], columns=Z.columns, dtype=Z.dtype, bsize=Z.bsize) else: return X_rdd
[ "TODO", ":", "rewrite", "docstring", "Transform", "X", "separately", "by", "each", "transformer", "concatenate", "results", ".", "Parameters", "----------", "X", ":", "array", "-", "like", "or", "sparse", "matrix", "shape", "(", "n_samples", "n_features", ")", "Input", "data", "to", "be", "transformed", ".", "Returns", "-------", "X_t", ":", "array", "-", "like", "or", "sparse", "matrix", "shape", "(", "n_samples", "sum_n_components", ")", "hstack", "of", "results", "of", "transformers", ".", "sum_n_components", "is", "the", "sum", "of", "n_components", "(", "output", "dimension", ")", "over", "transformers", "." ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/pipeline.py#L264-L298
[ "def", "transform", "(", "self", ",", "Z", ")", ":", "if", "isinstance", "(", "Z", ",", "DictRDD", ")", ":", "X", "=", "Z", "[", ":", ",", "'X'", "]", "else", ":", "X", "=", "Z", "Zs", "=", "[", "_transform_one", "(", "trans", ",", "name", ",", "X", ",", "self", ".", "transformer_weights", ")", "for", "name", ",", "trans", "in", "self", ".", "transformer_list", "]", "X_rdd", "=", "reduce", "(", "lambda", "x", ",", "y", ":", "x", ".", "zip", "(", "y", ".", "_rdd", ")", ",", "Zs", ")", "X_rdd", "=", "X_rdd", ".", "map", "(", "flatten", ")", "mapper", "=", "np", ".", "hstack", "for", "item", "in", "X_rdd", ".", "first", "(", ")", ":", "if", "sp", ".", "issparse", "(", "item", ")", ":", "mapper", "=", "sp", ".", "hstack", "X_rdd", "=", "X_rdd", ".", "map", "(", "lambda", "x", ":", "mapper", "(", "x", ")", ")", "if", "isinstance", "(", "Z", ",", "DictRDD", ")", ":", "return", "DictRDD", "(", "[", "X_rdd", ",", "Z", "[", ":", ",", "'y'", "]", "]", ",", "columns", "=", "Z", ".", "columns", ",", "dtype", "=", "Z", ".", "dtype", ",", "bsize", "=", "Z", ".", "bsize", ")", "else", ":", "return", "X_rdd" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
SparkRandomForestClassifier.fit
Fit the model according to the given training data. Parameters ---------- Z : DictRDD containing (X, y) pairs X - Training vector y - Target labels classes : iterable The set of available classes Returns ------- self : object Returns self.
splearn/ensemble/__init__.py
def fit(self, Z, classes=None): """Fit the model according to the given training data. Parameters ---------- Z : DictRDD containing (X, y) pairs X - Training vector y - Target labels classes : iterable The set of available classes Returns ------- self : object Returns self. """ check_rdd(Z, {'X': (sp.spmatrix, np.ndarray)}) mapper = lambda X_y: super(SparkRandomForestClassifier, self).fit( X_y[0], X_y[1] ) models = Z.map(mapper).collect() self.__dict__ = models[0].__dict__ self.estimators_ = [] for m in models: self.estimators_ += m.estimators_ self.n_estimators = len(self.estimators_) return self
def fit(self, Z, classes=None): """Fit the model according to the given training data. Parameters ---------- Z : DictRDD containing (X, y) pairs X - Training vector y - Target labels classes : iterable The set of available classes Returns ------- self : object Returns self. """ check_rdd(Z, {'X': (sp.spmatrix, np.ndarray)}) mapper = lambda X_y: super(SparkRandomForestClassifier, self).fit( X_y[0], X_y[1] ) models = Z.map(mapper).collect() self.__dict__ = models[0].__dict__ self.estimators_ = [] for m in models: self.estimators_ += m.estimators_ self.n_estimators = len(self.estimators_) return self
[ "Fit", "the", "model", "according", "to", "the", "given", "training", "data", "." ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/ensemble/__init__.py#L122-L150
[ "def", "fit", "(", "self", ",", "Z", ",", "classes", "=", "None", ")", ":", "check_rdd", "(", "Z", ",", "{", "'X'", ":", "(", "sp", ".", "spmatrix", ",", "np", ".", "ndarray", ")", "}", ")", "mapper", "=", "lambda", "X_y", ":", "super", "(", "SparkRandomForestClassifier", ",", "self", ")", ".", "fit", "(", "X_y", "[", "0", "]", ",", "X_y", "[", "1", "]", ")", "models", "=", "Z", ".", "map", "(", "mapper", ")", ".", "collect", "(", ")", "self", ".", "__dict__", "=", "models", "[", "0", "]", ".", "__dict__", "self", ".", "estimators_", "=", "[", "]", "for", "m", "in", "models", ":", "self", ".", "estimators_", "+=", "m", ".", "estimators_", "self", ".", "n_estimators", "=", "len", "(", "self", ".", "estimators_", ")", "return", "self" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
SparkGridSearchCV._fit
Actual fitting, performing the search over parameters.
splearn/grid_search.py
def _fit(self, Z, parameter_iterable): """Actual fitting, performing the search over parameters.""" self.scorer_ = check_scoring(self.estimator, scoring=self.scoring) cv = self.cv cv = _check_cv(cv, Z) if self.verbose > 0: if isinstance(parameter_iterable, Sized): n_candidates = len(parameter_iterable) print("Fitting {0} folds for each of {1} candidates, totalling" " {2} fits".format(len(cv), n_candidates, n_candidates * len(cv))) base_estimator = clone(self.estimator) pre_dispatch = self.pre_dispatch out = Parallel( n_jobs=self.n_jobs, verbose=self.verbose, pre_dispatch=pre_dispatch, backend="threading" )( delayed(_fit_and_score)(clone(base_estimator), Z, self.scorer_, train, test, self.verbose, parameters, self.fit_params, return_parameters=True, error_score=self.error_score) for parameters in parameter_iterable for train, test in cv) # Out is a list of triplet: score, estimator, n_test_samples n_fits = len(out) n_folds = len(cv) scores = list() grid_scores = list() for grid_start in range(0, n_fits, n_folds): n_test_samples = 0 score = 0 all_scores = [] for this_score, this_n_test_samples, _, parameters in \ out[grid_start:grid_start + n_folds]: all_scores.append(this_score) if self.iid: this_score *= this_n_test_samples n_test_samples += this_n_test_samples score += this_score if self.iid: score /= float(n_test_samples) else: score /= float(n_folds) scores.append((score, parameters)) # TODO: shall we also store the test_fold_sizes? grid_scores.append(_CVScoreTuple( parameters, score, np.array(all_scores))) # Store the computed scores self.grid_scores_ = grid_scores # Find the best parameters by comparing on the mean validation score: # note that `sorted` is deterministic in the way it breaks ties best = sorted(grid_scores, key=lambda x: x.mean_validation_score, reverse=True)[0] self.best_params_ = best.parameters self.best_score_ = best.mean_validation_score if self.refit: # fit the best estimator using the entire dataset # clone first to work around broken estimators best_estimator = clone(base_estimator).set_params( **best.parameters) best_estimator.fit(Z, **self.fit_params) self.best_estimator_ = best_estimator return self
def _fit(self, Z, parameter_iterable): """Actual fitting, performing the search over parameters.""" self.scorer_ = check_scoring(self.estimator, scoring=self.scoring) cv = self.cv cv = _check_cv(cv, Z) if self.verbose > 0: if isinstance(parameter_iterable, Sized): n_candidates = len(parameter_iterable) print("Fitting {0} folds for each of {1} candidates, totalling" " {2} fits".format(len(cv), n_candidates, n_candidates * len(cv))) base_estimator = clone(self.estimator) pre_dispatch = self.pre_dispatch out = Parallel( n_jobs=self.n_jobs, verbose=self.verbose, pre_dispatch=pre_dispatch, backend="threading" )( delayed(_fit_and_score)(clone(base_estimator), Z, self.scorer_, train, test, self.verbose, parameters, self.fit_params, return_parameters=True, error_score=self.error_score) for parameters in parameter_iterable for train, test in cv) # Out is a list of triplet: score, estimator, n_test_samples n_fits = len(out) n_folds = len(cv) scores = list() grid_scores = list() for grid_start in range(0, n_fits, n_folds): n_test_samples = 0 score = 0 all_scores = [] for this_score, this_n_test_samples, _, parameters in \ out[grid_start:grid_start + n_folds]: all_scores.append(this_score) if self.iid: this_score *= this_n_test_samples n_test_samples += this_n_test_samples score += this_score if self.iid: score /= float(n_test_samples) else: score /= float(n_folds) scores.append((score, parameters)) # TODO: shall we also store the test_fold_sizes? grid_scores.append(_CVScoreTuple( parameters, score, np.array(all_scores))) # Store the computed scores self.grid_scores_ = grid_scores # Find the best parameters by comparing on the mean validation score: # note that `sorted` is deterministic in the way it breaks ties best = sorted(grid_scores, key=lambda x: x.mean_validation_score, reverse=True)[0] self.best_params_ = best.parameters self.best_score_ = best.mean_validation_score if self.refit: # fit the best estimator using the entire dataset # clone first to work around broken estimators best_estimator = clone(base_estimator).set_params( **best.parameters) best_estimator.fit(Z, **self.fit_params) self.best_estimator_ = best_estimator return self
[ "Actual", "fitting", "performing", "the", "search", "over", "parameters", "." ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/grid_search.py#L17-L90
[ "def", "_fit", "(", "self", ",", "Z", ",", "parameter_iterable", ")", ":", "self", ".", "scorer_", "=", "check_scoring", "(", "self", ".", "estimator", ",", "scoring", "=", "self", ".", "scoring", ")", "cv", "=", "self", ".", "cv", "cv", "=", "_check_cv", "(", "cv", ",", "Z", ")", "if", "self", ".", "verbose", ">", "0", ":", "if", "isinstance", "(", "parameter_iterable", ",", "Sized", ")", ":", "n_candidates", "=", "len", "(", "parameter_iterable", ")", "print", "(", "\"Fitting {0} folds for each of {1} candidates, totalling\"", "\" {2} fits\"", ".", "format", "(", "len", "(", "cv", ")", ",", "n_candidates", ",", "n_candidates", "*", "len", "(", "cv", ")", ")", ")", "base_estimator", "=", "clone", "(", "self", ".", "estimator", ")", "pre_dispatch", "=", "self", ".", "pre_dispatch", "out", "=", "Parallel", "(", "n_jobs", "=", "self", ".", "n_jobs", ",", "verbose", "=", "self", ".", "verbose", ",", "pre_dispatch", "=", "pre_dispatch", ",", "backend", "=", "\"threading\"", ")", "(", "delayed", "(", "_fit_and_score", ")", "(", "clone", "(", "base_estimator", ")", ",", "Z", ",", "self", ".", "scorer_", ",", "train", ",", "test", ",", "self", ".", "verbose", ",", "parameters", ",", "self", ".", "fit_params", ",", "return_parameters", "=", "True", ",", "error_score", "=", "self", ".", "error_score", ")", "for", "parameters", "in", "parameter_iterable", "for", "train", ",", "test", "in", "cv", ")", "# Out is a list of triplet: score, estimator, n_test_samples", "n_fits", "=", "len", "(", "out", ")", "n_folds", "=", "len", "(", "cv", ")", "scores", "=", "list", "(", ")", "grid_scores", "=", "list", "(", ")", "for", "grid_start", "in", "range", "(", "0", ",", "n_fits", ",", "n_folds", ")", ":", "n_test_samples", "=", "0", "score", "=", "0", "all_scores", "=", "[", "]", "for", "this_score", ",", "this_n_test_samples", ",", "_", ",", "parameters", "in", "out", "[", "grid_start", ":", "grid_start", "+", "n_folds", "]", ":", "all_scores", ".", "append", "(", "this_score", ")", "if", "self", ".", "iid", ":", "this_score", "*=", "this_n_test_samples", "n_test_samples", "+=", "this_n_test_samples", "score", "+=", "this_score", "if", "self", ".", "iid", ":", "score", "/=", "float", "(", "n_test_samples", ")", "else", ":", "score", "/=", "float", "(", "n_folds", ")", "scores", ".", "append", "(", "(", "score", ",", "parameters", ")", ")", "# TODO: shall we also store the test_fold_sizes?", "grid_scores", ".", "append", "(", "_CVScoreTuple", "(", "parameters", ",", "score", ",", "np", ".", "array", "(", "all_scores", ")", ")", ")", "# Store the computed scores", "self", ".", "grid_scores_", "=", "grid_scores", "# Find the best parameters by comparing on the mean validation score:", "# note that `sorted` is deterministic in the way it breaks ties", "best", "=", "sorted", "(", "grid_scores", ",", "key", "=", "lambda", "x", ":", "x", ".", "mean_validation_score", ",", "reverse", "=", "True", ")", "[", "0", "]", "self", ".", "best_params_", "=", "best", ".", "parameters", "self", ".", "best_score_", "=", "best", ".", "mean_validation_score", "if", "self", ".", "refit", ":", "# fit the best estimator using the entire dataset", "# clone first to work around broken estimators", "best_estimator", "=", "clone", "(", "base_estimator", ")", ".", "set_params", "(", "*", "*", "best", ".", "parameters", ")", "best_estimator", ".", "fit", "(", "Z", ",", "*", "*", "self", ".", "fit_params", ")", "self", ".", "best_estimator_", "=", "best_estimator", "return", "self" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
SparkLabelEncoder.fit
Fit label encoder Parameters ---------- y : ArrayRDD (n_samples,) Target values. Returns ------- self : returns an instance of self.
splearn/preprocessing/label.py
def fit(self, y): """Fit label encoder Parameters ---------- y : ArrayRDD (n_samples,) Target values. Returns ------- self : returns an instance of self. """ def mapper(y): y = column_or_1d(y, warn=True) _check_numpy_unicode_bug(y) return np.unique(y) def reducer(a, b): return np.unique(np.concatenate((a, b))) self.classes_ = y.map(mapper).reduce(reducer) return self
def fit(self, y): """Fit label encoder Parameters ---------- y : ArrayRDD (n_samples,) Target values. Returns ------- self : returns an instance of self. """ def mapper(y): y = column_or_1d(y, warn=True) _check_numpy_unicode_bug(y) return np.unique(y) def reducer(a, b): return np.unique(np.concatenate((a, b))) self.classes_ = y.map(mapper).reduce(reducer) return self
[ "Fit", "label", "encoder", "Parameters", "----------", "y", ":", "ArrayRDD", "(", "n_samples", ")", "Target", "values", ".", "Returns", "-------", "self", ":", "returns", "an", "instance", "of", "self", "." ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/preprocessing/label.py#L49-L70
[ "def", "fit", "(", "self", ",", "y", ")", ":", "def", "mapper", "(", "y", ")", ":", "y", "=", "column_or_1d", "(", "y", ",", "warn", "=", "True", ")", "_check_numpy_unicode_bug", "(", "y", ")", "return", "np", ".", "unique", "(", "y", ")", "def", "reducer", "(", "a", ",", "b", ")", ":", "return", "np", ".", "unique", "(", "np", ".", "concatenate", "(", "(", "a", ",", "b", ")", ")", ")", "self", ".", "classes_", "=", "y", ".", "map", "(", "mapper", ")", ".", "reduce", "(", "reducer", ")", "return", "self" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
SparkLabelEncoder.transform
Transform labels to normalized encoding. Parameters ---------- y : ArrayRDD [n_samples] Target values. Returns ------- y : ArrayRDD [n_samples]
splearn/preprocessing/label.py
def transform(self, y): """Transform labels to normalized encoding. Parameters ---------- y : ArrayRDD [n_samples] Target values. Returns ------- y : ArrayRDD [n_samples] """ mapper = super(SparkLabelEncoder, self).transform mapper = self.broadcast(mapper, y.context) return y.transform(mapper)
def transform(self, y): """Transform labels to normalized encoding. Parameters ---------- y : ArrayRDD [n_samples] Target values. Returns ------- y : ArrayRDD [n_samples] """ mapper = super(SparkLabelEncoder, self).transform mapper = self.broadcast(mapper, y.context) return y.transform(mapper)
[ "Transform", "labels", "to", "normalized", "encoding", ".", "Parameters", "----------", "y", ":", "ArrayRDD", "[", "n_samples", "]", "Target", "values", ".", "Returns", "-------", "y", ":", "ArrayRDD", "[", "n_samples", "]" ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/preprocessing/label.py#L84-L96
[ "def", "transform", "(", "self", ",", "y", ")", ":", "mapper", "=", "super", "(", "SparkLabelEncoder", ",", "self", ")", ".", "transform", "mapper", "=", "self", ".", "broadcast", "(", "mapper", ",", "y", ".", "context", ")", "return", "y", ".", "transform", "(", "mapper", ")" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
_score
Compute the score of an estimator on a given test set.
splearn/cross_validation.py
def _score(estimator, Z_test, scorer): """Compute the score of an estimator on a given test set.""" score = scorer(estimator, Z_test) if not isinstance(score, numbers.Number): raise ValueError("scoring must return a number, got %s (%s) instead." % (str(score), type(score))) return score
def _score(estimator, Z_test, scorer): """Compute the score of an estimator on a given test set.""" score = scorer(estimator, Z_test) if not isinstance(score, numbers.Number): raise ValueError("scoring must return a number, got %s (%s) instead." % (str(score), type(score))) return score
[ "Compute", "the", "score", "of", "an", "estimator", "on", "a", "given", "test", "set", "." ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/cross_validation.py#L29-L35
[ "def", "_score", "(", "estimator", ",", "Z_test", ",", "scorer", ")", ":", "score", "=", "scorer", "(", "estimator", ",", "Z_test", ")", "if", "not", "isinstance", "(", "score", ",", "numbers", ".", "Number", ")", ":", "raise", "ValueError", "(", "\"scoring must return a number, got %s (%s) instead.\"", "%", "(", "str", "(", "score", ")", ",", "type", "(", "score", ")", ")", ")", "return", "score" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
SparkKMeans.fit
Compute k-means clustering. Parameters ---------- Z : ArrayRDD or DictRDD containing array-like or sparse matrix Train data. Returns ------- self
splearn/cluster/k_means_.py
def fit(self, Z): """Compute k-means clustering. Parameters ---------- Z : ArrayRDD or DictRDD containing array-like or sparse matrix Train data. Returns ------- self """ X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z check_rdd(X, (np.ndarray, sp.spmatrix)) if self.init == 'k-means||': self._mllib_model = MLlibKMeans.train( X.unblock(), self.n_clusters, maxIterations=self.max_iter, initializationMode="k-means||") self.cluster_centers_ = self._mllib_model.centers else: models = X.map(lambda X: super(SparkKMeans, self).fit(X)) models = models.map(lambda model: model.cluster_centers_).collect() return super(SparkKMeans, self).fit(np.concatenate(models))
def fit(self, Z): """Compute k-means clustering. Parameters ---------- Z : ArrayRDD or DictRDD containing array-like or sparse matrix Train data. Returns ------- self """ X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z check_rdd(X, (np.ndarray, sp.spmatrix)) if self.init == 'k-means||': self._mllib_model = MLlibKMeans.train( X.unblock(), self.n_clusters, maxIterations=self.max_iter, initializationMode="k-means||") self.cluster_centers_ = self._mllib_model.centers else: models = X.map(lambda X: super(SparkKMeans, self).fit(X)) models = models.map(lambda model: model.cluster_centers_).collect() return super(SparkKMeans, self).fit(np.concatenate(models))
[ "Compute", "k", "-", "means", "clustering", "." ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/cluster/k_means_.py#L74-L98
[ "def", "fit", "(", "self", ",", "Z", ")", ":", "X", "=", "Z", "[", ":", ",", "'X'", "]", "if", "isinstance", "(", "Z", ",", "DictRDD", ")", "else", "Z", "check_rdd", "(", "X", ",", "(", "np", ".", "ndarray", ",", "sp", ".", "spmatrix", ")", ")", "if", "self", ".", "init", "==", "'k-means||'", ":", "self", ".", "_mllib_model", "=", "MLlibKMeans", ".", "train", "(", "X", ".", "unblock", "(", ")", ",", "self", ".", "n_clusters", ",", "maxIterations", "=", "self", ".", "max_iter", ",", "initializationMode", "=", "\"k-means||\"", ")", "self", ".", "cluster_centers_", "=", "self", ".", "_mllib_model", ".", "centers", "else", ":", "models", "=", "X", ".", "map", "(", "lambda", "X", ":", "super", "(", "SparkKMeans", ",", "self", ")", ".", "fit", "(", "X", ")", ")", "models", "=", "models", ".", "map", "(", "lambda", "model", ":", "model", ".", "cluster_centers_", ")", ".", "collect", "(", ")", "return", "super", "(", "SparkKMeans", ",", "self", ")", ".", "fit", "(", "np", ".", "concatenate", "(", "models", ")", ")" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
SparkKMeans.predict
Predict the closest cluster each sample in X belongs to. In the vector quantization literature, `cluster_centers_` is called the code book and each value returned by `predict` is the index of the closest code in the code book. Parameters ---------- X : ArrayRDD containing array-like, sparse matrix New data to predict. Returns ------- labels : ArrayRDD with predictions Index of the cluster each sample belongs to.
splearn/cluster/k_means_.py
def predict(self, X): """Predict the closest cluster each sample in X belongs to. In the vector quantization literature, `cluster_centers_` is called the code book and each value returned by `predict` is the index of the closest code in the code book. Parameters ---------- X : ArrayRDD containing array-like, sparse matrix New data to predict. Returns ------- labels : ArrayRDD with predictions Index of the cluster each sample belongs to. """ check_rdd(X, (np.ndarray, sp.spmatrix)) if hasattr(self, '_mllib_model'): if isinstance(X, ArrayRDD): X = X.unblock() return X.map(lambda x: self._mllib_model.predict(x)) else: rdd = X.map(lambda X: super(SparkKMeans, self).predict(X)) return ArrayRDD(rdd)
def predict(self, X): """Predict the closest cluster each sample in X belongs to. In the vector quantization literature, `cluster_centers_` is called the code book and each value returned by `predict` is the index of the closest code in the code book. Parameters ---------- X : ArrayRDD containing array-like, sparse matrix New data to predict. Returns ------- labels : ArrayRDD with predictions Index of the cluster each sample belongs to. """ check_rdd(X, (np.ndarray, sp.spmatrix)) if hasattr(self, '_mllib_model'): if isinstance(X, ArrayRDD): X = X.unblock() return X.map(lambda x: self._mllib_model.predict(x)) else: rdd = X.map(lambda X: super(SparkKMeans, self).predict(X)) return ArrayRDD(rdd)
[ "Predict", "the", "closest", "cluster", "each", "sample", "in", "X", "belongs", "to", "." ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/cluster/k_means_.py#L100-L125
[ "def", "predict", "(", "self", ",", "X", ")", ":", "check_rdd", "(", "X", ",", "(", "np", ".", "ndarray", ",", "sp", ".", "spmatrix", ")", ")", "if", "hasattr", "(", "self", ",", "'_mllib_model'", ")", ":", "if", "isinstance", "(", "X", ",", "ArrayRDD", ")", ":", "X", "=", "X", ".", "unblock", "(", ")", "return", "X", ".", "map", "(", "lambda", "x", ":", "self", ".", "_mllib_model", ".", "predict", "(", "x", ")", ")", "else", ":", "rdd", "=", "X", ".", "map", "(", "lambda", "X", ":", "super", "(", "SparkKMeans", ",", "self", ")", ".", "predict", "(", "X", ")", ")", "return", "ArrayRDD", "(", "rdd", ")" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
SparkSGDClassifier.fit
Fit the model according to the given training data. Parameters ---------- Z : DictRDD containing (X, y) pairs X - Training vector y - Target labels classes : iterable The set of available classes Returns ------- self : object Returns self.
splearn/linear_model/stochastic_gradient.py
def fit(self, Z, classes=None): """Fit the model according to the given training data. Parameters ---------- Z : DictRDD containing (X, y) pairs X - Training vector y - Target labels classes : iterable The set of available classes Returns ------- self : object Returns self. """ check_rdd(Z, {'X': (sp.spmatrix, np.ndarray)}) self._classes_ = np.unique(classes) return self._spark_fit(SparkSGDClassifier, Z)
def fit(self, Z, classes=None): """Fit the model according to the given training data. Parameters ---------- Z : DictRDD containing (X, y) pairs X - Training vector y - Target labels classes : iterable The set of available classes Returns ------- self : object Returns self. """ check_rdd(Z, {'X': (sp.spmatrix, np.ndarray)}) self._classes_ = np.unique(classes) return self._spark_fit(SparkSGDClassifier, Z)
[ "Fit", "the", "model", "according", "to", "the", "given", "training", "data", "." ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/linear_model/stochastic_gradient.py#L154-L172
[ "def", "fit", "(", "self", ",", "Z", ",", "classes", "=", "None", ")", ":", "check_rdd", "(", "Z", ",", "{", "'X'", ":", "(", "sp", ".", "spmatrix", ",", "np", ".", "ndarray", ")", "}", ")", "self", ".", "_classes_", "=", "np", ".", "unique", "(", "classes", ")", "return", "self", ".", "_spark_fit", "(", "SparkSGDClassifier", ",", "Z", ")" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
SparkSGDClassifier.predict
Distributed method to predict class labels for samples in X. Parameters ---------- X : ArrayRDD containing {array-like, sparse matrix} Samples. Returns ------- C : ArrayRDD Predicted class label per sample.
splearn/linear_model/stochastic_gradient.py
def predict(self, X): """Distributed method to predict class labels for samples in X. Parameters ---------- X : ArrayRDD containing {array-like, sparse matrix} Samples. Returns ------- C : ArrayRDD Predicted class label per sample. """ check_rdd(X, (sp.spmatrix, np.ndarray)) return self._spark_predict(SparkSGDClassifier, X)
def predict(self, X): """Distributed method to predict class labels for samples in X. Parameters ---------- X : ArrayRDD containing {array-like, sparse matrix} Samples. Returns ------- C : ArrayRDD Predicted class label per sample. """ check_rdd(X, (sp.spmatrix, np.ndarray)) return self._spark_predict(SparkSGDClassifier, X)
[ "Distributed", "method", "to", "predict", "class", "labels", "for", "samples", "in", "X", "." ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/linear_model/stochastic_gradient.py#L174-L188
[ "def", "predict", "(", "self", ",", "X", ")", ":", "check_rdd", "(", "X", ",", "(", "sp", ".", "spmatrix", ",", "np", ".", "ndarray", ")", ")", "return", "self", ".", "_spark_predict", "(", "SparkSGDClassifier", ",", "X", ")" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
check_rdd_dtype
Checks if the blocks in the RDD matches the expected types. Parameters: ----------- rdd: splearn.BlockRDD The RDD to check expected_dtype: {type, list of types, tuple of types, dict of types} Expected type(s). If the RDD is a DictRDD the parameter type is restricted to dict. Returns: -------- accept: bool Returns if the types are matched.
splearn/utils/validation.py
def check_rdd_dtype(rdd, expected_dtype): """Checks if the blocks in the RDD matches the expected types. Parameters: ----------- rdd: splearn.BlockRDD The RDD to check expected_dtype: {type, list of types, tuple of types, dict of types} Expected type(s). If the RDD is a DictRDD the parameter type is restricted to dict. Returns: -------- accept: bool Returns if the types are matched. """ if not isinstance(rdd, BlockRDD): raise TypeError("Expected {0} for parameter rdd, got {1}." .format(BlockRDD, type(rdd))) if isinstance(rdd, DictRDD): if not isinstance(expected_dtype, dict): raise TypeError('Expected {0} for parameter ' 'expected_dtype, got {1}.' .format(dict, type(expected_dtype))) accept = True types = dict(list(zip(rdd.columns, rdd.dtype))) for key, values in expected_dtype.items(): if not isinstance(values, (tuple, list)): values = [values] accept = accept and types[key] in values return accept if not isinstance(expected_dtype, (tuple, list)): expected_dtype = [expected_dtype] return rdd.dtype in expected_dtype
def check_rdd_dtype(rdd, expected_dtype): """Checks if the blocks in the RDD matches the expected types. Parameters: ----------- rdd: splearn.BlockRDD The RDD to check expected_dtype: {type, list of types, tuple of types, dict of types} Expected type(s). If the RDD is a DictRDD the parameter type is restricted to dict. Returns: -------- accept: bool Returns if the types are matched. """ if not isinstance(rdd, BlockRDD): raise TypeError("Expected {0} for parameter rdd, got {1}." .format(BlockRDD, type(rdd))) if isinstance(rdd, DictRDD): if not isinstance(expected_dtype, dict): raise TypeError('Expected {0} for parameter ' 'expected_dtype, got {1}.' .format(dict, type(expected_dtype))) accept = True types = dict(list(zip(rdd.columns, rdd.dtype))) for key, values in expected_dtype.items(): if not isinstance(values, (tuple, list)): values = [values] accept = accept and types[key] in values return accept if not isinstance(expected_dtype, (tuple, list)): expected_dtype = [expected_dtype] return rdd.dtype in expected_dtype
[ "Checks", "if", "the", "blocks", "in", "the", "RDD", "matches", "the", "expected", "types", "." ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/utils/validation.py#L4-L39
[ "def", "check_rdd_dtype", "(", "rdd", ",", "expected_dtype", ")", ":", "if", "not", "isinstance", "(", "rdd", ",", "BlockRDD", ")", ":", "raise", "TypeError", "(", "\"Expected {0} for parameter rdd, got {1}.\"", ".", "format", "(", "BlockRDD", ",", "type", "(", "rdd", ")", ")", ")", "if", "isinstance", "(", "rdd", ",", "DictRDD", ")", ":", "if", "not", "isinstance", "(", "expected_dtype", ",", "dict", ")", ":", "raise", "TypeError", "(", "'Expected {0} for parameter '", "'expected_dtype, got {1}.'", ".", "format", "(", "dict", ",", "type", "(", "expected_dtype", ")", ")", ")", "accept", "=", "True", "types", "=", "dict", "(", "list", "(", "zip", "(", "rdd", ".", "columns", ",", "rdd", ".", "dtype", ")", ")", ")", "for", "key", ",", "values", "in", "expected_dtype", ".", "items", "(", ")", ":", "if", "not", "isinstance", "(", "values", ",", "(", "tuple", ",", "list", ")", ")", ":", "values", "=", "[", "values", "]", "accept", "=", "accept", "and", "types", "[", "key", "]", "in", "values", "return", "accept", "if", "not", "isinstance", "(", "expected_dtype", ",", "(", "tuple", ",", "list", ")", ")", ":", "expected_dtype", "=", "[", "expected_dtype", "]", "return", "rdd", ".", "dtype", "in", "expected_dtype" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
SparkDictVectorizer.fit
Learn a list of feature name -> indices mappings. Parameters ---------- Z : DictRDD with column 'X' Dict(s) or Mapping(s) from feature names (arbitrary Python objects) to feature values (strings or convertible to dtype). Returns ------- self
splearn/feature_extraction/dict_vectorizer.py
def fit(self, Z): """Learn a list of feature name -> indices mappings. Parameters ---------- Z : DictRDD with column 'X' Dict(s) or Mapping(s) from feature names (arbitrary Python objects) to feature values (strings or convertible to dtype). Returns ------- self """ X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z """Create vocabulary """ class SetAccum(AccumulatorParam): def zero(self, initialValue): return set(initialValue) def addInPlace(self, v1, v2): v1 |= v2 return v1 accum = X.context.accumulator(set(), SetAccum()) def mapper(X, separator=self.separator): feature_names = [] for x in X: for f, v in six.iteritems(x): if isinstance(v, six.string_types): f = "%s%s%s" % (f, self.separator, v) feature_names.append(f) accum.add(set(feature_names)) X.foreach(mapper) # init vocabulary feature_names = list(accum.value) if self.sort: feature_names.sort() vocab = dict((f, i) for i, f in enumerate(feature_names)) self.feature_names_ = feature_names self.vocabulary_ = vocab return self
def fit(self, Z): """Learn a list of feature name -> indices mappings. Parameters ---------- Z : DictRDD with column 'X' Dict(s) or Mapping(s) from feature names (arbitrary Python objects) to feature values (strings or convertible to dtype). Returns ------- self """ X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z """Create vocabulary """ class SetAccum(AccumulatorParam): def zero(self, initialValue): return set(initialValue) def addInPlace(self, v1, v2): v1 |= v2 return v1 accum = X.context.accumulator(set(), SetAccum()) def mapper(X, separator=self.separator): feature_names = [] for x in X: for f, v in six.iteritems(x): if isinstance(v, six.string_types): f = "%s%s%s" % (f, self.separator, v) feature_names.append(f) accum.add(set(feature_names)) X.foreach(mapper) # init vocabulary feature_names = list(accum.value) if self.sort: feature_names.sort() vocab = dict((f, i) for i, f in enumerate(feature_names)) self.feature_names_ = feature_names self.vocabulary_ = vocab return self
[ "Learn", "a", "list", "of", "feature", "name", "-", ">", "indices", "mappings", "." ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/feature_extraction/dict_vectorizer.py#L76-L124
[ "def", "fit", "(", "self", ",", "Z", ")", ":", "X", "=", "Z", "[", ":", ",", "'X'", "]", "if", "isinstance", "(", "Z", ",", "DictRDD", ")", "else", "Z", "\"\"\"Create vocabulary\n \"\"\"", "class", "SetAccum", "(", "AccumulatorParam", ")", ":", "def", "zero", "(", "self", ",", "initialValue", ")", ":", "return", "set", "(", "initialValue", ")", "def", "addInPlace", "(", "self", ",", "v1", ",", "v2", ")", ":", "v1", "|=", "v2", "return", "v1", "accum", "=", "X", ".", "context", ".", "accumulator", "(", "set", "(", ")", ",", "SetAccum", "(", ")", ")", "def", "mapper", "(", "X", ",", "separator", "=", "self", ".", "separator", ")", ":", "feature_names", "=", "[", "]", "for", "x", "in", "X", ":", "for", "f", ",", "v", "in", "six", ".", "iteritems", "(", "x", ")", ":", "if", "isinstance", "(", "v", ",", "six", ".", "string_types", ")", ":", "f", "=", "\"%s%s%s\"", "%", "(", "f", ",", "self", ".", "separator", ",", "v", ")", "feature_names", ".", "append", "(", "f", ")", "accum", ".", "add", "(", "set", "(", "feature_names", ")", ")", "X", ".", "foreach", "(", "mapper", ")", "# init vocabulary", "feature_names", "=", "list", "(", "accum", ".", "value", ")", "if", "self", ".", "sort", ":", "feature_names", ".", "sort", "(", ")", "vocab", "=", "dict", "(", "(", "f", ",", "i", ")", "for", "i", ",", "f", "in", "enumerate", "(", "feature_names", ")", ")", "self", ".", "feature_names_", "=", "feature_names", "self", ".", "vocabulary_", "=", "vocab", "return", "self" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
SparkDictVectorizer.transform
Transform ArrayRDD's (or DictRDD's 'X' column's) feature->value dicts to array or sparse matrix. Named features not encountered during fit or fit_transform will be silently ignored. Parameters ---------- Z : ArrayRDD or DictRDD with column 'X' containing Mapping or iterable over Mappings, length = n_samples Dict(s) or Mapping(s) from feature names (arbitrary Python objects) to feature values (strings or convertible to dtype). Returns ------- Z : transformed, containing {array, sparse matrix} Feature vectors; always 2-d.
splearn/feature_extraction/dict_vectorizer.py
def transform(self, Z): """Transform ArrayRDD's (or DictRDD's 'X' column's) feature->value dicts to array or sparse matrix. Named features not encountered during fit or fit_transform will be silently ignored. Parameters ---------- Z : ArrayRDD or DictRDD with column 'X' containing Mapping or iterable over Mappings, length = n_samples Dict(s) or Mapping(s) from feature names (arbitrary Python objects) to feature values (strings or convertible to dtype). Returns ------- Z : transformed, containing {array, sparse matrix} Feature vectors; always 2-d. """ mapper = self.broadcast(super(SparkDictVectorizer, self).transform, Z.context) dtype = sp.spmatrix if self.sparse else np.ndarray return Z.transform(mapper, column='X', dtype=dtype)
def transform(self, Z): """Transform ArrayRDD's (or DictRDD's 'X' column's) feature->value dicts to array or sparse matrix. Named features not encountered during fit or fit_transform will be silently ignored. Parameters ---------- Z : ArrayRDD or DictRDD with column 'X' containing Mapping or iterable over Mappings, length = n_samples Dict(s) or Mapping(s) from feature names (arbitrary Python objects) to feature values (strings or convertible to dtype). Returns ------- Z : transformed, containing {array, sparse matrix} Feature vectors; always 2-d. """ mapper = self.broadcast(super(SparkDictVectorizer, self).transform, Z.context) dtype = sp.spmatrix if self.sparse else np.ndarray return Z.transform(mapper, column='X', dtype=dtype)
[ "Transform", "ArrayRDD", "s", "(", "or", "DictRDD", "s", "X", "column", "s", ")", "feature", "-", ">", "value", "dicts", "to", "array", "or", "sparse", "matrix", ".", "Named", "features", "not", "encountered", "during", "fit", "or", "fit_transform", "will", "be", "silently", "ignored", "." ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/feature_extraction/dict_vectorizer.py#L126-L147
[ "def", "transform", "(", "self", ",", "Z", ")", ":", "mapper", "=", "self", ".", "broadcast", "(", "super", "(", "SparkDictVectorizer", ",", "self", ")", ".", "transform", ",", "Z", ".", "context", ")", "dtype", "=", "sp", ".", "spmatrix", "if", "self", ".", "sparse", "else", "np", ".", "ndarray", "return", "Z", ".", "transform", "(", "mapper", ",", "column", "=", "'X'", ",", "dtype", "=", "dtype", ")" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
SparkVarianceThreshold.fit
Learn empirical variances from X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Sample vectors from which to compute variances. y : any Ignored. This parameter exists only for compatibility with sklearn.pipeline.Pipeline. Returns ------- self
splearn/feature_selection/variance_threshold.py
def fit(self, Z): """Learn empirical variances from X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Sample vectors from which to compute variances. y : any Ignored. This parameter exists only for compatibility with sklearn.pipeline.Pipeline. Returns ------- self """ X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z check_rdd(X, (np.ndarray, sp.spmatrix)) def mapper(X): """Calculate statistics for every numpy or scipy blocks.""" X = check_array(X, ('csr', 'csc'), dtype=np.float64) if hasattr(X, "toarray"): # sparse matrix mean, var = mean_variance_axis(X, axis=0) else: mean, var = np.mean(X, axis=0), np.var(X, axis=0) return X.shape[0], mean, var def reducer(a, b): """Calculate the combined statistics.""" n_a, mean_a, var_a = a n_b, mean_b, var_b = b n_ab = n_a + n_b mean_ab = ((mean_a * n_a) + (mean_b * n_b)) / n_ab var_ab = (((n_a * var_a) + (n_b * var_b)) / n_ab) + \ ((n_a * n_b) * ((mean_b - mean_a) / n_ab) ** 2) return (n_ab, mean_ab, var_ab) _, _, self.variances_ = X.map(mapper).treeReduce(reducer) if np.all(self.variances_ <= self.threshold): msg = "No feature in X meets the variance threshold {0:.5f}" if X.shape[0] == 1: msg += " (X contains only one sample)" raise ValueError(msg.format(self.threshold)) return self
def fit(self, Z): """Learn empirical variances from X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Sample vectors from which to compute variances. y : any Ignored. This parameter exists only for compatibility with sklearn.pipeline.Pipeline. Returns ------- self """ X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z check_rdd(X, (np.ndarray, sp.spmatrix)) def mapper(X): """Calculate statistics for every numpy or scipy blocks.""" X = check_array(X, ('csr', 'csc'), dtype=np.float64) if hasattr(X, "toarray"): # sparse matrix mean, var = mean_variance_axis(X, axis=0) else: mean, var = np.mean(X, axis=0), np.var(X, axis=0) return X.shape[0], mean, var def reducer(a, b): """Calculate the combined statistics.""" n_a, mean_a, var_a = a n_b, mean_b, var_b = b n_ab = n_a + n_b mean_ab = ((mean_a * n_a) + (mean_b * n_b)) / n_ab var_ab = (((n_a * var_a) + (n_b * var_b)) / n_ab) + \ ((n_a * n_b) * ((mean_b - mean_a) / n_ab) ** 2) return (n_ab, mean_ab, var_ab) _, _, self.variances_ = X.map(mapper).treeReduce(reducer) if np.all(self.variances_ <= self.threshold): msg = "No feature in X meets the variance threshold {0:.5f}" if X.shape[0] == 1: msg += " (X contains only one sample)" raise ValueError(msg.format(self.threshold)) return self
[ "Learn", "empirical", "variances", "from", "X", "." ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/feature_selection/variance_threshold.py#L46-L93
[ "def", "fit", "(", "self", ",", "Z", ")", ":", "X", "=", "Z", "[", ":", ",", "'X'", "]", "if", "isinstance", "(", "Z", ",", "DictRDD", ")", "else", "Z", "check_rdd", "(", "X", ",", "(", "np", ".", "ndarray", ",", "sp", ".", "spmatrix", ")", ")", "def", "mapper", "(", "X", ")", ":", "\"\"\"Calculate statistics for every numpy or scipy blocks.\"\"\"", "X", "=", "check_array", "(", "X", ",", "(", "'csr'", ",", "'csc'", ")", ",", "dtype", "=", "np", ".", "float64", ")", "if", "hasattr", "(", "X", ",", "\"toarray\"", ")", ":", "# sparse matrix", "mean", ",", "var", "=", "mean_variance_axis", "(", "X", ",", "axis", "=", "0", ")", "else", ":", "mean", ",", "var", "=", "np", ".", "mean", "(", "X", ",", "axis", "=", "0", ")", ",", "np", ".", "var", "(", "X", ",", "axis", "=", "0", ")", "return", "X", ".", "shape", "[", "0", "]", ",", "mean", ",", "var", "def", "reducer", "(", "a", ",", "b", ")", ":", "\"\"\"Calculate the combined statistics.\"\"\"", "n_a", ",", "mean_a", ",", "var_a", "=", "a", "n_b", ",", "mean_b", ",", "var_b", "=", "b", "n_ab", "=", "n_a", "+", "n_b", "mean_ab", "=", "(", "(", "mean_a", "*", "n_a", ")", "+", "(", "mean_b", "*", "n_b", ")", ")", "/", "n_ab", "var_ab", "=", "(", "(", "(", "n_a", "*", "var_a", ")", "+", "(", "n_b", "*", "var_b", ")", ")", "/", "n_ab", ")", "+", "(", "(", "n_a", "*", "n_b", ")", "*", "(", "(", "mean_b", "-", "mean_a", ")", "/", "n_ab", ")", "**", "2", ")", "return", "(", "n_ab", ",", "mean_ab", ",", "var_ab", ")", "_", ",", "_", ",", "self", ".", "variances_", "=", "X", ".", "map", "(", "mapper", ")", ".", "treeReduce", "(", "reducer", ")", "if", "np", ".", "all", "(", "self", ".", "variances_", "<=", "self", ".", "threshold", ")", ":", "msg", "=", "\"No feature in X meets the variance threshold {0:.5f}\"", "if", "X", ".", "shape", "[", "0", "]", "==", "1", ":", "msg", "+=", "\" (X contains only one sample)\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "self", ".", "threshold", ")", ")", "return", "self" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
svd
Calculate the SVD of a blocked RDD directly, returning only the leading k singular vectors. Assumes n rows and d columns, efficient when n >> d Must be able to fit d^2 within the memory of a single machine. Parameters ---------- blocked_rdd : RDD RDD with data points in numpy array blocks k : Int Number of singular vectors to return Returns ---------- u : RDD of blocks Left eigenvectors s : numpy array Singular values v : numpy array Right eigenvectors
splearn/decomposition/truncated_svd.py
def svd(blocked_rdd, k): """ Calculate the SVD of a blocked RDD directly, returning only the leading k singular vectors. Assumes n rows and d columns, efficient when n >> d Must be able to fit d^2 within the memory of a single machine. Parameters ---------- blocked_rdd : RDD RDD with data points in numpy array blocks k : Int Number of singular vectors to return Returns ---------- u : RDD of blocks Left eigenvectors s : numpy array Singular values v : numpy array Right eigenvectors """ # compute the covariance matrix (without mean subtraction) # TODO use one func for this (with mean subtraction as an option?) c = blocked_rdd.map(lambda x: (x.T.dot(x), x.shape[0])) prod, n = c.reduce(lambda x, y: (x[0] + y[0], x[1] + y[1])) # do local eigendecomposition w, v = ln.eig(prod / n) w = np.real(w) v = np.real(v) inds = np.argsort(w)[::-1] s = np.sqrt(w[inds[0:k]]) * np.sqrt(n) v = v[:, inds[0:k]].T # project back into data, normalize by singular values u = blocked_rdd.map(lambda x: np.inner(x, v) / s) return u, s, v
def svd(blocked_rdd, k): """ Calculate the SVD of a blocked RDD directly, returning only the leading k singular vectors. Assumes n rows and d columns, efficient when n >> d Must be able to fit d^2 within the memory of a single machine. Parameters ---------- blocked_rdd : RDD RDD with data points in numpy array blocks k : Int Number of singular vectors to return Returns ---------- u : RDD of blocks Left eigenvectors s : numpy array Singular values v : numpy array Right eigenvectors """ # compute the covariance matrix (without mean subtraction) # TODO use one func for this (with mean subtraction as an option?) c = blocked_rdd.map(lambda x: (x.T.dot(x), x.shape[0])) prod, n = c.reduce(lambda x, y: (x[0] + y[0], x[1] + y[1])) # do local eigendecomposition w, v = ln.eig(prod / n) w = np.real(w) v = np.real(v) inds = np.argsort(w)[::-1] s = np.sqrt(w[inds[0:k]]) * np.sqrt(n) v = v[:, inds[0:k]].T # project back into data, normalize by singular values u = blocked_rdd.map(lambda x: np.inner(x, v) / s) return u, s, v
[ "Calculate", "the", "SVD", "of", "a", "blocked", "RDD", "directly", "returning", "only", "the", "leading", "k", "singular", "vectors", ".", "Assumes", "n", "rows", "and", "d", "columns", "efficient", "when", "n", ">>", "d", "Must", "be", "able", "to", "fit", "d^2", "within", "the", "memory", "of", "a", "single", "machine", ".", "Parameters", "----------", "blocked_rdd", ":", "RDD", "RDD", "with", "data", "points", "in", "numpy", "array", "blocks", "k", ":", "Int", "Number", "of", "singular", "vectors", "to", "return", "Returns", "----------", "u", ":", "RDD", "of", "blocks", "Left", "eigenvectors", "s", ":", "numpy", "array", "Singular", "values", "v", ":", "numpy", "array", "Right", "eigenvectors" ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/decomposition/truncated_svd.py#L15-L52
[ "def", "svd", "(", "blocked_rdd", ",", "k", ")", ":", "# compute the covariance matrix (without mean subtraction)", "# TODO use one func for this (with mean subtraction as an option?)", "c", "=", "blocked_rdd", ".", "map", "(", "lambda", "x", ":", "(", "x", ".", "T", ".", "dot", "(", "x", ")", ",", "x", ".", "shape", "[", "0", "]", ")", ")", "prod", ",", "n", "=", "c", ".", "reduce", "(", "lambda", "x", ",", "y", ":", "(", "x", "[", "0", "]", "+", "y", "[", "0", "]", ",", "x", "[", "1", "]", "+", "y", "[", "1", "]", ")", ")", "# do local eigendecomposition", "w", ",", "v", "=", "ln", ".", "eig", "(", "prod", "/", "n", ")", "w", "=", "np", ".", "real", "(", "w", ")", "v", "=", "np", ".", "real", "(", "v", ")", "inds", "=", "np", ".", "argsort", "(", "w", ")", "[", ":", ":", "-", "1", "]", "s", "=", "np", ".", "sqrt", "(", "w", "[", "inds", "[", "0", ":", "k", "]", "]", ")", "*", "np", ".", "sqrt", "(", "n", ")", "v", "=", "v", "[", ":", ",", "inds", "[", "0", ":", "k", "]", "]", ".", "T", "# project back into data, normalize by singular values", "u", "=", "blocked_rdd", ".", "map", "(", "lambda", "x", ":", "np", ".", "inner", "(", "x", ",", "v", ")", "/", "s", ")", "return", "u", ",", "s", ",", "v" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
svd_em
Calculate the SVD of a blocked RDD using an expectation maximization algorithm (from Roweis, NIPS, 1997) that avoids explicitly computing the covariance matrix, returning only the leading k singular vectors. Assumes n rows and d columns, does not require d^2 to fit into memory on a single machine. Parameters ---------- blocked_rdd : ArrayRDD ArrayRDD with data points in numpy array blocks k : Int Number of singular vectors to return maxiter : Int, optional, default = 20 Number of iterations to perform tol : Double, optional, default = 1e-5 Tolerance for stopping iterative updates seed : Int, optional, default = None Seed for random number generator for initializing subspace Returns ---------- u : RDD of blocks Left eigenvectors s : numpy array Singular values v : numpy array Right eigenvectors
splearn/decomposition/truncated_svd.py
def svd_em(blocked_rdd, k, maxiter=20, tol=1e-6, compute_u=True, seed=None): """ Calculate the SVD of a blocked RDD using an expectation maximization algorithm (from Roweis, NIPS, 1997) that avoids explicitly computing the covariance matrix, returning only the leading k singular vectors. Assumes n rows and d columns, does not require d^2 to fit into memory on a single machine. Parameters ---------- blocked_rdd : ArrayRDD ArrayRDD with data points in numpy array blocks k : Int Number of singular vectors to return maxiter : Int, optional, default = 20 Number of iterations to perform tol : Double, optional, default = 1e-5 Tolerance for stopping iterative updates seed : Int, optional, default = None Seed for random number generator for initializing subspace Returns ---------- u : RDD of blocks Left eigenvectors s : numpy array Singular values v : numpy array Right eigenvectors """ n, m = blocked_rdd.shape[:2] sc = blocked_rdd._rdd.context def outerprod(x): return x.T.dot(x) # global run_sum # def accumsum(x): # global run_sum # run_sum += x # class MatrixAccum(AccumulatorParam): # def zero(self, value): # return np.zeros(np.shape(value)) # def addInPlace(self, val1, val2): # val1 += val2 # return val1 if seed is not None: rng = np.random.RandomState(seed) c = rng.randn(k, m) else: c = np.random.randn(k, m) iter = 0 error = 100 # iteratively update subspace using expectation maximization # e-step: x = (cc')^-1 c y # m-step: c = y x' (xx')^-1 while (iter < maxiter) & (error > tol): c_old = c # pre compute (cc')^-1 c c_inv = np.dot(c.T, ln.inv(np.dot(c, c.T))) premult1 = sc.broadcast(c_inv) # compute (xx')^-1 through a map reduce xx = blocked_rdd.map(lambda x: outerprod(safe_sparse_dot(x, premult1.value))) \ .treeReduce(add) # compute (xx')^-1 using an accumulator # run_sum = sc.accumulator(np.zeros((k, k)), MatrixAccum()) # blocked_rdd.map(lambda x: outerprod(safe_sparse_dot(x, premult1.value))) \ # .foreachPartition(lambda l: accumsum(sum(l))) # xx = run_sum.value xx_inv = ln.inv(xx) # pre compute (cc')^-1 c (xx')^-1 premult2 = blocked_rdd.context.broadcast(np.dot(c_inv, xx_inv)) # compute the new c through a map reduce c = blocked_rdd.map(lambda x: safe_sparse_dot(x.T, safe_sparse_dot(x, premult2.value))) \ .treeReduce(add) # compute the new c using an accumulator # run_sum = sc.accumulator(np.zeros((m, k)), MatrixAccum()) # blocked_rdd.map(lambda x: safe_sparse_dot(x.T, safe_sparse_dot(x, premult2.value))) \ # .foreachPartition(lambda l: accumsum(sum(l))) # c = run_sum.value c = c.T error = np.sum((c - c_old) ** 2) iter += 1 # project data into subspace spanned by columns of c # use standard eigendecomposition to recover an orthonormal basis c = ln.orth(c.T).T cov = blocked_rdd.map(lambda x: safe_sparse_dot(x, c.T)) \ .map(lambda x: outerprod(x)) \ .treeReduce(add) w, v = ln.eig(cov / n) w = np.real(w) v = np.real(v) inds = np.argsort(w)[::-1] s = np.sqrt(w[inds[0:k]]) * np.sqrt(n) v = np.dot(v[:, inds[0:k]].T, c) if compute_u: v_broadcasted = blocked_rdd.context.broadcast(v) u = blocked_rdd.map( lambda x: safe_sparse_dot(x, v_broadcasted.value.T) / s) return u, s, v else: return s, v
def svd_em(blocked_rdd, k, maxiter=20, tol=1e-6, compute_u=True, seed=None): """ Calculate the SVD of a blocked RDD using an expectation maximization algorithm (from Roweis, NIPS, 1997) that avoids explicitly computing the covariance matrix, returning only the leading k singular vectors. Assumes n rows and d columns, does not require d^2 to fit into memory on a single machine. Parameters ---------- blocked_rdd : ArrayRDD ArrayRDD with data points in numpy array blocks k : Int Number of singular vectors to return maxiter : Int, optional, default = 20 Number of iterations to perform tol : Double, optional, default = 1e-5 Tolerance for stopping iterative updates seed : Int, optional, default = None Seed for random number generator for initializing subspace Returns ---------- u : RDD of blocks Left eigenvectors s : numpy array Singular values v : numpy array Right eigenvectors """ n, m = blocked_rdd.shape[:2] sc = blocked_rdd._rdd.context def outerprod(x): return x.T.dot(x) # global run_sum # def accumsum(x): # global run_sum # run_sum += x # class MatrixAccum(AccumulatorParam): # def zero(self, value): # return np.zeros(np.shape(value)) # def addInPlace(self, val1, val2): # val1 += val2 # return val1 if seed is not None: rng = np.random.RandomState(seed) c = rng.randn(k, m) else: c = np.random.randn(k, m) iter = 0 error = 100 # iteratively update subspace using expectation maximization # e-step: x = (cc')^-1 c y # m-step: c = y x' (xx')^-1 while (iter < maxiter) & (error > tol): c_old = c # pre compute (cc')^-1 c c_inv = np.dot(c.T, ln.inv(np.dot(c, c.T))) premult1 = sc.broadcast(c_inv) # compute (xx')^-1 through a map reduce xx = blocked_rdd.map(lambda x: outerprod(safe_sparse_dot(x, premult1.value))) \ .treeReduce(add) # compute (xx')^-1 using an accumulator # run_sum = sc.accumulator(np.zeros((k, k)), MatrixAccum()) # blocked_rdd.map(lambda x: outerprod(safe_sparse_dot(x, premult1.value))) \ # .foreachPartition(lambda l: accumsum(sum(l))) # xx = run_sum.value xx_inv = ln.inv(xx) # pre compute (cc')^-1 c (xx')^-1 premult2 = blocked_rdd.context.broadcast(np.dot(c_inv, xx_inv)) # compute the new c through a map reduce c = blocked_rdd.map(lambda x: safe_sparse_dot(x.T, safe_sparse_dot(x, premult2.value))) \ .treeReduce(add) # compute the new c using an accumulator # run_sum = sc.accumulator(np.zeros((m, k)), MatrixAccum()) # blocked_rdd.map(lambda x: safe_sparse_dot(x.T, safe_sparse_dot(x, premult2.value))) \ # .foreachPartition(lambda l: accumsum(sum(l))) # c = run_sum.value c = c.T error = np.sum((c - c_old) ** 2) iter += 1 # project data into subspace spanned by columns of c # use standard eigendecomposition to recover an orthonormal basis c = ln.orth(c.T).T cov = blocked_rdd.map(lambda x: safe_sparse_dot(x, c.T)) \ .map(lambda x: outerprod(x)) \ .treeReduce(add) w, v = ln.eig(cov / n) w = np.real(w) v = np.real(v) inds = np.argsort(w)[::-1] s = np.sqrt(w[inds[0:k]]) * np.sqrt(n) v = np.dot(v[:, inds[0:k]].T, c) if compute_u: v_broadcasted = blocked_rdd.context.broadcast(v) u = blocked_rdd.map( lambda x: safe_sparse_dot(x, v_broadcasted.value.T) / s) return u, s, v else: return s, v
[ "Calculate", "the", "SVD", "of", "a", "blocked", "RDD", "using", "an", "expectation", "maximization", "algorithm", "(", "from", "Roweis", "NIPS", "1997", ")", "that", "avoids", "explicitly", "computing", "the", "covariance", "matrix", "returning", "only", "the", "leading", "k", "singular", "vectors", ".", "Assumes", "n", "rows", "and", "d", "columns", "does", "not", "require", "d^2", "to", "fit", "into", "memory", "on", "a", "single", "machine", ".", "Parameters", "----------", "blocked_rdd", ":", "ArrayRDD", "ArrayRDD", "with", "data", "points", "in", "numpy", "array", "blocks", "k", ":", "Int", "Number", "of", "singular", "vectors", "to", "return", "maxiter", ":", "Int", "optional", "default", "=", "20", "Number", "of", "iterations", "to", "perform", "tol", ":", "Double", "optional", "default", "=", "1e", "-", "5", "Tolerance", "for", "stopping", "iterative", "updates", "seed", ":", "Int", "optional", "default", "=", "None", "Seed", "for", "random", "number", "generator", "for", "initializing", "subspace", "Returns", "----------", "u", ":", "RDD", "of", "blocks", "Left", "eigenvectors", "s", ":", "numpy", "array", "Singular", "values", "v", ":", "numpy", "array", "Right", "eigenvectors" ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/decomposition/truncated_svd.py#L55-L170
[ "def", "svd_em", "(", "blocked_rdd", ",", "k", ",", "maxiter", "=", "20", ",", "tol", "=", "1e-6", ",", "compute_u", "=", "True", ",", "seed", "=", "None", ")", ":", "n", ",", "m", "=", "blocked_rdd", ".", "shape", "[", ":", "2", "]", "sc", "=", "blocked_rdd", ".", "_rdd", ".", "context", "def", "outerprod", "(", "x", ")", ":", "return", "x", ".", "T", ".", "dot", "(", "x", ")", "# global run_sum", "# def accumsum(x):", "# global run_sum", "# run_sum += x", "# class MatrixAccum(AccumulatorParam):", "# def zero(self, value):", "# return np.zeros(np.shape(value))", "# def addInPlace(self, val1, val2):", "# val1 += val2", "# return val1", "if", "seed", "is", "not", "None", ":", "rng", "=", "np", ".", "random", ".", "RandomState", "(", "seed", ")", "c", "=", "rng", ".", "randn", "(", "k", ",", "m", ")", "else", ":", "c", "=", "np", ".", "random", ".", "randn", "(", "k", ",", "m", ")", "iter", "=", "0", "error", "=", "100", "# iteratively update subspace using expectation maximization", "# e-step: x = (cc')^-1 c y", "# m-step: c = y x' (xx')^-1", "while", "(", "iter", "<", "maxiter", ")", "&", "(", "error", ">", "tol", ")", ":", "c_old", "=", "c", "# pre compute (cc')^-1 c", "c_inv", "=", "np", ".", "dot", "(", "c", ".", "T", ",", "ln", ".", "inv", "(", "np", ".", "dot", "(", "c", ",", "c", ".", "T", ")", ")", ")", "premult1", "=", "sc", ".", "broadcast", "(", "c_inv", ")", "# compute (xx')^-1 through a map reduce", "xx", "=", "blocked_rdd", ".", "map", "(", "lambda", "x", ":", "outerprod", "(", "safe_sparse_dot", "(", "x", ",", "premult1", ".", "value", ")", ")", ")", ".", "treeReduce", "(", "add", ")", "# compute (xx')^-1 using an accumulator", "# run_sum = sc.accumulator(np.zeros((k, k)), MatrixAccum())", "# blocked_rdd.map(lambda x: outerprod(safe_sparse_dot(x, premult1.value))) \\", "# .foreachPartition(lambda l: accumsum(sum(l)))", "# xx = run_sum.value", "xx_inv", "=", "ln", ".", "inv", "(", "xx", ")", "# pre compute (cc')^-1 c (xx')^-1", "premult2", "=", "blocked_rdd", ".", "context", ".", "broadcast", "(", "np", ".", "dot", "(", "c_inv", ",", "xx_inv", ")", ")", "# compute the new c through a map reduce", "c", "=", "blocked_rdd", ".", "map", "(", "lambda", "x", ":", "safe_sparse_dot", "(", "x", ".", "T", ",", "safe_sparse_dot", "(", "x", ",", "premult2", ".", "value", ")", ")", ")", ".", "treeReduce", "(", "add", ")", "# compute the new c using an accumulator", "# run_sum = sc.accumulator(np.zeros((m, k)), MatrixAccum())", "# blocked_rdd.map(lambda x: safe_sparse_dot(x.T, safe_sparse_dot(x, premult2.value))) \\", "# .foreachPartition(lambda l: accumsum(sum(l)))", "# c = run_sum.value", "c", "=", "c", ".", "T", "error", "=", "np", ".", "sum", "(", "(", "c", "-", "c_old", ")", "**", "2", ")", "iter", "+=", "1", "# project data into subspace spanned by columns of c", "# use standard eigendecomposition to recover an orthonormal basis", "c", "=", "ln", ".", "orth", "(", "c", ".", "T", ")", ".", "T", "cov", "=", "blocked_rdd", ".", "map", "(", "lambda", "x", ":", "safe_sparse_dot", "(", "x", ",", "c", ".", "T", ")", ")", ".", "map", "(", "lambda", "x", ":", "outerprod", "(", "x", ")", ")", ".", "treeReduce", "(", "add", ")", "w", ",", "v", "=", "ln", ".", "eig", "(", "cov", "/", "n", ")", "w", "=", "np", ".", "real", "(", "w", ")", "v", "=", "np", ".", "real", "(", "v", ")", "inds", "=", "np", ".", "argsort", "(", "w", ")", "[", ":", ":", "-", "1", "]", "s", "=", "np", ".", "sqrt", "(", "w", "[", "inds", "[", "0", ":", "k", "]", "]", ")", "*", "np", ".", "sqrt", "(", "n", ")", "v", "=", "np", ".", "dot", "(", "v", "[", ":", ",", "inds", "[", "0", ":", "k", "]", "]", ".", "T", ",", "c", ")", "if", "compute_u", ":", "v_broadcasted", "=", "blocked_rdd", ".", "context", ".", "broadcast", "(", "v", ")", "u", "=", "blocked_rdd", ".", "map", "(", "lambda", "x", ":", "safe_sparse_dot", "(", "x", ",", "v_broadcasted", ".", "value", ".", "T", ")", "/", "s", ")", "return", "u", ",", "s", ",", "v", "else", ":", "return", "s", ",", "v" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
SparkTruncatedSVD.fit_transform
Fit LSI model to X and perform dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. Returns ------- X_new : array, shape (n_samples, n_components) Reduced version of X. This will always be a dense array.
splearn/decomposition/truncated_svd.py
def fit_transform(self, Z): """Fit LSI model to X and perform dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. Returns ------- X_new : array, shape (n_samples, n_components) Reduced version of X. This will always be a dense array. """ X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z check_rdd(X, (sp.spmatrix, np.ndarray)) if self.algorithm == "em": X = X.persist() # boosting iterative svm Sigma, V = svd_em(X, k=self.n_components, maxiter=self.n_iter, tol=self.tol, compute_u=False, seed=self.random_state) self.components_ = V X.unpersist() return self.transform(Z) else: # TODO: raise warning non distributed return super(SparkTruncatedSVD, self).fit_transform(X.tosparse())
def fit_transform(self, Z): """Fit LSI model to X and perform dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. Returns ------- X_new : array, shape (n_samples, n_components) Reduced version of X. This will always be a dense array. """ X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z check_rdd(X, (sp.spmatrix, np.ndarray)) if self.algorithm == "em": X = X.persist() # boosting iterative svm Sigma, V = svd_em(X, k=self.n_components, maxiter=self.n_iter, tol=self.tol, compute_u=False, seed=self.random_state) self.components_ = V X.unpersist() return self.transform(Z) else: # TODO: raise warning non distributed return super(SparkTruncatedSVD, self).fit_transform(X.tosparse())
[ "Fit", "LSI", "model", "to", "X", "and", "perform", "dimensionality", "reduction", "on", "X", "." ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/decomposition/truncated_svd.py#L283-L308
[ "def", "fit_transform", "(", "self", ",", "Z", ")", ":", "X", "=", "Z", "[", ":", ",", "'X'", "]", "if", "isinstance", "(", "Z", ",", "DictRDD", ")", "else", "Z", "check_rdd", "(", "X", ",", "(", "sp", ".", "spmatrix", ",", "np", ".", "ndarray", ")", ")", "if", "self", ".", "algorithm", "==", "\"em\"", ":", "X", "=", "X", ".", "persist", "(", ")", "# boosting iterative svm", "Sigma", ",", "V", "=", "svd_em", "(", "X", ",", "k", "=", "self", ".", "n_components", ",", "maxiter", "=", "self", ".", "n_iter", ",", "tol", "=", "self", ".", "tol", ",", "compute_u", "=", "False", ",", "seed", "=", "self", ".", "random_state", ")", "self", ".", "components_", "=", "V", "X", ".", "unpersist", "(", ")", "return", "self", ".", "transform", "(", "Z", ")", "else", ":", "# TODO: raise warning non distributed", "return", "super", "(", "SparkTruncatedSVD", ",", "self", ")", ".", "fit_transform", "(", "X", ".", "tosparse", "(", ")", ")" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
SparkTruncatedSVD.transform
Perform dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) New data. Returns ------- X_new : array, shape (n_samples, n_components) Reduced version of X. This will always be a dense array.
splearn/decomposition/truncated_svd.py
def transform(self, Z): """Perform dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) New data. Returns ------- X_new : array, shape (n_samples, n_components) Reduced version of X. This will always be a dense array. """ X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z check_rdd(X, (sp.spmatrix, np.ndarray)) mapper = self.broadcast( super(SparkTruncatedSVD, self).transform, Z.context) return Z.transform(mapper, column='X', dtype=np.ndarray)
def transform(self, Z): """Perform dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) New data. Returns ------- X_new : array, shape (n_samples, n_components) Reduced version of X. This will always be a dense array. """ X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z check_rdd(X, (sp.spmatrix, np.ndarray)) mapper = self.broadcast( super(SparkTruncatedSVD, self).transform, Z.context) return Z.transform(mapper, column='X', dtype=np.ndarray)
[ "Perform", "dimensionality", "reduction", "on", "X", "." ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/decomposition/truncated_svd.py#L310-L328
[ "def", "transform", "(", "self", ",", "Z", ")", ":", "X", "=", "Z", "[", ":", ",", "'X'", "]", "if", "isinstance", "(", "Z", ",", "DictRDD", ")", "else", "Z", "check_rdd", "(", "X", ",", "(", "sp", ".", "spmatrix", ",", "np", ".", "ndarray", ")", ")", "mapper", "=", "self", ".", "broadcast", "(", "super", "(", "SparkTruncatedSVD", ",", "self", ")", ".", "transform", ",", "Z", ".", "context", ")", "return", "Z", ".", "transform", "(", "mapper", ",", "column", "=", "'X'", ",", "dtype", "=", "np", ".", "ndarray", ")" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
_block_collection
Pack rdd with a specific collection constructor.
splearn/rdd.py
def _block_collection(iterator, dtype, bsize=-1): """Pack rdd with a specific collection constructor.""" i = 0 accumulated = [] for a in iterator: if (bsize > 0) and (i >= bsize): yield _pack_accumulated(accumulated, dtype) accumulated = [] i = 0 accumulated.append(a) i += 1 if i > 0: yield _pack_accumulated(accumulated, dtype)
def _block_collection(iterator, dtype, bsize=-1): """Pack rdd with a specific collection constructor.""" i = 0 accumulated = [] for a in iterator: if (bsize > 0) and (i >= bsize): yield _pack_accumulated(accumulated, dtype) accumulated = [] i = 0 accumulated.append(a) i += 1 if i > 0: yield _pack_accumulated(accumulated, dtype)
[ "Pack", "rdd", "with", "a", "specific", "collection", "constructor", "." ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/rdd.py#L35-L47
[ "def", "_block_collection", "(", "iterator", ",", "dtype", ",", "bsize", "=", "-", "1", ")", ":", "i", "=", "0", "accumulated", "=", "[", "]", "for", "a", "in", "iterator", ":", "if", "(", "bsize", ">", "0", ")", "and", "(", "i", ">=", "bsize", ")", ":", "yield", "_pack_accumulated", "(", "accumulated", ",", "dtype", ")", "accumulated", "=", "[", "]", "i", "=", "0", "accumulated", ".", "append", "(", "a", ")", "i", "+=", "1", "if", "i", ">", "0", ":", "yield", "_pack_accumulated", "(", "accumulated", ",", "dtype", ")" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
_block_tuple
Pack rdd of tuples as tuples of arrays or scipy.sparse matrices.
splearn/rdd.py
def _block_tuple(iterator, dtypes, bsize=-1): """Pack rdd of tuples as tuples of arrays or scipy.sparse matrices.""" i = 0 blocked_tuple = None for tuple_i in iterator: if blocked_tuple is None: blocked_tuple = tuple([] for _ in range(len(tuple_i))) if (bsize > 0) and (i >= bsize): yield tuple(_pack_accumulated(x, dtype) for x, dtype in zip(blocked_tuple, dtypes)) blocked_tuple = tuple([] for _ in range(len(tuple_i))) i = 0 for x_j, x in zip(tuple_i, blocked_tuple): x.append(x_j) i += 1 if i > 0: yield tuple(_pack_accumulated(x, dtype) for x, dtype in zip(blocked_tuple, dtypes))
def _block_tuple(iterator, dtypes, bsize=-1): """Pack rdd of tuples as tuples of arrays or scipy.sparse matrices.""" i = 0 blocked_tuple = None for tuple_i in iterator: if blocked_tuple is None: blocked_tuple = tuple([] for _ in range(len(tuple_i))) if (bsize > 0) and (i >= bsize): yield tuple(_pack_accumulated(x, dtype) for x, dtype in zip(blocked_tuple, dtypes)) blocked_tuple = tuple([] for _ in range(len(tuple_i))) i = 0 for x_j, x in zip(tuple_i, blocked_tuple): x.append(x_j) i += 1 if i > 0: yield tuple(_pack_accumulated(x, dtype) for x, dtype in zip(blocked_tuple, dtypes))
[ "Pack", "rdd", "of", "tuples", "as", "tuples", "of", "arrays", "or", "scipy", ".", "sparse", "matrices", "." ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/rdd.py#L50-L69
[ "def", "_block_tuple", "(", "iterator", ",", "dtypes", ",", "bsize", "=", "-", "1", ")", ":", "i", "=", "0", "blocked_tuple", "=", "None", "for", "tuple_i", "in", "iterator", ":", "if", "blocked_tuple", "is", "None", ":", "blocked_tuple", "=", "tuple", "(", "[", "]", "for", "_", "in", "range", "(", "len", "(", "tuple_i", ")", ")", ")", "if", "(", "bsize", ">", "0", ")", "and", "(", "i", ">=", "bsize", ")", ":", "yield", "tuple", "(", "_pack_accumulated", "(", "x", ",", "dtype", ")", "for", "x", ",", "dtype", "in", "zip", "(", "blocked_tuple", ",", "dtypes", ")", ")", "blocked_tuple", "=", "tuple", "(", "[", "]", "for", "_", "in", "range", "(", "len", "(", "tuple_i", ")", ")", ")", "i", "=", "0", "for", "x_j", ",", "x", "in", "zip", "(", "tuple_i", ",", "blocked_tuple", ")", ":", "x", ".", "append", "(", "x_j", ")", "i", "+=", "1", "if", "i", ">", "0", ":", "yield", "tuple", "(", "_pack_accumulated", "(", "x", ",", "dtype", ")", "for", "x", ",", "dtype", "in", "zip", "(", "blocked_tuple", ",", "dtypes", ")", ")" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
block
Block an RDD Parameters ---------- rdd : RDD RDD of data points to block into either numpy arrays, scipy sparse matrices, or pandas data frames. Type of data point will be automatically inferred and blocked accordingly. bsize : int, optional, default None Size of each block (number of elements), if None all data points from each partition will be combined in a block. Returns ------- rdd : ArrayRDD or TupleRDD or DictRDD The transformed rdd with added functionality
splearn/rdd.py
def block(rdd, bsize=-1, dtype=None): """Block an RDD Parameters ---------- rdd : RDD RDD of data points to block into either numpy arrays, scipy sparse matrices, or pandas data frames. Type of data point will be automatically inferred and blocked accordingly. bsize : int, optional, default None Size of each block (number of elements), if None all data points from each partition will be combined in a block. Returns ------- rdd : ArrayRDD or TupleRDD or DictRDD The transformed rdd with added functionality """ try: entry = rdd.first() except IndexError: # empty RDD: do not block return rdd # do different kinds of block depending on the type if isinstance(entry, dict): rdd = rdd.map(lambda x: list(x.values())) return DictRDD(rdd, list(entry.keys()), bsize, dtype) elif isinstance(entry, tuple): return DictRDD(rdd, bsize=bsize, dtype=dtype) elif sp.issparse(entry): return SparseRDD(rdd, bsize) elif isinstance(entry, np.ndarray): return ArrayRDD(rdd, bsize) else: return BlockRDD(rdd, bsize, dtype)
def block(rdd, bsize=-1, dtype=None): """Block an RDD Parameters ---------- rdd : RDD RDD of data points to block into either numpy arrays, scipy sparse matrices, or pandas data frames. Type of data point will be automatically inferred and blocked accordingly. bsize : int, optional, default None Size of each block (number of elements), if None all data points from each partition will be combined in a block. Returns ------- rdd : ArrayRDD or TupleRDD or DictRDD The transformed rdd with added functionality """ try: entry = rdd.first() except IndexError: # empty RDD: do not block return rdd # do different kinds of block depending on the type if isinstance(entry, dict): rdd = rdd.map(lambda x: list(x.values())) return DictRDD(rdd, list(entry.keys()), bsize, dtype) elif isinstance(entry, tuple): return DictRDD(rdd, bsize=bsize, dtype=dtype) elif sp.issparse(entry): return SparseRDD(rdd, bsize) elif isinstance(entry, np.ndarray): return ArrayRDD(rdd, bsize) else: return BlockRDD(rdd, bsize, dtype)
[ "Block", "an", "RDD" ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/rdd.py#L72-L111
[ "def", "block", "(", "rdd", ",", "bsize", "=", "-", "1", ",", "dtype", "=", "None", ")", ":", "try", ":", "entry", "=", "rdd", ".", "first", "(", ")", "except", "IndexError", ":", "# empty RDD: do not block", "return", "rdd", "# do different kinds of block depending on the type", "if", "isinstance", "(", "entry", ",", "dict", ")", ":", "rdd", "=", "rdd", ".", "map", "(", "lambda", "x", ":", "list", "(", "x", ".", "values", "(", ")", ")", ")", "return", "DictRDD", "(", "rdd", ",", "list", "(", "entry", ".", "keys", "(", ")", ")", ",", "bsize", ",", "dtype", ")", "elif", "isinstance", "(", "entry", ",", "tuple", ")", ":", "return", "DictRDD", "(", "rdd", ",", "bsize", "=", "bsize", ",", "dtype", "=", "dtype", ")", "elif", "sp", ".", "issparse", "(", "entry", ")", ":", "return", "SparseRDD", "(", "rdd", ",", "bsize", ")", "elif", "isinstance", "(", "entry", ",", "np", ".", "ndarray", ")", ":", "return", "ArrayRDD", "(", "rdd", ",", "bsize", ")", "else", ":", "return", "BlockRDD", "(", "rdd", ",", "bsize", ",", "dtype", ")" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
BlockRDD._block
Execute the blocking process on the given rdd. Parameters ---------- rdd : pyspark.rdd.RDD Distributed data to block bsize : int or None The desired size of the blocks Returns ------- rdd : pyspark.rdd.RDD Blocked rdd.
splearn/rdd.py
def _block(self, rdd, bsize, dtype): """Execute the blocking process on the given rdd. Parameters ---------- rdd : pyspark.rdd.RDD Distributed data to block bsize : int or None The desired size of the blocks Returns ------- rdd : pyspark.rdd.RDD Blocked rdd. """ return rdd.mapPartitions( lambda x: _block_collection(x, dtype, bsize))
def _block(self, rdd, bsize, dtype): """Execute the blocking process on the given rdd. Parameters ---------- rdd : pyspark.rdd.RDD Distributed data to block bsize : int or None The desired size of the blocks Returns ------- rdd : pyspark.rdd.RDD Blocked rdd. """ return rdd.mapPartitions( lambda x: _block_collection(x, dtype, bsize))
[ "Execute", "the", "blocking", "process", "on", "the", "given", "rdd", "." ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/rdd.py#L132-L148
[ "def", "_block", "(", "self", ",", "rdd", ",", "bsize", ",", "dtype", ")", ":", "return", "rdd", ".", "mapPartitions", "(", "lambda", "x", ":", "_block_collection", "(", "x", ",", "dtype", ",", "bsize", ")", ")" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
BlockRDD.transform
Equivalent to map, compatibility purpose only. Column parameter ignored.
splearn/rdd.py
def transform(self, fn, dtype=None, *args, **kwargs): """Equivalent to map, compatibility purpose only. Column parameter ignored. """ rdd = self._rdd.map(fn) if dtype is None: return self.__class__(rdd, noblock=True, **self.get_params()) if dtype is np.ndarray: return ArrayRDD(rdd, bsize=self.bsize, noblock=True) elif dtype is sp.spmatrix: return SparseRDD(rdd, bsize=self.bsize, noblock=True) else: return BlockRDD(rdd, bsize=self.bsize, dtype=dtype, noblock=True)
def transform(self, fn, dtype=None, *args, **kwargs): """Equivalent to map, compatibility purpose only. Column parameter ignored. """ rdd = self._rdd.map(fn) if dtype is None: return self.__class__(rdd, noblock=True, **self.get_params()) if dtype is np.ndarray: return ArrayRDD(rdd, bsize=self.bsize, noblock=True) elif dtype is sp.spmatrix: return SparseRDD(rdd, bsize=self.bsize, noblock=True) else: return BlockRDD(rdd, bsize=self.bsize, dtype=dtype, noblock=True)
[ "Equivalent", "to", "map", "compatibility", "purpose", "only", ".", "Column", "parameter", "ignored", "." ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/rdd.py#L257-L270
[ "def", "transform", "(", "self", ",", "fn", ",", "dtype", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "rdd", "=", "self", ".", "_rdd", ".", "map", "(", "fn", ")", "if", "dtype", "is", "None", ":", "return", "self", ".", "__class__", "(", "rdd", ",", "noblock", "=", "True", ",", "*", "*", "self", ".", "get_params", "(", ")", ")", "if", "dtype", "is", "np", ".", "ndarray", ":", "return", "ArrayRDD", "(", "rdd", ",", "bsize", "=", "self", ".", "bsize", ",", "noblock", "=", "True", ")", "elif", "dtype", "is", "sp", ".", "spmatrix", ":", "return", "SparseRDD", "(", "rdd", ",", "bsize", "=", "self", ".", "bsize", ",", "noblock", "=", "True", ")", "else", ":", "return", "BlockRDD", "(", "rdd", ",", "bsize", "=", "self", ".", "bsize", ",", "dtype", "=", "dtype", ",", "noblock", "=", "True", ")" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
ArrayLikeRDDMixin.shape
Returns the shape of the data.
splearn/rdd.py
def shape(self): """Returns the shape of the data.""" # TODO cache first = self.first().shape shape = self._rdd.map(lambda x: x.shape[0]).sum() return (shape,) + first[1:]
def shape(self): """Returns the shape of the data.""" # TODO cache first = self.first().shape shape = self._rdd.map(lambda x: x.shape[0]).sum() return (shape,) + first[1:]
[ "Returns", "the", "shape", "of", "the", "data", "." ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/rdd.py#L303-L308
[ "def", "shape", "(", "self", ")", ":", "# TODO cache", "first", "=", "self", ".", "first", "(", ")", ".", "shape", "shape", "=", "self", ".", "_rdd", ".", "map", "(", "lambda", "x", ":", "x", ".", "shape", "[", "0", "]", ")", ".", "sum", "(", ")", "return", "(", "shape", ",", ")", "+", "first", "[", "1", ":", "]" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
SparseRDD.toarray
Returns the data as numpy.array from each partition.
splearn/rdd.py
def toarray(self): """Returns the data as numpy.array from each partition.""" rdd = self._rdd.map(lambda x: x.toarray()) return np.concatenate(rdd.collect())
def toarray(self): """Returns the data as numpy.array from each partition.""" rdd = self._rdd.map(lambda x: x.toarray()) return np.concatenate(rdd.collect())
[ "Returns", "the", "data", "as", "numpy", ".", "array", "from", "each", "partition", "." ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/rdd.py#L491-L494
[ "def", "toarray", "(", "self", ")", ":", "rdd", "=", "self", ".", "_rdd", ".", "map", "(", "lambda", "x", ":", "x", ".", "toarray", "(", ")", ")", "return", "np", ".", "concatenate", "(", "rdd", ".", "collect", "(", ")", ")" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
DictRDD._block
Execute the blocking process on the given rdd. Parameters ---------- rdd : pyspark.rdd.RDD Distributed data to block bsize : int or None The desired size of the blocks Returns ------- rdd : pyspark.rdd.RDD Blocked rdd.
splearn/rdd.py
def _block(self, rdd, bsize, dtype): """Execute the blocking process on the given rdd. Parameters ---------- rdd : pyspark.rdd.RDD Distributed data to block bsize : int or None The desired size of the blocks Returns ------- rdd : pyspark.rdd.RDD Blocked rdd. """ return rdd.mapPartitions(lambda x: _block_tuple(x, dtype, bsize))
def _block(self, rdd, bsize, dtype): """Execute the blocking process on the given rdd. Parameters ---------- rdd : pyspark.rdd.RDD Distributed data to block bsize : int or None The desired size of the blocks Returns ------- rdd : pyspark.rdd.RDD Blocked rdd. """ return rdd.mapPartitions(lambda x: _block_tuple(x, dtype, bsize))
[ "Execute", "the", "blocking", "process", "on", "the", "given", "rdd", "." ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/rdd.py#L625-L640
[ "def", "_block", "(", "self", ",", "rdd", ",", "bsize", ",", "dtype", ")", ":", "return", "rdd", ".", "mapPartitions", "(", "lambda", "x", ":", "_block_tuple", "(", "x", ",", "dtype", ",", "bsize", ")", ")" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
DictRDD.transform
Execute a transformation on a column or columns. Returns the modified DictRDD. Parameters ---------- f : function The function to execute on the columns. column : {str, list or None} The column(s) to transform. If None is specified the method is equivalent to map. column : {str, list or None} The dtype of the column(s) to transform. Returns ------- result : DictRDD DictRDD with transformed column(s). TODO: optimize
splearn/rdd.py
def transform(self, fn, column=None, dtype=None): """Execute a transformation on a column or columns. Returns the modified DictRDD. Parameters ---------- f : function The function to execute on the columns. column : {str, list or None} The column(s) to transform. If None is specified the method is equivalent to map. column : {str, list or None} The dtype of the column(s) to transform. Returns ------- result : DictRDD DictRDD with transformed column(s). TODO: optimize """ dtypes = self.dtype if column is None: indices = list(range(len(self.columns))) else: if not type(column) in (list, tuple): column = [column] indices = [self.columns.index(c) for c in column] if dtype is not None: if not type(dtype) in (list, tuple): dtype = [dtype] dtypes = [dtype[indices.index(i)] if i in indices else t for i, t in enumerate(self.dtype)] def mapper(values): result = fn(*[values[i] for i in indices]) if len(indices) == 1: result = (result,) elif not isinstance(result, (tuple, list)): raise ValueError("Transformer function must return an" " iterable!") elif len(result) != len(indices): raise ValueError("Transformer result's length must be" " equal to the given columns length!") return tuple(result[indices.index(i)] if i in indices else v for i, v in enumerate(values)) return DictRDD(self._rdd.map(mapper), columns=self.columns, dtype=dtypes, bsize=self.bsize, noblock=True)
def transform(self, fn, column=None, dtype=None): """Execute a transformation on a column or columns. Returns the modified DictRDD. Parameters ---------- f : function The function to execute on the columns. column : {str, list or None} The column(s) to transform. If None is specified the method is equivalent to map. column : {str, list or None} The dtype of the column(s) to transform. Returns ------- result : DictRDD DictRDD with transformed column(s). TODO: optimize """ dtypes = self.dtype if column is None: indices = list(range(len(self.columns))) else: if not type(column) in (list, tuple): column = [column] indices = [self.columns.index(c) for c in column] if dtype is not None: if not type(dtype) in (list, tuple): dtype = [dtype] dtypes = [dtype[indices.index(i)] if i in indices else t for i, t in enumerate(self.dtype)] def mapper(values): result = fn(*[values[i] for i in indices]) if len(indices) == 1: result = (result,) elif not isinstance(result, (tuple, list)): raise ValueError("Transformer function must return an" " iterable!") elif len(result) != len(indices): raise ValueError("Transformer result's length must be" " equal to the given columns length!") return tuple(result[indices.index(i)] if i in indices else v for i, v in enumerate(values)) return DictRDD(self._rdd.map(mapper), columns=self.columns, dtype=dtypes, bsize=self.bsize, noblock=True)
[ "Execute", "a", "transformation", "on", "a", "column", "or", "columns", ".", "Returns", "the", "modified", "DictRDD", "." ]
lensacom/sparkit-learn
python
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/rdd.py#L716-L768
[ "def", "transform", "(", "self", ",", "fn", ",", "column", "=", "None", ",", "dtype", "=", "None", ")", ":", "dtypes", "=", "self", ".", "dtype", "if", "column", "is", "None", ":", "indices", "=", "list", "(", "range", "(", "len", "(", "self", ".", "columns", ")", ")", ")", "else", ":", "if", "not", "type", "(", "column", ")", "in", "(", "list", ",", "tuple", ")", ":", "column", "=", "[", "column", "]", "indices", "=", "[", "self", ".", "columns", ".", "index", "(", "c", ")", "for", "c", "in", "column", "]", "if", "dtype", "is", "not", "None", ":", "if", "not", "type", "(", "dtype", ")", "in", "(", "list", ",", "tuple", ")", ":", "dtype", "=", "[", "dtype", "]", "dtypes", "=", "[", "dtype", "[", "indices", ".", "index", "(", "i", ")", "]", "if", "i", "in", "indices", "else", "t", "for", "i", ",", "t", "in", "enumerate", "(", "self", ".", "dtype", ")", "]", "def", "mapper", "(", "values", ")", ":", "result", "=", "fn", "(", "*", "[", "values", "[", "i", "]", "for", "i", "in", "indices", "]", ")", "if", "len", "(", "indices", ")", "==", "1", ":", "result", "=", "(", "result", ",", ")", "elif", "not", "isinstance", "(", "result", ",", "(", "tuple", ",", "list", ")", ")", ":", "raise", "ValueError", "(", "\"Transformer function must return an\"", "\" iterable!\"", ")", "elif", "len", "(", "result", ")", "!=", "len", "(", "indices", ")", ":", "raise", "ValueError", "(", "\"Transformer result's length must be\"", "\" equal to the given columns length!\"", ")", "return", "tuple", "(", "result", "[", "indices", ".", "index", "(", "i", ")", "]", "if", "i", "in", "indices", "else", "v", "for", "i", ",", "v", "in", "enumerate", "(", "values", ")", ")", "return", "DictRDD", "(", "self", ".", "_rdd", ".", "map", "(", "mapper", ")", ",", "columns", "=", "self", ".", "columns", ",", "dtype", "=", "dtypes", ",", "bsize", "=", "self", ".", "bsize", ",", "noblock", "=", "True", ")" ]
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
test
bitperm
Returns zero if there are no permissions for a bit of the perm. of a file. Otherwise it returns a positive value :param os.stat_result s: os.stat(file) object :param str perm: R (Read) or W (Write) or X (eXecute) :param str pos: USR (USeR) or GRP (GRouP) or OTH (OTHer) :return: mask value :rtype: int
amazon_dash/config.py
def bitperm(s, perm, pos): """Returns zero if there are no permissions for a bit of the perm. of a file. Otherwise it returns a positive value :param os.stat_result s: os.stat(file) object :param str perm: R (Read) or W (Write) or X (eXecute) :param str pos: USR (USeR) or GRP (GRouP) or OTH (OTHer) :return: mask value :rtype: int """ perm = perm.upper() pos = pos.upper() assert perm in ['R', 'W', 'X'] assert pos in ['USR', 'GRP', 'OTH'] return s.st_mode & getattr(stat, 'S_I{}{}'.format(perm, pos))
def bitperm(s, perm, pos): """Returns zero if there are no permissions for a bit of the perm. of a file. Otherwise it returns a positive value :param os.stat_result s: os.stat(file) object :param str perm: R (Read) or W (Write) or X (eXecute) :param str pos: USR (USeR) or GRP (GRouP) or OTH (OTHer) :return: mask value :rtype: int """ perm = perm.upper() pos = pos.upper() assert perm in ['R', 'W', 'X'] assert pos in ['USR', 'GRP', 'OTH'] return s.st_mode & getattr(stat, 'S_I{}{}'.format(perm, pos))
[ "Returns", "zero", "if", "there", "are", "no", "permissions", "for", "a", "bit", "of", "the", "perm", ".", "of", "a", "file", ".", "Otherwise", "it", "returns", "a", "positive", "value" ]
Nekmo/amazon-dash
python
https://github.com/Nekmo/amazon-dash/blob/0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b/amazon_dash/config.py#L159-L172
[ "def", "bitperm", "(", "s", ",", "perm", ",", "pos", ")", ":", "perm", "=", "perm", ".", "upper", "(", ")", "pos", "=", "pos", ".", "upper", "(", ")", "assert", "perm", "in", "[", "'R'", ",", "'W'", ",", "'X'", "]", "assert", "pos", "in", "[", "'USR'", ",", "'GRP'", ",", "'OTH'", "]", "return", "s", ".", "st_mode", "&", "getattr", "(", "stat", ",", "'S_I{}{}'", ".", "format", "(", "perm", ",", "pos", ")", ")" ]
0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b
test
only_root_write
File is only writable by root :param str path: Path to file :return: True if only root can write :rtype: bool
amazon_dash/config.py
def only_root_write(path): """File is only writable by root :param str path: Path to file :return: True if only root can write :rtype: bool """ s = os.stat(path) for ug, bp in [(s.st_uid, bitperm(s, 'w', 'usr')), (s.st_gid, bitperm(s, 'w', 'grp'))]: # User id (is not root) and bit permission if ug and bp: return False if bitperm(s, 'w', 'oth'): return False return True
def only_root_write(path): """File is only writable by root :param str path: Path to file :return: True if only root can write :rtype: bool """ s = os.stat(path) for ug, bp in [(s.st_uid, bitperm(s, 'w', 'usr')), (s.st_gid, bitperm(s, 'w', 'grp'))]: # User id (is not root) and bit permission if ug and bp: return False if bitperm(s, 'w', 'oth'): return False return True
[ "File", "is", "only", "writable", "by", "root" ]
Nekmo/amazon-dash
python
https://github.com/Nekmo/amazon-dash/blob/0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b/amazon_dash/config.py#L185-L199
[ "def", "only_root_write", "(", "path", ")", ":", "s", "=", "os", ".", "stat", "(", "path", ")", "for", "ug", ",", "bp", "in", "[", "(", "s", ".", "st_uid", ",", "bitperm", "(", "s", ",", "'w'", ",", "'usr'", ")", ")", ",", "(", "s", ".", "st_gid", ",", "bitperm", "(", "s", ",", "'w'", ",", "'grp'", ")", ")", "]", ":", "# User id (is not root) and bit permission", "if", "ug", "and", "bp", ":", "return", "False", "if", "bitperm", "(", "s", ",", "'w'", ",", "'oth'", ")", ":", "return", "False", "return", "True" ]
0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b
test
check_config
Command to check configuration file. Raises InvalidConfig on error :param str file: path to config file :param printfn: print function for success message :return: None
amazon_dash/config.py
def check_config(file, printfn=print): """Command to check configuration file. Raises InvalidConfig on error :param str file: path to config file :param printfn: print function for success message :return: None """ Config(file).read() printfn('The configuration file "{}" is correct'.format(file))
def check_config(file, printfn=print): """Command to check configuration file. Raises InvalidConfig on error :param str file: path to config file :param printfn: print function for success message :return: None """ Config(file).read() printfn('The configuration file "{}" is correct'.format(file))
[ "Command", "to", "check", "configuration", "file", ".", "Raises", "InvalidConfig", "on", "error" ]
Nekmo/amazon-dash
python
https://github.com/Nekmo/amazon-dash/blob/0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b/amazon_dash/config.py#L243-L251
[ "def", "check_config", "(", "file", ",", "printfn", "=", "print", ")", ":", "Config", "(", "file", ")", ".", "read", "(", ")", "printfn", "(", "'The configuration file \"{}\" is correct'", ".", "format", "(", "file", ")", ")" ]
0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b
test
Config.read
Parse and validate the config file. The read data is accessible as a dictionary in this instance :return: None
amazon_dash/config.py
def read(self): """Parse and validate the config file. The read data is accessible as a dictionary in this instance :return: None """ try: data = load(open(self.file), Loader) except (UnicodeDecodeError, YAMLError) as e: raise InvalidConfig(self.file, '{}'.format(e)) try: validate(data, SCHEMA) except ValidationError as e: raise InvalidConfig(self.file, e) self.update(data)
def read(self): """Parse and validate the config file. The read data is accessible as a dictionary in this instance :return: None """ try: data = load(open(self.file), Loader) except (UnicodeDecodeError, YAMLError) as e: raise InvalidConfig(self.file, '{}'.format(e)) try: validate(data, SCHEMA) except ValidationError as e: raise InvalidConfig(self.file, e) self.update(data)
[ "Parse", "and", "validate", "the", "config", "file", ".", "The", "read", "data", "is", "accessible", "as", "a", "dictionary", "in", "this", "instance" ]
Nekmo/amazon-dash
python
https://github.com/Nekmo/amazon-dash/blob/0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b/amazon_dash/config.py#L227-L240
[ "def", "read", "(", "self", ")", ":", "try", ":", "data", "=", "load", "(", "open", "(", "self", ".", "file", ")", ",", "Loader", ")", "except", "(", "UnicodeDecodeError", ",", "YAMLError", ")", "as", "e", ":", "raise", "InvalidConfig", "(", "self", ".", "file", ",", "'{}'", ".", "format", "(", "e", ")", ")", "try", ":", "validate", "(", "data", ",", "SCHEMA", ")", "except", "ValidationError", "as", "e", ":", "raise", "InvalidConfig", "(", "self", ".", "file", ",", "e", ")", "self", ".", "update", "(", "data", ")" ]
0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b
test
run_as_cmd
Get the arguments to execute a command as a user :param str cmd: command to execute :param user: User for use :param shell: Bash, zsh, etc. :return: arguments :rtype: list
amazon_dash/execute.py
def run_as_cmd(cmd, user, shell='bash'): """Get the arguments to execute a command as a user :param str cmd: command to execute :param user: User for use :param shell: Bash, zsh, etc. :return: arguments :rtype: list """ to_execute = get_shell(shell) + [EXECUTE_SHELL_PARAM, cmd] if user == 'root': return to_execute return ['sudo', '-s', '--set-home', '-u', user] + to_execute
def run_as_cmd(cmd, user, shell='bash'): """Get the arguments to execute a command as a user :param str cmd: command to execute :param user: User for use :param shell: Bash, zsh, etc. :return: arguments :rtype: list """ to_execute = get_shell(shell) + [EXECUTE_SHELL_PARAM, cmd] if user == 'root': return to_execute return ['sudo', '-s', '--set-home', '-u', user] + to_execute
[ "Get", "the", "arguments", "to", "execute", "a", "command", "as", "a", "user" ]
Nekmo/amazon-dash
python
https://github.com/Nekmo/amazon-dash/blob/0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b/amazon_dash/execute.py#L39-L51
[ "def", "run_as_cmd", "(", "cmd", ",", "user", ",", "shell", "=", "'bash'", ")", ":", "to_execute", "=", "get_shell", "(", "shell", ")", "+", "[", "EXECUTE_SHELL_PARAM", ",", "cmd", "]", "if", "user", "==", "'root'", ":", "return", "to_execute", "return", "[", "'sudo'", ",", "'-s'", ",", "'--set-home'", ",", "'-u'", ",", "user", "]", "+", "to_execute" ]
0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b
test
execute_cmd
Excecute command on thread :param cmd: Command to execute :param cwd: current working directory :return: None
amazon_dash/execute.py
def execute_cmd(cmd, cwd=None, timeout=5): """Excecute command on thread :param cmd: Command to execute :param cwd: current working directory :return: None """ p = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) try: p.wait(timeout=timeout) except subprocess.TimeoutExpired: return None else: stdout, stderr = p.stdout.read(), p.stderr.read() if sys.version_info >= (3,): stdout, stderr = stdout.decode('utf-8', errors='ignore'), stderr.decode('utf-8', errors='ignore') if p.returncode: raise ExecuteError('Error running command {}: The error code {} has returned. Stderr: {}'.format( ' '.join(cmd), p.returncode, stderr )) else: return stdout, stderr
def execute_cmd(cmd, cwd=None, timeout=5): """Excecute command on thread :param cmd: Command to execute :param cwd: current working directory :return: None """ p = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) try: p.wait(timeout=timeout) except subprocess.TimeoutExpired: return None else: stdout, stderr = p.stdout.read(), p.stderr.read() if sys.version_info >= (3,): stdout, stderr = stdout.decode('utf-8', errors='ignore'), stderr.decode('utf-8', errors='ignore') if p.returncode: raise ExecuteError('Error running command {}: The error code {} has returned. Stderr: {}'.format( ' '.join(cmd), p.returncode, stderr )) else: return stdout, stderr
[ "Excecute", "command", "on", "thread" ]
Nekmo/amazon-dash
python
https://github.com/Nekmo/amazon-dash/blob/0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b/amazon_dash/execute.py#L54-L75
[ "def", "execute_cmd", "(", "cmd", ",", "cwd", "=", "None", ",", "timeout", "=", "5", ")", ":", "p", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "cwd", "=", "cwd", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "try", ":", "p", ".", "wait", "(", "timeout", "=", "timeout", ")", "except", "subprocess", ".", "TimeoutExpired", ":", "return", "None", "else", ":", "stdout", ",", "stderr", "=", "p", ".", "stdout", ".", "read", "(", ")", ",", "p", ".", "stderr", ".", "read", "(", ")", "if", "sys", ".", "version_info", ">=", "(", "3", ",", ")", ":", "stdout", ",", "stderr", "=", "stdout", ".", "decode", "(", "'utf-8'", ",", "errors", "=", "'ignore'", ")", ",", "stderr", ".", "decode", "(", "'utf-8'", ",", "errors", "=", "'ignore'", ")", "if", "p", ".", "returncode", ":", "raise", "ExecuteError", "(", "'Error running command {}: The error code {} has returned. Stderr: {}'", ".", "format", "(", "' '", ".", "join", "(", "cmd", ")", ",", "p", ".", "returncode", ",", "stderr", ")", ")", "else", ":", "return", "stdout", ",", "stderr" ]
0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b
test
execute_over_ssh
Excecute command on remote machine using SSH :param cmd: Command to execute :param ssh: Server to connect. Port is optional :param cwd: current working directory :return: None
amazon_dash/execute.py
def execute_over_ssh(cmd, ssh, cwd=None, shell='bash'): """Excecute command on remote machine using SSH :param cmd: Command to execute :param ssh: Server to connect. Port is optional :param cwd: current working directory :return: None """ port = None parts = ssh.split(':', 1) if len(parts) > 1 and not parts[1].isdigit(): raise InvalidConfig(extra_body='Invalid port number on ssh config: {}'.format(parts[1])) elif len(parts) > 1: port = parts[1] quoted_cmd = ' '.join([x.replace("'", """'"'"'""") for x in cmd.split(' ')]) remote_cmd = ' '.join([ ' '.join(get_shell(shell)), # /usr/bin/env bash ' '.join([EXECUTE_SHELL_PARAM, "'", ' '.join((['cd', cwd, ';'] if cwd else []) + [quoted_cmd]), "'"])], ) return ['ssh', parts[0]] + (['-p', port] if port else []) + ['-C'] + [remote_cmd]
def execute_over_ssh(cmd, ssh, cwd=None, shell='bash'): """Excecute command on remote machine using SSH :param cmd: Command to execute :param ssh: Server to connect. Port is optional :param cwd: current working directory :return: None """ port = None parts = ssh.split(':', 1) if len(parts) > 1 and not parts[1].isdigit(): raise InvalidConfig(extra_body='Invalid port number on ssh config: {}'.format(parts[1])) elif len(parts) > 1: port = parts[1] quoted_cmd = ' '.join([x.replace("'", """'"'"'""") for x in cmd.split(' ')]) remote_cmd = ' '.join([ ' '.join(get_shell(shell)), # /usr/bin/env bash ' '.join([EXECUTE_SHELL_PARAM, "'", ' '.join((['cd', cwd, ';'] if cwd else []) + [quoted_cmd]), "'"])], ) return ['ssh', parts[0]] + (['-p', port] if port else []) + ['-C'] + [remote_cmd]
[ "Excecute", "command", "on", "remote", "machine", "using", "SSH" ]
Nekmo/amazon-dash
python
https://github.com/Nekmo/amazon-dash/blob/0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b/amazon_dash/execute.py#L78-L97
[ "def", "execute_over_ssh", "(", "cmd", ",", "ssh", ",", "cwd", "=", "None", ",", "shell", "=", "'bash'", ")", ":", "port", "=", "None", "parts", "=", "ssh", ".", "split", "(", "':'", ",", "1", ")", "if", "len", "(", "parts", ")", ">", "1", "and", "not", "parts", "[", "1", "]", ".", "isdigit", "(", ")", ":", "raise", "InvalidConfig", "(", "extra_body", "=", "'Invalid port number on ssh config: {}'", ".", "format", "(", "parts", "[", "1", "]", ")", ")", "elif", "len", "(", "parts", ")", ">", "1", ":", "port", "=", "parts", "[", "1", "]", "quoted_cmd", "=", "' '", ".", "join", "(", "[", "x", ".", "replace", "(", "\"'\"", ",", "\"\"\"'\"'\"'\"\"\"", ")", "for", "x", "in", "cmd", ".", "split", "(", "' '", ")", "]", ")", "remote_cmd", "=", "' '", ".", "join", "(", "[", "' '", ".", "join", "(", "get_shell", "(", "shell", ")", ")", ",", "# /usr/bin/env bash", "' '", ".", "join", "(", "[", "EXECUTE_SHELL_PARAM", ",", "\"'\"", ",", "' '", ".", "join", "(", "(", "[", "'cd'", ",", "cwd", ",", "';'", "]", "if", "cwd", "else", "[", "]", ")", "+", "[", "quoted_cmd", "]", ")", ",", "\"'\"", "]", ")", "]", ",", ")", "return", "[", "'ssh'", ",", "parts", "[", "0", "]", "]", "+", "(", "[", "'-p'", ",", "port", "]", "if", "port", "else", "[", "]", ")", "+", "[", "'-C'", "]", "+", "[", "remote_cmd", "]" ]
0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b
test
ExecuteCmd.execute
Execute using self.data :param bool root_allowed: Allow execute as root commands :return:
amazon_dash/execute.py
def execute(self, root_allowed=False): """Execute using self.data :param bool root_allowed: Allow execute as root commands :return: """ if self.user == ROOT_USER and not root_allowed and not self.data.get('ssh'): raise SecurityException('For security, execute commands as root is not allowed. ' 'Use --root-allowed to allow executing commands as root. ' ' It is however recommended to add a user to the configuration ' 'of the device (device: {})'.format(self.name)) if self.data.get('user') and self.data.get('ssh'): raise InvalidConfig('User option is unsupported in ssh mode. The ssh user must be defined in ' 'the ssh option. For example: user@machine') if self.data.get('ssh'): cmd = execute_over_ssh(self.data['cmd'], self.data['ssh'], self.data.get('cwd')) output = execute_cmd(cmd) else: cmd = run_as_cmd(self.data['cmd'], self.user) output = execute_cmd(cmd, self.data.get('cwd')) if output: return output[0]
def execute(self, root_allowed=False): """Execute using self.data :param bool root_allowed: Allow execute as root commands :return: """ if self.user == ROOT_USER and not root_allowed and not self.data.get('ssh'): raise SecurityException('For security, execute commands as root is not allowed. ' 'Use --root-allowed to allow executing commands as root. ' ' It is however recommended to add a user to the configuration ' 'of the device (device: {})'.format(self.name)) if self.data.get('user') and self.data.get('ssh'): raise InvalidConfig('User option is unsupported in ssh mode. The ssh user must be defined in ' 'the ssh option. For example: user@machine') if self.data.get('ssh'): cmd = execute_over_ssh(self.data['cmd'], self.data['ssh'], self.data.get('cwd')) output = execute_cmd(cmd) else: cmd = run_as_cmd(self.data['cmd'], self.user) output = execute_cmd(cmd, self.data.get('cwd')) if output: return output[0]
[ "Execute", "using", "self", ".", "data" ]
Nekmo/amazon-dash
python
https://github.com/Nekmo/amazon-dash/blob/0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b/amazon_dash/execute.py#L151-L172
[ "def", "execute", "(", "self", ",", "root_allowed", "=", "False", ")", ":", "if", "self", ".", "user", "==", "ROOT_USER", "and", "not", "root_allowed", "and", "not", "self", ".", "data", ".", "get", "(", "'ssh'", ")", ":", "raise", "SecurityException", "(", "'For security, execute commands as root is not allowed. '", "'Use --root-allowed to allow executing commands as root. '", "' It is however recommended to add a user to the configuration '", "'of the device (device: {})'", ".", "format", "(", "self", ".", "name", ")", ")", "if", "self", ".", "data", ".", "get", "(", "'user'", ")", "and", "self", ".", "data", ".", "get", "(", "'ssh'", ")", ":", "raise", "InvalidConfig", "(", "'User option is unsupported in ssh mode. The ssh user must be defined in '", "'the ssh option. For example: user@machine'", ")", "if", "self", ".", "data", ".", "get", "(", "'ssh'", ")", ":", "cmd", "=", "execute_over_ssh", "(", "self", ".", "data", "[", "'cmd'", "]", ",", "self", ".", "data", "[", "'ssh'", "]", ",", "self", ".", "data", ".", "get", "(", "'cwd'", ")", ")", "output", "=", "execute_cmd", "(", "cmd", ")", "else", ":", "cmd", "=", "run_as_cmd", "(", "self", ".", "data", "[", "'cmd'", "]", ",", "self", ".", "user", ")", "output", "=", "execute_cmd", "(", "cmd", ",", "self", ".", "data", ".", "get", "(", "'cwd'", ")", ")", "if", "output", ":", "return", "output", "[", "0", "]" ]
0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b
test
ExecuteUrl.validate
Check self.data. Raise InvalidConfig on error :return: None
amazon_dash/execute.py
def validate(self): """Check self.data. Raise InvalidConfig on error :return: None """ if (self.data.get('content-type') or self.data.get('body')) and \ self.data.get('method', '').lower() not in CONTENT_TYPE_METHODS: raise InvalidConfig( extra_body='The body/content-type option only can be used with the {} methods. The device is {}. ' 'Check the configuration file.'.format(', '.join(CONTENT_TYPE_METHODS), self.name) ) self.data['content-type'] = CONTENT_TYPE_ALIASES.get(self.data.get('content-type'), self.data.get('content-type')) form_type = CONTENT_TYPE_ALIASES['form'] if self.data.get('body') and (self.data.get('content-type') or form_type) == form_type: try: self.data['body'] = json.loads(self.data['body']) except JSONDecodeError: raise InvalidConfig( extra_body='Invalid JSON body on {} device.'.format(self.name) )
def validate(self): """Check self.data. Raise InvalidConfig on error :return: None """ if (self.data.get('content-type') or self.data.get('body')) and \ self.data.get('method', '').lower() not in CONTENT_TYPE_METHODS: raise InvalidConfig( extra_body='The body/content-type option only can be used with the {} methods. The device is {}. ' 'Check the configuration file.'.format(', '.join(CONTENT_TYPE_METHODS), self.name) ) self.data['content-type'] = CONTENT_TYPE_ALIASES.get(self.data.get('content-type'), self.data.get('content-type')) form_type = CONTENT_TYPE_ALIASES['form'] if self.data.get('body') and (self.data.get('content-type') or form_type) == form_type: try: self.data['body'] = json.loads(self.data['body']) except JSONDecodeError: raise InvalidConfig( extra_body='Invalid JSON body on {} device.'.format(self.name) )
[ "Check", "self", ".", "data", ".", "Raise", "InvalidConfig", "on", "error" ]
Nekmo/amazon-dash
python
https://github.com/Nekmo/amazon-dash/blob/0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b/amazon_dash/execute.py#L179-L199
[ "def", "validate", "(", "self", ")", ":", "if", "(", "self", ".", "data", ".", "get", "(", "'content-type'", ")", "or", "self", ".", "data", ".", "get", "(", "'body'", ")", ")", "and", "self", ".", "data", ".", "get", "(", "'method'", ",", "''", ")", ".", "lower", "(", ")", "not", "in", "CONTENT_TYPE_METHODS", ":", "raise", "InvalidConfig", "(", "extra_body", "=", "'The body/content-type option only can be used with the {} methods. The device is {}. '", "'Check the configuration file.'", ".", "format", "(", "', '", ".", "join", "(", "CONTENT_TYPE_METHODS", ")", ",", "self", ".", "name", ")", ")", "self", ".", "data", "[", "'content-type'", "]", "=", "CONTENT_TYPE_ALIASES", ".", "get", "(", "self", ".", "data", ".", "get", "(", "'content-type'", ")", ",", "self", ".", "data", ".", "get", "(", "'content-type'", ")", ")", "form_type", "=", "CONTENT_TYPE_ALIASES", "[", "'form'", "]", "if", "self", ".", "data", ".", "get", "(", "'body'", ")", "and", "(", "self", ".", "data", ".", "get", "(", "'content-type'", ")", "or", "form_type", ")", "==", "form_type", ":", "try", ":", "self", ".", "data", "[", "'body'", "]", "=", "json", ".", "loads", "(", "self", ".", "data", "[", "'body'", "]", ")", "except", "JSONDecodeError", ":", "raise", "InvalidConfig", "(", "extra_body", "=", "'Invalid JSON body on {} device.'", ".", "format", "(", "self", ".", "name", ")", ")" ]
0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b
test
ExecuteUrl.execute
Execute using self.data :param bool root_allowed: Only used for ExecuteCmd :return:
amazon_dash/execute.py
def execute(self, root_allowed=False): """Execute using self.data :param bool root_allowed: Only used for ExecuteCmd :return: """ kwargs = {'stream': True, 'timeout': 15, 'headers': self.data.get('headers', {})} if self.data.get('content-type'): kwargs['headers']['content-type'] = self.data['content-type'] if self.data.get('body'): kwargs['data'] = self.data['body'] if self.data.get('auth'): kwargs['auth'] = tuple(self.data['auth'].split(':', 1)) try: resp = request(self.data.get('method', 'get').lower(), self.data['url'], verify=self.data.get('verify', True), **kwargs) except RequestException as e: raise ExecuteError('Exception on request to {}: {}'.format(self.data['url'], e)) if resp.status_code >= 400: raise ExecuteError('"{}" return code {}.'.format(self.data['url'], resp.status_code)) data = resp.raw.read(1000, decode_content=True) if sys.version_info >= (3,): data = data.decode('utf-8', errors='ignore') return data
def execute(self, root_allowed=False): """Execute using self.data :param bool root_allowed: Only used for ExecuteCmd :return: """ kwargs = {'stream': True, 'timeout': 15, 'headers': self.data.get('headers', {})} if self.data.get('content-type'): kwargs['headers']['content-type'] = self.data['content-type'] if self.data.get('body'): kwargs['data'] = self.data['body'] if self.data.get('auth'): kwargs['auth'] = tuple(self.data['auth'].split(':', 1)) try: resp = request(self.data.get('method', 'get').lower(), self.data['url'], verify=self.data.get('verify', True), **kwargs) except RequestException as e: raise ExecuteError('Exception on request to {}: {}'.format(self.data['url'], e)) if resp.status_code >= 400: raise ExecuteError('"{}" return code {}.'.format(self.data['url'], resp.status_code)) data = resp.raw.read(1000, decode_content=True) if sys.version_info >= (3,): data = data.decode('utf-8', errors='ignore') return data
[ "Execute", "using", "self", ".", "data" ]
Nekmo/amazon-dash
python
https://github.com/Nekmo/amazon-dash/blob/0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b/amazon_dash/execute.py#L201-L226
[ "def", "execute", "(", "self", ",", "root_allowed", "=", "False", ")", ":", "kwargs", "=", "{", "'stream'", ":", "True", ",", "'timeout'", ":", "15", ",", "'headers'", ":", "self", ".", "data", ".", "get", "(", "'headers'", ",", "{", "}", ")", "}", "if", "self", ".", "data", ".", "get", "(", "'content-type'", ")", ":", "kwargs", "[", "'headers'", "]", "[", "'content-type'", "]", "=", "self", ".", "data", "[", "'content-type'", "]", "if", "self", ".", "data", ".", "get", "(", "'body'", ")", ":", "kwargs", "[", "'data'", "]", "=", "self", ".", "data", "[", "'body'", "]", "if", "self", ".", "data", ".", "get", "(", "'auth'", ")", ":", "kwargs", "[", "'auth'", "]", "=", "tuple", "(", "self", ".", "data", "[", "'auth'", "]", ".", "split", "(", "':'", ",", "1", ")", ")", "try", ":", "resp", "=", "request", "(", "self", ".", "data", ".", "get", "(", "'method'", ",", "'get'", ")", ".", "lower", "(", ")", ",", "self", ".", "data", "[", "'url'", "]", ",", "verify", "=", "self", ".", "data", ".", "get", "(", "'verify'", ",", "True", ")", ",", "*", "*", "kwargs", ")", "except", "RequestException", "as", "e", ":", "raise", "ExecuteError", "(", "'Exception on request to {}: {}'", ".", "format", "(", "self", ".", "data", "[", "'url'", "]", ",", "e", ")", ")", "if", "resp", ".", "status_code", ">=", "400", ":", "raise", "ExecuteError", "(", "'\"{}\" return code {}.'", ".", "format", "(", "self", ".", "data", "[", "'url'", "]", ",", "resp", ".", "status_code", ")", ")", "data", "=", "resp", ".", "raw", ".", "read", "(", "1000", ",", "decode_content", "=", "True", ")", "if", "sys", ".", "version_info", ">=", "(", "3", ",", ")", ":", "data", "=", "data", ".", "decode", "(", "'utf-8'", ",", "errors", "=", "'ignore'", ")", "return", "data" ]
0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b
test
ExecuteUrlServiceBase.get_headers
Get HTTP Headers to send. By default default_headers :return: HTTP Headers :rtype: dict
amazon_dash/execute.py
def get_headers(self): """Get HTTP Headers to send. By default default_headers :return: HTTP Headers :rtype: dict """ headers = copy.copy(self.default_headers or {}) headers.update(self.data.get('headers') or {}) return headers
def get_headers(self): """Get HTTP Headers to send. By default default_headers :return: HTTP Headers :rtype: dict """ headers = copy.copy(self.default_headers or {}) headers.update(self.data.get('headers') or {}) return headers
[ "Get", "HTTP", "Headers", "to", "send", ".", "By", "default", "default_headers" ]
Nekmo/amazon-dash
python
https://github.com/Nekmo/amazon-dash/blob/0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b/amazon_dash/execute.py#L275-L283
[ "def", "get_headers", "(", "self", ")", ":", "headers", "=", "copy", ".", "copy", "(", "self", ".", "default_headers", "or", "{", "}", ")", "headers", ".", "update", "(", "self", ".", "data", ".", "get", "(", "'headers'", ")", "or", "{", "}", ")", "return", "headers" ]
0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b
test
ExecuteOwnApiBase.get_url
API url :return: url :rtype: str
amazon_dash/execute.py
def get_url(self): """API url :return: url :rtype: str """ url = self.data[self.execute_name] parsed = urlparse(url) if not parsed.scheme: url = '{}://{}'.format(self.default_protocol, url) if not url.split(':')[-1].isalnum(): url += ':{}'.format(self.default_port) return url
def get_url(self): """API url :return: url :rtype: str """ url = self.data[self.execute_name] parsed = urlparse(url) if not parsed.scheme: url = '{}://{}'.format(self.default_protocol, url) if not url.split(':')[-1].isalnum(): url += ':{}'.format(self.default_port) return url
[ "API", "url" ]
Nekmo/amazon-dash
python
https://github.com/Nekmo/amazon-dash/blob/0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b/amazon_dash/execute.py#L300-L312
[ "def", "get_url", "(", "self", ")", ":", "url", "=", "self", ".", "data", "[", "self", ".", "execute_name", "]", "parsed", "=", "urlparse", "(", "url", ")", "if", "not", "parsed", ".", "scheme", ":", "url", "=", "'{}://{}'", ".", "format", "(", "self", ".", "default_protocol", ",", "url", ")", "if", "not", "url", ".", "split", "(", "':'", ")", "[", "-", "1", "]", ".", "isalnum", "(", ")", ":", "url", "+=", "':{}'", ".", "format", "(", "self", ".", "default_port", ")", "return", "url" ]
0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b
test
ExecuteOwnApiBase.get_body
Return "data" value on self.data :return: data to send :rtype: str
amazon_dash/execute.py
def get_body(self): """Return "data" value on self.data :return: data to send :rtype: str """ if self.default_body: return self.default_body data = self.data.get('data') if isinstance(data, dict): return json.dumps(data) return data
def get_body(self): """Return "data" value on self.data :return: data to send :rtype: str """ if self.default_body: return self.default_body data = self.data.get('data') if isinstance(data, dict): return json.dumps(data) return data
[ "Return", "data", "value", "on", "self", ".", "data" ]
Nekmo/amazon-dash
python
https://github.com/Nekmo/amazon-dash/blob/0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b/amazon_dash/execute.py#L314-L325
[ "def", "get_body", "(", "self", ")", ":", "if", "self", ".", "default_body", ":", "return", "self", ".", "default_body", "data", "=", "self", ".", "data", ".", "get", "(", "'data'", ")", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "return", "json", ".", "dumps", "(", "data", ")", "return", "data" ]
0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b
test
ExecuteHomeAssistant.get_url
Home assistant url :return: url :rtype: str
amazon_dash/execute.py
def get_url(self): """Home assistant url :return: url :rtype: str """ url = super(ExecuteHomeAssistant, self).get_url() if not self.data.get('event'): raise InvalidConfig(extra_body='Event option is required for HomeAsistant on {} device.'.format(self.name)) url += '/api/events/{}'.format(self.data['event']) return url
def get_url(self): """Home assistant url :return: url :rtype: str """ url = super(ExecuteHomeAssistant, self).get_url() if not self.data.get('event'): raise InvalidConfig(extra_body='Event option is required for HomeAsistant on {} device.'.format(self.name)) url += '/api/events/{}'.format(self.data['event']) return url
[ "Home", "assistant", "url" ]
Nekmo/amazon-dash
python
https://github.com/Nekmo/amazon-dash/blob/0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b/amazon_dash/execute.py#L336-L346
[ "def", "get_url", "(", "self", ")", ":", "url", "=", "super", "(", "ExecuteHomeAssistant", ",", "self", ")", ".", "get_url", "(", ")", "if", "not", "self", ".", "data", ".", "get", "(", "'event'", ")", ":", "raise", "InvalidConfig", "(", "extra_body", "=", "'Event option is required for HomeAsistant on {} device.'", ".", "format", "(", "self", ".", "name", ")", ")", "url", "+=", "'/api/events/{}'", ".", "format", "(", "self", ".", "data", "[", "'event'", "]", ")", "return", "url" ]
0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b
test
ExecuteIFTTT.get_url
IFTTT Webhook url :return: url :rtype: str
amazon_dash/execute.py
def get_url(self): """IFTTT Webhook url :return: url :rtype: str """ if not self.data[self.execute_name]: raise InvalidConfig(extra_body='Value for IFTTT is required on {} device. Get your key here: ' 'https://ifttt.com/services/maker_webhooks/settings'.format(self.name)) if not self.data.get('event'): raise InvalidConfig(extra_body='Event option is required for IFTTT on {} device. ' 'You define the event name when creating a Webhook ' 'applet'.format(self.name)) url = self.url_pattern.format(event=self.data['event'], key=self.data[self.execute_name]) return url
def get_url(self): """IFTTT Webhook url :return: url :rtype: str """ if not self.data[self.execute_name]: raise InvalidConfig(extra_body='Value for IFTTT is required on {} device. Get your key here: ' 'https://ifttt.com/services/maker_webhooks/settings'.format(self.name)) if not self.data.get('event'): raise InvalidConfig(extra_body='Event option is required for IFTTT on {} device. ' 'You define the event name when creating a Webhook ' 'applet'.format(self.name)) url = self.url_pattern.format(event=self.data['event'], key=self.data[self.execute_name]) return url
[ "IFTTT", "Webhook", "url" ]
Nekmo/amazon-dash
python
https://github.com/Nekmo/amazon-dash/blob/0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b/amazon_dash/execute.py#L392-L406
[ "def", "get_url", "(", "self", ")", ":", "if", "not", "self", ".", "data", "[", "self", ".", "execute_name", "]", ":", "raise", "InvalidConfig", "(", "extra_body", "=", "'Value for IFTTT is required on {} device. Get your key here: '", "'https://ifttt.com/services/maker_webhooks/settings'", ".", "format", "(", "self", ".", "name", ")", ")", "if", "not", "self", ".", "data", ".", "get", "(", "'event'", ")", ":", "raise", "InvalidConfig", "(", "extra_body", "=", "'Event option is required for IFTTT on {} device. '", "'You define the event name when creating a Webhook '", "'applet'", ".", "format", "(", "self", ".", "name", ")", ")", "url", "=", "self", ".", "url_pattern", ".", "format", "(", "event", "=", "self", ".", "data", "[", "'event'", "]", ",", "key", "=", "self", ".", "data", "[", "self", ".", "execute_name", "]", ")", "return", "url" ]
0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b
test
pkt_text
Return source mac address for this Scapy Packet :param scapy.packet.Packet pkt: Scapy Packet :return: Mac address. Include (Amazon Device) for these devices :rtype: str
amazon_dash/discovery.py
def pkt_text(pkt): """Return source mac address for this Scapy Packet :param scapy.packet.Packet pkt: Scapy Packet :return: Mac address. Include (Amazon Device) for these devices :rtype: str """ if pkt.src.upper() in BANNED_DEVICES: body = '' elif pkt.src.upper()[:8] in AMAZON_DEVICES: body = '{} (Amazon Device)'.format(pkt.src) else: body = pkt.src return body
def pkt_text(pkt): """Return source mac address for this Scapy Packet :param scapy.packet.Packet pkt: Scapy Packet :return: Mac address. Include (Amazon Device) for these devices :rtype: str """ if pkt.src.upper() in BANNED_DEVICES: body = '' elif pkt.src.upper()[:8] in AMAZON_DEVICES: body = '{} (Amazon Device)'.format(pkt.src) else: body = pkt.src return body
[ "Return", "source", "mac", "address", "for", "this", "Scapy", "Packet" ]
Nekmo/amazon-dash
python
https://github.com/Nekmo/amazon-dash/blob/0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b/amazon_dash/discovery.py#L62-L75
[ "def", "pkt_text", "(", "pkt", ")", ":", "if", "pkt", ".", "src", ".", "upper", "(", ")", "in", "BANNED_DEVICES", ":", "body", "=", "''", "elif", "pkt", ".", "src", ".", "upper", "(", ")", "[", ":", "8", "]", "in", "AMAZON_DEVICES", ":", "body", "=", "'{} (Amazon Device)'", ".", "format", "(", "pkt", ".", "src", ")", "else", ":", "body", "=", "pkt", ".", "src", "return", "body" ]
0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b
test
discovery_print
Scandevice callback. Register src mac to avoid src repetition. Print device on screen. :param scapy.packet.Packet pkt: Scapy Packet :return: None
amazon_dash/discovery.py
def discovery_print(pkt): """Scandevice callback. Register src mac to avoid src repetition. Print device on screen. :param scapy.packet.Packet pkt: Scapy Packet :return: None """ if pkt.src in mac_id_list: return mac_id_list.append(pkt.src) text = pkt_text(pkt) click.secho(text, fg='magenta') if 'Amazon' in text else click.echo(text)
def discovery_print(pkt): """Scandevice callback. Register src mac to avoid src repetition. Print device on screen. :param scapy.packet.Packet pkt: Scapy Packet :return: None """ if pkt.src in mac_id_list: return mac_id_list.append(pkt.src) text = pkt_text(pkt) click.secho(text, fg='magenta') if 'Amazon' in text else click.echo(text)
[ "Scandevice", "callback", ".", "Register", "src", "mac", "to", "avoid", "src", "repetition", ".", "Print", "device", "on", "screen", "." ]
Nekmo/amazon-dash
python
https://github.com/Nekmo/amazon-dash/blob/0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b/amazon_dash/discovery.py#L78-L89
[ "def", "discovery_print", "(", "pkt", ")", ":", "if", "pkt", ".", "src", "in", "mac_id_list", ":", "return", "mac_id_list", ".", "append", "(", "pkt", ".", "src", ")", "text", "=", "pkt_text", "(", "pkt", ")", "click", ".", "secho", "(", "text", ",", "fg", "=", "'magenta'", ")", "if", "'Amazon'", "in", "text", "else", "click", ".", "echo", "(", "text", ")" ]
0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b
test
discover
Print help and scan devices on screen. :return: None
amazon_dash/discovery.py
def discover(interface=None): """Print help and scan devices on screen. :return: None """ click.secho(HELP, fg='yellow') scan_devices(discovery_print, lfilter=lambda d: d.src not in mac_id_list, iface=interface)
def discover(interface=None): """Print help and scan devices on screen. :return: None """ click.secho(HELP, fg='yellow') scan_devices(discovery_print, lfilter=lambda d: d.src not in mac_id_list, iface=interface)
[ "Print", "help", "and", "scan", "devices", "on", "screen", "." ]
Nekmo/amazon-dash
python
https://github.com/Nekmo/amazon-dash/blob/0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b/amazon_dash/discovery.py#L92-L98
[ "def", "discover", "(", "interface", "=", "None", ")", ":", "click", ".", "secho", "(", "HELP", ",", "fg", "=", "'yellow'", ")", "scan_devices", "(", "discovery_print", ",", "lfilter", "=", "lambda", "d", ":", "d", ".", "src", "not", "in", "mac_id_list", ",", "iface", "=", "interface", ")" ]
0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b
test
Device.execute
Execute this device :param bool root_allowed: Only used for ExecuteCmd :return: None
amazon_dash/listener.py
def execute(self, root_allowed=False): """Execute this device :param bool root_allowed: Only used for ExecuteCmd :return: None """ logger.debug('%s device executed (mac %s)', self.name, self.src) if not self.execute_instance: msg = '%s: There is not execution method in device conf.' logger.warning(msg, self.name) self.send_confirmation(msg % self.name, False) return try: result = self.execute_instance.execute(root_allowed) except Exception as e: self.send_confirmation('Error executing the device {}: {}'.format(self.name, e), False) raise else: result = 'The {} device has been started and is running right now'.format(self.name) \ if result is None else result result = result or 'The {} device has been executed successfully'.format(self.name) self.send_confirmation(result) return result
def execute(self, root_allowed=False): """Execute this device :param bool root_allowed: Only used for ExecuteCmd :return: None """ logger.debug('%s device executed (mac %s)', self.name, self.src) if not self.execute_instance: msg = '%s: There is not execution method in device conf.' logger.warning(msg, self.name) self.send_confirmation(msg % self.name, False) return try: result = self.execute_instance.execute(root_allowed) except Exception as e: self.send_confirmation('Error executing the device {}: {}'.format(self.name, e), False) raise else: result = 'The {} device has been started and is running right now'.format(self.name) \ if result is None else result result = result or 'The {} device has been executed successfully'.format(self.name) self.send_confirmation(result) return result
[ "Execute", "this", "device" ]
Nekmo/amazon-dash
python
https://github.com/Nekmo/amazon-dash/blob/0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b/amazon_dash/listener.py#L71-L93
[ "def", "execute", "(", "self", ",", "root_allowed", "=", "False", ")", ":", "logger", ".", "debug", "(", "'%s device executed (mac %s)'", ",", "self", ".", "name", ",", "self", ".", "src", ")", "if", "not", "self", ".", "execute_instance", ":", "msg", "=", "'%s: There is not execution method in device conf.'", "logger", ".", "warning", "(", "msg", ",", "self", ".", "name", ")", "self", ".", "send_confirmation", "(", "msg", "%", "self", ".", "name", ",", "False", ")", "return", "try", ":", "result", "=", "self", ".", "execute_instance", ".", "execute", "(", "root_allowed", ")", "except", "Exception", "as", "e", ":", "self", ".", "send_confirmation", "(", "'Error executing the device {}: {}'", ".", "format", "(", "self", ".", "name", ",", "e", ")", ",", "False", ")", "raise", "else", ":", "result", "=", "'The {} device has been started and is running right now'", ".", "format", "(", "self", ".", "name", ")", "if", "result", "is", "None", "else", "result", "result", "=", "result", "or", "'The {} device has been executed successfully'", ".", "format", "(", "self", ".", "name", ")", "self", ".", "send_confirmation", "(", "result", ")", "return", "result" ]
0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b
test
Device.send_confirmation
Send success or error message to configured confirmation :param str message: Body message to send :param bool success: Device executed successfully to personalize message :return: None
amazon_dash/listener.py
def send_confirmation(self, message, success=True): """Send success or error message to configured confirmation :param str message: Body message to send :param bool success: Device executed successfully to personalize message :return: None """ message = message.strip() if not self.confirmation: return try: self.confirmation.send(message, success) except Exception as e: logger.warning('Error sending confirmation on device {}: {}'.format(self.name, e))
def send_confirmation(self, message, success=True): """Send success or error message to configured confirmation :param str message: Body message to send :param bool success: Device executed successfully to personalize message :return: None """ message = message.strip() if not self.confirmation: return try: self.confirmation.send(message, success) except Exception as e: logger.warning('Error sending confirmation on device {}: {}'.format(self.name, e))
[ "Send", "success", "or", "error", "message", "to", "configured", "confirmation" ]
Nekmo/amazon-dash
python
https://github.com/Nekmo/amazon-dash/blob/0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b/amazon_dash/listener.py#L95-L108
[ "def", "send_confirmation", "(", "self", ",", "message", ",", "success", "=", "True", ")", ":", "message", "=", "message", ".", "strip", "(", ")", "if", "not", "self", ".", "confirmation", ":", "return", "try", ":", "self", ".", "confirmation", ".", "send", "(", "message", ",", "success", ")", "except", "Exception", "as", "e", ":", "logger", ".", "warning", "(", "'Error sending confirmation on device {}: {}'", ".", "format", "(", "self", ".", "name", ",", "e", ")", ")" ]
0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b
test
Listener.on_push
Press button. Check DEFAULT_DELAY. :param scapy.packet.Packet device: Scapy packet :return: None
amazon_dash/listener.py
def on_push(self, device): """Press button. Check DEFAULT_DELAY. :param scapy.packet.Packet device: Scapy packet :return: None """ src = device.src.lower() if last_execution[src] + self.settings.get('delay', DEFAULT_DELAY) > time.time(): return last_execution[src] = time.time() self.execute(device)
def on_push(self, device): """Press button. Check DEFAULT_DELAY. :param scapy.packet.Packet device: Scapy packet :return: None """ src = device.src.lower() if last_execution[src] + self.settings.get('delay', DEFAULT_DELAY) > time.time(): return last_execution[src] = time.time() self.execute(device)
[ "Press", "button", ".", "Check", "DEFAULT_DELAY", "." ]
Nekmo/amazon-dash
python
https://github.com/Nekmo/amazon-dash/blob/0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b/amazon_dash/listener.py#L128-L138
[ "def", "on_push", "(", "self", ",", "device", ")", ":", "src", "=", "device", ".", "src", ".", "lower", "(", ")", "if", "last_execution", "[", "src", "]", "+", "self", ".", "settings", ".", "get", "(", "'delay'", ",", "DEFAULT_DELAY", ")", ">", "time", ".", "time", "(", ")", ":", "return", "last_execution", "[", "src", "]", "=", "time", ".", "time", "(", ")", "self", ".", "execute", "(", "device", ")" ]
0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b
test
Listener.execute
Execute a device. Used if the time between executions is greater than DEFAULT_DELAY :param scapy.packet.Packet device: Scapy packet :return: None
amazon_dash/listener.py
def execute(self, device): """Execute a device. Used if the time between executions is greater than DEFAULT_DELAY :param scapy.packet.Packet device: Scapy packet :return: None """ src = device.src.lower() device = self.devices[src] threading.Thread(target=device.execute, kwargs={ 'root_allowed': self.root_allowed }).start()
def execute(self, device): """Execute a device. Used if the time between executions is greater than DEFAULT_DELAY :param scapy.packet.Packet device: Scapy packet :return: None """ src = device.src.lower() device = self.devices[src] threading.Thread(target=device.execute, kwargs={ 'root_allowed': self.root_allowed }).start()
[ "Execute", "a", "device", ".", "Used", "if", "the", "time", "between", "executions", "is", "greater", "than", "DEFAULT_DELAY" ]
Nekmo/amazon-dash
python
https://github.com/Nekmo/amazon-dash/blob/0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b/amazon_dash/listener.py#L140-L150
[ "def", "execute", "(", "self", ",", "device", ")", ":", "src", "=", "device", ".", "src", ".", "lower", "(", ")", "device", "=", "self", ".", "devices", "[", "src", "]", "threading", ".", "Thread", "(", "target", "=", "device", ".", "execute", ",", "kwargs", "=", "{", "'root_allowed'", ":", "self", ".", "root_allowed", "}", ")", ".", "start", "(", ")" ]
0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b
test
Listener.run
Start daemon mode :param bool root_allowed: Only used for ExecuteCmd :return: loop
amazon_dash/listener.py
def run(self, root_allowed=False): """Start daemon mode :param bool root_allowed: Only used for ExecuteCmd :return: loop """ self.root_allowed = root_allowed scan_devices(self.on_push, lambda d: d.src.lower() in self.devices, self.settings.get('interface'))
def run(self, root_allowed=False): """Start daemon mode :param bool root_allowed: Only used for ExecuteCmd :return: loop """ self.root_allowed = root_allowed scan_devices(self.on_push, lambda d: d.src.lower() in self.devices, self.settings.get('interface'))
[ "Start", "daemon", "mode" ]
Nekmo/amazon-dash
python
https://github.com/Nekmo/amazon-dash/blob/0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b/amazon_dash/listener.py#L152-L159
[ "def", "run", "(", "self", ",", "root_allowed", "=", "False", ")", ":", "self", ".", "root_allowed", "=", "root_allowed", "scan_devices", "(", "self", ".", "on_push", ",", "lambda", "d", ":", "d", ".", "src", ".", "lower", "(", ")", "in", "self", ".", "devices", ",", "self", ".", "settings", ".", "get", "(", "'interface'", ")", ")" ]
0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b
test
scan_devices
Sniff packages :param fn: callback on packet :param lfilter: filter packages :return: loop
amazon_dash/scan.py
def scan_devices(fn, lfilter, iface=None): """Sniff packages :param fn: callback on packet :param lfilter: filter packages :return: loop """ try: sniff(prn=fn, store=0, # filter="udp", filter="arp or (udp and src port 68 and dst port 67 and src host 0.0.0.0)", lfilter=lfilter, iface=iface) except PermissionError: raise SocketPermissionError
def scan_devices(fn, lfilter, iface=None): """Sniff packages :param fn: callback on packet :param lfilter: filter packages :return: loop """ try: sniff(prn=fn, store=0, # filter="udp", filter="arp or (udp and src port 68 and dst port 67 and src host 0.0.0.0)", lfilter=lfilter, iface=iface) except PermissionError: raise SocketPermissionError
[ "Sniff", "packages" ]
Nekmo/amazon-dash
python
https://github.com/Nekmo/amazon-dash/blob/0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b/amazon_dash/scan.py#L16-L29
[ "def", "scan_devices", "(", "fn", ",", "lfilter", ",", "iface", "=", "None", ")", ":", "try", ":", "sniff", "(", "prn", "=", "fn", ",", "store", "=", "0", ",", "# filter=\"udp\",", "filter", "=", "\"arp or (udp and src port 68 and dst port 67 and src host 0.0.0.0)\"", ",", "lfilter", "=", "lfilter", ",", "iface", "=", "iface", ")", "except", "PermissionError", ":", "raise", "SocketPermissionError" ]
0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b
test
open_url
Loads a web page in the current browser session. :param absolgenerateute_or_relative_url: an absolute url to web page in case of config.base_url is not specified, otherwise - relative url correspondingly :Usage: open_url('http://mydomain.com/subpage1') open_url('http://mydomain.com/subpage2') # OR config.base_url = 'http://mydomain.com' open_url('/subpage1') open_url('/subpage2')
selene/browser.py
def open_url(absolute_or_relative_url): """ Loads a web page in the current browser session. :param absolgenerateute_or_relative_url: an absolute url to web page in case of config.base_url is not specified, otherwise - relative url correspondingly :Usage: open_url('http://mydomain.com/subpage1') open_url('http://mydomain.com/subpage2') # OR config.base_url = 'http://mydomain.com' open_url('/subpage1') open_url('/subpage2') """ # todo: refactor next line when app_host is removed base_url = selene.config.app_host if selene.config.app_host else selene.config.base_url driver().get(base_url + absolute_or_relative_url)
def open_url(absolute_or_relative_url): """ Loads a web page in the current browser session. :param absolgenerateute_or_relative_url: an absolute url to web page in case of config.base_url is not specified, otherwise - relative url correspondingly :Usage: open_url('http://mydomain.com/subpage1') open_url('http://mydomain.com/subpage2') # OR config.base_url = 'http://mydomain.com' open_url('/subpage1') open_url('/subpage2') """ # todo: refactor next line when app_host is removed base_url = selene.config.app_host if selene.config.app_host else selene.config.base_url driver().get(base_url + absolute_or_relative_url)
[ "Loads", "a", "web", "page", "in", "the", "current", "browser", "session", ".", ":", "param", "absolgenerateute_or_relative_url", ":", "an", "absolute", "url", "to", "web", "page", "in", "case", "of", "config", ".", "base_url", "is", "not", "specified", "otherwise", "-", "relative", "url", "correspondingly" ]
yashaka/selene
python
https://github.com/yashaka/selene/blob/123d6142047c3f0a9f0a67da6be0b916e630fd6a/selene/browser.py#L38-L55
[ "def", "open_url", "(", "absolute_or_relative_url", ")", ":", "# todo: refactor next line when app_host is removed", "base_url", "=", "selene", ".", "config", ".", "app_host", "if", "selene", ".", "config", ".", "app_host", "else", "selene", ".", "config", ".", "base_url", "driver", "(", ")", ".", "get", "(", "base_url", "+", "absolute_or_relative_url", ")" ]
123d6142047c3f0a9f0a67da6be0b916e630fd6a
test
OfxConverter.convert
Convert an OFX Transaction to a posting
ledgerautosync/converter.py
def convert(self, txn): """ Convert an OFX Transaction to a posting """ ofxid = self.mk_ofxid(txn.id) metadata = {} posting_metadata = {"ofxid": ofxid} if isinstance(txn, OfxTransaction): posting = Posting(self.name, Amount(txn.amount, self.currency), metadata=posting_metadata) return Transaction( date=txn.date, payee=self.format_payee(txn), postings=[ posting, posting.clone_inverted( self.mk_dynamic_account(self.format_payee(txn), exclude=self.name))]) elif isinstance(txn, InvestmentTransaction): acct1 = self.name acct2 = self.name posting1 = None posting2 = None security = self.maybe_get_ticker(txn.security) if isinstance(txn.type, str): # recent versions of ofxparse if re.match('^(buy|sell)', txn.type): acct2 = self.unknownaccount or 'Assets:Unknown' elif txn.type == 'transfer': acct2 = 'Transfer' elif txn.type == 'reinvest': # reinvestment of income # TODO: make this configurable acct2 = 'Income:Interest' elif txn.type == 'income' and txn.income_type == 'DIV': # Fidelity lists non-reinvested dividend income as # type: income, income_type: DIV # TODO: determine how dividend income is listed from other institutions # income/DIV transactions do not involve buying or selling a security # so their postings need special handling compared to # others metadata['dividend_from'] = security acct2 = 'Income:Dividends' posting1 = Posting(acct1, Amount(txn.total, self.currency), metadata=posting_metadata) posting2 = posting1.clone_inverted(acct2) else: # ??? pass else: # Old version of ofxparse if (txn.type in [0, 1, 3, 4]): # buymf, sellmf, buystock, sellstock acct2 = self.unknownaccount or 'Assets:Unknown' elif (txn.type == 2): # reinvest acct2 = 'Income:Interest' else: # ??? pass aux_date = None if txn.settleDate is not None and \ txn.settleDate != txn.tradeDate: aux_date = txn.settleDate # income/DIV already defined above; # this block defines all other posting types if posting1 is None and posting2 is None: posting1 = Posting( acct1, Amount( txn.units, security, unlimited=True), unit_price=Amount( txn.unit_price, self.currency, unlimited=True), metadata=posting_metadata) posting2 = Posting( acct2, Amount( txn.units * txn.unit_price, self.currency, reverse=True)) else: # Previously defined if type:income income_type/DIV pass return Transaction( date=txn.tradeDate, aux_date=aux_date, payee=self.format_payee(txn), metadata=metadata, postings=[posting1, posting2] )
def convert(self, txn): """ Convert an OFX Transaction to a posting """ ofxid = self.mk_ofxid(txn.id) metadata = {} posting_metadata = {"ofxid": ofxid} if isinstance(txn, OfxTransaction): posting = Posting(self.name, Amount(txn.amount, self.currency), metadata=posting_metadata) return Transaction( date=txn.date, payee=self.format_payee(txn), postings=[ posting, posting.clone_inverted( self.mk_dynamic_account(self.format_payee(txn), exclude=self.name))]) elif isinstance(txn, InvestmentTransaction): acct1 = self.name acct2 = self.name posting1 = None posting2 = None security = self.maybe_get_ticker(txn.security) if isinstance(txn.type, str): # recent versions of ofxparse if re.match('^(buy|sell)', txn.type): acct2 = self.unknownaccount or 'Assets:Unknown' elif txn.type == 'transfer': acct2 = 'Transfer' elif txn.type == 'reinvest': # reinvestment of income # TODO: make this configurable acct2 = 'Income:Interest' elif txn.type == 'income' and txn.income_type == 'DIV': # Fidelity lists non-reinvested dividend income as # type: income, income_type: DIV # TODO: determine how dividend income is listed from other institutions # income/DIV transactions do not involve buying or selling a security # so their postings need special handling compared to # others metadata['dividend_from'] = security acct2 = 'Income:Dividends' posting1 = Posting(acct1, Amount(txn.total, self.currency), metadata=posting_metadata) posting2 = posting1.clone_inverted(acct2) else: # ??? pass else: # Old version of ofxparse if (txn.type in [0, 1, 3, 4]): # buymf, sellmf, buystock, sellstock acct2 = self.unknownaccount or 'Assets:Unknown' elif (txn.type == 2): # reinvest acct2 = 'Income:Interest' else: # ??? pass aux_date = None if txn.settleDate is not None and \ txn.settleDate != txn.tradeDate: aux_date = txn.settleDate # income/DIV already defined above; # this block defines all other posting types if posting1 is None and posting2 is None: posting1 = Posting( acct1, Amount( txn.units, security, unlimited=True), unit_price=Amount( txn.unit_price, self.currency, unlimited=True), metadata=posting_metadata) posting2 = Posting( acct2, Amount( txn.units * txn.unit_price, self.currency, reverse=True)) else: # Previously defined if type:income income_type/DIV pass return Transaction( date=txn.tradeDate, aux_date=aux_date, payee=self.format_payee(txn), metadata=metadata, postings=[posting1, posting2] )
[ "Convert", "an", "OFX", "Transaction", "to", "a", "posting" ]
egh/ledger-autosync
python
https://github.com/egh/ledger-autosync/blob/7a303f3a693261d10f677c01fb08f35c105a1e1b/ledgerautosync/converter.py#L403-L507
[ "def", "convert", "(", "self", ",", "txn", ")", ":", "ofxid", "=", "self", ".", "mk_ofxid", "(", "txn", ".", "id", ")", "metadata", "=", "{", "}", "posting_metadata", "=", "{", "\"ofxid\"", ":", "ofxid", "}", "if", "isinstance", "(", "txn", ",", "OfxTransaction", ")", ":", "posting", "=", "Posting", "(", "self", ".", "name", ",", "Amount", "(", "txn", ".", "amount", ",", "self", ".", "currency", ")", ",", "metadata", "=", "posting_metadata", ")", "return", "Transaction", "(", "date", "=", "txn", ".", "date", ",", "payee", "=", "self", ".", "format_payee", "(", "txn", ")", ",", "postings", "=", "[", "posting", ",", "posting", ".", "clone_inverted", "(", "self", ".", "mk_dynamic_account", "(", "self", ".", "format_payee", "(", "txn", ")", ",", "exclude", "=", "self", ".", "name", ")", ")", "]", ")", "elif", "isinstance", "(", "txn", ",", "InvestmentTransaction", ")", ":", "acct1", "=", "self", ".", "name", "acct2", "=", "self", ".", "name", "posting1", "=", "None", "posting2", "=", "None", "security", "=", "self", ".", "maybe_get_ticker", "(", "txn", ".", "security", ")", "if", "isinstance", "(", "txn", ".", "type", ",", "str", ")", ":", "# recent versions of ofxparse", "if", "re", ".", "match", "(", "'^(buy|sell)'", ",", "txn", ".", "type", ")", ":", "acct2", "=", "self", ".", "unknownaccount", "or", "'Assets:Unknown'", "elif", "txn", ".", "type", "==", "'transfer'", ":", "acct2", "=", "'Transfer'", "elif", "txn", ".", "type", "==", "'reinvest'", ":", "# reinvestment of income", "# TODO: make this configurable", "acct2", "=", "'Income:Interest'", "elif", "txn", ".", "type", "==", "'income'", "and", "txn", ".", "income_type", "==", "'DIV'", ":", "# Fidelity lists non-reinvested dividend income as", "# type: income, income_type: DIV", "# TODO: determine how dividend income is listed from other institutions", "# income/DIV transactions do not involve buying or selling a security", "# so their postings need special handling compared to", "# others", "metadata", "[", "'dividend_from'", "]", "=", "security", "acct2", "=", "'Income:Dividends'", "posting1", "=", "Posting", "(", "acct1", ",", "Amount", "(", "txn", ".", "total", ",", "self", ".", "currency", ")", ",", "metadata", "=", "posting_metadata", ")", "posting2", "=", "posting1", ".", "clone_inverted", "(", "acct2", ")", "else", ":", "# ???", "pass", "else", ":", "# Old version of ofxparse", "if", "(", "txn", ".", "type", "in", "[", "0", ",", "1", ",", "3", ",", "4", "]", ")", ":", "# buymf, sellmf, buystock, sellstock", "acct2", "=", "self", ".", "unknownaccount", "or", "'Assets:Unknown'", "elif", "(", "txn", ".", "type", "==", "2", ")", ":", "# reinvest", "acct2", "=", "'Income:Interest'", "else", ":", "# ???", "pass", "aux_date", "=", "None", "if", "txn", ".", "settleDate", "is", "not", "None", "and", "txn", ".", "settleDate", "!=", "txn", ".", "tradeDate", ":", "aux_date", "=", "txn", ".", "settleDate", "# income/DIV already defined above;", "# this block defines all other posting types", "if", "posting1", "is", "None", "and", "posting2", "is", "None", ":", "posting1", "=", "Posting", "(", "acct1", ",", "Amount", "(", "txn", ".", "units", ",", "security", ",", "unlimited", "=", "True", ")", ",", "unit_price", "=", "Amount", "(", "txn", ".", "unit_price", ",", "self", ".", "currency", ",", "unlimited", "=", "True", ")", ",", "metadata", "=", "posting_metadata", ")", "posting2", "=", "Posting", "(", "acct2", ",", "Amount", "(", "txn", ".", "units", "*", "txn", ".", "unit_price", ",", "self", ".", "currency", ",", "reverse", "=", "True", ")", ")", "else", ":", "# Previously defined if type:income income_type/DIV", "pass", "return", "Transaction", "(", "date", "=", "txn", ".", "tradeDate", ",", "aux_date", "=", "aux_date", ",", "payee", "=", "self", ".", "format_payee", "(", "txn", ")", ",", "metadata", "=", "metadata", ",", "postings", "=", "[", "posting1", ",", "posting2", "]", ")" ]
7a303f3a693261d10f677c01fb08f35c105a1e1b
test
find_ledger_file
Returns main ledger file path or raise exception if it cannot be \ found.
ledgerautosync/cli.py
def find_ledger_file(ledgerrcpath=None): """Returns main ledger file path or raise exception if it cannot be \ found.""" if ledgerrcpath is None: ledgerrcpath = os.path.abspath(os.path.expanduser("~/.ledgerrc")) if "LEDGER_FILE" in os.environ: return os.path.abspath(os.path.expanduser(os.environ["LEDGER_FILE"])) elif os.path.exists(ledgerrcpath): # hacky ledgerrc = open(ledgerrcpath) for line in ledgerrc.readlines(): md = re.match(r"--file\s+([^\s]+).*", line) if md is not None: return os.path.abspath(os.path.expanduser(md.group(1))) else: return None
def find_ledger_file(ledgerrcpath=None): """Returns main ledger file path or raise exception if it cannot be \ found.""" if ledgerrcpath is None: ledgerrcpath = os.path.abspath(os.path.expanduser("~/.ledgerrc")) if "LEDGER_FILE" in os.environ: return os.path.abspath(os.path.expanduser(os.environ["LEDGER_FILE"])) elif os.path.exists(ledgerrcpath): # hacky ledgerrc = open(ledgerrcpath) for line in ledgerrc.readlines(): md = re.match(r"--file\s+([^\s]+).*", line) if md is not None: return os.path.abspath(os.path.expanduser(md.group(1))) else: return None
[ "Returns", "main", "ledger", "file", "path", "or", "raise", "exception", "if", "it", "cannot", "be", "\\", "found", "." ]
egh/ledger-autosync
python
https://github.com/egh/ledger-autosync/blob/7a303f3a693261d10f677c01fb08f35c105a1e1b/ledgerautosync/cli.py#L40-L55
[ "def", "find_ledger_file", "(", "ledgerrcpath", "=", "None", ")", ":", "if", "ledgerrcpath", "is", "None", ":", "ledgerrcpath", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "\"~/.ledgerrc\"", ")", ")", "if", "\"LEDGER_FILE\"", "in", "os", ".", "environ", ":", "return", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "os", ".", "environ", "[", "\"LEDGER_FILE\"", "]", ")", ")", "elif", "os", ".", "path", ".", "exists", "(", "ledgerrcpath", ")", ":", "# hacky", "ledgerrc", "=", "open", "(", "ledgerrcpath", ")", "for", "line", "in", "ledgerrc", ".", "readlines", "(", ")", ":", "md", "=", "re", ".", "match", "(", "r\"--file\\s+([^\\s]+).*\"", ",", "line", ")", "if", "md", "is", "not", "None", ":", "return", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "md", ".", "group", "(", "1", ")", ")", ")", "else", ":", "return", "None" ]
7a303f3a693261d10f677c01fb08f35c105a1e1b
test
print_results
This function is the final common pathway of program: Print initial balance if requested; Print transactions surviving de-duplication filter; Print balance assertions if requested; Print commodity prices obtained from position statements
ledgerautosync/cli.py
def print_results(converter, ofx, ledger, txns, args): """ This function is the final common pathway of program: Print initial balance if requested; Print transactions surviving de-duplication filter; Print balance assertions if requested; Print commodity prices obtained from position statements """ if args.initial: if (not(ledger.check_transaction_by_id ("ofxid", converter.mk_ofxid(AUTOSYNC_INITIAL))) and not(ledger.check_transaction_by_id("ofxid", ALL_AUTOSYNC_INITIAL))): print(converter.format_initial_balance(ofx.account.statement)) for txn in txns: print(converter.convert(txn).format(args.indent)) if args.assertions: print(converter.format_balance(ofx.account.statement)) # if OFX has positions use these to obtain commodity prices # and print "P" records to provide dated/timed valuations # Note that this outputs only the commodity price, # not your position (e.g. # shares), even though this is in the OFX record if hasattr(ofx.account.statement, 'positions'): for pos in ofx.account.statement.positions: print(converter.format_position(pos))
def print_results(converter, ofx, ledger, txns, args): """ This function is the final common pathway of program: Print initial balance if requested; Print transactions surviving de-duplication filter; Print balance assertions if requested; Print commodity prices obtained from position statements """ if args.initial: if (not(ledger.check_transaction_by_id ("ofxid", converter.mk_ofxid(AUTOSYNC_INITIAL))) and not(ledger.check_transaction_by_id("ofxid", ALL_AUTOSYNC_INITIAL))): print(converter.format_initial_balance(ofx.account.statement)) for txn in txns: print(converter.convert(txn).format(args.indent)) if args.assertions: print(converter.format_balance(ofx.account.statement)) # if OFX has positions use these to obtain commodity prices # and print "P" records to provide dated/timed valuations # Note that this outputs only the commodity price, # not your position (e.g. # shares), even though this is in the OFX record if hasattr(ofx.account.statement, 'positions'): for pos in ofx.account.statement.positions: print(converter.format_position(pos))
[ "This", "function", "is", "the", "final", "common", "pathway", "of", "program", ":" ]
egh/ledger-autosync
python
https://github.com/egh/ledger-autosync/blob/7a303f3a693261d10f677c01fb08f35c105a1e1b/ledgerautosync/cli.py#L58-L84
[ "def", "print_results", "(", "converter", ",", "ofx", ",", "ledger", ",", "txns", ",", "args", ")", ":", "if", "args", ".", "initial", ":", "if", "(", "not", "(", "ledger", ".", "check_transaction_by_id", "(", "\"ofxid\"", ",", "converter", ".", "mk_ofxid", "(", "AUTOSYNC_INITIAL", ")", ")", ")", "and", "not", "(", "ledger", ".", "check_transaction_by_id", "(", "\"ofxid\"", ",", "ALL_AUTOSYNC_INITIAL", ")", ")", ")", ":", "print", "(", "converter", ".", "format_initial_balance", "(", "ofx", ".", "account", ".", "statement", ")", ")", "for", "txn", "in", "txns", ":", "print", "(", "converter", ".", "convert", "(", "txn", ")", ".", "format", "(", "args", ".", "indent", ")", ")", "if", "args", ".", "assertions", ":", "print", "(", "converter", ".", "format_balance", "(", "ofx", ".", "account", ".", "statement", ")", ")", "# if OFX has positions use these to obtain commodity prices", "# and print \"P\" records to provide dated/timed valuations", "# Note that this outputs only the commodity price,", "# not your position (e.g. # shares), even though this is in the OFX record", "if", "hasattr", "(", "ofx", ".", "account", ".", "statement", ",", "'positions'", ")", ":", "for", "pos", "in", "ofx", ".", "account", ".", "statement", ".", "positions", ":", "print", "(", "converter", ".", "format_position", "(", "pos", ")", ")" ]
7a303f3a693261d10f677c01fb08f35c105a1e1b
test
compatibility
Run the unit test suite with each support library and Python version.
noxfile.py
def compatibility(session, install): """Run the unit test suite with each support library and Python version.""" session.install('-e', '.[dev]') session.install(install) _run_tests(session)
def compatibility(session, install): """Run the unit test suite with each support library and Python version.""" session.install('-e', '.[dev]') session.install(install) _run_tests(session)
[ "Run", "the", "unit", "test", "suite", "with", "each", "support", "library", "and", "Python", "version", "." ]
google/pybadges
python
https://github.com/google/pybadges/blob/d42c8080adb21b81123ac9540c53127ed2fa1edc/noxfile.py#L50-L55
[ "def", "compatibility", "(", "session", ",", "install", ")", ":", "session", ".", "install", "(", "'-e'", ",", "'.[dev]'", ")", "session", ".", "install", "(", "install", ")", "_run_tests", "(", "session", ")" ]
d42c8080adb21b81123ac9540c53127ed2fa1edc
test
PilMeasurer.text_width
Returns the width, in pixels, of a string in DejaVu Sans 110pt.
pybadges/pil_text_measurer.py
def text_width(self, text: str) -> float: """Returns the width, in pixels, of a string in DejaVu Sans 110pt.""" width, _ = self._font.getsize(text) return width
def text_width(self, text: str) -> float: """Returns the width, in pixels, of a string in DejaVu Sans 110pt.""" width, _ = self._font.getsize(text) return width
[ "Returns", "the", "width", "in", "pixels", "of", "a", "string", "in", "DejaVu", "Sans", "110pt", "." ]
google/pybadges
python
https://github.com/google/pybadges/blob/d42c8080adb21b81123ac9540c53127ed2fa1edc/pybadges/pil_text_measurer.py#L37-L40
[ "def", "text_width", "(", "self", ",", "text", ":", "str", ")", "->", "float", ":", "width", ",", "_", "=", "self", ".", "_font", ".", "getsize", "(", "text", ")", "return", "width" ]
d42c8080adb21b81123ac9540c53127ed2fa1edc
test
get_long_description
Transform README.md into a usable long description. Replaces relative references to svg images to absolute https references.
setup.py
def get_long_description(): """Transform README.md into a usable long description. Replaces relative references to svg images to absolute https references. """ with open('README.md') as f: read_me = f.read() def replace_relative_with_absolute(match): svg_path = match.group(0)[1:-1] return ('(https://github.com/google/pybadges/raw/master/' '%s?sanitize=true)' % svg_path) return re.sub(r'\(tests/golden-images/.*?\.svg\)', replace_relative_with_absolute, read_me)
def get_long_description(): """Transform README.md into a usable long description. Replaces relative references to svg images to absolute https references. """ with open('README.md') as f: read_me = f.read() def replace_relative_with_absolute(match): svg_path = match.group(0)[1:-1] return ('(https://github.com/google/pybadges/raw/master/' '%s?sanitize=true)' % svg_path) return re.sub(r'\(tests/golden-images/.*?\.svg\)', replace_relative_with_absolute, read_me)
[ "Transform", "README", ".", "md", "into", "a", "usable", "long", "description", "." ]
google/pybadges
python
https://github.com/google/pybadges/blob/d42c8080adb21b81123ac9540c53127ed2fa1edc/setup.py#L23-L39
[ "def", "get_long_description", "(", ")", ":", "with", "open", "(", "'README.md'", ")", "as", "f", ":", "read_me", "=", "f", ".", "read", "(", ")", "def", "replace_relative_with_absolute", "(", "match", ")", ":", "svg_path", "=", "match", ".", "group", "(", "0", ")", "[", "1", ":", "-", "1", "]", "return", "(", "'(https://github.com/google/pybadges/raw/master/'", "'%s?sanitize=true)'", "%", "svg_path", ")", "return", "re", ".", "sub", "(", "r'\\(tests/golden-images/.*?\\.svg\\)'", ",", "replace_relative_with_absolute", ",", "read_me", ")" ]
d42c8080adb21b81123ac9540c53127ed2fa1edc
test
PrecalculatedTextMeasurer.text_width
Returns the width, in pixels, of a string in DejaVu Sans 110pt.
pybadges/precalculated_text_measurer.py
def text_width(self, text: str) -> float: """Returns the width, in pixels, of a string in DejaVu Sans 110pt.""" width = 0 for index, c in enumerate(text): width += self._char_to_width.get(c, self._default_character_width) width -= self._pair_to_kern.get(text[index:index + 2], 0) return width
def text_width(self, text: str) -> float: """Returns the width, in pixels, of a string in DejaVu Sans 110pt.""" width = 0 for index, c in enumerate(text): width += self._char_to_width.get(c, self._default_character_width) width -= self._pair_to_kern.get(text[index:index + 2], 0) return width
[ "Returns", "the", "width", "in", "pixels", "of", "a", "string", "in", "DejaVu", "Sans", "110pt", "." ]
google/pybadges
python
https://github.com/google/pybadges/blob/d42c8080adb21b81123ac9540c53127ed2fa1edc/pybadges/precalculated_text_measurer.py#L52-L59
[ "def", "text_width", "(", "self", ",", "text", ":", "str", ")", "->", "float", ":", "width", "=", "0", "for", "index", ",", "c", "in", "enumerate", "(", "text", ")", ":", "width", "+=", "self", ".", "_char_to_width", ".", "get", "(", "c", ",", "self", ".", "_default_character_width", ")", "width", "-=", "self", ".", "_pair_to_kern", ".", "get", "(", "text", "[", "index", ":", "index", "+", "2", "]", ",", "0", ")", "return", "width" ]
d42c8080adb21b81123ac9540c53127ed2fa1edc
test
PrecalculatedTextMeasurer.from_json
Return a PrecalculatedTextMeasurer given a JSON stream. See precalculate_text.py for details on the required format.
pybadges/precalculated_text_measurer.py
def from_json(f: TextIO) -> 'PrecalculatedTextMeasurer': """Return a PrecalculatedTextMeasurer given a JSON stream. See precalculate_text.py for details on the required format. """ o = json.load(f) return PrecalculatedTextMeasurer(o['mean-character-length'], o['character-lengths'], o['kerning-pairs'])
def from_json(f: TextIO) -> 'PrecalculatedTextMeasurer': """Return a PrecalculatedTextMeasurer given a JSON stream. See precalculate_text.py for details on the required format. """ o = json.load(f) return PrecalculatedTextMeasurer(o['mean-character-length'], o['character-lengths'], o['kerning-pairs'])
[ "Return", "a", "PrecalculatedTextMeasurer", "given", "a", "JSON", "stream", "." ]
google/pybadges
python
https://github.com/google/pybadges/blob/d42c8080adb21b81123ac9540c53127ed2fa1edc/pybadges/precalculated_text_measurer.py#L62-L70
[ "def", "from_json", "(", "f", ":", "TextIO", ")", "->", "'PrecalculatedTextMeasurer'", ":", "o", "=", "json", ".", "load", "(", "f", ")", "return", "PrecalculatedTextMeasurer", "(", "o", "[", "'mean-character-length'", "]", ",", "o", "[", "'character-lengths'", "]", ",", "o", "[", "'kerning-pairs'", "]", ")" ]
d42c8080adb21b81123ac9540c53127ed2fa1edc
test
PrecalculatedTextMeasurer.default
Returns a reasonable default PrecalculatedTextMeasurer.
pybadges/precalculated_text_measurer.py
def default(cls) -> 'PrecalculatedTextMeasurer': """Returns a reasonable default PrecalculatedTextMeasurer.""" if cls._default_cache is not None: return cls._default_cache if pkg_resources.resource_exists(__name__, 'default-widths.json.xz'): import lzma with pkg_resources.resource_stream(__name__, 'default-widths.json.xz') as f: with lzma.open(f, "rt") as g: cls._default_cache = PrecalculatedTextMeasurer.from_json( cast(TextIO, g)) return cls._default_cache elif pkg_resources.resource_exists(__name__, 'default-widths.json'): with pkg_resources.resource_stream(__name__, 'default-widths.json') as f: cls._default_cache = PrecalculatedTextMeasurer.from_json( io.TextIOWrapper(f, encoding='utf-8')) return cls._default_cache else: raise ValueError('could not load default-widths.json')
def default(cls) -> 'PrecalculatedTextMeasurer': """Returns a reasonable default PrecalculatedTextMeasurer.""" if cls._default_cache is not None: return cls._default_cache if pkg_resources.resource_exists(__name__, 'default-widths.json.xz'): import lzma with pkg_resources.resource_stream(__name__, 'default-widths.json.xz') as f: with lzma.open(f, "rt") as g: cls._default_cache = PrecalculatedTextMeasurer.from_json( cast(TextIO, g)) return cls._default_cache elif pkg_resources.resource_exists(__name__, 'default-widths.json'): with pkg_resources.resource_stream(__name__, 'default-widths.json') as f: cls._default_cache = PrecalculatedTextMeasurer.from_json( io.TextIOWrapper(f, encoding='utf-8')) return cls._default_cache else: raise ValueError('could not load default-widths.json')
[ "Returns", "a", "reasonable", "default", "PrecalculatedTextMeasurer", "." ]
google/pybadges
python
https://github.com/google/pybadges/blob/d42c8080adb21b81123ac9540c53127ed2fa1edc/pybadges/precalculated_text_measurer.py#L73-L93
[ "def", "default", "(", "cls", ")", "->", "'PrecalculatedTextMeasurer'", ":", "if", "cls", ".", "_default_cache", "is", "not", "None", ":", "return", "cls", ".", "_default_cache", "if", "pkg_resources", ".", "resource_exists", "(", "__name__", ",", "'default-widths.json.xz'", ")", ":", "import", "lzma", "with", "pkg_resources", ".", "resource_stream", "(", "__name__", ",", "'default-widths.json.xz'", ")", "as", "f", ":", "with", "lzma", ".", "open", "(", "f", ",", "\"rt\"", ")", "as", "g", ":", "cls", ".", "_default_cache", "=", "PrecalculatedTextMeasurer", ".", "from_json", "(", "cast", "(", "TextIO", ",", "g", ")", ")", "return", "cls", ".", "_default_cache", "elif", "pkg_resources", ".", "resource_exists", "(", "__name__", ",", "'default-widths.json'", ")", ":", "with", "pkg_resources", ".", "resource_stream", "(", "__name__", ",", "'default-widths.json'", ")", "as", "f", ":", "cls", ".", "_default_cache", "=", "PrecalculatedTextMeasurer", ".", "from_json", "(", "io", ".", "TextIOWrapper", "(", "f", ",", "encoding", "=", "'utf-8'", ")", ")", "return", "cls", ".", "_default_cache", "else", ":", "raise", "ValueError", "(", "'could not load default-widths.json'", ")" ]
d42c8080adb21b81123ac9540c53127ed2fa1edc
test
badge
Creates a github-style badge as an SVG image. >>> badge(left_text='coverage', right_text='23%', right_color='red') '<svg...</svg>' >>> badge(left_text='build', right_text='green', right_color='green', ... whole_link="http://www.example.com/") '<svg...</svg>' Args: left_text: The text that should appear on the left-hand-side of the badge e.g. "coverage". right_text: The text that should appear on the right-hand-side of the badge e.g. "23%". left_link: The URL that should be redirected to when the left-hand text is selected. right_link: The URL that should be redirected to when the right-hand text is selected. whole_link: The link that should be redirected to when the badge is selected. If set then left_link and right_right may not be set. logo: A url representing a logo that will be displayed inside the badge. Can be a data URL e.g. "data:image/svg+xml;utf8,<svg..." left_color: The color of the part of the badge containing the left-hand text. Can be an valid CSS color (see https://developer.mozilla.org/en-US/docs/Web/CSS/color) or a color name defined here: https://github.com/badges/shields/blob/master/lib/colorscheme.json right_color: The color of the part of the badge containing the right-hand text. Can be an valid CSS color (see https://developer.mozilla.org/en-US/docs/Web/CSS/color) or a color name defined here: https://github.com/badges/shields/blob/master/lib/colorscheme.json measurer: A text_measurer.TextMeasurer that can be used to measure the width of left_text and right_text. embed_logo: If True then embed the logo image directly in the badge. This can prevent an HTTP request and some browsers will not render external image referenced. When True, `logo` must be a HTTP/HTTPS URI or a filesystem path. Also, the `badge` call may raise an exception if the logo cannot be loaded, is not an image, etc.
pybadges/__init__.py
def badge(left_text: str, right_text: str, left_link: Optional[str] = None, right_link: Optional[str] = None, whole_link: Optional[str] = None, logo: Optional[str] = None, left_color: str = '#555', right_color: str = '#007ec6', measurer: Optional[text_measurer.TextMeasurer] = None, embed_logo: bool = False) -> str: """Creates a github-style badge as an SVG image. >>> badge(left_text='coverage', right_text='23%', right_color='red') '<svg...</svg>' >>> badge(left_text='build', right_text='green', right_color='green', ... whole_link="http://www.example.com/") '<svg...</svg>' Args: left_text: The text that should appear on the left-hand-side of the badge e.g. "coverage". right_text: The text that should appear on the right-hand-side of the badge e.g. "23%". left_link: The URL that should be redirected to when the left-hand text is selected. right_link: The URL that should be redirected to when the right-hand text is selected. whole_link: The link that should be redirected to when the badge is selected. If set then left_link and right_right may not be set. logo: A url representing a logo that will be displayed inside the badge. Can be a data URL e.g. "data:image/svg+xml;utf8,<svg..." left_color: The color of the part of the badge containing the left-hand text. Can be an valid CSS color (see https://developer.mozilla.org/en-US/docs/Web/CSS/color) or a color name defined here: https://github.com/badges/shields/blob/master/lib/colorscheme.json right_color: The color of the part of the badge containing the right-hand text. Can be an valid CSS color (see https://developer.mozilla.org/en-US/docs/Web/CSS/color) or a color name defined here: https://github.com/badges/shields/blob/master/lib/colorscheme.json measurer: A text_measurer.TextMeasurer that can be used to measure the width of left_text and right_text. embed_logo: If True then embed the logo image directly in the badge. This can prevent an HTTP request and some browsers will not render external image referenced. When True, `logo` must be a HTTP/HTTPS URI or a filesystem path. Also, the `badge` call may raise an exception if the logo cannot be loaded, is not an image, etc. """ if measurer is None: measurer = ( precalculated_text_measurer.PrecalculatedTextMeasurer .default()) if (left_link or right_link) and whole_link: raise ValueError( 'whole_link may not bet set with left_link or right_link') template = _JINJA2_ENVIRONMENT.get_template('badge-template-full.svg') if logo and embed_logo: logo = _embed_image(logo) svg = template.render( left_text=left_text, right_text=right_text, left_text_width=measurer.text_width(left_text) / 10.0, right_text_width=measurer.text_width(right_text) / 10.0, left_link=left_link, right_link=right_link, whole_link=whole_link, logo=logo, left_color=_NAME_TO_COLOR.get(left_color, left_color), right_color=_NAME_TO_COLOR.get(right_color, right_color), ) xml = minidom.parseString(svg) _remove_blanks(xml) xml.normalize() return xml.documentElement.toxml()
def badge(left_text: str, right_text: str, left_link: Optional[str] = None, right_link: Optional[str] = None, whole_link: Optional[str] = None, logo: Optional[str] = None, left_color: str = '#555', right_color: str = '#007ec6', measurer: Optional[text_measurer.TextMeasurer] = None, embed_logo: bool = False) -> str: """Creates a github-style badge as an SVG image. >>> badge(left_text='coverage', right_text='23%', right_color='red') '<svg...</svg>' >>> badge(left_text='build', right_text='green', right_color='green', ... whole_link="http://www.example.com/") '<svg...</svg>' Args: left_text: The text that should appear on the left-hand-side of the badge e.g. "coverage". right_text: The text that should appear on the right-hand-side of the badge e.g. "23%". left_link: The URL that should be redirected to when the left-hand text is selected. right_link: The URL that should be redirected to when the right-hand text is selected. whole_link: The link that should be redirected to when the badge is selected. If set then left_link and right_right may not be set. logo: A url representing a logo that will be displayed inside the badge. Can be a data URL e.g. "data:image/svg+xml;utf8,<svg..." left_color: The color of the part of the badge containing the left-hand text. Can be an valid CSS color (see https://developer.mozilla.org/en-US/docs/Web/CSS/color) or a color name defined here: https://github.com/badges/shields/blob/master/lib/colorscheme.json right_color: The color of the part of the badge containing the right-hand text. Can be an valid CSS color (see https://developer.mozilla.org/en-US/docs/Web/CSS/color) or a color name defined here: https://github.com/badges/shields/blob/master/lib/colorscheme.json measurer: A text_measurer.TextMeasurer that can be used to measure the width of left_text and right_text. embed_logo: If True then embed the logo image directly in the badge. This can prevent an HTTP request and some browsers will not render external image referenced. When True, `logo` must be a HTTP/HTTPS URI or a filesystem path. Also, the `badge` call may raise an exception if the logo cannot be loaded, is not an image, etc. """ if measurer is None: measurer = ( precalculated_text_measurer.PrecalculatedTextMeasurer .default()) if (left_link or right_link) and whole_link: raise ValueError( 'whole_link may not bet set with left_link or right_link') template = _JINJA2_ENVIRONMENT.get_template('badge-template-full.svg') if logo and embed_logo: logo = _embed_image(logo) svg = template.render( left_text=left_text, right_text=right_text, left_text_width=measurer.text_width(left_text) / 10.0, right_text_width=measurer.text_width(right_text) / 10.0, left_link=left_link, right_link=right_link, whole_link=whole_link, logo=logo, left_color=_NAME_TO_COLOR.get(left_color, left_color), right_color=_NAME_TO_COLOR.get(right_color, right_color), ) xml = minidom.parseString(svg) _remove_blanks(xml) xml.normalize() return xml.documentElement.toxml()
[ "Creates", "a", "github", "-", "style", "badge", "as", "an", "SVG", "image", "." ]
google/pybadges
python
https://github.com/google/pybadges/blob/d42c8080adb21b81123ac9540c53127ed2fa1edc/pybadges/__init__.py#L115-L188
[ "def", "badge", "(", "left_text", ":", "str", ",", "right_text", ":", "str", ",", "left_link", ":", "Optional", "[", "str", "]", "=", "None", ",", "right_link", ":", "Optional", "[", "str", "]", "=", "None", ",", "whole_link", ":", "Optional", "[", "str", "]", "=", "None", ",", "logo", ":", "Optional", "[", "str", "]", "=", "None", ",", "left_color", ":", "str", "=", "'#555'", ",", "right_color", ":", "str", "=", "'#007ec6'", ",", "measurer", ":", "Optional", "[", "text_measurer", ".", "TextMeasurer", "]", "=", "None", ",", "embed_logo", ":", "bool", "=", "False", ")", "->", "str", ":", "if", "measurer", "is", "None", ":", "measurer", "=", "(", "precalculated_text_measurer", ".", "PrecalculatedTextMeasurer", ".", "default", "(", ")", ")", "if", "(", "left_link", "or", "right_link", ")", "and", "whole_link", ":", "raise", "ValueError", "(", "'whole_link may not bet set with left_link or right_link'", ")", "template", "=", "_JINJA2_ENVIRONMENT", ".", "get_template", "(", "'badge-template-full.svg'", ")", "if", "logo", "and", "embed_logo", ":", "logo", "=", "_embed_image", "(", "logo", ")", "svg", "=", "template", ".", "render", "(", "left_text", "=", "left_text", ",", "right_text", "=", "right_text", ",", "left_text_width", "=", "measurer", ".", "text_width", "(", "left_text", ")", "/", "10.0", ",", "right_text_width", "=", "measurer", ".", "text_width", "(", "right_text", ")", "/", "10.0", ",", "left_link", "=", "left_link", ",", "right_link", "=", "right_link", ",", "whole_link", "=", "whole_link", ",", "logo", "=", "logo", ",", "left_color", "=", "_NAME_TO_COLOR", ".", "get", "(", "left_color", ",", "left_color", ")", ",", "right_color", "=", "_NAME_TO_COLOR", ".", "get", "(", "right_color", ",", "right_color", ")", ",", ")", "xml", "=", "minidom", ".", "parseString", "(", "svg", ")", "_remove_blanks", "(", "xml", ")", "xml", ".", "normalize", "(", ")", "return", "xml", ".", "documentElement", ".", "toxml", "(", ")" ]
d42c8080adb21b81123ac9540c53127ed2fa1edc
test
generate_supported_characters
Generate the characters support by the font at the given path.
pybadges/precalculate_text.py
def generate_supported_characters(deja_vu_sans_path: str) -> Iterable[str]: """Generate the characters support by the font at the given path.""" font = ttLib.TTFont(deja_vu_sans_path) for cmap in font['cmap'].tables: if cmap.isUnicode(): for code in cmap.cmap: yield chr(code)
def generate_supported_characters(deja_vu_sans_path: str) -> Iterable[str]: """Generate the characters support by the font at the given path.""" font = ttLib.TTFont(deja_vu_sans_path) for cmap in font['cmap'].tables: if cmap.isUnicode(): for code in cmap.cmap: yield chr(code)
[ "Generate", "the", "characters", "support", "by", "the", "font", "at", "the", "given", "path", "." ]
google/pybadges
python
https://github.com/google/pybadges/blob/d42c8080adb21b81123ac9540c53127ed2fa1edc/pybadges/precalculate_text.py#L57-L63
[ "def", "generate_supported_characters", "(", "deja_vu_sans_path", ":", "str", ")", "->", "Iterable", "[", "str", "]", ":", "font", "=", "ttLib", ".", "TTFont", "(", "deja_vu_sans_path", ")", "for", "cmap", "in", "font", "[", "'cmap'", "]", ".", "tables", ":", "if", "cmap", ".", "isUnicode", "(", ")", ":", "for", "code", "in", "cmap", ".", "cmap", ":", "yield", "chr", "(", "code", ")" ]
d42c8080adb21b81123ac9540c53127ed2fa1edc
test
generate_encodeable_characters
Generates the subset of 'characters' that can be encoded by 'encodings'. Args: characters: The characters to check for encodeability e.g. 'abcd'. encodings: The encodings to check against e.g. ['cp1252', 'iso-8859-5']. Returns: The subset of 'characters' that can be encoded using one of the provided encodings.
pybadges/precalculate_text.py
def generate_encodeable_characters(characters: Iterable[str], encodings: Iterable[str]) -> Iterable[str]: """Generates the subset of 'characters' that can be encoded by 'encodings'. Args: characters: The characters to check for encodeability e.g. 'abcd'. encodings: The encodings to check against e.g. ['cp1252', 'iso-8859-5']. Returns: The subset of 'characters' that can be encoded using one of the provided encodings. """ for c in characters: for encoding in encodings: try: c.encode(encoding) yield c except UnicodeEncodeError: pass
def generate_encodeable_characters(characters: Iterable[str], encodings: Iterable[str]) -> Iterable[str]: """Generates the subset of 'characters' that can be encoded by 'encodings'. Args: characters: The characters to check for encodeability e.g. 'abcd'. encodings: The encodings to check against e.g. ['cp1252', 'iso-8859-5']. Returns: The subset of 'characters' that can be encoded using one of the provided encodings. """ for c in characters: for encoding in encodings: try: c.encode(encoding) yield c except UnicodeEncodeError: pass
[ "Generates", "the", "subset", "of", "characters", "that", "can", "be", "encoded", "by", "encodings", "." ]
google/pybadges
python
https://github.com/google/pybadges/blob/d42c8080adb21b81123ac9540c53127ed2fa1edc/pybadges/precalculate_text.py#L66-L84
[ "def", "generate_encodeable_characters", "(", "characters", ":", "Iterable", "[", "str", "]", ",", "encodings", ":", "Iterable", "[", "str", "]", ")", "->", "Iterable", "[", "str", "]", ":", "for", "c", "in", "characters", ":", "for", "encoding", "in", "encodings", ":", "try", ":", "c", ".", "encode", "(", "encoding", ")", "yield", "c", "except", "UnicodeEncodeError", ":", "pass" ]
d42c8080adb21b81123ac9540c53127ed2fa1edc
test
calculate_character_to_length_mapping
Return a mapping between each given character and its length. Args: measurer: The TextMeasurer used to measure the width of the text in pixels. characters: The characters to measure e.g. "ml". Returns: A mapping from the given characters to their length in pixels, as determined by 'measurer' e.g. {'m': 5.2, 'l', 1.2}.
pybadges/precalculate_text.py
def calculate_character_to_length_mapping( measurer: text_measurer.TextMeasurer, characters: Iterable[str]) -> Mapping[str, float]: """Return a mapping between each given character and its length. Args: measurer: The TextMeasurer used to measure the width of the text in pixels. characters: The characters to measure e.g. "ml". Returns: A mapping from the given characters to their length in pixels, as determined by 'measurer' e.g. {'m': 5.2, 'l', 1.2}. """ char_to_length = {} for c in characters: char_to_length[c] = measurer.text_width(c) return char_to_length
def calculate_character_to_length_mapping( measurer: text_measurer.TextMeasurer, characters: Iterable[str]) -> Mapping[str, float]: """Return a mapping between each given character and its length. Args: measurer: The TextMeasurer used to measure the width of the text in pixels. characters: The characters to measure e.g. "ml". Returns: A mapping from the given characters to their length in pixels, as determined by 'measurer' e.g. {'m': 5.2, 'l', 1.2}. """ char_to_length = {} for c in characters: char_to_length[c] = measurer.text_width(c) return char_to_length
[ "Return", "a", "mapping", "between", "each", "given", "character", "and", "its", "length", "." ]
google/pybadges
python
https://github.com/google/pybadges/blob/d42c8080adb21b81123ac9540c53127ed2fa1edc/pybadges/precalculate_text.py#L87-L105
[ "def", "calculate_character_to_length_mapping", "(", "measurer", ":", "text_measurer", ".", "TextMeasurer", ",", "characters", ":", "Iterable", "[", "str", "]", ")", "->", "Mapping", "[", "str", ",", "float", "]", ":", "char_to_length", "=", "{", "}", "for", "c", "in", "characters", ":", "char_to_length", "[", "c", "]", "=", "measurer", ".", "text_width", "(", "c", ")", "return", "char_to_length" ]
d42c8080adb21b81123ac9540c53127ed2fa1edc
test
calculate_pair_to_kern_mapping
Returns a mapping between each *pair* of characters and their kerning. Args: measurer: The TextMeasurer used to measure the width of each pair of characters. char_to_length: A mapping between characters and their length in pixels. Must contain every character in 'characters' e.g. {'h': 5.2, 'e': 4.0, 'l', 1.2, 'o': 5.0}. characters: The characters to generate the kerning mapping for e.g. 'hel'. Returns: A mapping between each pair of given characters (e.g. 'hh', he', hl', 'eh', 'ee', 'el', 'lh, 'le', 'll') and the kerning adjustment for that pair of characters i.e. the difference between the length of the two characters calculated using 'char_to_length' vs. the length calculated by `measurer`. Positive values indicate that the length is less than using the sum of 'char_to_length'. Zero values are excluded from the map e.g. {'hl': 3.1, 'ee': -0.5}.
pybadges/precalculate_text.py
def calculate_pair_to_kern_mapping( measurer: text_measurer.TextMeasurer, char_to_length: Mapping[str, float], characters: Iterable[str]) -> Mapping[str, float]: """Returns a mapping between each *pair* of characters and their kerning. Args: measurer: The TextMeasurer used to measure the width of each pair of characters. char_to_length: A mapping between characters and their length in pixels. Must contain every character in 'characters' e.g. {'h': 5.2, 'e': 4.0, 'l', 1.2, 'o': 5.0}. characters: The characters to generate the kerning mapping for e.g. 'hel'. Returns: A mapping between each pair of given characters (e.g. 'hh', he', hl', 'eh', 'ee', 'el', 'lh, 'le', 'll') and the kerning adjustment for that pair of characters i.e. the difference between the length of the two characters calculated using 'char_to_length' vs. the length calculated by `measurer`. Positive values indicate that the length is less than using the sum of 'char_to_length'. Zero values are excluded from the map e.g. {'hl': 3.1, 'ee': -0.5}. """ pair_to_kerning = {} for a, b in itertools.permutations(characters, 2): kerned_width = measurer.text_width(a + b) unkerned_width = char_to_length[a] + char_to_length[b] kerning = unkerned_width - kerned_width if abs(kerning) > 0.05: pair_to_kerning[a + b] = round(kerning, 3) return pair_to_kerning
def calculate_pair_to_kern_mapping( measurer: text_measurer.TextMeasurer, char_to_length: Mapping[str, float], characters: Iterable[str]) -> Mapping[str, float]: """Returns a mapping between each *pair* of characters and their kerning. Args: measurer: The TextMeasurer used to measure the width of each pair of characters. char_to_length: A mapping between characters and their length in pixels. Must contain every character in 'characters' e.g. {'h': 5.2, 'e': 4.0, 'l', 1.2, 'o': 5.0}. characters: The characters to generate the kerning mapping for e.g. 'hel'. Returns: A mapping between each pair of given characters (e.g. 'hh', he', hl', 'eh', 'ee', 'el', 'lh, 'le', 'll') and the kerning adjustment for that pair of characters i.e. the difference between the length of the two characters calculated using 'char_to_length' vs. the length calculated by `measurer`. Positive values indicate that the length is less than using the sum of 'char_to_length'. Zero values are excluded from the map e.g. {'hl': 3.1, 'ee': -0.5}. """ pair_to_kerning = {} for a, b in itertools.permutations(characters, 2): kerned_width = measurer.text_width(a + b) unkerned_width = char_to_length[a] + char_to_length[b] kerning = unkerned_width - kerned_width if abs(kerning) > 0.05: pair_to_kerning[a + b] = round(kerning, 3) return pair_to_kerning
[ "Returns", "a", "mapping", "between", "each", "*", "pair", "*", "of", "characters", "and", "their", "kerning", "." ]
google/pybadges
python
https://github.com/google/pybadges/blob/d42c8080adb21b81123ac9540c53127ed2fa1edc/pybadges/precalculate_text.py#L108-L139
[ "def", "calculate_pair_to_kern_mapping", "(", "measurer", ":", "text_measurer", ".", "TextMeasurer", ",", "char_to_length", ":", "Mapping", "[", "str", ",", "float", "]", ",", "characters", ":", "Iterable", "[", "str", "]", ")", "->", "Mapping", "[", "str", ",", "float", "]", ":", "pair_to_kerning", "=", "{", "}", "for", "a", ",", "b", "in", "itertools", ".", "permutations", "(", "characters", ",", "2", ")", ":", "kerned_width", "=", "measurer", ".", "text_width", "(", "a", "+", "b", ")", "unkerned_width", "=", "char_to_length", "[", "a", "]", "+", "char_to_length", "[", "b", "]", "kerning", "=", "unkerned_width", "-", "kerned_width", "if", "abs", "(", "kerning", ")", ">", "0.05", ":", "pair_to_kerning", "[", "a", "+", "b", "]", "=", "round", "(", "kerning", ",", "3", ")", "return", "pair_to_kerning" ]
d42c8080adb21b81123ac9540c53127ed2fa1edc
test
write_json
Write the data required by PrecalculatedTextMeasurer to a stream.
pybadges/precalculate_text.py
def write_json(f: TextIO, deja_vu_sans_path: str, measurer: text_measurer.TextMeasurer, encodings: Iterable[str]) -> None: """Write the data required by PrecalculatedTextMeasurer to a stream.""" supported_characters = list( generate_supported_characters(deja_vu_sans_path)) kerning_characters = ''.join( generate_encodeable_characters(supported_characters, encodings)) char_to_length = calculate_character_to_length_mapping(measurer, supported_characters) pair_to_kerning = calculate_pair_to_kern_mapping(measurer, char_to_length, kerning_characters) json.dump( {'mean-character-length': statistics.mean(char_to_length.values()), 'character-lengths': char_to_length, 'kerning-characters': kerning_characters, 'kerning-pairs': pair_to_kerning}, f, sort_keys=True, indent=1)
def write_json(f: TextIO, deja_vu_sans_path: str, measurer: text_measurer.TextMeasurer, encodings: Iterable[str]) -> None: """Write the data required by PrecalculatedTextMeasurer to a stream.""" supported_characters = list( generate_supported_characters(deja_vu_sans_path)) kerning_characters = ''.join( generate_encodeable_characters(supported_characters, encodings)) char_to_length = calculate_character_to_length_mapping(measurer, supported_characters) pair_to_kerning = calculate_pair_to_kern_mapping(measurer, char_to_length, kerning_characters) json.dump( {'mean-character-length': statistics.mean(char_to_length.values()), 'character-lengths': char_to_length, 'kerning-characters': kerning_characters, 'kerning-pairs': pair_to_kerning}, f, sort_keys=True, indent=1)
[ "Write", "the", "data", "required", "by", "PrecalculatedTextMeasurer", "to", "a", "stream", "." ]
google/pybadges
python
https://github.com/google/pybadges/blob/d42c8080adb21b81123ac9540c53127ed2fa1edc/pybadges/precalculate_text.py#L142-L159
[ "def", "write_json", "(", "f", ":", "TextIO", ",", "deja_vu_sans_path", ":", "str", ",", "measurer", ":", "text_measurer", ".", "TextMeasurer", ",", "encodings", ":", "Iterable", "[", "str", "]", ")", "->", "None", ":", "supported_characters", "=", "list", "(", "generate_supported_characters", "(", "deja_vu_sans_path", ")", ")", "kerning_characters", "=", "''", ".", "join", "(", "generate_encodeable_characters", "(", "supported_characters", ",", "encodings", ")", ")", "char_to_length", "=", "calculate_character_to_length_mapping", "(", "measurer", ",", "supported_characters", ")", "pair_to_kerning", "=", "calculate_pair_to_kern_mapping", "(", "measurer", ",", "char_to_length", ",", "kerning_characters", ")", "json", ".", "dump", "(", "{", "'mean-character-length'", ":", "statistics", ".", "mean", "(", "char_to_length", ".", "values", "(", ")", ")", ",", "'character-lengths'", ":", "char_to_length", ",", "'kerning-characters'", ":", "kerning_characters", ",", "'kerning-pairs'", ":", "pair_to_kerning", "}", ",", "f", ",", "sort_keys", "=", "True", ",", "indent", "=", "1", ")" ]
d42c8080adb21b81123ac9540c53127ed2fa1edc
test
convolve_gaussian_2d
Convolve 2d gaussian.
ssim/utils.py
def convolve_gaussian_2d(image, gaussian_kernel_1d): """Convolve 2d gaussian.""" result = scipy.ndimage.filters.correlate1d( image, gaussian_kernel_1d, axis=0) result = scipy.ndimage.filters.correlate1d( result, gaussian_kernel_1d, axis=1) return result
def convolve_gaussian_2d(image, gaussian_kernel_1d): """Convolve 2d gaussian.""" result = scipy.ndimage.filters.correlate1d( image, gaussian_kernel_1d, axis=0) result = scipy.ndimage.filters.correlate1d( result, gaussian_kernel_1d, axis=1) return result
[ "Convolve", "2d", "gaussian", "." ]
jterrace/pyssim
python
https://github.com/jterrace/pyssim/blob/ff9bd90c3eb7525013ad46babf66b7cc78391e89/ssim/utils.py#L11-L17
[ "def", "convolve_gaussian_2d", "(", "image", ",", "gaussian_kernel_1d", ")", ":", "result", "=", "scipy", ".", "ndimage", ".", "filters", ".", "correlate1d", "(", "image", ",", "gaussian_kernel_1d", ",", "axis", "=", "0", ")", "result", "=", "scipy", ".", "ndimage", ".", "filters", ".", "correlate1d", "(", "result", ",", "gaussian_kernel_1d", ",", "axis", "=", "1", ")", "return", "result" ]
ff9bd90c3eb7525013ad46babf66b7cc78391e89
test
get_gaussian_kernel
Generate a gaussian kernel.
ssim/utils.py
def get_gaussian_kernel(gaussian_kernel_width=11, gaussian_kernel_sigma=1.5): """Generate a gaussian kernel.""" # 1D Gaussian kernel definition gaussian_kernel_1d = numpy.ndarray((gaussian_kernel_width)) norm_mu = int(gaussian_kernel_width / 2) # Fill Gaussian kernel for i in range(gaussian_kernel_width): gaussian_kernel_1d[i] = (exp(-(((i - norm_mu) ** 2)) / (2 * (gaussian_kernel_sigma ** 2)))) return gaussian_kernel_1d / numpy.sum(gaussian_kernel_1d)
def get_gaussian_kernel(gaussian_kernel_width=11, gaussian_kernel_sigma=1.5): """Generate a gaussian kernel.""" # 1D Gaussian kernel definition gaussian_kernel_1d = numpy.ndarray((gaussian_kernel_width)) norm_mu = int(gaussian_kernel_width / 2) # Fill Gaussian kernel for i in range(gaussian_kernel_width): gaussian_kernel_1d[i] = (exp(-(((i - norm_mu) ** 2)) / (2 * (gaussian_kernel_sigma ** 2)))) return gaussian_kernel_1d / numpy.sum(gaussian_kernel_1d)
[ "Generate", "a", "gaussian", "kernel", "." ]
jterrace/pyssim
python
https://github.com/jterrace/pyssim/blob/ff9bd90c3eb7525013ad46babf66b7cc78391e89/ssim/utils.py#L19-L29
[ "def", "get_gaussian_kernel", "(", "gaussian_kernel_width", "=", "11", ",", "gaussian_kernel_sigma", "=", "1.5", ")", ":", "# 1D Gaussian kernel definition", "gaussian_kernel_1d", "=", "numpy", ".", "ndarray", "(", "(", "gaussian_kernel_width", ")", ")", "norm_mu", "=", "int", "(", "gaussian_kernel_width", "/", "2", ")", "# Fill Gaussian kernel", "for", "i", "in", "range", "(", "gaussian_kernel_width", ")", ":", "gaussian_kernel_1d", "[", "i", "]", "=", "(", "exp", "(", "-", "(", "(", "(", "i", "-", "norm_mu", ")", "**", "2", ")", ")", "/", "(", "2", "*", "(", "gaussian_kernel_sigma", "**", "2", ")", ")", ")", ")", "return", "gaussian_kernel_1d", "/", "numpy", ".", "sum", "(", "gaussian_kernel_1d", ")" ]
ff9bd90c3eb7525013ad46babf66b7cc78391e89
test
to_grayscale
Convert PIL image to numpy grayscale array and numpy alpha array. Args: img (PIL.Image): PIL Image object. Returns: (gray, alpha): both numpy arrays.
ssim/utils.py
def to_grayscale(img): """Convert PIL image to numpy grayscale array and numpy alpha array. Args: img (PIL.Image): PIL Image object. Returns: (gray, alpha): both numpy arrays. """ gray = numpy.asarray(ImageOps.grayscale(img)).astype(numpy.float) imbands = img.getbands() alpha = None if 'A' in imbands: alpha = numpy.asarray(img.split()[-1]).astype(numpy.float) return gray, alpha
def to_grayscale(img): """Convert PIL image to numpy grayscale array and numpy alpha array. Args: img (PIL.Image): PIL Image object. Returns: (gray, alpha): both numpy arrays. """ gray = numpy.asarray(ImageOps.grayscale(img)).astype(numpy.float) imbands = img.getbands() alpha = None if 'A' in imbands: alpha = numpy.asarray(img.split()[-1]).astype(numpy.float) return gray, alpha
[ "Convert", "PIL", "image", "to", "numpy", "grayscale", "array", "and", "numpy", "alpha", "array", "." ]
jterrace/pyssim
python
https://github.com/jterrace/pyssim/blob/ff9bd90c3eb7525013ad46babf66b7cc78391e89/ssim/utils.py#L31-L47
[ "def", "to_grayscale", "(", "img", ")", ":", "gray", "=", "numpy", ".", "asarray", "(", "ImageOps", ".", "grayscale", "(", "img", ")", ")", ".", "astype", "(", "numpy", ".", "float", ")", "imbands", "=", "img", ".", "getbands", "(", ")", "alpha", "=", "None", "if", "'A'", "in", "imbands", ":", "alpha", "=", "numpy", ".", "asarray", "(", "img", ".", "split", "(", ")", "[", "-", "1", "]", ")", ".", "astype", "(", "numpy", ".", "float", ")", "return", "gray", ",", "alpha" ]
ff9bd90c3eb7525013ad46babf66b7cc78391e89
test
main
Main function for pyssim.
ssim/ssimlib.py
def main(): """Main function for pyssim.""" description = '\n'.join([ 'Compares an image with a list of images using the SSIM metric.', ' Example:', ' pyssim test-images/test1-1.png "test-images/*"' ]) parser = argparse.ArgumentParser( prog='pyssim', formatter_class=argparse.RawTextHelpFormatter, description=description) parser.add_argument('--cw', help='compute the complex wavelet SSIM', action='store_true') parser.add_argument( 'base_image', metavar='image1.png', type=argparse.FileType('r')) parser.add_argument( 'comparison_images', metavar='image path with* or image2.png') parser.add_argument('--width', type=int, default=None, help='scales the image before computing SSIM') parser.add_argument('--height', type=int, default=None, help='scales the image before computing SSIM') args = parser.parse_args() if args.width and args.height: size = (args.width, args.height) else: size = None if not args.cw: gaussian_kernel_sigma = 1.5 gaussian_kernel_width = 11 gaussian_kernel_1d = get_gaussian_kernel( gaussian_kernel_width, gaussian_kernel_sigma) comparison_images = glob.glob(args.comparison_images) is_a_single_image = len(comparison_images) == 1 for comparison_image in comparison_images: if args.cw: ssim = SSIM(args.base_image.name, size=size) ssim_value = ssim.cw_ssim_value(comparison_image) else: ssim = SSIM(args.base_image.name, gaussian_kernel_1d, size=size) ssim_value = ssim.ssim_value(comparison_image) if is_a_single_image: sys.stdout.write('%.7g' % ssim_value) else: sys.stdout.write('%s - %s: %.7g' % ( args.base_image.name, comparison_image, ssim_value)) sys.stdout.write('\n')
def main(): """Main function for pyssim.""" description = '\n'.join([ 'Compares an image with a list of images using the SSIM metric.', ' Example:', ' pyssim test-images/test1-1.png "test-images/*"' ]) parser = argparse.ArgumentParser( prog='pyssim', formatter_class=argparse.RawTextHelpFormatter, description=description) parser.add_argument('--cw', help='compute the complex wavelet SSIM', action='store_true') parser.add_argument( 'base_image', metavar='image1.png', type=argparse.FileType('r')) parser.add_argument( 'comparison_images', metavar='image path with* or image2.png') parser.add_argument('--width', type=int, default=None, help='scales the image before computing SSIM') parser.add_argument('--height', type=int, default=None, help='scales the image before computing SSIM') args = parser.parse_args() if args.width and args.height: size = (args.width, args.height) else: size = None if not args.cw: gaussian_kernel_sigma = 1.5 gaussian_kernel_width = 11 gaussian_kernel_1d = get_gaussian_kernel( gaussian_kernel_width, gaussian_kernel_sigma) comparison_images = glob.glob(args.comparison_images) is_a_single_image = len(comparison_images) == 1 for comparison_image in comparison_images: if args.cw: ssim = SSIM(args.base_image.name, size=size) ssim_value = ssim.cw_ssim_value(comparison_image) else: ssim = SSIM(args.base_image.name, gaussian_kernel_1d, size=size) ssim_value = ssim.ssim_value(comparison_image) if is_a_single_image: sys.stdout.write('%.7g' % ssim_value) else: sys.stdout.write('%s - %s: %.7g' % ( args.base_image.name, comparison_image, ssim_value)) sys.stdout.write('\n')
[ "Main", "function", "for", "pyssim", "." ]
jterrace/pyssim
python
https://github.com/jterrace/pyssim/blob/ff9bd90c3eb7525013ad46babf66b7cc78391e89/ssim/ssimlib.py#L193-L246
[ "def", "main", "(", ")", ":", "description", "=", "'\\n'", ".", "join", "(", "[", "'Compares an image with a list of images using the SSIM metric.'", ",", "' Example:'", ",", "' pyssim test-images/test1-1.png \"test-images/*\"'", "]", ")", "parser", "=", "argparse", ".", "ArgumentParser", "(", "prog", "=", "'pyssim'", ",", "formatter_class", "=", "argparse", ".", "RawTextHelpFormatter", ",", "description", "=", "description", ")", "parser", ".", "add_argument", "(", "'--cw'", ",", "help", "=", "'compute the complex wavelet SSIM'", ",", "action", "=", "'store_true'", ")", "parser", ".", "add_argument", "(", "'base_image'", ",", "metavar", "=", "'image1.png'", ",", "type", "=", "argparse", ".", "FileType", "(", "'r'", ")", ")", "parser", ".", "add_argument", "(", "'comparison_images'", ",", "metavar", "=", "'image path with* or image2.png'", ")", "parser", ".", "add_argument", "(", "'--width'", ",", "type", "=", "int", ",", "default", "=", "None", ",", "help", "=", "'scales the image before computing SSIM'", ")", "parser", ".", "add_argument", "(", "'--height'", ",", "type", "=", "int", ",", "default", "=", "None", ",", "help", "=", "'scales the image before computing SSIM'", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "if", "args", ".", "width", "and", "args", ".", "height", ":", "size", "=", "(", "args", ".", "width", ",", "args", ".", "height", ")", "else", ":", "size", "=", "None", "if", "not", "args", ".", "cw", ":", "gaussian_kernel_sigma", "=", "1.5", "gaussian_kernel_width", "=", "11", "gaussian_kernel_1d", "=", "get_gaussian_kernel", "(", "gaussian_kernel_width", ",", "gaussian_kernel_sigma", ")", "comparison_images", "=", "glob", ".", "glob", "(", "args", ".", "comparison_images", ")", "is_a_single_image", "=", "len", "(", "comparison_images", ")", "==", "1", "for", "comparison_image", "in", "comparison_images", ":", "if", "args", ".", "cw", ":", "ssim", "=", "SSIM", "(", "args", ".", "base_image", ".", "name", ",", "size", "=", "size", ")", "ssim_value", "=", "ssim", ".", "cw_ssim_value", "(", "comparison_image", ")", "else", ":", "ssim", "=", "SSIM", "(", "args", ".", "base_image", ".", "name", ",", "gaussian_kernel_1d", ",", "size", "=", "size", ")", "ssim_value", "=", "ssim", ".", "ssim_value", "(", "comparison_image", ")", "if", "is_a_single_image", ":", "sys", ".", "stdout", ".", "write", "(", "'%.7g'", "%", "ssim_value", ")", "else", ":", "sys", ".", "stdout", ".", "write", "(", "'%s - %s: %.7g'", "%", "(", "args", ".", "base_image", ".", "name", ",", "comparison_image", ",", "ssim_value", ")", ")", "sys", ".", "stdout", ".", "write", "(", "'\\n'", ")" ]
ff9bd90c3eb7525013ad46babf66b7cc78391e89
test
SSIM.ssim_value
Compute the SSIM value from the reference image to the target image. Args: target (str or PIL.Image): Input image to compare the reference image to. This may be a PIL Image object or, to save time, an SSIMImage object (e.g. the img member of another SSIM object). Returns: Computed SSIM float value.
ssim/ssimlib.py
def ssim_value(self, target): """Compute the SSIM value from the reference image to the target image. Args: target (str or PIL.Image): Input image to compare the reference image to. This may be a PIL Image object or, to save time, an SSIMImage object (e.g. the img member of another SSIM object). Returns: Computed SSIM float value. """ # Performance boost if handed a compatible SSIMImage object. if not isinstance(target, SSIMImage) \ or not np.array_equal(self.gaussian_kernel_1d, target.gaussian_kernel_1d): target = SSIMImage(target, self.gaussian_kernel_1d, self.img.size) img_mat_12 = self.img.img_gray * target.img_gray img_mat_sigma_12 = convolve_gaussian_2d( img_mat_12, self.gaussian_kernel_1d) img_mat_mu_12 = self.img.img_gray_mu * target.img_gray_mu img_mat_sigma_12 = img_mat_sigma_12 - img_mat_mu_12 # Numerator of SSIM num_ssim = ((2 * img_mat_mu_12 + self.c_1) * (2 * img_mat_sigma_12 + self.c_2)) # Denominator of SSIM den_ssim = ( (self.img.img_gray_mu_squared + target.img_gray_mu_squared + self.c_1) * (self.img.img_gray_sigma_squared + target.img_gray_sigma_squared + self.c_2)) ssim_map = num_ssim / den_ssim index = np.average(ssim_map) return index
def ssim_value(self, target): """Compute the SSIM value from the reference image to the target image. Args: target (str or PIL.Image): Input image to compare the reference image to. This may be a PIL Image object or, to save time, an SSIMImage object (e.g. the img member of another SSIM object). Returns: Computed SSIM float value. """ # Performance boost if handed a compatible SSIMImage object. if not isinstance(target, SSIMImage) \ or not np.array_equal(self.gaussian_kernel_1d, target.gaussian_kernel_1d): target = SSIMImage(target, self.gaussian_kernel_1d, self.img.size) img_mat_12 = self.img.img_gray * target.img_gray img_mat_sigma_12 = convolve_gaussian_2d( img_mat_12, self.gaussian_kernel_1d) img_mat_mu_12 = self.img.img_gray_mu * target.img_gray_mu img_mat_sigma_12 = img_mat_sigma_12 - img_mat_mu_12 # Numerator of SSIM num_ssim = ((2 * img_mat_mu_12 + self.c_1) * (2 * img_mat_sigma_12 + self.c_2)) # Denominator of SSIM den_ssim = ( (self.img.img_gray_mu_squared + target.img_gray_mu_squared + self.c_1) * (self.img.img_gray_sigma_squared + target.img_gray_sigma_squared + self.c_2)) ssim_map = num_ssim / den_ssim index = np.average(ssim_map) return index
[ "Compute", "the", "SSIM", "value", "from", "the", "reference", "image", "to", "the", "target", "image", "." ]
jterrace/pyssim
python
https://github.com/jterrace/pyssim/blob/ff9bd90c3eb7525013ad46babf66b7cc78391e89/ssim/ssimlib.py#L109-L145
[ "def", "ssim_value", "(", "self", ",", "target", ")", ":", "# Performance boost if handed a compatible SSIMImage object.", "if", "not", "isinstance", "(", "target", ",", "SSIMImage", ")", "or", "not", "np", ".", "array_equal", "(", "self", ".", "gaussian_kernel_1d", ",", "target", ".", "gaussian_kernel_1d", ")", ":", "target", "=", "SSIMImage", "(", "target", ",", "self", ".", "gaussian_kernel_1d", ",", "self", ".", "img", ".", "size", ")", "img_mat_12", "=", "self", ".", "img", ".", "img_gray", "*", "target", ".", "img_gray", "img_mat_sigma_12", "=", "convolve_gaussian_2d", "(", "img_mat_12", ",", "self", ".", "gaussian_kernel_1d", ")", "img_mat_mu_12", "=", "self", ".", "img", ".", "img_gray_mu", "*", "target", ".", "img_gray_mu", "img_mat_sigma_12", "=", "img_mat_sigma_12", "-", "img_mat_mu_12", "# Numerator of SSIM", "num_ssim", "=", "(", "(", "2", "*", "img_mat_mu_12", "+", "self", ".", "c_1", ")", "*", "(", "2", "*", "img_mat_sigma_12", "+", "self", ".", "c_2", ")", ")", "# Denominator of SSIM", "den_ssim", "=", "(", "(", "self", ".", "img", ".", "img_gray_mu_squared", "+", "target", ".", "img_gray_mu_squared", "+", "self", ".", "c_1", ")", "*", "(", "self", ".", "img", ".", "img_gray_sigma_squared", "+", "target", ".", "img_gray_sigma_squared", "+", "self", ".", "c_2", ")", ")", "ssim_map", "=", "num_ssim", "/", "den_ssim", "index", "=", "np", ".", "average", "(", "ssim_map", ")", "return", "index" ]
ff9bd90c3eb7525013ad46babf66b7cc78391e89
test
SSIM.cw_ssim_value
Compute the complex wavelet SSIM (CW-SSIM) value from the reference image to the target image. Args: target (str or PIL.Image): Input image to compare the reference image to. This may be a PIL Image object or, to save time, an SSIMImage object (e.g. the img member of another SSIM object). width: width for the wavelet convolution (default: 30) Returns: Computed CW-SSIM float value.
ssim/ssimlib.py
def cw_ssim_value(self, target, width=30): """Compute the complex wavelet SSIM (CW-SSIM) value from the reference image to the target image. Args: target (str or PIL.Image): Input image to compare the reference image to. This may be a PIL Image object or, to save time, an SSIMImage object (e.g. the img member of another SSIM object). width: width for the wavelet convolution (default: 30) Returns: Computed CW-SSIM float value. """ if not isinstance(target, SSIMImage): target = SSIMImage(target, size=self.img.size) # Define a width for the wavelet convolution widths = np.arange(1, width+1) # Use the image data as arrays sig1 = np.asarray(self.img.img_gray.getdata()) sig2 = np.asarray(target.img_gray.getdata()) # Convolution cwtmatr1 = signal.cwt(sig1, signal.ricker, widths) cwtmatr2 = signal.cwt(sig2, signal.ricker, widths) # Compute the first term c1c2 = np.multiply(abs(cwtmatr1), abs(cwtmatr2)) c1_2 = np.square(abs(cwtmatr1)) c2_2 = np.square(abs(cwtmatr2)) num_ssim_1 = 2 * np.sum(c1c2, axis=0) + self.k den_ssim_1 = np.sum(c1_2, axis=0) + np.sum(c2_2, axis=0) + self.k # Compute the second term c1c2_conj = np.multiply(cwtmatr1, np.conjugate(cwtmatr2)) num_ssim_2 = 2 * np.abs(np.sum(c1c2_conj, axis=0)) + self.k den_ssim_2 = 2 * np.sum(np.abs(c1c2_conj), axis=0) + self.k # Construct the result ssim_map = (num_ssim_1 / den_ssim_1) * (num_ssim_2 / den_ssim_2) # Average the per pixel results index = np.average(ssim_map) return index
def cw_ssim_value(self, target, width=30): """Compute the complex wavelet SSIM (CW-SSIM) value from the reference image to the target image. Args: target (str or PIL.Image): Input image to compare the reference image to. This may be a PIL Image object or, to save time, an SSIMImage object (e.g. the img member of another SSIM object). width: width for the wavelet convolution (default: 30) Returns: Computed CW-SSIM float value. """ if not isinstance(target, SSIMImage): target = SSIMImage(target, size=self.img.size) # Define a width for the wavelet convolution widths = np.arange(1, width+1) # Use the image data as arrays sig1 = np.asarray(self.img.img_gray.getdata()) sig2 = np.asarray(target.img_gray.getdata()) # Convolution cwtmatr1 = signal.cwt(sig1, signal.ricker, widths) cwtmatr2 = signal.cwt(sig2, signal.ricker, widths) # Compute the first term c1c2 = np.multiply(abs(cwtmatr1), abs(cwtmatr2)) c1_2 = np.square(abs(cwtmatr1)) c2_2 = np.square(abs(cwtmatr2)) num_ssim_1 = 2 * np.sum(c1c2, axis=0) + self.k den_ssim_1 = np.sum(c1_2, axis=0) + np.sum(c2_2, axis=0) + self.k # Compute the second term c1c2_conj = np.multiply(cwtmatr1, np.conjugate(cwtmatr2)) num_ssim_2 = 2 * np.abs(np.sum(c1c2_conj, axis=0)) + self.k den_ssim_2 = 2 * np.sum(np.abs(c1c2_conj), axis=0) + self.k # Construct the result ssim_map = (num_ssim_1 / den_ssim_1) * (num_ssim_2 / den_ssim_2) # Average the per pixel results index = np.average(ssim_map) return index
[ "Compute", "the", "complex", "wavelet", "SSIM", "(", "CW", "-", "SSIM", ")", "value", "from", "the", "reference", "image", "to", "the", "target", "image", "." ]
jterrace/pyssim
python
https://github.com/jterrace/pyssim/blob/ff9bd90c3eb7525013ad46babf66b7cc78391e89/ssim/ssimlib.py#L147-L191
[ "def", "cw_ssim_value", "(", "self", ",", "target", ",", "width", "=", "30", ")", ":", "if", "not", "isinstance", "(", "target", ",", "SSIMImage", ")", ":", "target", "=", "SSIMImage", "(", "target", ",", "size", "=", "self", ".", "img", ".", "size", ")", "# Define a width for the wavelet convolution", "widths", "=", "np", ".", "arange", "(", "1", ",", "width", "+", "1", ")", "# Use the image data as arrays", "sig1", "=", "np", ".", "asarray", "(", "self", ".", "img", ".", "img_gray", ".", "getdata", "(", ")", ")", "sig2", "=", "np", ".", "asarray", "(", "target", ".", "img_gray", ".", "getdata", "(", ")", ")", "# Convolution", "cwtmatr1", "=", "signal", ".", "cwt", "(", "sig1", ",", "signal", ".", "ricker", ",", "widths", ")", "cwtmatr2", "=", "signal", ".", "cwt", "(", "sig2", ",", "signal", ".", "ricker", ",", "widths", ")", "# Compute the first term", "c1c2", "=", "np", ".", "multiply", "(", "abs", "(", "cwtmatr1", ")", ",", "abs", "(", "cwtmatr2", ")", ")", "c1_2", "=", "np", ".", "square", "(", "abs", "(", "cwtmatr1", ")", ")", "c2_2", "=", "np", ".", "square", "(", "abs", "(", "cwtmatr2", ")", ")", "num_ssim_1", "=", "2", "*", "np", ".", "sum", "(", "c1c2", ",", "axis", "=", "0", ")", "+", "self", ".", "k", "den_ssim_1", "=", "np", ".", "sum", "(", "c1_2", ",", "axis", "=", "0", ")", "+", "np", ".", "sum", "(", "c2_2", ",", "axis", "=", "0", ")", "+", "self", ".", "k", "# Compute the second term", "c1c2_conj", "=", "np", ".", "multiply", "(", "cwtmatr1", ",", "np", ".", "conjugate", "(", "cwtmatr2", ")", ")", "num_ssim_2", "=", "2", "*", "np", ".", "abs", "(", "np", ".", "sum", "(", "c1c2_conj", ",", "axis", "=", "0", ")", ")", "+", "self", ".", "k", "den_ssim_2", "=", "2", "*", "np", ".", "sum", "(", "np", ".", "abs", "(", "c1c2_conj", ")", ",", "axis", "=", "0", ")", "+", "self", ".", "k", "# Construct the result", "ssim_map", "=", "(", "num_ssim_1", "/", "den_ssim_1", ")", "*", "(", "num_ssim_2", "/", "den_ssim_2", ")", "# Average the per pixel results", "index", "=", "np", ".", "average", "(", "ssim_map", ")", "return", "index" ]
ff9bd90c3eb7525013ad46babf66b7cc78391e89
test
compute_ssim
Computes SSIM. Args: im1: First PIL Image object to compare. im2: Second PIL Image object to compare. Returns: SSIM float value.
ssim/__init__.py
def compute_ssim(image1, image2, gaussian_kernel_sigma=1.5, gaussian_kernel_width=11): """Computes SSIM. Args: im1: First PIL Image object to compare. im2: Second PIL Image object to compare. Returns: SSIM float value. """ gaussian_kernel_1d = get_gaussian_kernel( gaussian_kernel_width, gaussian_kernel_sigma) return SSIM(image1, gaussian_kernel_1d).ssim_value(image2)
def compute_ssim(image1, image2, gaussian_kernel_sigma=1.5, gaussian_kernel_width=11): """Computes SSIM. Args: im1: First PIL Image object to compare. im2: Second PIL Image object to compare. Returns: SSIM float value. """ gaussian_kernel_1d = get_gaussian_kernel( gaussian_kernel_width, gaussian_kernel_sigma) return SSIM(image1, gaussian_kernel_1d).ssim_value(image2)
[ "Computes", "SSIM", "." ]
jterrace/pyssim
python
https://github.com/jterrace/pyssim/blob/ff9bd90c3eb7525013ad46babf66b7cc78391e89/ssim/__init__.py#L10-L23
[ "def", "compute_ssim", "(", "image1", ",", "image2", ",", "gaussian_kernel_sigma", "=", "1.5", ",", "gaussian_kernel_width", "=", "11", ")", ":", "gaussian_kernel_1d", "=", "get_gaussian_kernel", "(", "gaussian_kernel_width", ",", "gaussian_kernel_sigma", ")", "return", "SSIM", "(", "image1", ",", "gaussian_kernel_1d", ")", ".", "ssim_value", "(", "image2", ")" ]
ff9bd90c3eb7525013ad46babf66b7cc78391e89
test
replicated
Replicated decorator. Use it to mark your class members that modifies a class state. Function will be called asynchronously. Function accepts flowing additional parameters (optional): 'callback': callback(result, failReason), failReason - `FAIL_REASON <#pysyncobj.FAIL_REASON>`_. 'sync': True - to block execution and wait for result, False - async call. If callback is passed, 'sync' option is ignored. 'timeout': if 'sync' is enabled, and no result is available for 'timeout' seconds - SyncObjException will be raised. These parameters are reserved and should not be used in kwargs of your replicated method. :param func: arbitrary class member :type func: function :param ver: (optional) - code version (for zero deployment) :type ver: int
pysyncobj/syncobj.py
def replicated(*decArgs, **decKwargs): """Replicated decorator. Use it to mark your class members that modifies a class state. Function will be called asynchronously. Function accepts flowing additional parameters (optional): 'callback': callback(result, failReason), failReason - `FAIL_REASON <#pysyncobj.FAIL_REASON>`_. 'sync': True - to block execution and wait for result, False - async call. If callback is passed, 'sync' option is ignored. 'timeout': if 'sync' is enabled, and no result is available for 'timeout' seconds - SyncObjException will be raised. These parameters are reserved and should not be used in kwargs of your replicated method. :param func: arbitrary class member :type func: function :param ver: (optional) - code version (for zero deployment) :type ver: int """ def replicatedImpl(func): def newFunc(self, *args, **kwargs): if kwargs.pop('_doApply', False): return func(self, *args, **kwargs) else: if isinstance(self, SyncObj): applier = self._applyCommand funcName = self._getFuncName(func.__name__) funcID = self._methodToID[funcName] elif isinstance(self, SyncObjConsumer): consumerId = id(self) funcName = self._syncObj._getFuncName((consumerId, func.__name__)) funcID = self._syncObj._methodToID[(consumerId, funcName)] applier = self._syncObj._applyCommand else: raise SyncObjException("Class should be inherited from SyncObj or SyncObjConsumer") callback = kwargs.pop('callback', None) if kwargs: cmd = (funcID, args, kwargs) elif args and not kwargs: cmd = (funcID, args) else: cmd = funcID sync = kwargs.pop('sync', False) if callback is not None: sync = False if sync: asyncResult = AsyncResult() callback = asyncResult.onResult timeout = kwargs.pop('timeout', None) applier(pickle.dumps(cmd), callback, _COMMAND_TYPE.REGULAR) if sync: res = asyncResult.event.wait(timeout) if not res: raise SyncObjException('Timeout') if not asyncResult.error == 0: raise SyncObjException(asyncResult.error) return asyncResult.result func_dict = newFunc.__dict__ if is_py3 else newFunc.func_dict func_dict['replicated'] = True func_dict['ver'] = int(decKwargs.get('ver', 0)) func_dict['origName'] = func.__name__ callframe = sys._getframe(1 if decKwargs else 2) namespace = callframe.f_locals newFuncName = func.__name__ + '_v' + str(func_dict['ver']) namespace[newFuncName] = __copy_func(newFunc, newFuncName) functools.update_wrapper(newFunc, func) return newFunc if len(decArgs) == 1 and len(decKwargs) == 0 and callable(decArgs[0]): return replicatedImpl(decArgs[0]) return replicatedImpl
def replicated(*decArgs, **decKwargs): """Replicated decorator. Use it to mark your class members that modifies a class state. Function will be called asynchronously. Function accepts flowing additional parameters (optional): 'callback': callback(result, failReason), failReason - `FAIL_REASON <#pysyncobj.FAIL_REASON>`_. 'sync': True - to block execution and wait for result, False - async call. If callback is passed, 'sync' option is ignored. 'timeout': if 'sync' is enabled, and no result is available for 'timeout' seconds - SyncObjException will be raised. These parameters are reserved and should not be used in kwargs of your replicated method. :param func: arbitrary class member :type func: function :param ver: (optional) - code version (for zero deployment) :type ver: int """ def replicatedImpl(func): def newFunc(self, *args, **kwargs): if kwargs.pop('_doApply', False): return func(self, *args, **kwargs) else: if isinstance(self, SyncObj): applier = self._applyCommand funcName = self._getFuncName(func.__name__) funcID = self._methodToID[funcName] elif isinstance(self, SyncObjConsumer): consumerId = id(self) funcName = self._syncObj._getFuncName((consumerId, func.__name__)) funcID = self._syncObj._methodToID[(consumerId, funcName)] applier = self._syncObj._applyCommand else: raise SyncObjException("Class should be inherited from SyncObj or SyncObjConsumer") callback = kwargs.pop('callback', None) if kwargs: cmd = (funcID, args, kwargs) elif args and not kwargs: cmd = (funcID, args) else: cmd = funcID sync = kwargs.pop('sync', False) if callback is not None: sync = False if sync: asyncResult = AsyncResult() callback = asyncResult.onResult timeout = kwargs.pop('timeout', None) applier(pickle.dumps(cmd), callback, _COMMAND_TYPE.REGULAR) if sync: res = asyncResult.event.wait(timeout) if not res: raise SyncObjException('Timeout') if not asyncResult.error == 0: raise SyncObjException(asyncResult.error) return asyncResult.result func_dict = newFunc.__dict__ if is_py3 else newFunc.func_dict func_dict['replicated'] = True func_dict['ver'] = int(decKwargs.get('ver', 0)) func_dict['origName'] = func.__name__ callframe = sys._getframe(1 if decKwargs else 2) namespace = callframe.f_locals newFuncName = func.__name__ + '_v' + str(func_dict['ver']) namespace[newFuncName] = __copy_func(newFunc, newFuncName) functools.update_wrapper(newFunc, func) return newFunc if len(decArgs) == 1 and len(decKwargs) == 0 and callable(decArgs[0]): return replicatedImpl(decArgs[0]) return replicatedImpl
[ "Replicated", "decorator", ".", "Use", "it", "to", "mark", "your", "class", "members", "that", "modifies", "a", "class", "state", ".", "Function", "will", "be", "called", "asynchronously", ".", "Function", "accepts", "flowing", "additional", "parameters", "(", "optional", ")", ":", "callback", ":", "callback", "(", "result", "failReason", ")", "failReason", "-", "FAIL_REASON", "<#pysyncobj", ".", "FAIL_REASON", ">", "_", ".", "sync", ":", "True", "-", "to", "block", "execution", "and", "wait", "for", "result", "False", "-", "async", "call", ".", "If", "callback", "is", "passed", "sync", "option", "is", "ignored", ".", "timeout", ":", "if", "sync", "is", "enabled", "and", "no", "result", "is", "available", "for", "timeout", "seconds", "-", "SyncObjException", "will", "be", "raised", ".", "These", "parameters", "are", "reserved", "and", "should", "not", "be", "used", "in", "kwargs", "of", "your", "replicated", "method", "." ]
bakwc/PySyncObj
python
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/syncobj.py#L1298-L1373
[ "def", "replicated", "(", "*", "decArgs", ",", "*", "*", "decKwargs", ")", ":", "def", "replicatedImpl", "(", "func", ")", ":", "def", "newFunc", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ".", "pop", "(", "'_doApply'", ",", "False", ")", ":", "return", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "if", "isinstance", "(", "self", ",", "SyncObj", ")", ":", "applier", "=", "self", ".", "_applyCommand", "funcName", "=", "self", ".", "_getFuncName", "(", "func", ".", "__name__", ")", "funcID", "=", "self", ".", "_methodToID", "[", "funcName", "]", "elif", "isinstance", "(", "self", ",", "SyncObjConsumer", ")", ":", "consumerId", "=", "id", "(", "self", ")", "funcName", "=", "self", ".", "_syncObj", ".", "_getFuncName", "(", "(", "consumerId", ",", "func", ".", "__name__", ")", ")", "funcID", "=", "self", ".", "_syncObj", ".", "_methodToID", "[", "(", "consumerId", ",", "funcName", ")", "]", "applier", "=", "self", ".", "_syncObj", ".", "_applyCommand", "else", ":", "raise", "SyncObjException", "(", "\"Class should be inherited from SyncObj or SyncObjConsumer\"", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "None", ")", "if", "kwargs", ":", "cmd", "=", "(", "funcID", ",", "args", ",", "kwargs", ")", "elif", "args", "and", "not", "kwargs", ":", "cmd", "=", "(", "funcID", ",", "args", ")", "else", ":", "cmd", "=", "funcID", "sync", "=", "kwargs", ".", "pop", "(", "'sync'", ",", "False", ")", "if", "callback", "is", "not", "None", ":", "sync", "=", "False", "if", "sync", ":", "asyncResult", "=", "AsyncResult", "(", ")", "callback", "=", "asyncResult", ".", "onResult", "timeout", "=", "kwargs", ".", "pop", "(", "'timeout'", ",", "None", ")", "applier", "(", "pickle", ".", "dumps", "(", "cmd", ")", ",", "callback", ",", "_COMMAND_TYPE", ".", "REGULAR", ")", "if", "sync", ":", "res", "=", "asyncResult", ".", "event", ".", "wait", "(", "timeout", ")", "if", "not", "res", ":", "raise", "SyncObjException", "(", "'Timeout'", ")", "if", "not", "asyncResult", ".", "error", "==", "0", ":", "raise", "SyncObjException", "(", "asyncResult", ".", "error", ")", "return", "asyncResult", ".", "result", "func_dict", "=", "newFunc", ".", "__dict__", "if", "is_py3", "else", "newFunc", ".", "func_dict", "func_dict", "[", "'replicated'", "]", "=", "True", "func_dict", "[", "'ver'", "]", "=", "int", "(", "decKwargs", ".", "get", "(", "'ver'", ",", "0", ")", ")", "func_dict", "[", "'origName'", "]", "=", "func", ".", "__name__", "callframe", "=", "sys", ".", "_getframe", "(", "1", "if", "decKwargs", "else", "2", ")", "namespace", "=", "callframe", ".", "f_locals", "newFuncName", "=", "func", ".", "__name__", "+", "'_v'", "+", "str", "(", "func_dict", "[", "'ver'", "]", ")", "namespace", "[", "newFuncName", "]", "=", "__copy_func", "(", "newFunc", ",", "newFuncName", ")", "functools", ".", "update_wrapper", "(", "newFunc", ",", "func", ")", "return", "newFunc", "if", "len", "(", "decArgs", ")", "==", "1", "and", "len", "(", "decKwargs", ")", "==", "0", "and", "callable", "(", "decArgs", "[", "0", "]", ")", ":", "return", "replicatedImpl", "(", "decArgs", "[", "0", "]", ")", "return", "replicatedImpl" ]
be3b0aaa932d5156f5df140c23c962430f51b7b8
test
SyncObj.destroy
Correctly destroy SyncObj. Stop autoTickThread, close connections, etc.
pysyncobj/syncobj.py
def destroy(self): """ Correctly destroy SyncObj. Stop autoTickThread, close connections, etc. """ if self.__conf.autoTick: self.__destroying = True else: self._doDestroy()
def destroy(self): """ Correctly destroy SyncObj. Stop autoTickThread, close connections, etc. """ if self.__conf.autoTick: self.__destroying = True else: self._doDestroy()
[ "Correctly", "destroy", "SyncObj", ".", "Stop", "autoTickThread", "close", "connections", "etc", "." ]
bakwc/PySyncObj
python
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/syncobj.py#L274-L281
[ "def", "destroy", "(", "self", ")", ":", "if", "self", ".", "__conf", ".", "autoTick", ":", "self", ".", "__destroying", "=", "True", "else", ":", "self", ".", "_doDestroy", "(", ")" ]
be3b0aaa932d5156f5df140c23c962430f51b7b8
test
SyncObj.waitBinded
Waits until initialized (binded port). If success - just returns. If failed to initialized after conf.maxBindRetries - raise SyncObjException.
pysyncobj/syncobj.py
def waitBinded(self): """ Waits until initialized (binded port). If success - just returns. If failed to initialized after conf.maxBindRetries - raise SyncObjException. """ try: self.__transport.waitReady() except TransportNotReadyError: raise SyncObjException('BindError') if not self.__transport.ready: raise SyncObjException('BindError')
def waitBinded(self): """ Waits until initialized (binded port). If success - just returns. If failed to initialized after conf.maxBindRetries - raise SyncObjException. """ try: self.__transport.waitReady() except TransportNotReadyError: raise SyncObjException('BindError') if not self.__transport.ready: raise SyncObjException('BindError')
[ "Waits", "until", "initialized", "(", "binded", "port", ")", ".", "If", "success", "-", "just", "returns", ".", "If", "failed", "to", "initialized", "after", "conf", ".", "maxBindRetries", "-", "raise", "SyncObjException", "." ]
bakwc/PySyncObj
python
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/syncobj.py#L291-L302
[ "def", "waitBinded", "(", "self", ")", ":", "try", ":", "self", ".", "__transport", ".", "waitReady", "(", ")", "except", "TransportNotReadyError", ":", "raise", "SyncObjException", "(", "'BindError'", ")", "if", "not", "self", ".", "__transport", ".", "ready", ":", "raise", "SyncObjException", "(", "'BindError'", ")" ]
be3b0aaa932d5156f5df140c23c962430f51b7b8
test
SyncObj.setCodeVersion
Switch to a new code version on all cluster nodes. You should ensure that cluster nodes are updated, otherwise they won't be able to apply commands. :param newVersion: new code version :type int :param callback: will be called on cussess or fail :type callback: function(`FAIL_REASON <#pysyncobj.FAIL_REASON>`_, None)
pysyncobj/syncobj.py
def setCodeVersion(self, newVersion, callback = None): """Switch to a new code version on all cluster nodes. You should ensure that cluster nodes are updated, otherwise they won't be able to apply commands. :param newVersion: new code version :type int :param callback: will be called on cussess or fail :type callback: function(`FAIL_REASON <#pysyncobj.FAIL_REASON>`_, None) """ assert isinstance(newVersion, int) if newVersion > self.__selfCodeVersion: raise Exception('wrong version, current version is %d, requested version is %d' % (self.__selfCodeVersion, newVersion)) if newVersion < self.__enabledCodeVersion: raise Exception('wrong version, enabled version is %d, requested version is %d' % (self.__enabledCodeVersion, newVersion)) self._applyCommand(pickle.dumps(newVersion), callback, _COMMAND_TYPE.VERSION)
def setCodeVersion(self, newVersion, callback = None): """Switch to a new code version on all cluster nodes. You should ensure that cluster nodes are updated, otherwise they won't be able to apply commands. :param newVersion: new code version :type int :param callback: will be called on cussess or fail :type callback: function(`FAIL_REASON <#pysyncobj.FAIL_REASON>`_, None) """ assert isinstance(newVersion, int) if newVersion > self.__selfCodeVersion: raise Exception('wrong version, current version is %d, requested version is %d' % (self.__selfCodeVersion, newVersion)) if newVersion < self.__enabledCodeVersion: raise Exception('wrong version, enabled version is %d, requested version is %d' % (self.__enabledCodeVersion, newVersion)) self._applyCommand(pickle.dumps(newVersion), callback, _COMMAND_TYPE.VERSION)
[ "Switch", "to", "a", "new", "code", "version", "on", "all", "cluster", "nodes", ".", "You", "should", "ensure", "that", "cluster", "nodes", "are", "updated", "otherwise", "they", "won", "t", "be", "able", "to", "apply", "commands", "." ]
bakwc/PySyncObj
python
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/syncobj.py#L316-L331
[ "def", "setCodeVersion", "(", "self", ",", "newVersion", ",", "callback", "=", "None", ")", ":", "assert", "isinstance", "(", "newVersion", ",", "int", ")", "if", "newVersion", ">", "self", ".", "__selfCodeVersion", ":", "raise", "Exception", "(", "'wrong version, current version is %d, requested version is %d'", "%", "(", "self", ".", "__selfCodeVersion", ",", "newVersion", ")", ")", "if", "newVersion", "<", "self", ".", "__enabledCodeVersion", ":", "raise", "Exception", "(", "'wrong version, enabled version is %d, requested version is %d'", "%", "(", "self", ".", "__enabledCodeVersion", ",", "newVersion", ")", ")", "self", ".", "_applyCommand", "(", "pickle", ".", "dumps", "(", "newVersion", ")", ",", "callback", ",", "_COMMAND_TYPE", ".", "VERSION", ")" ]
be3b0aaa932d5156f5df140c23c962430f51b7b8
test
SyncObj.removeNodeFromCluster
Remove single node from cluster (dynamic membership changes). Async. You should wait until node successfully added before adding next node. :param node: node object or 'nodeHost:nodePort' :type node: Node | str :param callback: will be called on success or fail :type callback: function(`FAIL_REASON <#pysyncobj.FAIL_REASON>`_, None)
pysyncobj/syncobj.py
def removeNodeFromCluster(self, node, callback = None): """Remove single node from cluster (dynamic membership changes). Async. You should wait until node successfully added before adding next node. :param node: node object or 'nodeHost:nodePort' :type node: Node | str :param callback: will be called on success or fail :type callback: function(`FAIL_REASON <#pysyncobj.FAIL_REASON>`_, None) """ if not self.__conf.dynamicMembershipChange: raise Exception('dynamicMembershipChange is disabled') if not isinstance(node, Node): node = self.__nodeClass(node) self._applyCommand(pickle.dumps(['rem', node.id, node]), callback, _COMMAND_TYPE.MEMBERSHIP)
def removeNodeFromCluster(self, node, callback = None): """Remove single node from cluster (dynamic membership changes). Async. You should wait until node successfully added before adding next node. :param node: node object or 'nodeHost:nodePort' :type node: Node | str :param callback: will be called on success or fail :type callback: function(`FAIL_REASON <#pysyncobj.FAIL_REASON>`_, None) """ if not self.__conf.dynamicMembershipChange: raise Exception('dynamicMembershipChange is disabled') if not isinstance(node, Node): node = self.__nodeClass(node) self._applyCommand(pickle.dumps(['rem', node.id, node]), callback, _COMMAND_TYPE.MEMBERSHIP)
[ "Remove", "single", "node", "from", "cluster", "(", "dynamic", "membership", "changes", ")", ".", "Async", ".", "You", "should", "wait", "until", "node", "successfully", "added", "before", "adding", "next", "node", "." ]
bakwc/PySyncObj
python
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/syncobj.py#L349-L363
[ "def", "removeNodeFromCluster", "(", "self", ",", "node", ",", "callback", "=", "None", ")", ":", "if", "not", "self", ".", "__conf", ".", "dynamicMembershipChange", ":", "raise", "Exception", "(", "'dynamicMembershipChange is disabled'", ")", "if", "not", "isinstance", "(", "node", ",", "Node", ")", ":", "node", "=", "self", ".", "__nodeClass", "(", "node", ")", "self", ".", "_applyCommand", "(", "pickle", ".", "dumps", "(", "[", "'rem'", ",", "node", ".", "id", ",", "node", "]", ")", ",", "callback", ",", "_COMMAND_TYPE", ".", "MEMBERSHIP", ")" ]
be3b0aaa932d5156f5df140c23c962430f51b7b8
test
SyncObj.getStatus
Dumps different debug info about cluster to dict and return it
pysyncobj/syncobj.py
def getStatus(self): """Dumps different debug info about cluster to dict and return it""" status = {} status['version'] = VERSION status['revision'] = REVISION status['self'] = self.__selfNode status['state'] = self.__raftState status['leader'] = self.__raftLeader status['partner_nodes_count'] = len(self.__otherNodes) for node in self.__otherNodes: status['partner_node_status_server_' + node.id] = 2 if node in self.__connectedNodes else 0 status['readonly_nodes_count'] = len(self.__readonlyNodes) for node in self.__readonlyNodes: status['readonly_node_status_server_' + node.id] = 2 if node in self.__connectedNodes else 0 status['log_len'] = len(self.__raftLog) status['last_applied'] = self.__raftLastApplied status['commit_idx'] = self.__raftCommitIndex status['raft_term'] = self.__raftCurrentTerm status['next_node_idx_count'] = len(self.__raftNextIndex) for node, idx in iteritems(self.__raftNextIndex): status['next_node_idx_server_' + node.id] = idx status['match_idx_count'] = len(self.__raftMatchIndex) for node, idx in iteritems(self.__raftMatchIndex): status['match_idx_server_' + node.id] = idx status['leader_commit_idx'] = self.__leaderCommitIndex status['uptime'] = int(time.time() - self.__startTime) status['self_code_version'] = self.__selfCodeVersion status['enabled_code_version'] = self.__enabledCodeVersion return status
def getStatus(self): """Dumps different debug info about cluster to dict and return it""" status = {} status['version'] = VERSION status['revision'] = REVISION status['self'] = self.__selfNode status['state'] = self.__raftState status['leader'] = self.__raftLeader status['partner_nodes_count'] = len(self.__otherNodes) for node in self.__otherNodes: status['partner_node_status_server_' + node.id] = 2 if node in self.__connectedNodes else 0 status['readonly_nodes_count'] = len(self.__readonlyNodes) for node in self.__readonlyNodes: status['readonly_node_status_server_' + node.id] = 2 if node in self.__connectedNodes else 0 status['log_len'] = len(self.__raftLog) status['last_applied'] = self.__raftLastApplied status['commit_idx'] = self.__raftCommitIndex status['raft_term'] = self.__raftCurrentTerm status['next_node_idx_count'] = len(self.__raftNextIndex) for node, idx in iteritems(self.__raftNextIndex): status['next_node_idx_server_' + node.id] = idx status['match_idx_count'] = len(self.__raftMatchIndex) for node, idx in iteritems(self.__raftMatchIndex): status['match_idx_server_' + node.id] = idx status['leader_commit_idx'] = self.__leaderCommitIndex status['uptime'] = int(time.time() - self.__startTime) status['self_code_version'] = self.__selfCodeVersion status['enabled_code_version'] = self.__enabledCodeVersion return status
[ "Dumps", "different", "debug", "info", "about", "cluster", "to", "dict", "and", "return", "it" ]
bakwc/PySyncObj
python
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/syncobj.py#L630-L659
[ "def", "getStatus", "(", "self", ")", ":", "status", "=", "{", "}", "status", "[", "'version'", "]", "=", "VERSION", "status", "[", "'revision'", "]", "=", "REVISION", "status", "[", "'self'", "]", "=", "self", ".", "__selfNode", "status", "[", "'state'", "]", "=", "self", ".", "__raftState", "status", "[", "'leader'", "]", "=", "self", ".", "__raftLeader", "status", "[", "'partner_nodes_count'", "]", "=", "len", "(", "self", ".", "__otherNodes", ")", "for", "node", "in", "self", ".", "__otherNodes", ":", "status", "[", "'partner_node_status_server_'", "+", "node", ".", "id", "]", "=", "2", "if", "node", "in", "self", ".", "__connectedNodes", "else", "0", "status", "[", "'readonly_nodes_count'", "]", "=", "len", "(", "self", ".", "__readonlyNodes", ")", "for", "node", "in", "self", ".", "__readonlyNodes", ":", "status", "[", "'readonly_node_status_server_'", "+", "node", ".", "id", "]", "=", "2", "if", "node", "in", "self", ".", "__connectedNodes", "else", "0", "status", "[", "'log_len'", "]", "=", "len", "(", "self", ".", "__raftLog", ")", "status", "[", "'last_applied'", "]", "=", "self", ".", "__raftLastApplied", "status", "[", "'commit_idx'", "]", "=", "self", ".", "__raftCommitIndex", "status", "[", "'raft_term'", "]", "=", "self", ".", "__raftCurrentTerm", "status", "[", "'next_node_idx_count'", "]", "=", "len", "(", "self", ".", "__raftNextIndex", ")", "for", "node", ",", "idx", "in", "iteritems", "(", "self", ".", "__raftNextIndex", ")", ":", "status", "[", "'next_node_idx_server_'", "+", "node", ".", "id", "]", "=", "idx", "status", "[", "'match_idx_count'", "]", "=", "len", "(", "self", ".", "__raftMatchIndex", ")", "for", "node", ",", "idx", "in", "iteritems", "(", "self", ".", "__raftMatchIndex", ")", ":", "status", "[", "'match_idx_server_'", "+", "node", ".", "id", "]", "=", "idx", "status", "[", "'leader_commit_idx'", "]", "=", "self", ".", "__leaderCommitIndex", "status", "[", "'uptime'", "]", "=", "int", "(", "time", ".", "time", "(", ")", "-", "self", ".", "__startTime", ")", "status", "[", "'self_code_version'", "]", "=", "self", ".", "__selfCodeVersion", "status", "[", "'enabled_code_version'", "]", "=", "self", ".", "__enabledCodeVersion", "return", "status" ]
be3b0aaa932d5156f5df140c23c962430f51b7b8
test
SyncObj.printStatus
Dumps different debug info about cluster to default logger
pysyncobj/syncobj.py
def printStatus(self): """Dumps different debug info about cluster to default logger""" status = self.getStatus() for k, v in iteritems(status): logging.info('%s: %s' % (str(k), str(v)))
def printStatus(self): """Dumps different debug info about cluster to default logger""" status = self.getStatus() for k, v in iteritems(status): logging.info('%s: %s' % (str(k), str(v)))
[ "Dumps", "different", "debug", "info", "about", "cluster", "to", "default", "logger" ]
bakwc/PySyncObj
python
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/syncobj.py#L664-L668
[ "def", "printStatus", "(", "self", ")", ":", "status", "=", "self", ".", "getStatus", "(", ")", "for", "k", ",", "v", "in", "iteritems", "(", "status", ")", ":", "logging", ".", "info", "(", "'%s: %s'", "%", "(", "str", "(", "k", ")", ",", "str", "(", "v", ")", ")", ")" ]
be3b0aaa932d5156f5df140c23c962430f51b7b8
test
TCPTransport._connToNode
Find the node to which a connection belongs. :param conn: connection object :type conn: TcpConnection :returns corresponding node or None if the node cannot be found :rtype Node or None
pysyncobj/transport.py
def _connToNode(self, conn): """ Find the node to which a connection belongs. :param conn: connection object :type conn: TcpConnection :returns corresponding node or None if the node cannot be found :rtype Node or None """ for node in self._connections: if self._connections[node] is conn: return node return None
def _connToNode(self, conn): """ Find the node to which a connection belongs. :param conn: connection object :type conn: TcpConnection :returns corresponding node or None if the node cannot be found :rtype Node or None """ for node in self._connections: if self._connections[node] is conn: return node return None
[ "Find", "the", "node", "to", "which", "a", "connection", "belongs", "." ]
bakwc/PySyncObj
python
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/transport.py#L211-L224
[ "def", "_connToNode", "(", "self", ",", "conn", ")", ":", "for", "node", "in", "self", ".", "_connections", ":", "if", "self", ".", "_connections", "[", "node", "]", "is", "conn", ":", "return", "node", "return", "None" ]
be3b0aaa932d5156f5df140c23c962430f51b7b8
test
TCPTransport._createServer
Create the TCP server (but don't bind yet)
pysyncobj/transport.py
def _createServer(self): """ Create the TCP server (but don't bind yet) """ conf = self._syncObj.conf bindAddr = conf.bindAddress or getattr(self._selfNode, 'address') if not bindAddr: raise RuntimeError('Unable to determine bind address') host, port = bindAddr.rsplit(':', 1) host = globalDnsResolver().resolve(host) self._server = TcpServer(self._syncObj._poller, host, port, onNewConnection = self._onNewIncomingConnection, sendBufferSize = conf.sendBufferSize, recvBufferSize = conf.recvBufferSize, connectionTimeout = conf.connectionTimeout)
def _createServer(self): """ Create the TCP server (but don't bind yet) """ conf = self._syncObj.conf bindAddr = conf.bindAddress or getattr(self._selfNode, 'address') if not bindAddr: raise RuntimeError('Unable to determine bind address') host, port = bindAddr.rsplit(':', 1) host = globalDnsResolver().resolve(host) self._server = TcpServer(self._syncObj._poller, host, port, onNewConnection = self._onNewIncomingConnection, sendBufferSize = conf.sendBufferSize, recvBufferSize = conf.recvBufferSize, connectionTimeout = conf.connectionTimeout)
[ "Create", "the", "TCP", "server", "(", "but", "don", "t", "bind", "yet", ")" ]
bakwc/PySyncObj
python
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/transport.py#L239-L253
[ "def", "_createServer", "(", "self", ")", ":", "conf", "=", "self", ".", "_syncObj", ".", "conf", "bindAddr", "=", "conf", ".", "bindAddress", "or", "getattr", "(", "self", ".", "_selfNode", ",", "'address'", ")", "if", "not", "bindAddr", ":", "raise", "RuntimeError", "(", "'Unable to determine bind address'", ")", "host", ",", "port", "=", "bindAddr", ".", "rsplit", "(", "':'", ",", "1", ")", "host", "=", "globalDnsResolver", "(", ")", ".", "resolve", "(", "host", ")", "self", ".", "_server", "=", "TcpServer", "(", "self", ".", "_syncObj", ".", "_poller", ",", "host", ",", "port", ",", "onNewConnection", "=", "self", ".", "_onNewIncomingConnection", ",", "sendBufferSize", "=", "conf", ".", "sendBufferSize", ",", "recvBufferSize", "=", "conf", ".", "recvBufferSize", ",", "connectionTimeout", "=", "conf", ".", "connectionTimeout", ")" ]
be3b0aaa932d5156f5df140c23c962430f51b7b8
test
TCPTransport._maybeBind
Bind the server unless it is already bound, this is a read-only node, or the last attempt was too recently. :raises TransportNotReadyError if the bind attempt fails
pysyncobj/transport.py
def _maybeBind(self): """ Bind the server unless it is already bound, this is a read-only node, or the last attempt was too recently. :raises TransportNotReadyError if the bind attempt fails """ if self._ready or self._selfIsReadonlyNode or time.time() < self._lastBindAttemptTime + self._syncObj.conf.bindRetryTime: return self._lastBindAttemptTime = time.time() try: self._server.bind() except Exception as e: self._bindAttempts += 1 if self._syncObj.conf.maxBindRetries and self._bindAttempts >= self._syncObj.conf.maxBindRetries: self._bindOverEvent.set() raise TransportNotReadyError else: self._ready = True self._bindOverEvent.set()
def _maybeBind(self): """ Bind the server unless it is already bound, this is a read-only node, or the last attempt was too recently. :raises TransportNotReadyError if the bind attempt fails """ if self._ready or self._selfIsReadonlyNode or time.time() < self._lastBindAttemptTime + self._syncObj.conf.bindRetryTime: return self._lastBindAttemptTime = time.time() try: self._server.bind() except Exception as e: self._bindAttempts += 1 if self._syncObj.conf.maxBindRetries and self._bindAttempts >= self._syncObj.conf.maxBindRetries: self._bindOverEvent.set() raise TransportNotReadyError else: self._ready = True self._bindOverEvent.set()
[ "Bind", "the", "server", "unless", "it", "is", "already", "bound", "this", "is", "a", "read", "-", "only", "node", "or", "the", "last", "attempt", "was", "too", "recently", "." ]
bakwc/PySyncObj
python
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/transport.py#L255-L274
[ "def", "_maybeBind", "(", "self", ")", ":", "if", "self", ".", "_ready", "or", "self", ".", "_selfIsReadonlyNode", "or", "time", ".", "time", "(", ")", "<", "self", ".", "_lastBindAttemptTime", "+", "self", ".", "_syncObj", ".", "conf", ".", "bindRetryTime", ":", "return", "self", ".", "_lastBindAttemptTime", "=", "time", ".", "time", "(", ")", "try", ":", "self", ".", "_server", ".", "bind", "(", ")", "except", "Exception", "as", "e", ":", "self", ".", "_bindAttempts", "+=", "1", "if", "self", ".", "_syncObj", ".", "conf", ".", "maxBindRetries", "and", "self", ".", "_bindAttempts", ">=", "self", ".", "_syncObj", ".", "conf", ".", "maxBindRetries", ":", "self", ".", "_bindOverEvent", ".", "set", "(", ")", "raise", "TransportNotReadyError", "else", ":", "self", ".", "_ready", "=", "True", "self", ".", "_bindOverEvent", ".", "set", "(", ")" ]
be3b0aaa932d5156f5df140c23c962430f51b7b8
test
TCPTransport._onNewIncomingConnection
Callback for connections initiated by the other side :param conn: connection object :type conn: TcpConnection
pysyncobj/transport.py
def _onNewIncomingConnection(self, conn): """ Callback for connections initiated by the other side :param conn: connection object :type conn: TcpConnection """ self._unknownConnections.add(conn) encryptor = self._syncObj.encryptor if encryptor: conn.encryptor = encryptor conn.setOnMessageReceivedCallback(functools.partial(self._onIncomingMessageReceived, conn)) conn.setOnDisconnectedCallback(functools.partial(self._onDisconnected, conn))
def _onNewIncomingConnection(self, conn): """ Callback for connections initiated by the other side :param conn: connection object :type conn: TcpConnection """ self._unknownConnections.add(conn) encryptor = self._syncObj.encryptor if encryptor: conn.encryptor = encryptor conn.setOnMessageReceivedCallback(functools.partial(self._onIncomingMessageReceived, conn)) conn.setOnDisconnectedCallback(functools.partial(self._onDisconnected, conn))
[ "Callback", "for", "connections", "initiated", "by", "the", "other", "side" ]
bakwc/PySyncObj
python
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/transport.py#L287-L300
[ "def", "_onNewIncomingConnection", "(", "self", ",", "conn", ")", ":", "self", ".", "_unknownConnections", ".", "add", "(", "conn", ")", "encryptor", "=", "self", ".", "_syncObj", ".", "encryptor", "if", "encryptor", ":", "conn", ".", "encryptor", "=", "encryptor", "conn", ".", "setOnMessageReceivedCallback", "(", "functools", ".", "partial", "(", "self", ".", "_onIncomingMessageReceived", ",", "conn", ")", ")", "conn", ".", "setOnDisconnectedCallback", "(", "functools", ".", "partial", "(", "self", ".", "_onDisconnected", ",", "conn", ")", ")" ]
be3b0aaa932d5156f5df140c23c962430f51b7b8
test
TCPTransport._onIncomingMessageReceived
Callback for initial messages on incoming connections. Handles encryption, utility messages, and association of the connection with a Node. Once this initial setup is done, the relevant connected callback is executed, and further messages are deferred to the onMessageReceived callback. :param conn: connection object :type conn: TcpConnection :param message: received message :type message: any
pysyncobj/transport.py
def _onIncomingMessageReceived(self, conn, message): """ Callback for initial messages on incoming connections. Handles encryption, utility messages, and association of the connection with a Node. Once this initial setup is done, the relevant connected callback is executed, and further messages are deferred to the onMessageReceived callback. :param conn: connection object :type conn: TcpConnection :param message: received message :type message: any """ if self._syncObj.encryptor and not conn.sendRandKey: conn.sendRandKey = message conn.recvRandKey = os.urandom(32) conn.send(conn.recvRandKey) return # Utility messages if isinstance(message, list): done = False try: if message[0] == 'status': conn.send(self._syncObj.getStatus()) done = True elif message[0] == 'add': self._syncObj.addNodeToCluster(message[1], callback = functools.partial(self._utilityCallback, conn = conn, cmd = 'ADD', arg = message[1])) done = True elif message[0] == 'remove': if message[1] == self._selfNode.address: conn.send('FAIL REMOVE ' + message[1]) else: self._syncObj.removeNodeFromCluster(message[1], callback = functools.partial(self._utilityCallback, conn = conn, cmd = 'REMOVE', arg = message[1])) done = True elif message[0] == 'set_version': self._syncObj.setCodeVersion(message[1], callback = functools.partial(self._utilityCallback, conn = conn, cmd = 'SET_VERSION', arg = str(message[1]))) done = True except Exception as e: conn.send(str(e)) done = True if done: return # At this point, message should be either a node ID (i.e. address) or 'readonly' node = self._nodeAddrToNode[message] if message in self._nodeAddrToNode else None if node is None and message != 'readonly': conn.disconnect() self._unknownConnections.discard(conn) return readonly = node is None if readonly: nodeId = str(self._readonlyNodesCounter) node = Node(nodeId) self._readonlyNodes.add(node) self._readonlyNodesCounter += 1 self._unknownConnections.discard(conn) self._connections[node] = conn conn.setOnMessageReceivedCallback(functools.partial(self._onMessageReceived, node)) if not readonly: self._onNodeConnected(node) else: self._onReadonlyNodeConnected(node)
def _onIncomingMessageReceived(self, conn, message): """ Callback for initial messages on incoming connections. Handles encryption, utility messages, and association of the connection with a Node. Once this initial setup is done, the relevant connected callback is executed, and further messages are deferred to the onMessageReceived callback. :param conn: connection object :type conn: TcpConnection :param message: received message :type message: any """ if self._syncObj.encryptor and not conn.sendRandKey: conn.sendRandKey = message conn.recvRandKey = os.urandom(32) conn.send(conn.recvRandKey) return # Utility messages if isinstance(message, list): done = False try: if message[0] == 'status': conn.send(self._syncObj.getStatus()) done = True elif message[0] == 'add': self._syncObj.addNodeToCluster(message[1], callback = functools.partial(self._utilityCallback, conn = conn, cmd = 'ADD', arg = message[1])) done = True elif message[0] == 'remove': if message[1] == self._selfNode.address: conn.send('FAIL REMOVE ' + message[1]) else: self._syncObj.removeNodeFromCluster(message[1], callback = functools.partial(self._utilityCallback, conn = conn, cmd = 'REMOVE', arg = message[1])) done = True elif message[0] == 'set_version': self._syncObj.setCodeVersion(message[1], callback = functools.partial(self._utilityCallback, conn = conn, cmd = 'SET_VERSION', arg = str(message[1]))) done = True except Exception as e: conn.send(str(e)) done = True if done: return # At this point, message should be either a node ID (i.e. address) or 'readonly' node = self._nodeAddrToNode[message] if message in self._nodeAddrToNode else None if node is None and message != 'readonly': conn.disconnect() self._unknownConnections.discard(conn) return readonly = node is None if readonly: nodeId = str(self._readonlyNodesCounter) node = Node(nodeId) self._readonlyNodes.add(node) self._readonlyNodesCounter += 1 self._unknownConnections.discard(conn) self._connections[node] = conn conn.setOnMessageReceivedCallback(functools.partial(self._onMessageReceived, node)) if not readonly: self._onNodeConnected(node) else: self._onReadonlyNodeConnected(node)
[ "Callback", "for", "initial", "messages", "on", "incoming", "connections", ".", "Handles", "encryption", "utility", "messages", "and", "association", "of", "the", "connection", "with", "a", "Node", ".", "Once", "this", "initial", "setup", "is", "done", "the", "relevant", "connected", "callback", "is", "executed", "and", "further", "messages", "are", "deferred", "to", "the", "onMessageReceived", "callback", "." ]
bakwc/PySyncObj
python
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/transport.py#L302-L365
[ "def", "_onIncomingMessageReceived", "(", "self", ",", "conn", ",", "message", ")", ":", "if", "self", ".", "_syncObj", ".", "encryptor", "and", "not", "conn", ".", "sendRandKey", ":", "conn", ".", "sendRandKey", "=", "message", "conn", ".", "recvRandKey", "=", "os", ".", "urandom", "(", "32", ")", "conn", ".", "send", "(", "conn", ".", "recvRandKey", ")", "return", "# Utility messages", "if", "isinstance", "(", "message", ",", "list", ")", ":", "done", "=", "False", "try", ":", "if", "message", "[", "0", "]", "==", "'status'", ":", "conn", ".", "send", "(", "self", ".", "_syncObj", ".", "getStatus", "(", ")", ")", "done", "=", "True", "elif", "message", "[", "0", "]", "==", "'add'", ":", "self", ".", "_syncObj", ".", "addNodeToCluster", "(", "message", "[", "1", "]", ",", "callback", "=", "functools", ".", "partial", "(", "self", ".", "_utilityCallback", ",", "conn", "=", "conn", ",", "cmd", "=", "'ADD'", ",", "arg", "=", "message", "[", "1", "]", ")", ")", "done", "=", "True", "elif", "message", "[", "0", "]", "==", "'remove'", ":", "if", "message", "[", "1", "]", "==", "self", ".", "_selfNode", ".", "address", ":", "conn", ".", "send", "(", "'FAIL REMOVE '", "+", "message", "[", "1", "]", ")", "else", ":", "self", ".", "_syncObj", ".", "removeNodeFromCluster", "(", "message", "[", "1", "]", ",", "callback", "=", "functools", ".", "partial", "(", "self", ".", "_utilityCallback", ",", "conn", "=", "conn", ",", "cmd", "=", "'REMOVE'", ",", "arg", "=", "message", "[", "1", "]", ")", ")", "done", "=", "True", "elif", "message", "[", "0", "]", "==", "'set_version'", ":", "self", ".", "_syncObj", ".", "setCodeVersion", "(", "message", "[", "1", "]", ",", "callback", "=", "functools", ".", "partial", "(", "self", ".", "_utilityCallback", ",", "conn", "=", "conn", ",", "cmd", "=", "'SET_VERSION'", ",", "arg", "=", "str", "(", "message", "[", "1", "]", ")", ")", ")", "done", "=", "True", "except", "Exception", "as", "e", ":", "conn", ".", "send", "(", "str", "(", "e", ")", ")", "done", "=", "True", "if", "done", ":", "return", "# At this point, message should be either a node ID (i.e. address) or 'readonly'", "node", "=", "self", ".", "_nodeAddrToNode", "[", "message", "]", "if", "message", "in", "self", ".", "_nodeAddrToNode", "else", "None", "if", "node", "is", "None", "and", "message", "!=", "'readonly'", ":", "conn", ".", "disconnect", "(", ")", "self", ".", "_unknownConnections", ".", "discard", "(", "conn", ")", "return", "readonly", "=", "node", "is", "None", "if", "readonly", ":", "nodeId", "=", "str", "(", "self", ".", "_readonlyNodesCounter", ")", "node", "=", "Node", "(", "nodeId", ")", "self", ".", "_readonlyNodes", ".", "add", "(", "node", ")", "self", ".", "_readonlyNodesCounter", "+=", "1", "self", ".", "_unknownConnections", ".", "discard", "(", "conn", ")", "self", ".", "_connections", "[", "node", "]", "=", "conn", "conn", ".", "setOnMessageReceivedCallback", "(", "functools", ".", "partial", "(", "self", ".", "_onMessageReceived", ",", "node", ")", ")", "if", "not", "readonly", ":", "self", ".", "_onNodeConnected", "(", "node", ")", "else", ":", "self", ".", "_onReadonlyNodeConnected", "(", "node", ")" ]
be3b0aaa932d5156f5df140c23c962430f51b7b8
test
TCPTransport._utilityCallback
Callback for the utility messages :param res: result of the command :param err: error code (one of pysyncobj.config.FAIL_REASON) :param conn: utility connection :param cmd: command :param arg: command arguments
pysyncobj/transport.py
def _utilityCallback(self, res, err, conn, cmd, arg): """ Callback for the utility messages :param res: result of the command :param err: error code (one of pysyncobj.config.FAIL_REASON) :param conn: utility connection :param cmd: command :param arg: command arguments """ cmdResult = 'FAIL' if err == FAIL_REASON.SUCCESS: cmdResult = 'SUCCESS' conn.send(cmdResult + ' ' + cmd + ' ' + arg)
def _utilityCallback(self, res, err, conn, cmd, arg): """ Callback for the utility messages :param res: result of the command :param err: error code (one of pysyncobj.config.FAIL_REASON) :param conn: utility connection :param cmd: command :param arg: command arguments """ cmdResult = 'FAIL' if err == FAIL_REASON.SUCCESS: cmdResult = 'SUCCESS' conn.send(cmdResult + ' ' + cmd + ' ' + arg)
[ "Callback", "for", "the", "utility", "messages" ]
bakwc/PySyncObj
python
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/transport.py#L367-L381
[ "def", "_utilityCallback", "(", "self", ",", "res", ",", "err", ",", "conn", ",", "cmd", ",", "arg", ")", ":", "cmdResult", "=", "'FAIL'", "if", "err", "==", "FAIL_REASON", ".", "SUCCESS", ":", "cmdResult", "=", "'SUCCESS'", "conn", ".", "send", "(", "cmdResult", "+", "' '", "+", "cmd", "+", "' '", "+", "arg", ")" ]
be3b0aaa932d5156f5df140c23c962430f51b7b8
test
TCPTransport._shouldConnect
Check whether this node should initiate a connection to another node :param node: the other node :type node: Node
pysyncobj/transport.py
def _shouldConnect(self, node): """ Check whether this node should initiate a connection to another node :param node: the other node :type node: Node """ return isinstance(node, TCPNode) and node not in self._preventConnectNodes and (self._selfIsReadonlyNode or self._selfNode.address > node.address)
def _shouldConnect(self, node): """ Check whether this node should initiate a connection to another node :param node: the other node :type node: Node """ return isinstance(node, TCPNode) and node not in self._preventConnectNodes and (self._selfIsReadonlyNode or self._selfNode.address > node.address)
[ "Check", "whether", "this", "node", "should", "initiate", "a", "connection", "to", "another", "node" ]
bakwc/PySyncObj
python
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/transport.py#L383-L391
[ "def", "_shouldConnect", "(", "self", ",", "node", ")", ":", "return", "isinstance", "(", "node", ",", "TCPNode", ")", "and", "node", "not", "in", "self", ".", "_preventConnectNodes", "and", "(", "self", ".", "_selfIsReadonlyNode", "or", "self", ".", "_selfNode", ".", "address", ">", "node", ".", "address", ")" ]
be3b0aaa932d5156f5df140c23c962430f51b7b8
test
TCPTransport._connectIfNecessarySingle
Connect to a node if necessary. :param node: node to connect to :type node: Node
pysyncobj/transport.py
def _connectIfNecessarySingle(self, node): """ Connect to a node if necessary. :param node: node to connect to :type node: Node """ if node in self._connections and self._connections[node].state != CONNECTION_STATE.DISCONNECTED: return True if not self._shouldConnect(node): return False assert node in self._connections # Since we "should connect" to this node, there should always be a connection object already in place. if node in self._lastConnectAttempt and time.time() - self._lastConnectAttempt[node] < self._syncObj.conf.connectionRetryTime: return False self._lastConnectAttempt[node] = time.time() return self._connections[node].connect(node.ip, node.port)
def _connectIfNecessarySingle(self, node): """ Connect to a node if necessary. :param node: node to connect to :type node: Node """ if node in self._connections and self._connections[node].state != CONNECTION_STATE.DISCONNECTED: return True if not self._shouldConnect(node): return False assert node in self._connections # Since we "should connect" to this node, there should always be a connection object already in place. if node in self._lastConnectAttempt and time.time() - self._lastConnectAttempt[node] < self._syncObj.conf.connectionRetryTime: return False self._lastConnectAttempt[node] = time.time() return self._connections[node].connect(node.ip, node.port)
[ "Connect", "to", "a", "node", "if", "necessary", "." ]
bakwc/PySyncObj
python
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/transport.py#L393-L409
[ "def", "_connectIfNecessarySingle", "(", "self", ",", "node", ")", ":", "if", "node", "in", "self", ".", "_connections", "and", "self", ".", "_connections", "[", "node", "]", ".", "state", "!=", "CONNECTION_STATE", ".", "DISCONNECTED", ":", "return", "True", "if", "not", "self", ".", "_shouldConnect", "(", "node", ")", ":", "return", "False", "assert", "node", "in", "self", ".", "_connections", "# Since we \"should connect\" to this node, there should always be a connection object already in place.", "if", "node", "in", "self", ".", "_lastConnectAttempt", "and", "time", ".", "time", "(", ")", "-", "self", ".", "_lastConnectAttempt", "[", "node", "]", "<", "self", ".", "_syncObj", ".", "conf", ".", "connectionRetryTime", ":", "return", "False", "self", ".", "_lastConnectAttempt", "[", "node", "]", "=", "time", ".", "time", "(", ")", "return", "self", ".", "_connections", "[", "node", "]", ".", "connect", "(", "node", ".", "ip", ",", "node", ".", "port", ")" ]
be3b0aaa932d5156f5df140c23c962430f51b7b8
test
TCPTransport._onOutgoingConnected
Callback for when a new connection from this to another node is established. Handles encryption and informs the other node which node this is. If encryption is disabled, this triggers the onNodeConnected callback and messages are deferred to the onMessageReceived callback. If encryption is enabled, the first message is handled by _onOutgoingMessageReceived. :param conn: connection object :type conn: TcpConnection
pysyncobj/transport.py
def _onOutgoingConnected(self, conn): """ Callback for when a new connection from this to another node is established. Handles encryption and informs the other node which node this is. If encryption is disabled, this triggers the onNodeConnected callback and messages are deferred to the onMessageReceived callback. If encryption is enabled, the first message is handled by _onOutgoingMessageReceived. :param conn: connection object :type conn: TcpConnection """ if self._syncObj.encryptor: conn.setOnMessageReceivedCallback(functools.partial(self._onOutgoingMessageReceived, conn)) # So we can process the sendRandKey conn.recvRandKey = os.urandom(32) conn.send(conn.recvRandKey) else: # The onMessageReceived callback is configured in addNode already. if not self._selfIsReadonlyNode: conn.send(self._selfNode.address) else: conn.send('readonly') self._onNodeConnected(self._connToNode(conn))
def _onOutgoingConnected(self, conn): """ Callback for when a new connection from this to another node is established. Handles encryption and informs the other node which node this is. If encryption is disabled, this triggers the onNodeConnected callback and messages are deferred to the onMessageReceived callback. If encryption is enabled, the first message is handled by _onOutgoingMessageReceived. :param conn: connection object :type conn: TcpConnection """ if self._syncObj.encryptor: conn.setOnMessageReceivedCallback(functools.partial(self._onOutgoingMessageReceived, conn)) # So we can process the sendRandKey conn.recvRandKey = os.urandom(32) conn.send(conn.recvRandKey) else: # The onMessageReceived callback is configured in addNode already. if not self._selfIsReadonlyNode: conn.send(self._selfNode.address) else: conn.send('readonly') self._onNodeConnected(self._connToNode(conn))
[ "Callback", "for", "when", "a", "new", "connection", "from", "this", "to", "another", "node", "is", "established", ".", "Handles", "encryption", "and", "informs", "the", "other", "node", "which", "node", "this", "is", ".", "If", "encryption", "is", "disabled", "this", "triggers", "the", "onNodeConnected", "callback", "and", "messages", "are", "deferred", "to", "the", "onMessageReceived", "callback", ".", "If", "encryption", "is", "enabled", "the", "first", "message", "is", "handled", "by", "_onOutgoingMessageReceived", "." ]
bakwc/PySyncObj
python
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/transport.py#L419-L439
[ "def", "_onOutgoingConnected", "(", "self", ",", "conn", ")", ":", "if", "self", ".", "_syncObj", ".", "encryptor", ":", "conn", ".", "setOnMessageReceivedCallback", "(", "functools", ".", "partial", "(", "self", ".", "_onOutgoingMessageReceived", ",", "conn", ")", ")", "# So we can process the sendRandKey", "conn", ".", "recvRandKey", "=", "os", ".", "urandom", "(", "32", ")", "conn", ".", "send", "(", "conn", ".", "recvRandKey", ")", "else", ":", "# The onMessageReceived callback is configured in addNode already.", "if", "not", "self", ".", "_selfIsReadonlyNode", ":", "conn", ".", "send", "(", "self", ".", "_selfNode", ".", "address", ")", "else", ":", "conn", ".", "send", "(", "'readonly'", ")", "self", ".", "_onNodeConnected", "(", "self", ".", "_connToNode", "(", "conn", ")", ")" ]
be3b0aaa932d5156f5df140c23c962430f51b7b8
test
TCPTransport._onOutgoingMessageReceived
Callback for receiving a message on a new outgoing connection. Used only if encryption is enabled to exchange the random keys. Once the key exchange is done, this triggers the onNodeConnected callback, and further messages are deferred to the onMessageReceived callback. :param conn: connection object :type conn: TcpConnection :param message: received message :type message: any
pysyncobj/transport.py
def _onOutgoingMessageReceived(self, conn, message): """ Callback for receiving a message on a new outgoing connection. Used only if encryption is enabled to exchange the random keys. Once the key exchange is done, this triggers the onNodeConnected callback, and further messages are deferred to the onMessageReceived callback. :param conn: connection object :type conn: TcpConnection :param message: received message :type message: any """ if not conn.sendRandKey: conn.sendRandKey = message conn.send(self._selfNode.address) node = self._connToNode(conn) conn.setOnMessageReceivedCallback(functools.partial(self._onMessageReceived, node)) self._onNodeConnected(node)
def _onOutgoingMessageReceived(self, conn, message): """ Callback for receiving a message on a new outgoing connection. Used only if encryption is enabled to exchange the random keys. Once the key exchange is done, this triggers the onNodeConnected callback, and further messages are deferred to the onMessageReceived callback. :param conn: connection object :type conn: TcpConnection :param message: received message :type message: any """ if not conn.sendRandKey: conn.sendRandKey = message conn.send(self._selfNode.address) node = self._connToNode(conn) conn.setOnMessageReceivedCallback(functools.partial(self._onMessageReceived, node)) self._onNodeConnected(node)
[ "Callback", "for", "receiving", "a", "message", "on", "a", "new", "outgoing", "connection", ".", "Used", "only", "if", "encryption", "is", "enabled", "to", "exchange", "the", "random", "keys", ".", "Once", "the", "key", "exchange", "is", "done", "this", "triggers", "the", "onNodeConnected", "callback", "and", "further", "messages", "are", "deferred", "to", "the", "onMessageReceived", "callback", "." ]
bakwc/PySyncObj
python
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/transport.py#L441-L458
[ "def", "_onOutgoingMessageReceived", "(", "self", ",", "conn", ",", "message", ")", ":", "if", "not", "conn", ".", "sendRandKey", ":", "conn", ".", "sendRandKey", "=", "message", "conn", ".", "send", "(", "self", ".", "_selfNode", ".", "address", ")", "node", "=", "self", ".", "_connToNode", "(", "conn", ")", "conn", ".", "setOnMessageReceivedCallback", "(", "functools", ".", "partial", "(", "self", ".", "_onMessageReceived", ",", "node", ")", ")", "self", ".", "_onNodeConnected", "(", "node", ")" ]
be3b0aaa932d5156f5df140c23c962430f51b7b8
test
TCPTransport._onDisconnected
Callback for when a connection is terminated or considered dead. Initiates a reconnect if necessary. :param conn: connection object :type conn: TcpConnection
pysyncobj/transport.py
def _onDisconnected(self, conn): """ Callback for when a connection is terminated or considered dead. Initiates a reconnect if necessary. :param conn: connection object :type conn: TcpConnection """ self._unknownConnections.discard(conn) node = self._connToNode(conn) if node is not None: if node in self._nodes: self._onNodeDisconnected(node) self._connectIfNecessarySingle(node) else: self._readonlyNodes.discard(node) self._onReadonlyNodeDisconnected(node)
def _onDisconnected(self, conn): """ Callback for when a connection is terminated or considered dead. Initiates a reconnect if necessary. :param conn: connection object :type conn: TcpConnection """ self._unknownConnections.discard(conn) node = self._connToNode(conn) if node is not None: if node in self._nodes: self._onNodeDisconnected(node) self._connectIfNecessarySingle(node) else: self._readonlyNodes.discard(node) self._onReadonlyNodeDisconnected(node)
[ "Callback", "for", "when", "a", "connection", "is", "terminated", "or", "considered", "dead", ".", "Initiates", "a", "reconnect", "if", "necessary", "." ]
bakwc/PySyncObj
python
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/transport.py#L460-L476
[ "def", "_onDisconnected", "(", "self", ",", "conn", ")", ":", "self", ".", "_unknownConnections", ".", "discard", "(", "conn", ")", "node", "=", "self", ".", "_connToNode", "(", "conn", ")", "if", "node", "is", "not", "None", ":", "if", "node", "in", "self", ".", "_nodes", ":", "self", ".", "_onNodeDisconnected", "(", "node", ")", "self", ".", "_connectIfNecessarySingle", "(", "node", ")", "else", ":", "self", ".", "_readonlyNodes", ".", "discard", "(", "node", ")", "self", ".", "_onReadonlyNodeDisconnected", "(", "node", ")" ]
be3b0aaa932d5156f5df140c23c962430f51b7b8
test
TCPTransport.addNode
Add a node to the network :param node: node to add :type node: TCPNode
pysyncobj/transport.py
def addNode(self, node): """ Add a node to the network :param node: node to add :type node: TCPNode """ self._nodes.add(node) self._nodeAddrToNode[node.address] = node if self._shouldConnect(node): conn = TcpConnection(poller = self._syncObj._poller, timeout = self._syncObj.conf.connectionTimeout, sendBufferSize = self._syncObj.conf.sendBufferSize, recvBufferSize = self._syncObj.conf.recvBufferSize) conn.encryptor = self._syncObj.encryptor conn.setOnConnectedCallback(functools.partial(self._onOutgoingConnected, conn)) conn.setOnMessageReceivedCallback(functools.partial(self._onMessageReceived, node)) conn.setOnDisconnectedCallback(functools.partial(self._onDisconnected, conn)) self._connections[node] = conn
def addNode(self, node): """ Add a node to the network :param node: node to add :type node: TCPNode """ self._nodes.add(node) self._nodeAddrToNode[node.address] = node if self._shouldConnect(node): conn = TcpConnection(poller = self._syncObj._poller, timeout = self._syncObj.conf.connectionTimeout, sendBufferSize = self._syncObj.conf.sendBufferSize, recvBufferSize = self._syncObj.conf.recvBufferSize) conn.encryptor = self._syncObj.encryptor conn.setOnConnectedCallback(functools.partial(self._onOutgoingConnected, conn)) conn.setOnMessageReceivedCallback(functools.partial(self._onMessageReceived, node)) conn.setOnDisconnectedCallback(functools.partial(self._onDisconnected, conn)) self._connections[node] = conn
[ "Add", "a", "node", "to", "the", "network" ]
bakwc/PySyncObj
python
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/transport.py#L490-L509
[ "def", "addNode", "(", "self", ",", "node", ")", ":", "self", ".", "_nodes", ".", "add", "(", "node", ")", "self", ".", "_nodeAddrToNode", "[", "node", ".", "address", "]", "=", "node", "if", "self", ".", "_shouldConnect", "(", "node", ")", ":", "conn", "=", "TcpConnection", "(", "poller", "=", "self", ".", "_syncObj", ".", "_poller", ",", "timeout", "=", "self", ".", "_syncObj", ".", "conf", ".", "connectionTimeout", ",", "sendBufferSize", "=", "self", ".", "_syncObj", ".", "conf", ".", "sendBufferSize", ",", "recvBufferSize", "=", "self", ".", "_syncObj", ".", "conf", ".", "recvBufferSize", ")", "conn", ".", "encryptor", "=", "self", ".", "_syncObj", ".", "encryptor", "conn", ".", "setOnConnectedCallback", "(", "functools", ".", "partial", "(", "self", ".", "_onOutgoingConnected", ",", "conn", ")", ")", "conn", ".", "setOnMessageReceivedCallback", "(", "functools", ".", "partial", "(", "self", ".", "_onMessageReceived", ",", "node", ")", ")", "conn", ".", "setOnDisconnectedCallback", "(", "functools", ".", "partial", "(", "self", ".", "_onDisconnected", ",", "conn", ")", ")", "self", ".", "_connections", "[", "node", "]", "=", "conn" ]
be3b0aaa932d5156f5df140c23c962430f51b7b8
test
TCPTransport.dropNode
Drop a node from the network :param node: node to drop :type node: Node
pysyncobj/transport.py
def dropNode(self, node): """ Drop a node from the network :param node: node to drop :type node: Node """ conn = self._connections.pop(node, None) if conn is not None: # Calling conn.disconnect() immediately triggers the onDisconnected callback if the connection isn't already disconnected, so this is necessary to prevent the automatic reconnect. self._preventConnectNodes.add(node) conn.disconnect() self._preventConnectNodes.remove(node) if isinstance(node, TCPNode): self._nodes.discard(node) self._nodeAddrToNode.pop(node.address, None) else: self._readonlyNodes.discard(node) self._lastConnectAttempt.pop(node, None)
def dropNode(self, node): """ Drop a node from the network :param node: node to drop :type node: Node """ conn = self._connections.pop(node, None) if conn is not None: # Calling conn.disconnect() immediately triggers the onDisconnected callback if the connection isn't already disconnected, so this is necessary to prevent the automatic reconnect. self._preventConnectNodes.add(node) conn.disconnect() self._preventConnectNodes.remove(node) if isinstance(node, TCPNode): self._nodes.discard(node) self._nodeAddrToNode.pop(node.address, None) else: self._readonlyNodes.discard(node) self._lastConnectAttempt.pop(node, None)
[ "Drop", "a", "node", "from", "the", "network" ]
bakwc/PySyncObj
python
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/transport.py#L511-L530
[ "def", "dropNode", "(", "self", ",", "node", ")", ":", "conn", "=", "self", ".", "_connections", ".", "pop", "(", "node", ",", "None", ")", "if", "conn", "is", "not", "None", ":", "# Calling conn.disconnect() immediately triggers the onDisconnected callback if the connection isn't already disconnected, so this is necessary to prevent the automatic reconnect.", "self", ".", "_preventConnectNodes", ".", "add", "(", "node", ")", "conn", ".", "disconnect", "(", ")", "self", ".", "_preventConnectNodes", ".", "remove", "(", "node", ")", "if", "isinstance", "(", "node", ",", "TCPNode", ")", ":", "self", ".", "_nodes", ".", "discard", "(", "node", ")", "self", ".", "_nodeAddrToNode", ".", "pop", "(", "node", ".", "address", ",", "None", ")", "else", ":", "self", ".", "_readonlyNodes", ".", "discard", "(", "node", ")", "self", ".", "_lastConnectAttempt", ".", "pop", "(", "node", ",", "None", ")" ]
be3b0aaa932d5156f5df140c23c962430f51b7b8
test
TCPTransport.send
Send a message to a node. Returns False if the connection appears to be dead either before or after actually trying to send the message. :param node: target node :type node: Node :param message: message :param message: any :returns success :rtype bool
pysyncobj/transport.py
def send(self, node, message): """ Send a message to a node. Returns False if the connection appears to be dead either before or after actually trying to send the message. :param node: target node :type node: Node :param message: message :param message: any :returns success :rtype bool """ if node not in self._connections or self._connections[node].state != CONNECTION_STATE.CONNECTED: return False self._connections[node].send(message) if self._connections[node].state != CONNECTION_STATE.CONNECTED: return False return True
def send(self, node, message): """ Send a message to a node. Returns False if the connection appears to be dead either before or after actually trying to send the message. :param node: target node :type node: Node :param message: message :param message: any :returns success :rtype bool """ if node not in self._connections or self._connections[node].state != CONNECTION_STATE.CONNECTED: return False self._connections[node].send(message) if self._connections[node].state != CONNECTION_STATE.CONNECTED: return False return True
[ "Send", "a", "message", "to", "a", "node", ".", "Returns", "False", "if", "the", "connection", "appears", "to", "be", "dead", "either", "before", "or", "after", "actually", "trying", "to", "send", "the", "message", "." ]
bakwc/PySyncObj
python
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/transport.py#L532-L549
[ "def", "send", "(", "self", ",", "node", ",", "message", ")", ":", "if", "node", "not", "in", "self", ".", "_connections", "or", "self", ".", "_connections", "[", "node", "]", ".", "state", "!=", "CONNECTION_STATE", ".", "CONNECTED", ":", "return", "False", "self", ".", "_connections", "[", "node", "]", ".", "send", "(", "message", ")", "if", "self", ".", "_connections", "[", "node", "]", ".", "state", "!=", "CONNECTION_STATE", ".", "CONNECTED", ":", "return", "False", "return", "True" ]
be3b0aaa932d5156f5df140c23c962430f51b7b8
test
TCPTransport.destroy
Destroy this transport
pysyncobj/transport.py
def destroy(self): """ Destroy this transport """ self.setOnMessageReceivedCallback(None) self.setOnNodeConnectedCallback(None) self.setOnNodeDisconnectedCallback(None) self.setOnReadonlyNodeConnectedCallback(None) self.setOnReadonlyNodeDisconnectedCallback(None) for node in self._nodes | self._readonlyNodes: self.dropNode(node) if self._server is not None: self._server.unbind() for conn in self._unknownConnections: conn.disconnect() self._unknownConnections = set()
def destroy(self): """ Destroy this transport """ self.setOnMessageReceivedCallback(None) self.setOnNodeConnectedCallback(None) self.setOnNodeDisconnectedCallback(None) self.setOnReadonlyNodeConnectedCallback(None) self.setOnReadonlyNodeDisconnectedCallback(None) for node in self._nodes | self._readonlyNodes: self.dropNode(node) if self._server is not None: self._server.unbind() for conn in self._unknownConnections: conn.disconnect() self._unknownConnections = set()
[ "Destroy", "this", "transport" ]
bakwc/PySyncObj
python
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/transport.py#L551-L567
[ "def", "destroy", "(", "self", ")", ":", "self", ".", "setOnMessageReceivedCallback", "(", "None", ")", "self", ".", "setOnNodeConnectedCallback", "(", "None", ")", "self", ".", "setOnNodeDisconnectedCallback", "(", "None", ")", "self", ".", "setOnReadonlyNodeConnectedCallback", "(", "None", ")", "self", ".", "setOnReadonlyNodeDisconnectedCallback", "(", "None", ")", "for", "node", "in", "self", ".", "_nodes", "|", "self", ".", "_readonlyNodes", ":", "self", ".", "dropNode", "(", "node", ")", "if", "self", ".", "_server", "is", "not", "None", ":", "self", ".", "_server", ".", "unbind", "(", ")", "for", "conn", "in", "self", ".", "_unknownConnections", ":", "conn", ".", "disconnect", "(", ")", "self", ".", "_unknownConnections", "=", "set", "(", ")" ]
be3b0aaa932d5156f5df140c23c962430f51b7b8
test
ReplQueue.put
Put an item into the queue. True - if item placed in queue. False - if queue is full and item can not be placed.
pysyncobj/batteries.py
def put(self, item): """Put an item into the queue. True - if item placed in queue. False - if queue is full and item can not be placed.""" if self.__maxsize and len(self.__data) >= self.__maxsize: return False self.__data.append(item) return True
def put(self, item): """Put an item into the queue. True - if item placed in queue. False - if queue is full and item can not be placed.""" if self.__maxsize and len(self.__data) >= self.__maxsize: return False self.__data.append(item) return True
[ "Put", "an", "item", "into", "the", "queue", ".", "True", "-", "if", "item", "placed", "in", "queue", ".", "False", "-", "if", "queue", "is", "full", "and", "item", "can", "not", "be", "placed", "." ]
bakwc/PySyncObj
python
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/batteries.py#L330-L337
[ "def", "put", "(", "self", ",", "item", ")", ":", "if", "self", ".", "__maxsize", "and", "len", "(", "self", ".", "__data", ")", ">=", "self", ".", "__maxsize", ":", "return", "False", "self", ".", "__data", ".", "append", "(", "item", ")", "return", "True" ]
be3b0aaa932d5156f5df140c23c962430f51b7b8
test
ReplPriorityQueue.put
Put an item into the queue. Items should be comparable, eg. tuples. True - if item placed in queue. False - if queue is full and item can not be placed.
pysyncobj/batteries.py
def put(self, item): """Put an item into the queue. Items should be comparable, eg. tuples. True - if item placed in queue. False - if queue is full and item can not be placed.""" if self.__maxsize and len(self.__data) >= self.__maxsize: return False heapq.heappush(self.__data, item) return True
def put(self, item): """Put an item into the queue. Items should be comparable, eg. tuples. True - if item placed in queue. False - if queue is full and item can not be placed.""" if self.__maxsize and len(self.__data) >= self.__maxsize: return False heapq.heappush(self.__data, item) return True
[ "Put", "an", "item", "into", "the", "queue", ".", "Items", "should", "be", "comparable", "eg", ".", "tuples", ".", "True", "-", "if", "item", "placed", "in", "queue", ".", "False", "-", "if", "queue", "is", "full", "and", "item", "can", "not", "be", "placed", "." ]
bakwc/PySyncObj
python
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/batteries.py#L379-L386
[ "def", "put", "(", "self", ",", "item", ")", ":", "if", "self", ".", "__maxsize", "and", "len", "(", "self", ".", "__data", ")", ">=", "self", ".", "__maxsize", ":", "return", "False", "heapq", ".", "heappush", "(", "self", ".", "__data", ",", "item", ")", "return", "True" ]
be3b0aaa932d5156f5df140c23c962430f51b7b8
test
ReplPriorityQueue.get
Extract the smallest item from queue. Return default if queue is empty.
pysyncobj/batteries.py
def get(self, default=None): """Extract the smallest item from queue. Return default if queue is empty.""" if not self.__data: return default return heapq.heappop(self.__data)
def get(self, default=None): """Extract the smallest item from queue. Return default if queue is empty.""" if not self.__data: return default return heapq.heappop(self.__data)
[ "Extract", "the", "smallest", "item", "from", "queue", ".", "Return", "default", "if", "queue", "is", "empty", "." ]
bakwc/PySyncObj
python
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/batteries.py#L389-L394
[ "def", "get", "(", "self", ",", "default", "=", "None", ")", ":", "if", "not", "self", ".", "__data", ":", "return", "default", "return", "heapq", ".", "heappop", "(", "self", ".", "__data", ")" ]
be3b0aaa932d5156f5df140c23c962430f51b7b8