index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt/mrtasks/CountBinsSamplesCountsMRTask.java
|
package hex.tree.dt.mrtasks;
import org.apache.commons.math3.util.Precision;
import water.MRTask;
import water.fvec.Chunk;
import java.util.Arrays;
import static hex.tree.dt.NumericFeatureLimits.LIMIT_MAX;
import static hex.tree.dt.NumericFeatureLimits.LIMIT_MIN;
import static hex.tree.dt.binning.NumericBin.*;
/**
* MR task for counting samples in bins.
*/
public class CountBinsSamplesCountsMRTask extends MRTask<CountBinsSamplesCountsMRTask> {
public final int _featureSplit;
// numCol x 2 - min and max for each feature
final double[][] _featuresLimits;
// binsCount x bin_encoding_len (5 or 3), depending on feature type:
// for numeric feature bin_encoding_len = 5: {numeric flag (-1.0), count, count0, min, max}
// for categorical feature bin_encoding_len = 3: {category, count, count0}
public double[][] _bins;
// indices for the serialized array
public static final int NUMERICAL_FLAG = 0;
// for both numeric and categorical features indices of count and count0 are the same
public static final int COUNT = 1;
public static final int COUNT_0 = 2;
public CountBinsSamplesCountsMRTask(int featureSplit, double[][] featuresLimits, double[][] bins) {
_featureSplit = featureSplit;
_featuresLimits = featuresLimits;
_bins = bins;
}
@Override
public void map(Chunk[] cs) {
// deep copy of bins array so the reduce phase performs correctly
{
double[][] tmpBins = new double[_bins.length][];
for (int b = 0; b < _bins.length; b++) {
tmpBins[b] = Arrays.copyOf(_bins[b], _bins[b].length);
}
_bins = tmpBins;
}
int classFeature = cs.length - 1;
int numRows = cs[0]._len;
boolean conditionsFailed;
// select only rows that fulfill all conditions
for (int row = 0; row < numRows; row++) {
conditionsFailed = false;
for (int column = 0; column < cs.length - 1 /*exclude prediction column*/; column++) {
if (!verifyLimits(cs[column].atd(row), column)) {
conditionsFailed = true;
break;
}
}
if (!conditionsFailed) {
if (!isNumerical(_featureSplit)) {
for (int i = 0; i < _bins.length; i++) {
// find bin by category
if (_bins[i][0] == cs[_featureSplit].atd(row)) {
_bins[i][COUNT]++;
if (Precision.equals(cs[classFeature].atd(row), 0, Precision.EPSILON)) {
_bins[i][COUNT_0]++;
}
}
}
} else {
for (int i = 0; i < _bins.length; i++) {
// count feature values in the current bin
if (checkBinBelonging(cs[_featureSplit].atd(row), i)) {
_bins[i][COUNT]++;
if (Precision.equals(cs[classFeature].atd(row), 0, Precision.EPSILON)) {
_bins[i][COUNT_0]++;
}
}
}
}
}
}
}
private boolean isNumerical(int feature) {
return _featuresLimits[feature][NUMERICAL_FLAG] == -1.0;
}
private boolean verifyLimits(double featureValue, int column) {
// verifying limits is different for numerical and categorical columns
if (isNumerical(column)) {
return featureValue > _featuresLimits[column][LIMIT_MIN]
&& featureValue <= _featuresLimits[column][LIMIT_MAX];
} else {
// actual categorical value is true(1.0) in feature limits
return _featuresLimits[column][(int) featureValue] == 1.0;
}
}
private boolean checkBinBelonging(double featureValue, int bin) {
return featureValue > _bins[bin][MIN_INDEX] && featureValue <= _bins[bin][MAX_INDEX];
}
@Override
public void reduce(CountBinsSamplesCountsMRTask mrt) {
for (int i = 0; i < _bins.length; i++) {
_bins[i][COUNT] += mrt._bins[i][COUNT];
_bins[i][COUNT_0] += mrt._bins[i][COUNT_0];
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt/mrtasks/FeaturesLimitsMRTask.java
|
package hex.tree.dt.mrtasks;
import hex.tree.dt.DT;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.NewChunk;
import java.util.Arrays;
import static hex.tree.dt.NumericFeatureLimits.*;
/**
* MR task for calculating real features limits based on limits. Useful optimization equal-width binning.
*/
public class FeaturesLimitsMRTask extends MRTask<FeaturesLimitsMRTask> {
// numCol x 2 - min and max for each feature
double[][] _featuresLimits;
// numCol x 2 - min and max for each feature - size is ok as it is linearly dependent on numCols
public double[][] _realFeatureLimits;
public FeaturesLimitsMRTask(double[][] featuresLimits) {
_featuresLimits = featuresLimits;
_realFeatureLimits = null;
}
/**
* Update current minimum if the candidate value is less than the actual minimum.
*
* @param feature feature index
* @param candidateValue new potential min
*/
private void tryUpdatingMin(int feature, double candidateValue) {
if (_realFeatureLimits[feature][LIMIT_MIN] > candidateValue) {
_realFeatureLimits[feature][LIMIT_MIN] = candidateValue - DT.EPSILON;
}
}
/**
* Update current maximum if the candidate value is grater than the actual maximum.
*
* @param feature feature index
* @param candidateValue new potential max
*/
private void tryUpdatingMax(int feature, double candidateValue) {
if (_realFeatureLimits[feature][LIMIT_MAX] < candidateValue) {
_realFeatureLimits[feature][LIMIT_MAX] = candidateValue;
}
}
/**
* Mark new category - set the flag of the given category to 1.0 (true).
*
* @param feature feature index
* @param category new category
*/
private void tryAddingCategory(int feature, double category) {
_realFeatureLimits[feature][(int) category] = 1.0;
}
/**
* Update current categories mask with given categories mask.
*
* @param feature feature index
* @param categoriesMask categories to add to existing mask
*/
private void updateCategories(int feature, double[] categoriesMask) {
for (int i = 0; i < categoriesMask.length; i++) {
// set 1.0 (true) even if it is already 1.0
if (categoriesMask[i] == 1.0) {
_realFeatureLimits[feature][i] = 1.0;
}
}
}
@Override
public void map(Chunk[] cs, NewChunk[] nc) {
// init real features limits - check if the feature is numerical or categorical
_realFeatureLimits = Arrays.stream(_featuresLimits)
.map(f -> f[NUMERICAL_FLAG] == -1.0
// Init max with min double and init min with max double so any real value is better than the init one
? new double[]{-1.0, Double.MAX_VALUE, (-1) * Double.MAX_VALUE}
// Init with zeros to fill with present categories
: new double[f.length])
.toArray(double[][]::new);
int numCols = cs.length - 1; // exclude prediction column
int numRows = cs[0]._len;
boolean conditionsFailed;
// select only rows that fulfill all conditions
for (int row = 0; row < numRows; row++) {
conditionsFailed = false;
for (int column = 0; column < cs.length - 1 /*exclude prediction column*/; column++) {
if (!verifyLimits(cs[column].atd(row), column)) {
conditionsFailed = true;
break;
}
}
// update limits for each feature for rows that satisfy previous condition
if (!conditionsFailed) {
for (int column = 0; column < numCols; column++) {
if (_featuresLimits[column][NUMERICAL_FLAG] == -1.0) {
// numerical feature
tryUpdatingMin(column, cs[column].atd(row));
tryUpdatingMax(column, cs[column].atd(row));
} else {
// categorical feature
tryAddingCategory(column, cs[column].atd(row));
}
}
}
}
}
private boolean isNumerical(int feature) {
return _featuresLimits[feature][NUMERICAL_FLAG] == -1.0;
}
private boolean verifyLimits(double featureValue, int column) {
// verifying limits is different for numerical and categorical columns
if (isNumerical(column)) {
return featureValue > _featuresLimits[column][LIMIT_MIN]
&& featureValue <= _featuresLimits[column][LIMIT_MAX];
} else {
// actual categorical value is true(1.0) in feature limits
return _featuresLimits[column][(int) featureValue] == 1.0;
}
}
@Override
public void reduce(FeaturesLimitsMRTask mrt) {
for (int column = 0; column < _featuresLimits.length; column++) {
if (_realFeatureLimits[column][NUMERICAL_FLAG] == -1.0) {
tryUpdatingMin(column, mrt._realFeatureLimits[column][LIMIT_MIN]);
tryUpdatingMax(column, mrt._realFeatureLimits[column][LIMIT_MAX]);
} else {
updateCategories(column, mrt._realFeatureLimits[column]);
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt/mrtasks/GetClassCountsMRTask.java
|
package hex.tree.dt.mrtasks;
import water.MRTask;
import water.fvec.Chunk;
import static hex.tree.dt.NumericFeatureLimits.*;
/**
* MR task for counting classes.
*/
public class GetClassCountsMRTask extends MRTask<GetClassCountsMRTask> {
public int _numClasses;
// counts of samples for each class, class corresponds to the index in array: [count0, count1, ...]
public int[] _countsByClass;
private final double[][] _featuresLimits;
public GetClassCountsMRTask(double[][] featuresLimits, int numClasses) {
_numClasses = numClasses;
_featuresLimits = featuresLimits;
}
@Override
public void map(Chunk[] cs) {
_countsByClass = new int[_numClasses];
int classColumn = cs.length - 1; // the last column
int numRows = cs[0]._len;
boolean conditionsFailed;
// select only rows that fulfill all conditions
for (int row = 0; row < numRows; row++) {
conditionsFailed = false;
// - 1 because of the class column - don't check limits on it
for (int column = 0; column < cs.length - 1 /*exclude prediction column*/; column++) {
if (!verifyLimits(cs[column].atd(row), column)) {
conditionsFailed = true;
break;
}
}
if (!conditionsFailed) {
_countsByClass[(int) cs[classColumn].atd(row)]++;
}
}
}
private boolean isNumerical(int feature) {
return _featuresLimits[feature][NUMERICAL_FLAG] == -1.0;
}
private boolean verifyLimits(double featureValue, int column) {
// verifying limits is different for numerical and categorical columns
if (isNumerical(column)) {
return featureValue > _featuresLimits[column][LIMIT_MIN]
&& featureValue <= _featuresLimits[column][LIMIT_MAX];
} else {
// actual categorical value is true(1.0) in feature limits
return _featuresLimits[column][(int) featureValue] == 1.0;
}
}
@Override
public void reduce(GetClassCountsMRTask mrt) {
for (int c = 0; c < _numClasses; c++) {
_countsByClass[c] += mrt._countsByClass[c];
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt/mrtasks/ScoreDTTask.java
|
package hex.tree.dt.mrtasks;
import hex.ModelMetrics;
import hex.tree.dt.DTModel;
import water.MRTask;
import water.fvec.Chunk;
public class ScoreDTTask extends MRTask<ScoreDTTask> {
private DTModel _model;
private int _responseIdx;
private ModelMetrics.MetricBuilder _metricsBuilder;
public ScoreDTTask(DTModel _model) {
this._model = _model;
this._responseIdx = _model._output.responseIdx();
}
@Override
public void map(Chunk[] cs) {
_metricsBuilder = _model.makeMetricBuilder(_model._output._domains[_model._output.responseIdx()]);
double [] preds = new double[3];
double [] tmp = new double[_model._output.nfeatures()];
for (int row = 0; row < cs[0]._len; row++) {
preds = _model.score0(cs, 0, row, tmp, preds);
_metricsBuilder.perRow(preds, new float[]{(float) cs[_responseIdx].atd(row)}, _model);
}
}
@Override
public void reduce(ScoreDTTask other) {
_metricsBuilder.reduce(other._metricsBuilder);
}
public ModelMetrics.MetricBuilder getMetricsBuilder() {
return _metricsBuilder;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/gbm/GBM.java
|
package hex.tree.gbm;
import hex.*;
import hex.genmodel.MojoModel;
import hex.genmodel.algos.gbm.GbmMojoModel;
import hex.genmodel.utils.DistributionFamily;
import hex.quantile.Quantile;
import hex.quantile.QuantileModel;
import hex.tree.*;
import hex.tree.DTree.DecidedNode;
import hex.tree.DTree.LeafNode;
import hex.tree.DTree.UndecidedNode;
import org.apache.log4j.Logger;
import water.*;
import water.exceptions.H2OModelBuilderIllegalArgumentException;
import water.fvec.*;
import water.util.*;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import static hex.tree.ScoreBuildHistogram.DECIDED_ROW;
import static hex.util.LinearAlgebraUtils.toEigenArray;
/** Gradient Boosted Trees
*
* Based on "Elements of Statistical Learning, Second Edition, page 387"
*/
public class GBM extends SharedTree<GBMModel,GBMModel.GBMParameters,GBMModel.GBMOutput> {
private static final Logger LOG = Logger.getLogger(GBM.class);
@Override public ModelCategory[] can_build() {
return new ModelCategory[]{
ModelCategory.Regression,
ModelCategory.Binomial,
ModelCategory.Multinomial,
};
}
// Called from an http request
public GBM( GBMModel.GBMParameters parms ) { super(parms ); init(false); }
public GBM( GBMModel.GBMParameters parms, Key<GBMModel> key) { super(parms, key); init(false); }
public GBM(boolean startup_once) { super(new GBMModel.GBMParameters(),startup_once); }
@Override protected int nModelsInParallel(int folds) {
int defaultParallelization = nclasses() <= 2 ?
2 // for binomial and regression we can squeeze bit more performance by running 2 models in parallel
:
1; // for multinomial we need to build a tree per class - this is already massively parallel;
// adding another level of parallelism on top of that increases memory requirements and doesn't improve performance
return nModelsInParallel(folds, defaultParallelization);
}
/** Start the GBM training Job on an F/J thread. */
@Override protected GBMDriver trainModelImpl() {
return new GBMDriver();
}
/** Initialize the ModelBuilder, validating all arguments and preparing the
* training frame. This call is expected to be overridden in the subclasses
* and each subclass will start with "super.init();". This call is made
* by the front-end whenever the GUI is clicked, and needs to be fast;
* heavy-weight prep needs to wait for the trainModel() call.
*
* Validate the learning rate and distribution family. */
@Override public void init(boolean expensive) {
super.init(expensive);
// Initialize response based on given distribution family.
// Regression: initially predict the response mean
// Binomial: just class 0 (class 1 in the exact inverse prediction)
// Multinomial: Class distribution which is not a single value.
// However there is this weird tension on the initial value for
// classification: If you guess 0's (no class is favored over another),
// then with your first GBM tree you'll typically move towards the correct
// answer a little bit (assuming you have decent predictors) - and
// immediately the Confusion Matrix shows good results which gradually
// improve... BUT the Means Squared Error will suck for unbalanced sets,
// even as the CM is good. That's because we want the predictions for the
// common class to be large and positive, and the rare class to be negative
// and instead they start around 0. Guessing initial zero's means the MSE
// is so bad, that the R^2 metric is typically negative (usually it's
// between 0 and 1).
// If instead you guess the mean (reversed through the loss function), then
// the zero-tree GBM model reports an MSE equal to the response variance -
// and an initial R^2 of zero. More trees gradually improves the R^2 as
// expected. However, all the minority classes have large guesses in the
// wrong direction, and it takes a long time (lotsa trees) to correct that
// - so your CM sucks for a long time.
if (expensive) {
if (error_count() > 0)
throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(GBM.this);
if (_parms._distribution == DistributionFamily.AUTO) {
if (_nclass == 1) _parms._distribution = DistributionFamily.gaussian;
if (_nclass == 2) _parms._distribution = DistributionFamily.bernoulli;
if (_nclass >= 3) _parms._distribution = DistributionFamily.multinomial;
}
checkDistributions();
if (hasOffsetCol() && isClassifier() && (_parms._distribution == DistributionFamily.multinomial || _parms._distribution == DistributionFamily.custom)) {
error("_offset_column", "Offset is not supported for "+_parms._distribution+" distribution.");
}
if (_parms._monotone_constraints != null && _parms._monotone_constraints.length > 0 && !supportMonotoneConstraints(_parms._distribution)) {
error("_monotone_constraints", "Monotone constraints are only supported for Gaussian, Bernoulli, Tweedie and Quantile distributions, your distribution: " + _parms._distribution + ".");
}
if (_origTrain != null && _origTrain != _train) {
List<Double> projections = new ArrayList<>();
for (int i = 0; i < _origTrain.numCols(); i++) {
Vec currentCol = _origTrain.vec(i);
if (currentCol.isCategorical()) {
double[] actProjection = toEigenArray(currentCol);
for (double v : actProjection) {
projections.add(v);
}
}
}
double[] primitive_projections = new double[projections.size()];
for (int i = 0; i < projections.size(); i++) {
primitive_projections[i] = projections.get(i);
}
_orig_projection_array = primitive_projections;
}
}
switch( _parms._distribution) {
case bernoulli:
if( _nclass != 2 /*&& !couldBeBool(_response)*/)
error("_distribution", H2O.technote(2, "Binomial requires the response to be a 2-class categorical"));
break;
case quasibinomial:
if ( !_response.isNumeric() )
error("_distribution", H2O.technote(2, "Quasibinomial requires the response to be numeric."));
if ( _nclass != 2)
error("_distribution", H2O.technote(2, "Quasibinomial requires the response to be binary."));
break;
case modified_huber:
if( _nclass != 2 /*&& !couldBeBool(_response)*/)
error("_distribution", H2O.technote(2, "Modified Huber requires the response to be a 2-class categorical."));
break;
case multinomial:
if (!isClassifier()) error("_distribution", H2O.technote(2, "Multinomial requires an categorical response."));
break;
case huber:
if (isClassifier()) error("_distribution", H2O.technote(2, "Huber requires the response to be numeric."));
break;
case poisson:
if (isClassifier()) error("_distribution", H2O.technote(2, "Poisson requires the response to be numeric."));
break;
case gamma:
if (isClassifier()) error("_distribution", H2O.technote(2, "Gamma requires the response to be numeric."));
break;
case tweedie:
if (isClassifier()) error("_distribution", H2O.technote(2, "Tweedie requires the response to be numeric."));
break;
case gaussian:
if (isClassifier()) error("_distribution", H2O.technote(2, "Gaussian requires the response to be numeric."));
break;
case laplace:
if (isClassifier()) error("_distribution", H2O.technote(2, "Laplace requires the response to be numeric."));
break;
case quantile:
if (isClassifier()) error("_distribution", H2O.technote(2, "Quantile requires the response to be numeric."));
break;
case custom:
if(_parms._custom_distribution_func == null) error("_distribution", H2O.technote(2, "Custom requires custom function loaded."));
break;
case AUTO:
break;
default:
error("_distribution","Invalid distribution: " + _parms._distribution);
}
if( !(0. < _parms._learn_rate && _parms._learn_rate <= 1.0) )
error("_learn_rate", "learn_rate must be between 0 and 1");
if( !(0. < _parms._learn_rate_annealing && _parms._learn_rate_annealing <= 1.0) )
error("_learn_rate_annealing", "learn_rate_annealing must be between 0 and 1");
if( !(0. < _parms._col_sample_rate && _parms._col_sample_rate <= 1.0) )
error("_col_sample_rate", "col_sample_rate must be between 0 and 1");
if (_parms._max_abs_leafnode_pred <= 0)
error("_max_abs_leafnode_pred", "max_abs_leafnode_pred must be larger than 0.");
if (_parms._pred_noise_bandwidth < 0)
error("_pred_noise_bandwidth", "pred_noise_bandwidth must be >= 0.");
if ((_train != null) && (_parms._monotone_constraints != null)) {
TreeUtils.checkMonotoneConstraints(this, _train, _parms._monotone_constraints);
}
if ((_train != null) && (_parms._interaction_constraints != null)) {
if(_parms._categorical_encoding != Model.Parameters.CategoricalEncodingScheme.AUTO && _parms._categorical_encoding != Model.Parameters.CategoricalEncodingScheme.OneHotInternal){
error("_categorical_encoding", "Interaction constraints can be used when the categorical encoding is set to ``AUTO`` (``one_hot_internal`` or ``OneHotInternal``) only.");
}
TreeUtils.checkInteractionConstraints(this, _train, _parms._interaction_constraints);
if (error_count() == 0) {
_ics = _parms.interactionConstraints(_train);
}
}
}
private boolean supportMonotoneConstraints(DistributionFamily distributionFamily){
switch (distributionFamily) {
case gaussian:
case bernoulli:
case tweedie:
case quantile:
return true;
default:
return false;
}
}
// ----------------------
private class GBMDriver extends Driver {
private transient FrameMap frameMap;
private transient long _skippedCnt; // #observations that will be skipped because they have 0 weight or NA label in the training frame
@Override
protected Frame makeValidWorkspace() {
final int nClasses = numClassTrees();
final Vec[] tmp;
if (validWorkspaceCanReuseTrainWorkspace()) {
tmp = new Vec[nClasses];
for (int i = 0; i < nClasses; i++) {
tmp[i] = _train.vec(idx_tree(i));
assert tmp[i].isVolatile();
}
} else {
tmp = _valid.anyVec().makeVolatileDoubles(nClasses);
}
String[] tmpNames = new String[tmp.length];
for (int i = 0; i < tmpNames.length; i++)
tmpNames[i] = "__P_" + i;
return new Frame(tmpNames, tmp);
}
private boolean validWorkspaceCanReuseTrainWorkspace() {
// 1. only possible for CV models
if (!_parms._is_cv_model)
return false;
// 2. and only if training frame and validation frame are identical (except for weights)
// training frame can eg. be sub/over-sampled for imbalanced problems\
// shortcut: if responses are identical => frames must be identical as well (for CV models only!!!)
return _vresponse._key.equals(_response._key);
}
@Override protected boolean doOOBScoring() { return false; }
@Override protected void initializeModelSpecifics() {
frameMap = new FrameMap(GBM.this);
_mtry_per_tree = Math.max(1, (int)(_parms._col_sample_rate_per_tree * _ncols)); //per-tree
assert _parms.useColSampling() || _mtry_per_tree == _ncols;
if (!(1 <= _mtry_per_tree && _mtry_per_tree <= _ncols)) throw new IllegalArgumentException("Computed mtry_per_tree should be in interval <1,"+_ncols+"> but it is " + _mtry_per_tree);
_mtry = Math.max(1, (int)(_parms._col_sample_rate * _parms._col_sample_rate_per_tree * _ncols)); //per-split
assert _parms.useColSampling() || _mtry == _ncols;
if (!(1 <= _mtry && _mtry <= _ncols)) throw new IllegalArgumentException("Computed mtry should be in interval <1,"+_ncols+"> but it is " + _mtry);
// for Bernoulli, we compute the initial value with Newton-Raphson iteration, otherwise it might be NaN here
DistributionFamily distr = _parms._distribution;
_initialPrediction = _nclass > 2 || distr == DistributionFamily.laplace || distr == DistributionFamily.huber || distr == DistributionFamily.quantile ? 0 : getInitialValue();
if(distr == DistributionFamily.quasibinomial){
_model._output._quasibinomialDomains = new VecUtils.CollectDoubleDomain(null,2).doAll(_response).stringDomain(_response.isInt());
}
if (distr == DistributionFamily.bernoulli || distr == DistributionFamily.quasibinomial) {
if (hasOffsetCol())
_initialPrediction = getInitialValueBernoulliOffset(_train);
} else if (distr == DistributionFamily.laplace || distr == DistributionFamily.huber) {
_initialPrediction = getInitialValueQuantile(0.5);
} else if (distr == DistributionFamily.quantile) {
_initialPrediction = getInitialValueQuantile(_parms._quantile_alpha);
}
_model._output._init_f = _initialPrediction; //always write the initial value here (not just for Bernoulli)
if (_model.evalAutoParamsEnabled) {
_model.initActualParamValuesAfterOutputSetup(_nclass, isClassifier());
}
// Set the initial prediction into the tree column 0
if (_initialPrediction != 0.0) {
new FillVecWithConstant(_initialPrediction)
.doAll(vec_tree(_train, 0), _parms._build_tree_one_node); // Only setting tree-column 0
}
// Mark all rows that either have zero weights or the label is NA as decided
// This is way we can avoid processing them when building trees
final long zeroWeights = hasWeightCol() ? _weights.length() - _weights.nzCnt() : 0;
if (zeroWeights > 0 || // has some zero weights or
response().naCnt() > 0) { // there are NAs in the response
Vec[] vecs = new Vec[]{_response};
if (hasWeightCol())
vecs = ArrayUtils.append(vecs, _weights);
for (int k = 0; k < _nclass; k++) {
Vec nidsVecK = vec_nids(_train, k);
if (nidsVecK.min() == DECIDED_ROW) {
// classes not present in the training frame are skipped
assert _model._output._distribution[k] == 0;
assert nidsVecK.isConst();
continue;
}
vecs = ArrayUtils.append(vecs, nidsVecK);
}
_skippedCnt = new MarkDecidedRows().doAll(vecs).markedCnt;
assert _skippedCnt >= zeroWeights;
assert _skippedCnt >= response().naCnt();
assert _skippedCnt <= response().naCnt() + zeroWeights;
} else
_skippedCnt = 0;
}
class MarkDecidedRows extends MRTask<MarkDecidedRows> {
int markedCnt;
@Override
public void map(Chunk[] cs) {
final int colStart;
final Chunk y;
final Chunk weights;
if (hasWeightCol()) {
y = cs[0];
weights = cs[1];
colStart = 2;
} else {
y = cs[0];
weights = new C0DChunk(1, cs[0]._len);
colStart = 1;
}
for (int row = 0; row < y._len; row++) {
if (weights.atd(row) == 0 || y.isNA(row)) {
markedCnt++;
for (int c = colStart; c < cs.length; c++) {
cs[c].set(row, DECIDED_ROW);
}
}
}
}
@Override
public void reduce(MarkDecidedRows mrt) {
markedCnt += mrt.markedCnt;
}
@Override
protected boolean modifiesVolatileVecs() {
return true;
}
}
/**
* Helper to compute the initial value for Laplace/Huber/Quantile (incl. optional offset and obs weights)
* @return weighted median of response - offset
*/
private double getInitialValueQuantile(double quantile) {
// obtain y - o
Vec y = hasOffsetCol()
? new ResponseLessOffsetTask(frameMap).doAll(1, Vec.T_NUM, _train).outputFrame().anyVec()
: response();
// Now compute (weighted) quantile of y - o
double res;
QuantileModel qm = null;
Frame tempFrame = null;
try {
tempFrame = new Frame(Key.<Frame>make(H2O.SELF), new String[]{"y"}, new Vec[]{y});
if (hasWeightCol()) tempFrame.add("w", _weights);
DKV.put(tempFrame);
QuantileModel.QuantileParameters parms = new QuantileModel.QuantileParameters();
parms._train = tempFrame._key;
parms._probs = new double[]{quantile};
parms._weights_column = hasWeightCol() ? "w" : null;
Job<QuantileModel> job1 = new Quantile(parms).trainModel();
qm = job1.get();
res = qm._output._quantiles[0][0];
} finally {
if (qm!=null) qm.remove();
if (tempFrame!=null) DKV.remove(tempFrame._key);
}
return res;
}
/**
* Helper to compute the initial value for Bernoulli for offset != 0
*/
private double getInitialValueBernoulliOffset(Frame train) {
LOG.info("Running Newton-Raphson iteration to find the initial value since offsets are specified.");
double delta;
int count = 0;
double tol = 1e-4;
//From R GBM vignette:
//For speed, gbm() does only one step of the Newton-Raphson algorithm
//rather than iterating to convergence. No appreciable loss of accuracy
//since the next boosting iteration will simply correct for the prior iterations
//inadequacy.
int N = 1; //one step is enough - same as R
double init = 0; //start with initial value of 0 for convergence
do {
double newInit = new NewtonRaphson(frameMap, DistributionFactory.getDistribution(_parms), init).doAll(train).value();
delta = Math.abs(init - newInit);
init = newInit;
LOG.info("Iteration " + (++count) + ": initial value: " + init);
} while (count < N && delta >= tol);
if (delta > tol) LOG.warn("Not fully converged.");
LOG.info("Newton-Raphson iteration ran for " + count + " iteration(s). Final residual: " + delta);
return init;
}
private static final double MIN_LOG_TRUNC = -19;
private static final double MAX_LOG_TRUNC = 19;
private void truncatePreds(final DTree tree, int firstLeafIndex, DistributionFamily dist) {
if (firstLeafIndex==tree._len) return;
ComputeMinMax minMax = new ComputeMinMax(frameMap, firstLeafIndex, tree._len).doAll(_train);
if (LOG.isTraceEnabled()) {
LOG.trace("Number of leaf nodes: " + minMax._mins.length);
LOG.trace("Min: " + Arrays.toString(minMax._mins));
LOG.trace("Max: " + Arrays.toString(minMax._maxs));
}
//loop over leaf nodes only: starting at leaf index
for (int i = 0; i < tree._len - firstLeafIndex; i++) {
final LeafNode node = ((LeafNode) tree.node(firstLeafIndex + i));
int nidx = node.nid();
float nodeMin = minMax._mins[nidx- firstLeafIndex];
float nodeMax = minMax._maxs[nidx- firstLeafIndex];
if (LOG.isTraceEnabled()) LOG.trace("Node: " + nidx + " min/max: " + nodeMin + "/" + nodeMax);
// https://github.com/cran/gbm/blob/master/src/poisson.cpp
// https://github.com/harrysouthworth/gbm/blob/master/src/poisson.cpp
// https://github.com/gbm-developers/gbm/blob/master/src/poisson.cpp
// https://github.com/harrysouthworth/gbm/blob/master/src/gamma.cpp
// https://github.com/gbm-developers/gbm/blob/master/src/gamma.cpp
// https://github.com/harrysouthworth/gbm/blob/master/src/tweedie.cpp
// https://github.com/gbm-developers/gbm/blob/master/src/tweedie.cpp
double val = node._pred;
if (dist == DistributionFamily.gamma || dist == DistributionFamily.tweedie) //only for gamma/tweedie
val += nodeMax;
if (val > MAX_LOG_TRUNC) {
if (LOG.isDebugEnabled()) LOG.debug("Truncating large positive leaf prediction (log): " + node._pred + " to " + (MAX_LOG_TRUNC - nodeMax));
node._pred = (float) (MAX_LOG_TRUNC - nodeMax);
}
val = node._pred;
if (dist == DistributionFamily.gamma || dist == DistributionFamily.tweedie) //only for gamma/tweedie
val += nodeMin;
if (val < MIN_LOG_TRUNC) {
if (LOG.isDebugEnabled()) LOG.debug("Truncating large negative leaf prediction (log): " + node._pred + " to " + (MIN_LOG_TRUNC - nodeMin));
node._pred = (float) (MIN_LOG_TRUNC - nodeMin);
}
if (node._pred < MIN_LOG_TRUNC && node._pred > MAX_LOG_TRUNC) {
LOG.warn("Terminal node prediction outside of allowed interval in log-space: "
+ node._pred + " (should be in " + MIN_LOG_TRUNC + "..." + MAX_LOG_TRUNC + ").");
}
}
}
// --------------------------------------------------------------------------
// Build the next k-trees, which is trying to correct the residual error from
// the prior trees.
@Override protected boolean buildNextKTrees() {
// We're going to build K (nclass) trees - each focused on correcting
// errors for a single class.
final DTree[] ktrees = new DTree[_nclass];
// Define a "working set" of leaf splits, from here to tree._len
int[] leaves = new int[_nclass];
// Get distribution
Distribution distributionImpl = DistributionFactory.getDistribution(_parms);
// Compute predictions and resulting residuals
// ESL2, page 387, Steps 2a, 2b
// fills "Work" columns for all rows (incl. OOB) with the residuals
double huberDelta = Double.NaN;
if (_parms._distribution == DistributionFamily.huber) {
// Jerome Friedman 1999: Greedy Function Approximation: A Gradient Boosting Machine
// https://statweb.stanford.edu/~jhf/ftp/trebst.pdf
// compute absolute diff |y-(f+o)| for all rows
Vec diff = new ComputeAbsDiff(frameMap).doAll(1, (byte)3 /*numeric*/, _train).outputFrame().anyVec();
// compute weighted alpha-quantile of the absolute residual -> this is the delta for the huber loss
huberDelta = MathUtils.computeWeightedQuantile(_weights, diff, _parms._huber_alpha);
distributionImpl.setHuberDelta(huberDelta);
// now compute residuals using the gradient of the huber loss (with a globally adjusted delta)
new StoreResiduals(frameMap, distributionImpl).doAll(_train, _parms._build_tree_one_node);
} else {
// compute predictions and residuals in one shot
new ComputePredAndRes(frameMap, _nclass, _model._output._distribution, distributionImpl)
.doAll(_train, _parms._build_tree_one_node);
}
for (int k = 0; k < _nclass; k++) {
if (LOG.isTraceEnabled() && ktrees[k]!=null) {
LOG.trace("Updated predictions in WORK col for class " + k + ":\n" + new Frame(new String[]{"WORK"},new Vec[]{vec_work(_train, k)}).toTwoDimTable());
}
}
// ----
// ESL2, page 387. Step 2b ii.
// One Big Loop till the ktrees are of proper depth.
// Adds a layer to the trees each pass.
Constraints cs = _parms.constraints(_train);
// Initialize branch interaction constraints
BranchInteractionConstraints bics = null;
if(_parms._interaction_constraints != null) {
bics = _parms.initialInteractionConstraints(_ics);
}
growTrees(ktrees, leaves, _rand, cs, bics);
for (int k = 0; k < _nclass; k++) {
if (LOG.isTraceEnabled() && ktrees[k]!=null) {
LOG.trace("Grew trees. Updated NIDs for class " + k + ":\n" + new Frame(new String[]{"NIDS"},new Vec[]{vec_nids(_train, k)}).toTwoDimTable());
}
}
// ----
// ESL2, page 387. Step 2b iii. Compute the gammas (leaf node predictions === fit best constant), and store them back
// into the tree leaves. Includes learn_rate.
GammaPass gp = new GammaPass(frameMap, ktrees, leaves, distributionImpl, _nclass);
gp.doAll(_train);
if (_parms._distribution == DistributionFamily.laplace) {
fitBestConstantsQuantile(ktrees, leaves[0], 0.5); //special case for Laplace: compute the median for each leaf node and store that as prediction
} else if (_parms._distribution == DistributionFamily.quantile) {
if(cs == null) {
fitBestConstantsQuantile(ktrees, leaves[0], _parms._quantile_alpha); //compute the alpha-quantile for each leaf node and store that as prediction
} else {
fitBestConstants(ktrees, leaves, gp, cs); // compute quantile monotone constraints using precomputed parent prediction
resetQuantileConstants(ktrees, _parms._quantile_alpha, cs);
}
} else if (_parms._distribution == DistributionFamily.huber) {
fitBestConstantsHuber(ktrees, leaves[0], huberDelta); //compute the alpha-quantile for each leaf node and store that as prediction
} else {
fitBestConstants(ktrees, leaves, gp, cs);
}
// Apply a correction for strong mispredictions (otherwise deviance can explode)
if (_parms._distribution == DistributionFamily.gamma ||
_parms._distribution == DistributionFamily.poisson ||
_parms._distribution == DistributionFamily.tweedie) {
assert(_nclass == 1);
truncatePreds(ktrees[0], leaves[0], _parms._distribution);
}
if ((cs != null) && constraintCheckEnabled()) {
_job.update(0, "Checking monotonicity constraints on the final model");
checkConstraints(ktrees, leaves, cs);
}
// ----
// ESL2, page 387. Step 2b iv. Cache the sum of all the trees, plus the
// new tree, in the 'tree' columns. Also, zap the NIDs for next pass.
// Tree <== f(Tree)
// Nids <== 0
new AddTreeContributions(
frameMap, ktrees, _parms._pred_noise_bandwidth, _parms._seed, _parms._ntrees, _model._output._ntrees
).doAll(_train);
// sanity check
for (int k = 0; k < _nclass; k++) {
assert ktrees[k] == null || vec_nids(_train, k).nzCnt() == _skippedCnt;
}
// Grow the model by K-trees
_model._output.addKTrees(ktrees);
// If there is no row/col-sampling and trees are just roots with 0 prediction (==no change) we can stop building
if (!_parms.isStochastic()) {
boolean converged = true;
for (DTree tree : ktrees) {
if (tree == null)
continue;
DTree.Node root = tree.root();
converged = root instanceof LeafNode && ((LeafNode) root)._pred == 0.0f;
if (! converged) {
break;
}
}
if (converged) {
LOG.warn("Model cannot be further improved by building more trees, " +
"stopping with ntrees=" + _model._output._ntrees + ". Setting actual ntrees to the " + _model._output._ntrees+".");
_parms._ntrees = _model._output._ntrees;
return true;
}
}
boolean converged = effective_learning_rate() < 1e-6;
if (converged) {
LOG.warn("Effective learning rate dropped below 1e-6 (" + _parms._learn_rate + " * " + _parms._learn_rate_annealing + "^" + (_model._output._ntrees-1) + ") - stopping the model now.");
}
return converged;
}
/**
* How may trees are actually calculated for the number of classes the model uses.
* @return number of trees
*/
private int numClassTrees() {
return _nclass == 2 ? 1 : _nclass; // Boolean Optimization (only one tree needed for 2-class problems)
}
/**
* Grow k regression trees (k=1 for regression and binomial, k=N for classification with N classes)
* @param ktrees k trees to grow (must be properly initialized)
* @param leaves workspace to store the leaf node starting index (k-dimensional - one per tree)
* @param rand PRNG for reproducibility
*/
private void growTrees(DTree[] ktrees, int[] leaves, Random rand, Constraints cs, BranchInteractionConstraints bics) {
// Initial set of histograms. All trees; one leaf per tree (the root
// leaf); all columns
DHistogram hcs[][][] = new DHistogram[_nclass][1/*just root leaf*/][_ncols];
// Adjust real bins for the top-levels
int adj_nbins = Math.max(_parms._nbins_top_level,_parms._nbins);
long rseed = rand.nextLong();
// initialize trees
for (int k = 0; k < numClassTrees(); k++) {
// Initially setup as-if an empty-split had just happened
if (_model._output._distribution[k] != 0) {
ktrees[k] = new DTree(_train, _ncols, _mtry, _mtry_per_tree, rseed, _parms);
DHistogram[] hist = DHistogram.initialHist(_train, _ncols, adj_nbins, hcs[k][0], rseed, _parms, getGlobalSplitPointsKeys(), cs, false, _ics);
new UndecidedNode(ktrees[k], DTree.NO_PARENT, hist, cs, bics); // The "root" node
}
}
// Sample - mark the lines by putting 'OUT_OF_BAG' into nid(<klass>) vector
if (_parms.useRowSampling()) {
Sample ss[] = new Sample[_nclass];
for (int k = 0; k < _nclass; k++)
if (ktrees[k] != null)
ss[k] = new Sample(ktrees[k], _parms._sample_rate, _parms._sample_rate_per_class).dfork(null, new Frame(vec_nids(_train, k), _response), _parms._build_tree_one_node);
for (int k = 0; k < _nclass; k++) {
if (ss[k] != null) {
ss[k].getResult();
if (LOG.isTraceEnabled() && ktrees[k]!=null) {
LOG.trace("Sampled OOB rows. NIDS:\n" + new Frame(vec_nids(_train, k)).toTwoDimTable());
}
}
}
}
// ----
// ESL2, page 387. Step 2b ii.
// One Big Loop till the ktrees are of proper depth.
// Adds a layer to the trees each pass.
int depth = 0;
for (; depth < _parms._max_depth; depth++) {
hcs = buildLayer(_train, _parms._nbins, ktrees, leaves, hcs, _parms._build_tree_one_node);
// If we did not make any new splits, then the tree is split-to-death
if (hcs == null) break;
}
// Each tree bottomed-out in a DecidedNode; go 1 more level and insert
// LeafNodes to hold predictions.
for (int k = 0; k < _nclass; k++) {
DTree tree = ktrees[k];
if (tree == null) continue;
int leaf = tree.len();
leaves[k] = leaf; //record the size of the tree before splitting the bottom nodes as the starting index for the leaf node indices
for (int nid = 0; nid < leaf; nid++) {
if (tree.node(nid) instanceof DecidedNode) {
DecidedNode dn = tree.decided(nid);
if (dn._split == null) { // No decision here, no row should have this NID now
if (nid == 0) // Handle the trivial non-splitting tree
new LeafNode(tree, DTree.NO_PARENT, 0);
continue;
}
for (int i = 0; i < dn._nids.length; i++) { //L/R children
int cnid = dn._nids[i];
if (cnid == ScoreBuildHistogram.UNDECIDED_CHILD_NODE_ID || // Bottomed out (predictors or responses known constant)
tree.node(cnid) instanceof UndecidedNode || // Or chopped off for depth
(tree.node(cnid) instanceof DecidedNode && // Or not possible to split
((DecidedNode) tree.node(cnid))._split == null)) {
dn._nids[i] = new LeafNode(tree, nid).nid(); // Mark a leaf here
}
}
}
}
} // -- k-trees are done
}
// Jerome Friedman 1999: Greedy Function Approximation: A Gradient Boosting Machine
// https://statweb.stanford.edu/~jhf/ftp/trebst.pdf
private void fitBestConstantsHuber(DTree[] ktrees, int firstLeafIndex, double huberDelta) {
if (firstLeafIndex == ktrees[0]._len) return; // no splits happened - nothing to do
assert(_nclass==1);
// get diff y-(f+o) and weights and strata (node idx)
Vec diff = new ComputeDiff(frameMap).doAll(1, (byte)3 /*numeric*/, _train).outputFrame().anyVec();
Vec weights = hasWeightCol() ? _train.vecs()[idx_weight()] : null;
Vec strata = vec_nids(_train,0);
// compute median diff for each leaf node
Quantile.StratifiedQuantilesTask sqt = new Quantile.StratifiedQuantilesTask(null, 0.5 /*median of weighted residuals*/, diff, weights, strata, QuantileModel.CombineMethod.INTERPOLATE);
H2O.submitTask(sqt);
sqt.join();
// subtract median(diff) from residuals for all observations of each leaf
DiffMinusMedianDiff hp = new DiffMinusMedianDiff(strata, sqt._quantiles /*median residuals per leaf*/);
Frame tmpFrame1 = new Frame(new String[]{"strata","diff"}, new Vec[]{strata,diff});
hp.doAll(tmpFrame1);
Vec diffMinusMedianDiff = diff;
// for each leaf, compute the mean of Math.signum(resMinusMedianRes) * Math.min(Math.abs(resMinusMedianRes), huberDelta),
// where huberDelta is the alpha-percentile of the residual across all observations
Frame tmpFrame2 = new Frame(_train.vecs());
tmpFrame2.add("resMinusMedianRes", diffMinusMedianDiff);
double[] huberGamma = new HuberLeafMath(frameMap, huberDelta, strata).doAll(tmpFrame2)._huberGamma;
// now assign the median per leaf + the above _huberCorrection[i] to each leaf
final DTree tree = ktrees[0];
for (int i = 0; i < sqt._quantiles.length; i++) {
double huber = (sqt._quantiles[i] /*median*/ + huberGamma[i]);
if (Double.isNaN(sqt._quantiles[i])) continue; //no active rows for this NID
double val = effective_learning_rate() * huber;
assert !Double.isNaN(val) && !Double.isInfinite(val);
if (val > _parms._max_abs_leafnode_pred) val = _parms._max_abs_leafnode_pred;
if (val < -_parms._max_abs_leafnode_pred) val = -_parms._max_abs_leafnode_pred;
((LeafNode) tree.node(sqt._nids[i]))._pred = (float) val;
if (LOG.isTraceEnabled()) { LOG.trace("Leaf " + sqt._nids[i] + " has huber value: " + huber); }
}
diffMinusMedianDiff.remove();
}
private double effective_learning_rate() {
return _parms._learn_rate * Math.pow(_parms._learn_rate_annealing, (_model._output._ntrees-1));
}
private void fitBestConstantsQuantile(DTree[] ktrees, int firstLeafIndex, double quantile) {
if (firstLeafIndex == ktrees[0]._len) return; // no splits happened - nothing to do
assert(_nclass==1);
Vec diff = new ComputeDiff(frameMap).doAll(1, (byte)3 /*numeric*/, _train).outputFrame().anyVec();
Vec weights = hasWeightCol() ? _train.vecs()[idx_weight()] : null;
Vec strata = vec_nids(_train,0);
// compute quantile for all leaf nodes
QuantileModel.CombineMethod combine_method = QuantileModel.CombineMethod.INTERPOLATE;
Quantile.StratifiedQuantilesTask sqt = new Quantile.StratifiedQuantilesTask(null, quantile, diff, weights, strata, combine_method);
H2O.submitTask(sqt);
sqt.join();
final DTree tree = ktrees[0];
for (int i = 0; i < sqt._quantiles.length; i++) {
double leafQuantile = sqt._quantiles[i];
if (Double.isNaN(leafQuantile)) continue; //no active rows for this NID
double val = effective_learning_rate() * leafQuantile;
if (val > _parms._max_abs_leafnode_pred) val = _parms._max_abs_leafnode_pred;
if (val < -_parms._max_abs_leafnode_pred) val = -_parms._max_abs_leafnode_pred;
((LeafNode) tree.node(sqt._nids[i]))._pred = (float) val;
}
}
/**
* This method recompute result leaf node prediction to get a more precise prediction but respecting
* the column constraints in subtrees
* @param ktrees array of all trained trees
* @param quantile quantile alpha parameter
* @param cs constraint object to get information about constraints for each column
*/
private void resetQuantileConstants(DTree[] ktrees, double quantile, Constraints cs) {
Vec diff = new ComputeDiff(frameMap).doAll(1, (byte)3, _train).outputFrame().anyVec();
Vec weights = hasWeightCol() ? _train.vecs()[idx_weight()] : null;
Vec strata = vec_nids(_train,0);
// compute quantile for all leaf nodes
QuantileModel.CombineMethod combine_method = QuantileModel.CombineMethod.INTERPOLATE;
Quantile.StratifiedQuantilesTask sqt = new Quantile.StratifiedQuantilesTask(null, quantile, diff, weights, strata, combine_method);
H2O.submitTask(sqt);
sqt.join();
for (int k = 0; k < _nclass; k++) {
final DTree tree = ktrees[k];
if (tree == null || tree.len() < 2) continue;
float[] mins = new float[tree._len];
int[] min_ids = new int[tree._len];
float[] maxs = new float[tree._len];
int[] max_ids = new int[tree._len];
int dnSize = tree._len - tree._leaves; // calculate index where leaves starts
rollupMinMaxPreds(tree, tree.root(), mins, min_ids, maxs, max_ids);
for (int i = dnSize; i < tree.len(); i++) {
LeafNode node = (LeafNode)tree.node(i);
int quantileId = i - dnSize;
double leafQuantile = sqt._quantiles[quantileId];
if (Double.isNaN(leafQuantile)) continue; // quantile can be NaN if CV or weights column is enabled
boolean canBeReplaced = true;
DTree.Node tmpNode = tree.node(i);
while(tmpNode.pid() != DTree.NO_PARENT) {
DecidedNode parent = (DecidedNode) tree.node(tmpNode._pid);
int constraint = cs.getColumnConstraint(parent._split._col);
if (parent._split.naSplitDir() == DHistogram.NASplitDir.NAvsREST || constraint == 0) {
tmpNode = parent;
continue;
}
if (constraint > 0) {
if (leafQuantile > mins[parent._nids[0]] || leafQuantile < maxs[parent._nids[1]]) {
canBeReplaced = false;
break;
}
} else if (leafQuantile < maxs[parent._nids[0]] || leafQuantile > mins[parent._nids[1]]) {
canBeReplaced = false;
break;
}
tmpNode = parent;
}
if(canBeReplaced){
node._pred = (float) leafQuantile;
}
}
}
}
private void fitBestConstants(DTree[] ktrees, int[] leafs, GammaPass gp, Constraints cs) {
final boolean useSplitPredictions = cs != null && cs.useBounds();
double m1class = (_nclass > 1 && _parms._distribution == DistributionFamily.multinomial) ||
(_nclass > 2 && _parms._distribution == DistributionFamily.custom) ? (double) (_nclass - 1) / _nclass : 1.0; // K-1/K for multinomial
for (int k = 0; k < _nclass; k++) {
final DTree tree = ktrees[k];
if (tree == null) continue;
if (LOG.isTraceEnabled()) {
for (int i=0;i<ktrees[k]._len-leafs[k];++i)
LOG.trace(ktrees[k].node(leafs[k]+i).toString());
}
for (int i = 0; i < tree._len - leafs[k]; i++) {
LeafNode leafNode = (LeafNode) ktrees[k].node(leafs[k] + i);
final double gamma;
if (useSplitPredictions) {
gamma = gp.gamma(leafNode.getSplitPrediction());
} else {
gamma = gp.gamma(k, i);
}
double gf = effective_learning_rate() * m1class * gamma;
// In the multinomial case, check for very large values (which will get exponentiated later)
// Note that gss can be *zero* while rss is non-zero - happens when some rows in the same
// split are perfectly predicted true, and others perfectly predicted false.
if (_parms._distribution == DistributionFamily.multinomial || (_parms._distribution == DistributionFamily.custom && _nclass > 2)) {
if (gf > 1e4) gf = 1e4f; // Cap prediction, will already overflow during Math.exp(gf)
else if (gf < -1e4) gf = -1e4f;
}
if (Double.isNaN(gf)) gf=0;
else if (Double.isInfinite(gf)) gf=Math.signum(gf)*1e4f;
if (gf > _parms._max_abs_leafnode_pred) gf = _parms._max_abs_leafnode_pred;
if (gf < -_parms._max_abs_leafnode_pred) gf = -_parms._max_abs_leafnode_pred;
leafNode._pred = (float) gf;
}
}
}
private boolean constraintCheckEnabled() {
return Boolean.parseBoolean(getSysProperty("gbm.monotonicity.checkEnabled", "true"));
}
private void checkConstraints(DTree[] ktrees, int[] leafs, Constraints cs) {
for (int k = 0; k < _nclass; k++) {
final DTree tree = ktrees[k];
if (tree == null) continue;
float[] mins = new float[tree._len];
int[] min_ids = new int[tree._len];
float[] maxs = new float[tree._len];
int[] max_ids = new int[tree._len];
rollupMinMaxPreds(tree, tree.root(), mins, min_ids, maxs, max_ids);
for (int i = 0; i < tree._len - leafs.length; i++) {
DTree.Node node = tree.node(i);
if (! (node instanceof DecidedNode))
continue;
DecidedNode dn = ((DecidedNode) node);
if (dn._split == null)
continue;
int constraint = cs.getColumnConstraint(dn._split._col);
if (dn._split.naSplitDir() == DHistogram.NASplitDir.NAvsREST) {
// NAs are not subject to constraints, we don't have to check the monotonicity on "NA vs REST" type of splits
continue;
}
if (constraint > 0) {
if (maxs[dn._nids[0]] > mins[dn._nids[1]]) {
String message = "Monotonicity constraint " + constraint + " violated on column '" + _train.name(dn._split._col) + "' (max(left) > min(right)): " +
maxs[dn._nids[0]] + " > " + mins[dn._nids[1]] +
"\nNode: " + node +
"\nLeft Node (max): " + tree.node(max_ids[dn._nids[0]]) +
"\nRight Node (min): " + tree.node(min_ids[dn._nids[1]]);
throw new IllegalStateException(message);
}
} else if (constraint < 0) {
if (mins[dn._nids[0]] < maxs[dn._nids[1]]) {
String message = "Monotonicity constraint " + constraint + " violated on column '" + _train.name(dn._split._col) + "' (min(left) < max(right)): " +
mins[dn._nids[0]] + " < " + maxs[dn._nids[1]] +
"\nNode: " + node +
"\nLeft Node (min): " + tree.node(min_ids[dn._nids[0]]) +
"\nRight Node (max): " + tree.node(max_ids[dn._nids[1]]);
throw new IllegalStateException(message);
}
}
}
}
}
private void rollupMinMaxPreds(DTree tree, DTree.Node node, float[] mins, int min_ids[], float[] maxs, int[] max_ids) {
if (node instanceof LeafNode) {
mins[node.nid()] = ((LeafNode) node)._pred;
min_ids[node.nid()] = node.nid();
maxs[node.nid()] = ((LeafNode) node)._pred;
max_ids[node.nid()] = node.nid();
return;
}
DecidedNode dn = (DecidedNode) node;
rollupMinMaxPreds(tree, tree.node(dn._nids[0]), mins, min_ids, maxs, max_ids);
rollupMinMaxPreds(tree, tree.node(dn._nids[1]), mins, min_ids, maxs, max_ids);
final int min_id = mins[dn._nids[0]] < mins[dn._nids[1]] ? dn._nids[0] : dn._nids[1];
mins[node.nid()] = mins[min_id];
min_ids[node.nid()] = min_ids[min_id];
final int max_id = maxs[dn._nids[0]] > maxs[dn._nids[1]] ? dn._nids[0] : dn._nids[1];
maxs[node.nid()] = maxs[max_id];
max_ids[node.nid()] = max_ids[max_id];
}
@Override protected GBMModel makeModel(Key<GBMModel> modelKey, GBMModel.GBMParameters parms) {
return new GBMModel(modelKey, parms, new GBMModel.GBMOutput(GBM.this));
}
@Override
protected void doInTrainingCheckpoint() {
try {
String modelFile = _parms._in_training_checkpoints_dir + "/" + _model._key.toString() + ".ntrees_" + _model._output._ntrees;
GBMModel modelClone = _model.clone();
modelClone.setInputParms(_parms);
modelClone._key = Key.make(_model._key + "." + _model._output._ntrees);
modelClone._output = (GBMModel.GBMOutput) _model._output.clone();
modelClone._output.changeModelMetricsKey(modelClone._key);
modelClone.exportBinaryModel(modelFile, true);
} catch (IOException e) {
throw new RuntimeException("Failed to write GBM checkpoint" + _model._key.toString(), e);
}
}
}
//--------------------------------------------------------------------------------------------------------------------
private static class FillVecWithConstant extends MRTask<FillVecWithConstant> {
private double init;
public FillVecWithConstant(double d) {
init = d;
}
@Override
protected boolean modifiesVolatileVecs() {
return true;
}
@Override
public void map(Chunk tree) {
if (tree instanceof C8DVolatileChunk) {
Arrays.fill(((C8DVolatileChunk) tree).getValues(), init);
} else {
for (int i = 0; i < tree._len; i++)
tree.set(i, init);
}
}
}
private static class ResponseLessOffsetTask extends MRTask<ResponseLessOffsetTask> {
private FrameMap fm;
public ResponseLessOffsetTask(FrameMap frameMap) {
fm = frameMap;
}
@Override
public void map(Chunk[] chks, NewChunk[] nc) {
final Chunk resp = chks[fm.responseIndex];
final Chunk offset = chks[fm.offsetIndex];
for (int i = 0; i < chks[0]._len; ++i)
nc[0].addNum(resp.atd(i) - offset.atd(i)); // y - o
}
}
/**
* Newton-Raphson fixpoint iteration to find a self-consistent initial value
*/
private static class NewtonRaphson extends MRTask<NewtonRaphson> {
private FrameMap fm;
private Distribution dist;
private double _init;
private double _numerator;
private double _denominator;
public NewtonRaphson(FrameMap frameMap, Distribution distribution, double initialValue) {
assert frameMap != null && distribution != null;
fm = frameMap;
dist = distribution;
_init = initialValue;
_numerator = 0;
_denominator = 0;
}
public double value() {
return _init + _numerator / _denominator;
}
@Override
public void map(Chunk[] chks) {
Chunk ys = chks[fm.responseIndex];
Chunk offset = chks[fm.offsetIndex];
Chunk weight = fm.weightIndex >= 0 ? chks[fm.weightIndex] : new C0DChunk(1, chks[0]._len);
for (int row = 0; row < ys._len; row++) {
double w = weight.atd(row);
if (w == 0) continue;
if (ys.isNA(row)) continue;
double y = ys.atd(row);
double o = offset.atd(row);
double p = dist.linkInv(o + _init);
_numerator += w * (y - p);
_denominator += w * p * (1. - p);
}
}
@Override
public void reduce(NewtonRaphson mrt) {
_numerator += mrt._numerator;
_denominator += mrt._denominator;
}
}
/**
* Compute Residuals
* Do this for all rows, whether OOB or not
*/
private static class ComputePredAndRes extends MRTask<ComputePredAndRes> {
private FrameMap fm;
private int nclass;
private boolean[] out;
private Distribution dist;
public ComputePredAndRes(FrameMap frameMap, int nClasses, double[] outputDistribution, Distribution distribution) {
fm = frameMap;
nclass = nClasses;
dist = distribution;
out = new boolean[outputDistribution.length];
for (int i = 0; i < out.length; i++) out[i] = (outputDistribution[i] != 0);
}
@Override
public void map(Chunk[] chks) {
Chunk ys = chks[fm.responseIndex];
Chunk offset = fm.offsetIndex >= 0 ? chks[fm.offsetIndex] : new C0DChunk(0, chks[0]._len);
Chunk preds = chks[fm.tree0Index]; // Prior tree sums
C8DVolatileChunk wk = (C8DVolatileChunk) chks[fm.work0Index]; // Place to store residuals
Chunk weights = fm.weightIndex >= 0 ? chks[fm.weightIndex] : new C0DChunk(1, chks[0]._len);
double[] fs = nclass > 1 ? new double[nclass + 1] : null;
for (int row = 0; row < wk._len; row++) {
double weight = weights.atd(row);
if (weight == 0) continue;
if (ys.isNA(row)) continue;
double f = preds.atd(row) + offset.atd(row);
double y = ys.atd(row);
if (LOG.isTraceEnabled()) LOG.trace(f + " vs " + y); //expect that the model predicts very negative values for 0 and very positive values for 1
if ((dist._family == DistributionFamily.multinomial && fs != null) || (dist._family == DistributionFamily.custom && nclass > 2)) {
double sum = score1static(chks, fm.tree0Index, 0.0 /*not used for multiclass*/, fs, row, dist, nclass);
if (Double.isInfinite(sum)) { // Overflow (happens for constant responses)
for (int k = 0; k < nclass; k++) {
wk = (C8DVolatileChunk) chks[fm.work0Index + k];
wk.getValues()[row] = ((float) dist.negHalfGradient(y, (Double.isInfinite(fs[k + 1]) ? 1.0f : 0.0f), k));
}
} else {
for (int k = 0; k < nclass; k++) { // Save as a probability distribution
if (out[k]) {
wk = (C8DVolatileChunk) chks[fm.work0Index + k];
wk.getValues()[row] = ((float) dist.negHalfGradient(y, (float) (fs[k + 1] / sum), k));
}
}
}
} else {
wk.getValues()[row] = ((float) dist.negHalfGradient(y, f));
}
}
}
}
private static class ComputeMinMax extends MRTask<ComputeMinMax> {
private FrameMap fm;
int firstLeafIdx;
int _totalNumNodes;
float[] _mins;
float[] _maxs;
public ComputeMinMax(FrameMap frameMap, int firstLeafIndex, int totalNumNodes) {
fm = frameMap;
firstLeafIdx = firstLeafIndex;
_totalNumNodes = totalNumNodes;
}
@Override
public void map(Chunk[] chks) {
int len = _totalNumNodes - firstLeafIdx; // number of leaves
_mins = new float[len];
_maxs = new float[len];
Arrays.fill(_mins, Float.MAX_VALUE);
Arrays.fill(_maxs, -Float.MAX_VALUE);
Chunk ys = chks[fm.responseIndex];
Chunk offset = fm.offsetIndex >= 0 ? chks[fm.offsetIndex] : new C0DChunk(0, chks[0]._len);
Chunk preds = chks[fm.tree0Index]; // Prior tree sums
Chunk nids = chks[fm.nids0Index];
Chunk weights = fm.weightIndex >= 0 ? chks[fm.weightIndex] : new C0DChunk(1, chks[0]._len);
for (int row = 0; row < preds._len; row++) {
if (ys.isNA(row)) continue;
if (weights.atd(row) == 0) continue;
int nid = (int) nids.at8(row);
assert (nid != ScoreBuildHistogram.UNDECIDED_CHILD_NODE_ID);
if (nid < 0) continue; //skip OOB and otherwise skipped rows
float f = (float) (preds.atd(row) + offset.atd(row));
int idx = nid - firstLeafIdx;
_mins[idx] = Math.min(_mins[idx], f);
_maxs[idx] = Math.max(_maxs[idx], f);
}
}
@Override
public void reduce(ComputeMinMax mrt) {
ArrayUtils.reduceMin(_mins, mrt._mins);
ArrayUtils.reduceMax(_maxs, mrt._maxs);
}
}
private static class ComputeDiff extends MRTask<ComputeDiff> {
private FrameMap fm;
public ComputeDiff(FrameMap frameMap) {
fm = frameMap;
}
@Override
public void map(Chunk[] chks, NewChunk[] nc) {
final Chunk y = chks[fm.responseIndex];
final Chunk o = fm.offsetIndex >= 0 ? chks[fm.offsetIndex] : new C0DChunk(0, chks[0]._len);
final Chunk f = chks[fm.tree0Index];
for (int i = 0; i < chks[0].len(); ++i)
nc[0].addNum(y.atd(i) - (f.atd(i) + o.atd(i)));
}
}
private static class ComputeAbsDiff extends MRTask<ComputeAbsDiff> {
private FrameMap fm;
public ComputeAbsDiff(FrameMap frameMap) {
fm = frameMap;
}
@Override
public void map(Chunk[] chks, NewChunk[] nc) {
final Chunk y = chks[fm.responseIndex];
final Chunk o = fm.offsetIndex >= 0 ? chks[fm.offsetIndex] : new C0DChunk(0, chks[0]._len);
final Chunk f = chks[fm.tree0Index];
for (int i = 0; i < chks[0].len(); ++i)
nc[0].addNum(Math.abs(y.atd(i) - (f.atd(i) + o.atd(i))));
}
}
public static class DiffMinusMedianDiff extends MRTask<DiffMinusMedianDiff> {
private final int _strataMin;
private double[] _terminalMedians;
public DiffMinusMedianDiff(Vec strata, double[] terminalMedians) {
_strataMin = (int) strata.min();
_terminalMedians = terminalMedians;
}
@Override
public void map(Chunk[] chks) {
final Chunk strata = chks[0];
final Chunk diff = chks[1];
for (int i = 0; i < chks[0].len(); ++i) {
int nid = (int) strata.atd(i);
diff.set(i, diff.atd(i) - _terminalMedians[nid - _strataMin]);
}
}
}
private static final class HuberLeafMath extends MRTask<HuberLeafMath> {
// INPUT
final FrameMap fm;
final double _huberDelta;
final Vec _strata;
final int _strataMin;
final int _strataMax;
// OUTPUT
double[/*leaves*/] _huberGamma, _wcounts;
public HuberLeafMath(FrameMap frameMap, double huberDelta, Vec strata) {
fm = frameMap;
_huberDelta = huberDelta;
_strata = strata;
_strataMin = (int) _strata.min();
_strataMax = (int) _strata.max();
}
@Override
public void map(Chunk[] cs) {
if (_strata.length() == 0) {
LOG.warn("No Huber math can be done since there's no strata.");
_huberGamma = new double[0];
return;
}
final int nstrata = _strataMax - _strataMin + 1;
LOG.info("Computing Huber math for (up to) " + nstrata + " different strata.");
_huberGamma = new double[nstrata];
_wcounts = new double[nstrata];
Chunk weights = fm.weightIndex >= 0 ? cs[fm.weightIndex] : new C0DChunk(1, cs[0]._len);
Chunk stratum = cs[fm.nids0Index];
Chunk diffMinusMedianDiff = cs[cs.length - 1];
for (int row = 0; row < cs[0]._len; ++row) {
int nidx = (int) stratum.at8(row) - _strataMin; //get terminal node for this row
_huberGamma[nidx] += weights.atd(row) * Math.signum(diffMinusMedianDiff.atd(row)) * Math.min(Math.abs(diffMinusMedianDiff.atd(row)), _huberDelta);
_wcounts[nidx] += weights.atd(row);
}
}
@Override
public void reduce(HuberLeafMath mrt) {
ArrayUtils.add(_huberGamma, mrt._huberGamma);
ArrayUtils.add(_wcounts, mrt._wcounts);
}
@Override
protected void postGlobal() {
for (int i = 0; i < _huberGamma.length; ++i)
_huberGamma[i] /= _wcounts[i];
}
}
private static class StoreResiduals extends MRTask<StoreResiduals> {
private FrameMap fm;
private Distribution dist;
public StoreResiduals(FrameMap frameMap, Distribution distribution) {
fm = frameMap;
dist = distribution;
}
@Override
protected boolean modifiesVolatileVecs() {
return true;
}
@Override
public void map(Chunk[] chks) {
Chunk ys = chks[fm.responseIndex];
Chunk offset = fm.offsetIndex >= 0 ? chks[fm.offsetIndex] : new C0DChunk(0, chks[0]._len);
Chunk preds = chks[fm.tree0Index]; // Prior tree sums
C8DVolatileChunk wk = (C8DVolatileChunk) chks[fm.work0Index]; // Place to store residuals
Chunk weights = fm.weightIndex >= 0 ? chks[fm.weightIndex] : new C0DChunk(1, chks[0]._len);
for (int row = 0; row < wk._len; row++) {
double weight = weights.atd(row);
if (weight == 0) continue;
if (ys.isNA(row)) continue;
double f = preds.atd(row) + offset.atd(row);
double y = ys.atd(row);
wk.getValues()[row] = ((float) dist.negHalfGradient(y, f));
}
}
}
/**
* Set terminal node estimates (gamma)
* ESL2, page 387. Step 2b iii.
* Nids <== f(Nids)
* For classification (bernoulli):
* <pre>{@code gamma_i = sum (w_i * res_i) / sum (w_i*p_i*(1 - p_i)) where p_i = y_i - res_i}</pre>
* For classification (multinomial):
* <pre>{@code gamma_i_k = (nclass-1)/nclass * (sum res_i / sum (|res_i|*(1-|res_i|)))}</pre>
* For regression (gaussian):
* <pre>{@code gamma_i = sum res_i / count(res_i)}</pre>
*/
private static class GammaPass extends MRTask<GammaPass> {
private final FrameMap fm;
private final DTree[] _trees; // Read-only, shared (except at the histograms in the Nodes)
private final int[] _leafs; // Starting index of leaves (per class-tree)
private final Distribution _dist;
private final int _nclass;
private double[/*tree/klass*/][/*tree-relative node-id*/] _num;
private double[/*tree/klass*/][/*tree-relative node-id*/] _denom;
public GammaPass(FrameMap frameMap, DTree[] trees, int[] leafs, Distribution distribution, int nClasses) {
fm = frameMap;
_leafs = leafs;
_trees = trees;
_dist = distribution;
_nclass = nClasses;
}
double gamma(int tree, int nid) {
return gamma(tree, nid, _num[tree][nid]);
}
double gamma(int tree, int nid, double num) {
if (_denom[tree][nid] == 0)
return 0;
double g = num / _denom[tree][nid];
assert !Double.isInfinite(g) && !Double.isNaN(g);
return gamma(g);
}
double gamma(double g) {
if (_dist._family == DistributionFamily.poisson ||
_dist._family == DistributionFamily.gamma ||
_dist._family == DistributionFamily.tweedie) {
return _dist.link(g);
} else {
return g;
}
}
@Override
protected boolean modifiesVolatileVecs() {
return true;
}
@Override
public void map(Chunk[] chks) {
_denom = new double[_nclass][];
_num = new double[_nclass][];
final Chunk resp = chks[fm.responseIndex]; // Response for this frame
// For all tree/klasses
for (int k = 0; k < _nclass; k++) {
final DTree tree = _trees[k];
final int leaf = _leafs[k];
if (tree == null) continue; // Empty class is ignored
assert (tree._len - leaf >= 0);
// A leaf-biased array of all active Tree leaves.
final double[] denom = _denom[k] = new double[tree._len - leaf];
final double[] num = _num[k] = new double[tree._len - leaf];
final C4VolatileChunk nids = (C4VolatileChunk) chks[fm.nids0Index + k]; // Node-ids for this tree/class
int[] nids_vals = nids.getValues();
final Chunk ress = chks[fm.work0Index + k]; // Residuals for this tree/class
final Chunk offset = fm.offsetIndex >= 0 ? chks[fm.offsetIndex] : new C0DChunk(0, chks[0]._len);
final Chunk preds = chks[fm.tree0Index + k];
final Chunk weights = fm.weightIndex >= 0 ? chks[fm.weightIndex] : new C0DChunk(1, chks[0]._len);
// If we have all constant responses, then we do not split even the
// root and the residuals should be zero.
if (tree.root() instanceof LeafNode)
continue;
for (int row = 0; row < nids._len; row++) { // For all rows
double w = weights.atd(row);
if (w == 0)
continue;
double y = resp.atd(row); //response
if (Double.isNaN(y))
continue;
// Compute numerator and denominator of terminal node estimate (gamma)
int nid = (int) nids.at8(row); // Get Node to decide from
final boolean wasOOBRow = ScoreBuildHistogram.isOOBRow(nid); //same for all k
if (wasOOBRow) nid = ScoreBuildHistogram.oob2Nid(nid);
if (nid < 0)
continue;
DecidedNode dn = tree.decided(nid); // Must have a decision point
if (dn._split == null) // Unable to decide?
dn = tree.decided(dn.pid()); // Then take parent's decision
int leafnid = dn.getChildNodeID(chks, row); // Decide down to a leafnode
assert leaf <= leafnid && leafnid < tree._len :
"leaf: " + leaf + " leafnid: " + leafnid + " tree._len: " + tree._len + "\ndn: " + dn;
assert tree.node(leafnid) instanceof LeafNode;
// Note: I can tell which leaf/region I end up in, but I do not care for
// the prediction presented by the tree. For GBM, we compute the
// sum-of-residuals (and sum/abs/mult residuals) for all rows in the
// leaf, and get our prediction from that.
nids_vals[row] = leafnid;
assert !ress.isNA(row);
// OOB rows get placed properly (above), but they don't affect the computed Gamma (below)
// For Laplace/Quantile distribution, we need to compute the median of (y-offset-preds == y-f), will be done outside of here
if (wasOOBRow
|| _dist._family == DistributionFamily.laplace
|| _dist._family == DistributionFamily.huber
|| _dist._family == DistributionFamily.quantile) continue;
double z = ress.atd(row); // residual
double f = preds.atd(row) + offset.atd(row);
int idx = leafnid - leaf;
num[idx] += _dist.gammaNum(w, y, z, f);
denom[idx] += _dist.gammaDenom(w, y, z, f);
}
}
}
@Override
public void reduce(GammaPass gp) {
ArrayUtils.add(_denom, gp._denom);
ArrayUtils.add(_num, gp._num);
}
}
private static class AddTreeContributions extends MRTask<AddTreeContributions> {
private FrameMap fm;
private DTree[] _ktrees;
private int _nclass;
private double _pred_noise_bandwidth;
private long _seed;
private int _ntrees1;
private int _ntrees2;
public AddTreeContributions(
FrameMap frameMap, DTree[] ktrees, double predictionNoiseBandwidth, long seed, int nTreesInp, int nTreesOut
) {
fm = frameMap;
_ktrees = ktrees;
_nclass = ktrees.length;
_pred_noise_bandwidth = predictionNoiseBandwidth;
_seed = seed;
_ntrees1 = nTreesInp;
_ntrees2 = nTreesOut;
}
@Override
protected boolean modifiesVolatileVecs() {
return true;
}
@Override
public void map(Chunk[] chks) {
Random rand = RandomUtils.getRNG(_seed);
// For all tree/klasses
for (int k = 0; k < _nclass; k++) {
final DTree tree = _ktrees[k];
if (tree == null) continue;
final C4VolatileChunk nids = (C4VolatileChunk) chks[fm.nids0Index + k];
final int[] nids_vals = nids.getValues();
final C8DVolatileChunk ct = (C8DVolatileChunk) chks[fm.tree0Index + k];
double[] ct_vals = ct.getValues();
final Chunk y = chks[fm.responseIndex];
final Chunk weights = fm.weightIndex >= 0 ? chks[fm.weightIndex] : new C0DChunk(1, chks[0]._len);
long baseseed = (0xDECAF + _seed) * (0xFAAAAAAB + k * _ntrees1 + _ntrees2);
for (int row = 0; row < nids._len; row++) {
int nid = nids_vals[row];
nids_vals[row] = ScoreBuildHistogram.FRESH;
if (nid < 0) {
if (weights.atd(row) == 0 || y.isNA(row))
nids_vals[row] = ScoreBuildHistogram.DECIDED_ROW;
continue;
}
double factor = 1;
if (_pred_noise_bandwidth != 0) {
rand.setSeed(baseseed + nid); //bandwidth is a function of tree number, class and node id (but same for all rows in that node)
factor += rand.nextGaussian() * _pred_noise_bandwidth;
}
// Prediction stored in Leaf is cut to float to be deterministic in reconstructing
// <tree_klazz> fields from tree prediction
ct_vals[row] = ((float) (ct.atd(row) + factor * ((LeafNode) tree.node(nid))._pred));
}
}
}
}
//--------------------------------------------------------------------------------------------------------------------
// Read the 'tree' columns, do model-specific math and put the results in the
// fs[] array, and return the sum. Dividing any fs[] element by the sum
// turns the results into a probability distribution.
@Override protected double score1(Chunk[] chks, double weight, double offset, double[/*nclass*/] fs, int row) {
return score1static(chks, idx_tree(0), offset, fs, row, DistributionFactory.getDistribution(_parms), _nclass);
}
// Read the 'tree' columns, do model-specific math and put the results in the
// fs[] array, and return the sum. Dividing any fs[] element by the sum
// turns the results into a probability distribution.
private static double score1static(Chunk[] chks, int treeIdx, double offset, double[] fs, int row, Distribution dist, int nClasses) {
double f = chks[treeIdx].atd(row) + offset;
double p = dist.linkInv(f);
if (dist._family == DistributionFamily.modified_huber || dist._family == DistributionFamily.bernoulli || dist._family == DistributionFamily.quasibinomial ||
(dist._family == DistributionFamily.custom && nClasses == 2)) {
fs[2] = p;
fs[1] = 1.0 - p;
return 1; // f2 = 1.0 - f1; so f1+f2 = 1.0
} else if (dist._family == DistributionFamily.multinomial ||
(dist._family == DistributionFamily.custom && nClasses > 2)) {
if (nClasses == 2) {
// This optimization assumes the 2nd tree of a 2-class system is the
// inverse of the first. Fill in the missing tree
fs[1] = p;
fs[2] = 1 / p;
return fs[1] + fs[2];
}
// Multinomial loss function; sum(exp(data)). Load tree data
assert (offset == 0);
fs[1] = f;
for (int k = 1; k < nClasses; k++)
fs[k + 1] = chks[treeIdx + k].atd(row);
// Rescale to avoid Infinities; return sum(exp(data))
return hex.genmodel.GenModel.log_rescale(fs);
} else {
return fs[0] = p;
}
}
@Override
public PojoWriter makePojoWriter(Model<?, ?, ?> genericModel, MojoModel mojoModel) {
GbmMojoModel gbmMojoModel = (GbmMojoModel) mojoModel;
CompressedTree[][] trees = MojoUtils.extractCompressedTrees(gbmMojoModel);
boolean binomialOpt = MojoUtils.isUsingBinomialOpt(gbmMojoModel, trees);
return new GbmPojoWriter(genericModel, gbmMojoModel.getCategoricalEncoding(), binomialOpt, trees,
gbmMojoModel._init_f, gbmMojoModel._balanceClasses, gbmMojoModel._family,
LinkFunctionFactory.getLinkFunction(gbmMojoModel._link_function));
}
@Override
protected void raiseReproducibilityWarning(String datasetName, int chunks) {
warn("auto_rebalance", "Rebalancing " + datasetName + " dataset into " + chunks + " chunks. This model won't be reproducible on the different hardware configuration.");
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/gbm/GBMModel.java
|
package hex.tree.gbm;
import hex.*;
import hex.genmodel.algos.tree.SharedTreeNode;
import hex.genmodel.algos.tree.SharedTreeSubgraph;
import hex.genmodel.utils.DistributionFamily;
import hex.tree.*;
import hex.util.EffectiveParametersUtils;
import water.*;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.util.Log;
import water.util.TwoDimTable;
import java.util.*;
public class GBMModel extends SharedTreeModelWithContributions<GBMModel, GBMModel.GBMParameters, GBMModel.GBMOutput>
implements Model.StagedPredictions, FeatureInteractionsCollector, FriedmanPopescusHCollector, Model.RowToTreeAssignment {
public static class GBMParameters extends SharedTreeModel.SharedTreeParameters {
public double _learn_rate;
public double _learn_rate_annealing;
public double _col_sample_rate;
public double _max_abs_leafnode_pred;
public double _pred_noise_bandwidth;
public KeyValue[] _monotone_constraints;
public String[][] _interaction_constraints;
public GBMParameters() {
super();
_learn_rate = 0.1;
_learn_rate_annealing = 1.0;
_col_sample_rate = 1.0;
_sample_rate = 1.0;
_ntrees = 50;
_max_depth = 5;
_max_abs_leafnode_pred = Double.MAX_VALUE;
_pred_noise_bandwidth =0;
}
@Override
public boolean useColSampling() {
return super.useColSampling() || _col_sample_rate != 1.0;
}
public String algoName() { return "GBM"; }
public String fullName() { return "Gradient Boosting Machine"; }
public String javaName() { return GBMModel.class.getName(); }
@Override
public boolean forceStrictlyReproducibleHistograms() {
// if monotone constraints are enabled -> use strictly reproducible histograms (we calculate values that
// are not subject to reduce precision logic in DHistogram (the "float trick" cannot be applied)
return usesMonotoneConstraints();
}
private boolean usesMonotoneConstraints() {
if (areMonotoneConstraintsEmpty())
return emptyConstraints(0) != null;
return true;
}
private boolean areMonotoneConstraintsEmpty() {
return _monotone_constraints == null || _monotone_constraints.length == 0;
}
public Constraints constraints(Frame f) {
if (areMonotoneConstraintsEmpty()) {
return emptyConstraints(f.numCols());
}
int[] cs = new int[f.numCols()];
for (KeyValue spec : _monotone_constraints) {
if (spec.getValue() == 0)
continue;
int col = f.find(spec.getKey());
if (col < 0) {
throw new IllegalStateException("Invalid constraint specification, column '" + spec.getKey() + "' doesn't exist.");
}
cs[col] = spec.getValue() < 0 ? -1 : 1;
}
boolean useBounds = _distribution == DistributionFamily.gaussian ||
_distribution == DistributionFamily.bernoulli ||
_distribution == DistributionFamily.tweedie ||
_distribution == DistributionFamily.quasibinomial ||
_distribution == DistributionFamily.multinomial ||
_distribution == DistributionFamily.quantile;
return new Constraints(cs, DistributionFactory.getDistribution(this), useBounds);
}
// allows to override the behavior in tests (eg. create empty constraints and test execution as if constraints were used)
Constraints emptyConstraints(int nCols) {
return null;
}
public GlobalInteractionConstraints interactionConstraints(Frame frame){
return new GlobalInteractionConstraints(this._interaction_constraints, frame.names());
}
public BranchInteractionConstraints initialInteractionConstraints(GlobalInteractionConstraints ics){
return new BranchInteractionConstraints(ics.getAllAllowedColumnIndices());
}
}
public static class GBMOutput extends SharedTreeModel.SharedTreeOutput {
public String[] _quasibinomialDomains;
boolean _quasibinomial;
int _nclasses;
public int nclasses() {
return _nclasses;
}
public GBMOutput(GBM b) {
super(b);
_quasibinomial = b._parms._distribution == DistributionFamily.quasibinomial;
_nclasses = b.nclasses();
}
@Override
public String[] classNames() {
String [] res = super.classNames();
if(_quasibinomial){
return _quasibinomialDomains;
}
return res;
}
}
public GBMModel(Key<GBMModel> selfKey, GBMParameters parms, GBMOutput output) {
super(selfKey,parms,output);
}
@Override
public void initActualParamValues() {
super.initActualParamValues();
EffectiveParametersUtils.initFoldAssignment(_parms);
EffectiveParametersUtils.initHistogramType(_parms);
EffectiveParametersUtils.initCategoricalEncoding(_parms, Parameters.CategoricalEncodingScheme.Enum);
EffectiveParametersUtils.initCalibrationMethod(_parms);
}
public void initActualParamValuesAfterOutputSetup(int nclasses, boolean isClassifier) {
EffectiveParametersUtils.initStoppingMetric(_parms, isClassifier);
EffectiveParametersUtils.initDistribution(_parms, nclasses);
}
@Override
protected SharedTreeModelWithContributions<GBMModel, GBMParameters, GBMOutput>.ScoreContributionsWithBackgroundTask getScoreContributionsWithBackgroundTask(SharedTreeModel model, Frame fr, Frame backgroundFrame, boolean expand, int[] catOffsets, ContributionsOptions options) {
return new ScoreContributionsWithBackgroundTask(fr._key, backgroundFrame._key, options._outputPerReference, this, expand, catOffsets, options._outputSpace);
}
@Override
protected ScoreContributionsTask getScoreContributionsTask(SharedTreeModel model) {
return new ScoreContributionsTask(this);
}
@Override
protected ScoreContributionsTask getScoreContributionsSoringTask(SharedTreeModel model, ContributionsOptions options) {
return new ScoreContributionsSortingTask(model, options);
}
@Override
public Frame scoreStagedPredictions(Frame frame, Key<Frame> destination_key) {
Frame adaptFrm = new Frame(frame);
adaptTestForTrain(adaptFrm, true, false);
final String[] names = makeAllTreeColumnNames();
final int outputcols = names.length;
return new StagedPredictionsTask(this)
.doAll(outputcols, Vec.T_NUM, adaptFrm)
.outputFrame(destination_key, names, null);
}
private static class StagedPredictionsTask extends MRTask<StagedPredictionsTask> {
private final Key<GBMModel> _modelKey;
private transient GBMModel _model;
private StagedPredictionsTask(GBMModel model) {
_modelKey = model._key;
}
@Override
protected void setupLocal() {
_model = _modelKey.get();
assert _model != null;
}
@Override
public void map(Chunk chks[], NewChunk[] nc) {
double[] input = new double[chks.length];
int contribOffset = _model._output.nclasses() == 1 ? 0 : 1;
for (int row = 0; row < chks[0]._len; row++) {
for (int i = 0; i < chks.length; i++)
input[i] = chks[i].atd(row);
double[] contribs = new double[contribOffset + _model._output.nclasses()];
double[] preds = new double[contribs.length];
int col = 0;
for (int tidx = 0; tidx < _model._output._treeKeys.length; tidx++) {
Key[] keys = _model._output._treeKeys[tidx];
for (int i = 0; i < keys.length; i++) {
if (keys[i] != null)
contribs[contribOffset + i] += DKV.get(keys[i]).<CompressedTree>get().score(input, _model._output._domains);
preds[contribOffset + i] = contribs[contribOffset + i];
}
_model.score0Probabilities(preds, 0);
_model.score0PostProcessSupervised(preds, input);
for (int i = 0; i < keys.length; i++) {
if (keys[i] != null)
nc[col++].addNum(preds[contribOffset + i]);
}
}
assert (col == nc.length);
}
}
}
@Override
protected final double[] score0Incremental(Score.ScoreIncInfo sii, Chunk[] chks, double offset, int row_in_chunk, double[] tmp, double[] preds) {
assert _output.nfeatures() == tmp.length;
for (int i = 0; i < tmp.length; i++)
tmp[i] = chks[i].atd(row_in_chunk);
if (sii._startTree == 0)
Arrays.fill(preds,0);
else
for (int i = 0; i < sii._workspaceColCnt; i++)
preds[sii._predsAryOffset + i] = chks[sii._workspaceColIdx + i].atd(row_in_chunk);
score0(tmp, preds, offset, sii._startTree, _output._treeKeys.length);
for (int i = 0; i < sii._workspaceColCnt; i++)
chks[sii._workspaceColIdx + i].set(row_in_chunk, preds[sii._predsAryOffset + i]);
score0Probabilities(preds, offset);
score0PostProcessSupervised(preds, tmp);
return preds;
}
/** Bulk scoring API for one row. Chunks are all compatible with the model,
* and expect the last Chunks are for the final distribution and prediction.
* Default method is to just load the data into the tmp array, then call
* subclass scoring logic. */
@Override protected double[] score0(double data[/*ncols*/], double preds[/*nclasses+1*/], double offset, int ntrees) {
super.score0(data, preds, offset, ntrees); // These are f_k(x) in Algorithm 10.4
return score0Probabilities(preds, offset);
}
private double[] score0Probabilities(double preds[/*nclasses+1*/], double offset) {
if (_parms._distribution == DistributionFamily.bernoulli
|| _parms._distribution == DistributionFamily.quasibinomial
|| _parms._distribution == DistributionFamily.modified_huber
|| (_parms._distribution == DistributionFamily.custom && _output.nclasses() == 2)) { // custom distribution could be also binomial
double f = preds[1] + _output._init_f + offset; //Note: class 1 probability stored in preds[1] (since we have only one tree)
preds[2] = DistributionFactory.getDistribution(_parms).linkInv(f);
preds[1] = 1.0 - preds[2];
} else if (_parms._distribution == DistributionFamily.multinomial // Kept the initial prediction for binomial
|| (_parms._distribution == DistributionFamily.custom && _output.nclasses() > 2) ) { // custom distribution could be also multinomial
if (_output.nclasses() == 2) { //1-tree optimization for binomial
preds[1] += _output._init_f + offset; //offset is not yet allowed, but added here to be future-proof
preds[2] = -preds[1];
}
hex.genmodel.GenModel.GBM_rescale(preds);
} else { //Regression
double f = preds[0] + _output._init_f + offset;
preds[0] = DistributionFactory.getDistribution(_parms).linkInv(f);
}
return preds;
}
@Override
protected SharedTreePojoWriter makeTreePojoWriter() {
CompressedForest compressedForest = new CompressedForest(_output._treeKeys, _output._domains);
CompressedForest.LocalCompressedForest localCompressedForest = compressedForest.fetch();
return new GbmPojoWriter(this, localCompressedForest._trees);
}
@Override
public GbmMojoWriter getMojo() {
return new GbmMojoWriter(this);
}
public FeatureInteractions getFeatureInteractions(int maxInteractionDepth, int maxTreeDepth, int maxDeepening) {
FeatureInteractions featureInteractions = new FeatureInteractions();
int nclasses = this._output._nclasses > 2 ? this._output._nclasses : 1;
for (int i = 0; i < this._parms._ntrees; i++) {
for (int j = 0; j < nclasses; j++) {
FeatureInteractions currentTreeFeatureInteractions = new FeatureInteractions();
SharedTreeSubgraph tree = this.getSharedTreeSubgraph(i, j);
List<SharedTreeNode> interactionPath = new ArrayList<>();
Set<String> memo = new HashSet<>();
FeatureInteractions.collectFeatureInteractions(tree.rootNode, interactionPath, 0, 0, 1, 0, 0, currentTreeFeatureInteractions,
memo, maxInteractionDepth, maxTreeDepth, maxDeepening, i, true);
featureInteractions.mergeWith(currentTreeFeatureInteractions);
}
}
if(featureInteractions.isEmpty()){
Log.warn("There is no feature interaction for this model.");
return null;
}
return featureInteractions;
}
@Override
public TwoDimTable[][] getFeatureInteractionsTable(int maxInteractionDepth, int maxTreeDepth, int maxDeepening) {
return FeatureInteractions.getFeatureInteractionsTable(this.getFeatureInteractions(maxInteractionDepth,maxTreeDepth,maxDeepening));
}
@Override
public double getFriedmanPopescusH(Frame frame, String[] vars) {
Frame adaptFrm = removeSpecialNNonNumericColumns(frame);
for(int colId = 0; colId < adaptFrm.numCols(); colId++) {
Vec col = adaptFrm.vec(colId);
if (col.isBad()) {
throw new UnsupportedOperationException(
"Calculating of H statistics error: column " + adaptFrm.name(colId) + " is missing.");
}
if(!col.isNumeric()) {
throw new UnsupportedOperationException(
"Calculating of H statistics error: column " + adaptFrm.name(colId) + " is not numeric.");
}
}
int nclasses = this._output._nclasses > 2 ? this._output._nclasses : 1;
SharedTreeSubgraph[][] sharedTreeSubgraphs = new SharedTreeSubgraph[this._parms._ntrees][nclasses];
for (int i = 0; i < this._parms._ntrees; i++) {
for (int j = 0; j < nclasses; j++) {
sharedTreeSubgraphs[i][j] = this.getSharedTreeSubgraph(i, j);
}
}
return FriedmanPopescusH.h(adaptFrm, vars, this._parms._learn_rate, sharedTreeSubgraphs);
}
@Override
public Frame rowToTreeAssignment(Frame frame, Key<Frame> destination_key, Job<Frame> j) {
Key<CompressedTree>[/*_ntrees*/][/*_nclass*/] treeKeys = _output._treeKeys;
Sample[] ss = new Sample[treeKeys.length];
int[] cons = new int[treeKeys.length];
Vec[] vs = frame.vec(_parms._response_column).makeVolatileInts(cons);
for (int treeId = 0; treeId < treeKeys.length; treeId++) {
Key<CompressedTree> compressedTreeKey = treeKeys[treeId][0]; // Always pick the zero one, multinomial trees use the same subsample
if (compressedTreeKey == null)
continue;
CompressedTree ct = DKV.getGet(compressedTreeKey);
long seed = ct.getSeed();
ss[treeId] = new Sample(seed, _parms._sample_rate, _parms._sample_rate_per_class, 1, 0).dfork(vs[treeId], frame.vec(_parms._response_column));
}
for (int treeId = 0; treeId < treeKeys.length; treeId++) {
ss[treeId].getResult();
}
int outputSize = treeKeys.length + 1;
String[] names = new String[outputSize];
byte[] types = new byte[outputSize];
String[][] domains = new String[outputSize][2];
names[0] = "row_id";
types[0] = Vec.T_NUM;
domains[0] = null;
for (int i = 1; i < outputSize; i++) {
types[i] = Vec.T_CAT;
domains[i] = new String[]{"0", "1"};
names[i] = "tree_" + i;
}
return new MRTask(){
public void map(Chunk[] chk, NewChunk[] nchk) {
for (int row = 0; row < chk[0]._len; row++) {
nchk[0].addNum(row + chk[0].start());
for (int col = 0; col < chk.length; col++) {
nchk[col+1].addNum(chk[col].atd(row));
}
}
}
}.withPostMapAction(JobUpdatePostMap.forJob(j)).doAll(types, vs).outputFrame(destination_key, names, domains);
}
@Override
public double score(double[] data) {
double[] pred = score0(data, new double[_output.nclasses() + 1], 0, _output._ntrees);
score0PostProcessSupervised(pred, data);
return pred[0];
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/gbm/GbmMojoWriter.java
|
package hex.tree.gbm;
import hex.Distribution;
import hex.DistributionFactory;
import hex.tree.SharedTreeMojoWriter;
import java.io.IOException;
/**
* MOJO support for GBM model.
*/
public class GbmMojoWriter extends SharedTreeMojoWriter<GBMModel, GBMModel.GBMParameters, GBMModel.GBMOutput> {
@SuppressWarnings("unused") // Called through reflection in ModelBuildersHandler
public GbmMojoWriter() {}
public GbmMojoWriter(GBMModel model) {
super(model);
}
@Override public String mojoVersion() {
return "1.40";
}
@Override
protected void writeModelData() throws IOException {
super.writeModelData();
Distribution dist = DistributionFactory.getDistribution(model._parms);
writekv("distribution", dist._family);
writekv("link_function", dist._linkFunction.linkFunctionType);
writekv("init_f", model._output._init_f);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/gbm/GbmPojoWriter.java
|
package hex.tree.gbm;
import hex.Distribution;
import hex.DistributionFactory;
import hex.LinkFunction;
import hex.Model;
import hex.genmodel.CategoricalEncoding;
import hex.genmodel.utils.DistributionFamily;
import hex.tree.CompressedTree;
import hex.tree.SharedTreePojoWriter;
import water.util.SBPrintStream;
class GbmPojoWriter extends SharedTreePojoWriter {
private final double _init_f;
private final boolean _balance_classes;
private final DistributionFamily _distribution_family;
private final LinkFunction _link_function;
GbmPojoWriter(GBMModel model, CompressedTree[][] trees) {
super(model._key, model._output, model.getGenModelEncoding(), model.binomialOpt(),
trees, model._output._treeStats);
_init_f = model._output._init_f;
_balance_classes = model._parms._balance_classes;
Distribution distribution = DistributionFactory.getDistribution(model._parms);
_distribution_family = distribution._family;
_link_function = distribution._linkFunction;
}
GbmPojoWriter(Model<?, ?, ?> model, CategoricalEncoding encoding,
boolean binomialOpt, CompressedTree[][] trees,
double initF, boolean balanceClasses,
DistributionFamily distributionFamily, LinkFunction linkFunction) {
super(model._key, model._output, encoding, binomialOpt, trees, null);
_init_f = initF;
_balance_classes = balanceClasses;
_distribution_family = distributionFamily;
_link_function = linkFunction;
}
// Note: POJO scoring code doesn't support per-row offsets (the scoring API would need to be changed to pass in offsets)
@Override
protected void toJavaUnifyPreds(SBPrintStream body) {
// Preds are filled in from the trees, but need to be adjusted according to
// the loss function.
if (_distribution_family == DistributionFamily.bernoulli
|| _distribution_family == DistributionFamily.quasibinomial
|| _distribution_family == DistributionFamily.modified_huber
) {
body.ip("preds[2] = preds[1] + ").p(_init_f).p(";").nl();
body.ip("preds[2] = " + _link_function.linkInvString("preds[2]") + ";").nl();
body.ip("preds[1] = 1.0-preds[2];").nl();
if (_balance_classes)
body.ip("hex.genmodel.GenModel.correctProbabilities(preds, PRIOR_CLASS_DISTRIB, MODEL_CLASS_DISTRIB);").nl();
body.ip("preds[0] = hex.genmodel.GenModel.getPrediction(preds, PRIOR_CLASS_DISTRIB, data, " + _output.defaultThreshold() + ");").nl();
return;
}
if (_output.nclasses() == 1) { // Regression
body.ip("preds[0] += ").p(_init_f).p(";").nl();
body.ip("preds[0] = " + _link_function.linkInvString("preds[0]") + ";").nl();
return;
}
if (_output.nclasses() == 2) { // Kept the initial prediction for binomial
body.ip("preds[1] += ").p(_init_f).p(";").nl();
body.ip("preds[2] = - preds[1];").nl();
}
body.ip("hex.genmodel.GenModel.GBM_rescale(preds);").nl();
if (_balance_classes)
body.ip("hex.genmodel.GenModel.correctProbabilities(preds, PRIOR_CLASS_DISTRIB, MODEL_CLASS_DISTRIB);").nl();
body.ip("preds[0] = hex.genmodel.GenModel.getPrediction(preds, PRIOR_CLASS_DISTRIB, data, " + _output.defaultThreshold() + ");").nl();
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/isofor/IsolationForest.java
|
package hex.tree.isofor;
import hex.ModelCategory;
import hex.ModelMetricsBinomial;
import hex.ScoreKeeper;
import hex.genmodel.utils.DistributionFamily;
import hex.quantile.Quantile;
import hex.tree.*;
import hex.tree.DTree.DecidedNode;
import hex.tree.DTree.LeafNode;
import hex.tree.DTree.UndecidedNode;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import water.Iced;
import water.Job;
import water.Key;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.PrettyPrint;
import water.util.TwoDimTable;
import java.util.*;
import static water.util.RandomUtils.getRNG;
import static hex.tree.isofor.IsolationForestModel.IsolationForestParameters;
import static hex.tree.isofor.IsolationForestModel.IsolationForestOutput;
/**
* Isolation Forest
*/
public class IsolationForest extends SharedTree<IsolationForestModel, IsolationForestParameters, IsolationForestOutput> {
@Override public ModelCategory[] can_build() {
return new ModelCategory[]{
ModelCategory.AnomalyDetection
};
}
@Override public BuilderVisibility builderVisibility() {
return BuilderVisibility.Stable;
}
// Called from an http request
public IsolationForest(IsolationForestParameters parms ) { super(parms ); init(false); }
public IsolationForest(IsolationForestParameters parms, Key<IsolationForestModel> key) { super(parms, key); init(false); }
public IsolationForest(IsolationForestParameters parms, Job job ) { super(parms, job); init(false); }
public IsolationForest(boolean startup_once) { super(new IsolationForestParameters(), startup_once); }
@Override protected Driver trainModelImpl() { return new IsolationForestDriver(); }
@Override public boolean scoreZeroTrees() { return false; }
@Override public boolean isSupervised() { return false; }
@Override public boolean isResponseOptional() { return true; }
@Override protected ScoreKeeper.ProblemType getProblemType() { return ScoreKeeper.ProblemType.anomaly_detection; }
private transient VarSplits _var_splits;
@Override public void init(boolean expensive) {
super.init(expensive);
// Initialize local variables
if( _parms._mtries < 1 && _parms._mtries != -1 && _parms._mtries != -2 )
error("_mtries", "mtries must be -1 (converted to sqrt(features)) or -2 (All features) or >= 1 but it is " + _parms._mtries);
if( _train != null ) {
int ncols = _train.numCols();
if( _parms._mtries != -1 && _parms._mtries != -2 && !(1 <= _parms._mtries && _parms._mtries <= ncols))
error("_mtries","Computed mtries should be -1 or -2 or in interval [1," + ncols + "] but it is " + _parms._mtries);
}
if (_parms._distribution != DistributionFamily.AUTO && _parms._distribution != DistributionFamily.gaussian) {
throw new IllegalStateException("Isolation Forest doesn't expect the distribution to be specified by the user");
}
_parms._distribution = DistributionFamily.gaussian;
if (_parms._contamination != -1 && (_parms._contamination <= 0 || _parms._contamination > 0.5)) {
error("_contamination", "Contamination parameter needs to be in range (0, 0.5] or undefined (-1); but it is " + _parms._contamination);
}
if (_parms._valid != null) {
if (_parms._response_column == null) {
error("_response_column", "Response column needs to be defined when using a validation frame.");
} else if (expensive && vresponse() == null) {
error("_response_column", "Validation frame is missing response column `" + _parms._response_column + "`.");
}
if (_parms._contamination > 0) {
error("_contamination", "Contamination parameter cannot be used together with a validation frame.");
}
} else {
if (_parms._stopping_metric != ScoreKeeper.StoppingMetric.AUTO && _parms._stopping_metric != ScoreKeeper.StoppingMetric.anomaly_score) {
error("_stopping_metric", "Stopping metric `" + _parms._stopping_metric +
"` can only be used when a labeled validation frame is provided.");
}
}
if (expensive) {
if (vresponse() != null) {
if (!vresponse().isBinary() || vresponse().domain()==null) {
error("_response_column", "The response column of the validation frame needs to have a binary categorical domain (not anomaly/anomaly).");
}
}
if (response() != null) {
error("_training_frame", "Training frame should not have a response column");
}
}
}
@Override
protected void validateRowSampleRate() {
if (_parms._sample_rate == -1) {
if (_parms._sample_size <= 0) {
error("_sample_size", "Sample size needs to be a positive integer number but it is " + _parms._sample_size);
} else if (_train != null && _train.numRows() > 0) {
_parms._sample_rate = _parms._sample_size / (double) _train.numRows();
}
}
}
@Override
protected boolean validateStoppingMetric() {
return false; // disable the default stopping metric validation
}
private void randomResp(final long seed, final int iteration) {
new MRTask() {
@Override public void map(Chunk chks[]) {
Chunk c = chk_work(chks, 0);
final long chunk_seed = seed + (c.start() * (1 + iteration));
for (int i = 0; i < c._len; i++) {
double rnd = getRNG(chunk_seed + i).nextDouble();
chk_work(chks, 0).set(i, rnd);
}
}
}.doAll(_train);
}
@Override
protected DTree.DecidedNode makeDecided(DTree.UndecidedNode udn, DHistogram hs[], Constraints cs) {
return new IFDecidedNode(udn, hs, cs);
}
private class IFDecidedNode extends DTree.DecidedNode {
private IFDecidedNode(DTree.UndecidedNode n, DHistogram[] hs, Constraints cs) {
super(n, hs, cs, null);
}
@Override
public DTree.Split bestCol(DTree.UndecidedNode u, DHistogram hs[], Constraints cs) {
if( hs == null ) return null;
final int maxCols = u._scoreCols == null /* all cols */ ? hs.length : u._scoreCols.length;
List<FindSplits> findSplits = new ArrayList<>();
for (int i=0; i<maxCols; i++) {
int col = u._scoreCols == null ? i : u._scoreCols[i];
if( hs[col]==null || hs[col].nbins() <= 1 ) continue;
findSplits.add(new FindSplits(hs, cs, col, u));
}
Collections.shuffle(findSplits, _rand);
for (FindSplits fs : findSplits) {
DTree.Split s = fs.computeSplit();
if (s != null) {
return s;
}
}
return null;
}
}
@Override
protected void addCustomInfo(IsolationForestOutput out) {
if (_var_splits != null) {
out._var_splits = _var_splits;
out._variable_splits = _var_splits.toTwoDimTable(out.features(), "Variable Splits");
}
if (_parms._contamination > 0) {
assert vresponse() == null; // contamination is not compatible with using validation frame
assert _model.outputAnomalyFlag();
Frame fr = _model.score(_train);
try {
Vec score = fr.vec("score");
assert score != null;
out._defaultThreshold = Quantile.calcQuantile(score, 1 - _parms._contamination);
} finally {
fr.delete();
}
} else if (_model._output._validation_metrics instanceof ModelMetricsBinomial) {
out._defaultThreshold = ((ModelMetricsBinomial) _model._output._validation_metrics)._auc.defaultThreshold();
}
}
// ----------------------
private class IsolationForestDriver extends Driver {
@Override protected boolean doOOBScoring() { return true; }
@Override protected void initializeModelSpecifics() {
_mtry_per_tree = Math.max(1, (int)(_parms._col_sample_rate_per_tree * _ncols));
if (!(1 <= _mtry_per_tree && _mtry_per_tree <= _ncols)) throw new IllegalArgumentException("Computed mtry_per_tree should be in interval <1,"+_ncols+"> but it is " + _mtry_per_tree);
if(_parms._mtries==-2){ //mtries set to -2 would use all columns in each split regardless of what column has been dropped during train
_mtry = _ncols;
}else if(_parms._mtries==-1) {
_mtry = (isClassifier() ? Math.max((int) Math.sqrt(_ncols), 1) : Math.max(_ncols / 3, 1)); // classification: mtry=sqrt(_ncols), regression: mtry=_ncols/3
}else{
_mtry = _parms._mtries;
}
if (!(1 <= _mtry && _mtry <= _ncols)) {
throw new IllegalArgumentException("Computed mtry should be in interval <1," + _ncols + "> but it is " + _mtry);
}
_initialPrediction = 0;
_var_splits = new VarSplits(_ncols);
if ((_parms._contamination > 0) || (vresponse() != null)) {
_model._output._defaultThreshold = 0.5;
assert _model.outputAnomalyFlag();
}
}
// --------------------------------------------------------------------------
// Build the next random k-trees representing tid-th tree
@Override protected boolean buildNextKTrees() {
// Create a Random response
randomResp(_parms._seed, _model._output._ntrees);
final long rseed = _rand.nextLong();
final DTree tree = new DTree(_train, _ncols, _mtry, _mtry_per_tree, rseed, _parms);
final DTree[] ktrees = {tree};
new Sample(tree, _parms._sample_rate, null)
.dfork(null, new Frame(vec_nids(_train, 0), vec_work(_train, 0)), _parms._build_tree_one_node)
.getResult();
// Assign rows to nodes - fill the "NIDs" column(s)
growTree(rseed, ktrees);
// Reset NIDs
CalculatePaths stats = new CalculatePaths(ktrees[0]).doAll(_train, _parms._build_tree_one_node);
// Grow the model by K-trees
_model._output.addKTrees(ktrees);
_model._output._min_path_length = stats._minPathLength;
_model._output._max_path_length = stats._maxPathLength;
return false; // never stop early
}
// Assumes that the "Work" column are filled with copy of a random generated response
private void growTree(long rseed, final DTree[] ktrees) {
// Initial set of histograms. All trees; one leaf per tree (the root
// leaf); all columns
DHistogram hcs[][][] = new DHistogram[_nclass][1/*just root leaf*/][_ncols];
// Adjust real bins for the top-levels
int adj_nbins = Math.max(_parms._nbins_top_level,_parms._nbins);
// Initially setup as-if an empty-split had just happened
final DTree tree = ktrees[0];
new UndecidedNode(tree, -1, DHistogram.initialHist(_train, _ncols, adj_nbins, hcs[0][0], rseed, _parms, getGlobalSplitPointsKeys(), null, false, null), null, null); // The "root" node
// ----
// One Big Loop till the ktrees are of proper depth.
// Adds a layer to the trees each pass.
final int[] leafs = new int[1];
for(int depth=0 ; depth<_parms._max_depth; depth++ ) {
hcs = buildLayer(_train, _parms._nbins, ktrees, leafs, hcs, _parms._build_tree_one_node);
// If we did not make any new splits, then the tree is split-to-death
if( hcs == null ) break;
}
// Each tree bottomed-out in a DecidedNode; go 1 more level and insert
// LeafNodes to hold predictions.
int leaf = tree.len();
int depths[] = new int[leaf];
for( int nid=0; nid<leaf; nid++ ) {
if( tree.node(nid) instanceof DecidedNode ) {
DecidedNode dn = tree.decided(nid);
if( dn._split == null ) { // No decision here, no row should have this NID now
if( nid==0 ) { // Handle the trivial non-splitting tree
LeafNode ln = new LeafNode(tree, -1, 0);
ln._pred = 0;
}
continue;
}
depths[nid] = dn._pid >= 0 ? depths[dn._pid] + 1 : 0;
for( int i=0; i<dn._nids.length; i++ ) {
int cnid = dn._nids[i];
if( cnid == -1 || // Bottomed out (predictors or responses known constant)
tree.node(cnid) instanceof UndecidedNode || // Or chopped off for depth
(tree.node(cnid) instanceof DecidedNode && // Or not possible to split
((DecidedNode)tree.node(cnid))._split==null) ) {
LeafNode ln = new LeafNode(tree,nid);
ln._pred = depths[nid]; // Set depth as the prediction into the leaf
dn._nids[i] = ln.nid(); // Mark a leaf here
}
}
}
}
updatePerFeatureInfo(tree, depths);
}
private void updatePerFeatureInfo(DTree tree, int[] depths) {
for (int i = 0; i < tree._len; i++) {
DTree.Node n = tree.node(i);
if (! (n instanceof DecidedNode))
continue;
DecidedNode dn = (DecidedNode) n;
DTree.Split split = dn._split;
if (split == null)
continue;
_var_splits.update(split.col(), split, depths[n.nid()]);
}
}
// Collect and write predictions into leafs.
private class CalculatePaths extends MRTask<CalculatePaths> {
private final DTree _tree;
// OUT
private int _minPathLength = Integer.MAX_VALUE;
private int _maxPathLength = 0;
private CalculatePaths(DTree tree) { _tree = tree; }
@Override public void map(Chunk[] chks) {
final Chunk tree = chk_tree(chks, 0);
final Chunk nids = chk_nids(chks, 0); // Node-ids for this tree/class
final Chunk oobt = chk_oobt(chks);
for (int row = 0; row < nids._len; row++) {
final int rawNid = (int) chk_nids(chks,0).at8(row);
final boolean wasOOBRow = ScoreBuildHistogram.isOOBRow(rawNid);
final int nid = wasOOBRow ? ScoreBuildHistogram.oob2Nid(rawNid) : rawNid;
final int depth = getNodeDepth(chks, row, nid);
if (wasOOBRow) {
double oobcnt = oobt.atd(row) + 1;
oobt.set(row, oobcnt);
}
final int total_len = PathTracker.encodeNewPathLength(tree, row, depth, wasOOBRow);
_maxPathLength = total_len > _maxPathLength ? total_len : _maxPathLength;
_minPathLength = total_len < _minPathLength ? total_len : _minPathLength;
// reset NIds
nids.set(row, 0);
}
}
@Override public void reduce(CalculatePaths mrt) {
_minPathLength = Math.min(_minPathLength, mrt._minPathLength);
_maxPathLength = Math.max(_maxPathLength, mrt._maxPathLength);
}
int getNodeDepth(Chunk[] chks, int row, int nid) {
if (_tree.root() instanceof LeafNode) {
return 0;
} else {
if (_tree.node(nid) instanceof UndecidedNode) // If we bottomed out the tree
nid = _tree.node(nid).pid(); // Then take parent's decision
DecidedNode dn = _tree.decided(nid); // Must have a decision point
if (dn._split == null) // Unable to decide?
dn = _tree.decided(_tree.node(nid).pid()); // Then take parent's decision
int leafnid = dn.getChildNodeID(chks, row); // Decide down to a leafnode
double depth = ((LeafNode) _tree.node(leafnid)).pred();
assert (int) depth == depth;
return (int) depth;
}
}
}
@Override protected IsolationForestModel makeModel(Key modelKey, IsolationForestParameters parms) {
return new IsolationForestModel(modelKey, parms, new IsolationForestOutput(IsolationForest.this));
}
}
@Override protected double score1(Chunk chks[], double weight, double offset, double fs[/*2*/], int row) {
assert weight == 1;
int len = PathTracker.decodeOOBPathLength(chk_tree(chks, 0), row);
fs[1] = len / chk_oobt(chks).atd(row); // average tree path length
fs[0] = _model.normalizePathLength(fs[1] * _model._output._ntrees); // score
return fs[0];
}
protected TwoDimTable createScoringHistoryTable() {
List<String> colHeaders = new ArrayList<>();
List<String> colTypes = new ArrayList<>();
List<String> colFormat = new ArrayList<>();
colHeaders.add("Timestamp"); colTypes.add("string"); colFormat.add("%s");
colHeaders.add("Duration"); colTypes.add("string"); colFormat.add("%s");
colHeaders.add("Number of Trees"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Mean Tree Path Length"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Mean Anomaly Score"); colTypes.add("double"); colFormat.add("%.5f");
if (_parms._custom_metric_func != null) {
colHeaders.add("Training Custom"); colTypes.add("double"); colFormat.add("%.5f");
}
ScoreKeeper[] sks = _model._output._scored_train;
int rows = 0;
for (int i = 0; i < sks.length; i++) {
if (i != 0 && Double.isNaN(sks[i]._anomaly_score)) continue;
rows++;
}
TwoDimTable table = new TwoDimTable(
"Scoring History", null,
new String[rows],
colHeaders.toArray(new String[0]),
colTypes.toArray(new String[0]),
colFormat.toArray(new String[0]),
"");
int row = 0;
for( int i = 0; i<sks.length; i++ ) {
if (i != 0 && Double.isNaN(sks[i]._anomaly_score)) continue;
int col = 0;
DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss");
table.set(row, col++, fmt.print(_model._output._training_time_ms[i]));
table.set(row, col++, PrettyPrint.msecs(_model._output._training_time_ms[i] - _job.start_time(), true));
table.set(row, col++, i);
ScoreKeeper st = sks[i];
table.set(row, col++, st._anomaly_score);
table.set(row, col++, st._anomaly_score_normalized);
if (_parms._custom_metric_func != null) {
table.set(row, col++, st._custom_metric);
}
assert col == colHeaders.size();
row++;
}
return table;
}
@Override
public boolean havePojo() {
return false;
}
@Override
public boolean haveMojo() {
return true;
}
public static class VarSplits extends Iced<VarSplits> {
public final int[] _splitCounts;
public final float[] _aggSplitRatios;
public final long[] _splitDepths;
private VarSplits(int ncols) {
_splitCounts = new int[ncols];
_aggSplitRatios = new float[ncols];
_splitDepths = new long[ncols];
}
void update(int col, DTree.Split split, int depth) {
_aggSplitRatios[col] += Math.abs(split.n0() - split.n1()) / (split.n0() + split.n1());
_splitCounts[col]++;
_splitDepths[col] += depth + 1;
}
public TwoDimTable toTwoDimTable(String[] coef_names, String table_header) {
double[][] dblCellValues = new double[_splitCounts.length][];
for (int i = 0; i < _splitCounts.length; i++) {
dblCellValues[i] = new double[]{_splitCounts[i], _aggSplitRatios[i], _splitDepths[i]};
}
String[] col_headers = {"Count", "Aggregated Split Ratios", "Aggregated Split Depths"};
String[] col_types = {"int", "double", "long"};
String[] col_formats = {"%10d", "%5f", "%10d"};
return new TwoDimTable(table_header, null, coef_names, col_headers, col_types, col_formats,
"Variable", new String[_splitCounts.length][], dblCellValues);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/isofor/IsolationForestModel.java
|
package hex.tree.isofor;
import hex.ModelCategory;
import hex.ModelMetrics;
import hex.genmodel.CategoricalEncoding;
import hex.genmodel.utils.ArrayUtils;
import hex.ScoreKeeper;
import hex.genmodel.utils.DistributionFamily;
import hex.tree.SharedTreeModel;
import water.Key;
import water.fvec.Frame;
import water.util.SBPrintStream;
import water.util.TwoDimTable;
public class IsolationForestModel extends SharedTreeModel<IsolationForestModel, IsolationForestModel.IsolationForestParameters, IsolationForestModel.IsolationForestOutput> {
public static class IsolationForestParameters extends SharedTreeModel.SharedTreeParameters {
public String algoName() { return "IsolationForest"; }
public String fullName() { return "Isolation Forest"; }
public String javaName() { return IsolationForestModel.class.getName(); }
public int _mtries;
public long _sample_size;
public double _contamination;
public IsolationForestParameters() {
super();
_mtries = -1;
_sample_size = 256;
_max_depth = 8; // log2(_sample_size)
_sample_rate = -1;
_min_rows = 1;
_min_split_improvement = 0;
_nbins = 2;
_nbins_cats = 2;
// _nbins_top_level = 2;
_histogram_type = HistogramType.Random;
_distribution = DistributionFamily.gaussian;
// IF specific
_contamination = -1; // disabled
}
@Override
protected double defaultStoppingTolerance() {
return 0.01; // (inherited value 0.001 would be too low for the default criterion anomaly_score)
}
}
public static class IsolationForestOutput extends SharedTreeModel.SharedTreeOutput {
public int _max_path_length;
public int _min_path_length;
public String _response_column;
public String[] _response_domain;
public IsolationForest.VarSplits _var_splits;
public TwoDimTable _variable_splits;
public IsolationForestOutput(IsolationForest b) {
super(b);
if (b.vresponse() != null) {
_response_column = b._parms._response_column;
_response_domain = b.vresponse().domain();
}
}
@Override
public ModelCategory getModelCategory() {
return ModelCategory.AnomalyDetection;
}
@Override
public double defaultThreshold() {
return _defaultThreshold;
}
@Override
public String responseName() {
return _response_column;
}
@Override
public boolean hasResponse() {
return _response_column != null;
}
@Override
public int responseIdx() {
return _names.length;
}
}
public IsolationForestModel(Key<IsolationForestModel> selfKey, IsolationForestParameters parms, IsolationForestOutput output ) {
super(selfKey, parms, output);
}
@Override
public void initActualParamValues() {
super.initActualParamValues();
if (_parms._stopping_metric == ScoreKeeper.StoppingMetric.AUTO){
if (_parms._stopping_rounds == 0) {
_parms._stopping_metric = null;
} else {
_parms._stopping_metric = ScoreKeeper.StoppingMetric.anomaly_score;
}
}
if (_parms._categorical_encoding == Parameters.CategoricalEncodingScheme.AUTO) {
_parms._categorical_encoding = Parameters.CategoricalEncodingScheme.Enum;
}
}
@Override
public ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain) {
// note: in the context of scoring on a training frame during model building domain will be null
if (domain != null && _output.hasResponse()) {
return new MetricBuilderAnomalySupervised(domain);
} else {
return new ModelMetricsAnomaly.MetricBuilderAnomaly("Isolation Forest Metrics", outputAnomalyFlag());
}
}
@Override
protected String[] makeScoringNames() {
if (outputAnomalyFlag()) {
return new String[]{"predict", "score", "mean_length"};
} else {
return new String[]{"predict", "mean_length"};
}
}
@Override
protected String[][] makeScoringDomains(Frame adaptFrm, boolean computeMetrics, String[] names) {
assert outputAnomalyFlag() ? names.length == 3 : names.length == 2;
String[][] domains = new String[names.length][];
if (outputAnomalyFlag()) {
domains[0] = _output._response_domain != null ? _output._response_domain : new String[]{"0", "1"};
}
return domains;
}
/** Bulk scoring API for one row. Chunks are all compatible with the model,
* and expect the last Chunks are for the final distribution and prediction.
* Default method is to just load the data into the tmp array, then call
* subclass scoring logic. */
@Override protected double[] score0(double[] data, double[] preds, double offset, int ntrees) {
super.score0(data, preds, offset, ntrees);
boolean outputAnomalyFlag = outputAnomalyFlag();
int off = outputAnomalyFlag ? 1 : 0;
if (ntrees >= 1)
preds[off + 1] = preds[0] / ntrees;
preds[off] = normalizePathLength(preds[0]);
if (outputAnomalyFlag)
preds[0] = preds[1] >= _output._defaultThreshold ? 1 : 0;
return preds;
}
final double normalizePathLength(double pathLength) {
return normalizePathLength(pathLength, _output._min_path_length, _output._max_path_length);
}
static double normalizePathLength(double pathLength, int minPathLength, int maxPathLength) {
if (maxPathLength > minPathLength) {
return (maxPathLength - pathLength) / (maxPathLength - minPathLength);
} else {
return 1;
}
}
@Override
public IsolationForestMojoWriter getMojo() {
return new IsolationForestMojoWriter(this);
}
@Override
public String[] adaptTestForTrain(Frame test, boolean expensive, boolean computeMetrics) {
if (!computeMetrics || _output._response_column == null) {
return super.adaptTestForTrain(test, expensive, computeMetrics);
} else {
return adaptTestForTrain(
test,
_output._origNames,
_output._origDomains,
ArrayUtils.append(_output._names, _output._response_column),
ArrayUtils.append(_output._domains, _output._response_domain),
_parms,
expensive,
true,
_output.interactionBuilder(),
getToEigenVec(),
_toDelete,
false
);
}
}
final boolean outputAnomalyFlag() {
return _output._defaultThreshold >= 0;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/isofor/IsolationForestMojoWriter.java
|
package hex.tree.isofor;
import hex.genmodel.CategoricalEncoding;
import hex.tree.SharedTreeMojoWriter;
import java.io.IOException;
import static hex.tree.isofor.IsolationForestModel.IsolationForestParameters;
import static hex.tree.isofor.IsolationForestModel.IsolationForestOutput;
/**
* Mojo definition for Isolation Forest model.
*/
public class IsolationForestMojoWriter extends SharedTreeMojoWriter<IsolationForestModel, IsolationForestParameters, IsolationForestOutput> {
@SuppressWarnings("unused") // Called through reflection in ModelBuildersHandler
public IsolationForestMojoWriter() {}
public IsolationForestMojoWriter(IsolationForestModel model) { super(model); }
@Override public String mojoVersion() {
return "1.40";
}
@Override
protected void writeModelData() throws IOException {
super.writeModelData();
if (model.getGenModelEncoding() != CategoricalEncoding.AUTO) {
throw new IllegalArgumentException("Only default categorical encoding scheme is supported for MOJO");
}
writekv("max_path_length", model._output._max_path_length);
writekv("min_path_length", model._output._min_path_length);
writekv("output_anomaly_flag", model.outputAnomalyFlag());
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/isofor/MetricBuilderAnomalySupervised.java
|
package hex.tree.isofor;
import hex.*;
import water.fvec.Frame;
public class MetricBuilderAnomalySupervised extends ModelMetricsBinomial.MetricBuilderBinomial<MetricBuilderAnomalySupervised> {
public MetricBuilderAnomalySupervised(String[] domain) {
super(domain);
}
/**
* Create a ModelMetrics for a given model and frame
* @param m Model
* @param f Frame
* @param frameWithWeights Frame that contains extra columns such as weights (not used by MetricBuilderAnomalySupervised)
* @param preds optional predictions (can be null, not used by MetricBuilderAnomalySupervised)
* @return ModelMetricsBinomial
*/
@Override public ModelMetrics makeModelMetrics(final Model m, final Frame f,
Frame frameWithWeights, final Frame preds) {
final double sigma;
final double mse;
final double logloss;
final AUC2 auc;
if (_wcount > 0) {
sigma = weightedSigma();
mse = _sumsqe / _wcount;
logloss = _logloss / _wcount;
auc = new AUC2(_auc);
} else {
sigma = Double.NaN;
mse = Double.NaN;
logloss = Double.NaN;
auc = AUC2.emptyAUC();
}
ModelMetricsBinomial mm = new ModelMetricsBinomial(m, f, _count, mse, _domain,
sigma, auc, logloss, null, _customMetric);
if (m != null) {
m.addModelMetrics(mm);
}
return mm;
}
@Override
public double[] perRow(double[] ds, float[] yact, double w, double o, Model m) {
adaptPreds(ds);
return super.perRow(ds, yact, w, o, m);
}
private static void adaptPreds(double[] ds) {
ds[2] = Math.min(ds[1], 1.0);
ds[1] = 1 - ds[2];
ds[0] = -1;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/isofor/ModelMetricsAnomaly.java
|
package hex.tree.isofor;
import hex.*;
import water.fvec.Frame;
public class ModelMetricsAnomaly extends ModelMetricsUnsupervised implements ScoreKeeper.ScoreKeeperAware {
/**
* The raw number that an algorithm is using to count final anomaly score.
* E.g. raw number for Isolation Forest algorithm to count final anomaly score is mean path length
* of the observation (input row) from root to a leaf.
*/
public final double _mean_score;
/**
* Mean normalized score should be (but not necessary is) a number between 0 and 1. Try to follow convention that higher number means
* "more anomalous" observation (input row) and number more close to 0 means standard (not anomalous) observation (input row).
*
* Always refer to the algorithm's documentation for proper definition of this number.
* E.g. formula for normalization of Isolation Forest's score is different from the formula for Extended Isolation Forest.
*/
public final double _mean_normalized_score;
public ModelMetricsAnomaly(Model model, Frame frame, CustomMetric customMetric,
long nobs, double totalScore, double totalNormScore,
String description) {
super(model, frame, nobs, description, customMetric);
_mean_score = totalScore / nobs;
_mean_normalized_score = totalNormScore / nobs;
}
@Override
public void fillTo(ScoreKeeper sk) {
sk._anomaly_score = _mean_score;
sk._anomaly_score_normalized = _mean_normalized_score;
}
@Override
protected StringBuilder appendToStringMetrics(StringBuilder sb) {
sb.append(" Number of Observations: ").append(_nobs).append("\n");
sb.append(" Mean Score: ").append(_mean_score).append("\n");
sb.append(" Mean Normalized Anomaly Score: ").append(_mean_normalized_score).append("\n");
return sb;
}
public static class MetricBuilderAnomaly extends MetricBuilderUnsupervised<MetricBuilderAnomaly> {
private transient String _description;
private double _total_score = 0;
private double _total_norm_score = 0;
private long _nobs = 0;
public MetricBuilderAnomaly() {
this("", false);
}
public MetricBuilderAnomaly(String description, boolean outputAnomalyFlag) {
_work = new double[outputAnomalyFlag ? 3 : 2];
_description = description;
}
@Override
public double[] perRow(double[] preds, float[] dataRow, Model m) {
if (preds[0] < 0)
return preds;
_total_norm_score += preds[0];
_total_score += preds[1];
_nobs++;
return preds;
}
@Override
public void reduce(MetricBuilderAnomaly mb) {
_total_score += mb._total_score;
_total_norm_score += mb._total_norm_score;
_nobs += mb._nobs;
super.reduce(mb);
}
@Override
public ModelMetrics makeModelMetrics(Model m, Frame f) {
return m.addModelMetrics(new ModelMetricsAnomaly(m, f, _customMetric, _nobs, _total_score, _total_norm_score, _description));
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/isofor/PathTracker.java
|
package hex.tree.isofor;
import water.fvec.Chunk;
/**
* Helper class - encodes lengths of paths for observations separately for OOB and when they were used for tree building.
*/
class PathTracker {
static int encodeNewPathLength(Chunk tree, int row, int depth, boolean wasOOB) {
final long old_len_enc = tree.at8(row);
final long len_enc = addNewPathLength(old_len_enc, depth, wasOOB);
tree.set(row, len_enc);
return decodeTotalPathLength(len_enc);
}
static int decodeOOBPathLength(Chunk tree, int row) {
return decodeOOBPathLength(tree.at8(row));
}
private static int decodeTotalPathLength(long lengthEncoded) {
long total_len = (lengthEncoded >> 31) + (lengthEncoded & 0x7fffffff);
assert total_len == (int) total_len;
return (int) total_len;
}
static int decodeOOBPathLength(long lengthEncoded) {
return (int) (lengthEncoded >> 31);
}
static long addNewPathLength(long oldLengthEncoded, int depth, boolean wasOOB) {
if (wasOOB) {
return oldLengthEncoded + ((long) depth << 31);
} else {
return oldLengthEncoded + depth;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/isoforextended/ExtendedIsolationForest.java
|
package hex.tree.isoforextended;
import hex.ModelBuilder;
import hex.ModelCategory;
import hex.ModelMetrics;
import hex.ScoreKeeper;
import hex.tree.isoforextended.isolationtree.CompressedIsolationTree;
import hex.tree.isoforextended.isolationtree.IsolationTree;
import hex.tree.isoforextended.isolationtree.IsolationTreeStats;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import water.DKV;
import water.H2O;
import water.Job;
import water.Key;
import water.exceptions.H2OModelBuilderIllegalArgumentException;
import water.fvec.Frame;
import water.util.*;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
/**
* Extended isolation forest implementation. Algorithm comes from https://arxiv.org/pdf/1811.02141.pdf paper.
*
* @author Adam Valenta
*/
public class ExtendedIsolationForest extends ModelBuilder<ExtendedIsolationForestModel,
ExtendedIsolationForestModel.ExtendedIsolationForestParameters,
ExtendedIsolationForestModel.ExtendedIsolationForestOutput> {
transient private static final Logger LOG = Logger.getLogger(ExtendedIsolationForest.class);
public static final int MAX_NTREES = 100_000;
public static final int MAX_SAMPLE_SIZE = 100_000;
private ExtendedIsolationForestModel _model;
transient Random _rand;
transient IsolationTreeStats isolationTreeStats;
// Called from an http request
public ExtendedIsolationForest(ExtendedIsolationForestModel.ExtendedIsolationForestParameters parms) {
super(parms);
init(false);
}
public ExtendedIsolationForest(ExtendedIsolationForestModel.ExtendedIsolationForestParameters parms, Key<ExtendedIsolationForestModel> key) {
super(parms, key);
init(false);
}
public ExtendedIsolationForest(ExtendedIsolationForestModel.ExtendedIsolationForestParameters parms, Job job) {
super(parms, job);
init(false);
}
public ExtendedIsolationForest(boolean startup_once) {
super(new ExtendedIsolationForestModel.ExtendedIsolationForestParameters(), startup_once);
}
@Override
protected void checkMemoryFootPrint_impl() {
int heightLimit = (int) Math.ceil(MathUtils.log2(_parms._sample_size));
double numInnerNodes = Math.pow(2, heightLimit) - 1;
double numLeafNodes = Math.pow(2, heightLimit);
double sizeOfInnerNode = 2 * _train.numCols() * Double.BYTES;
double sizeOfLeafNode = Integer.BYTES;
long maxMem = H2O.SELF._heartbeat.get_free_mem();
// IsolationTree is sparse for large data, count only with 25% of the full tree
double oneTree = 0.25 * numInnerNodes * sizeOfInnerNode + numLeafNodes * sizeOfLeafNode;
long estimatedMemory = (long) (_parms._ntrees * oneTree);
long estimatedComputingMemory = 5 * estimatedMemory;
if (estimatedComputingMemory > H2O.SELF._heartbeat.get_free_mem() || estimatedComputingMemory < 0 /* long overflow **/) {
String msg = "Extended Isolation Forest computation won't fit in the driver node's memory ("
+ PrettyPrint.bytes(estimatedComputingMemory) + " > " + PrettyPrint.bytes(maxMem)
+ ") - try reducing the number of columns and/or the number of trees and/or the sample_size parameter. "
+ "You can disable memory check by setting the attribute " + H2O.OptArgs.SYSTEM_PROP_PREFIX + "debug.noMemoryCheck.";
error("_train", msg);
}
}
@Override
public void init(boolean expensive) {
super.init(expensive);
if (_parms.train() != null) {
if (expensive) { // because e.g. OneHotExplicit categorical encoding can change the dimension
long extensionLevelMax = _train.numCols() - 1;
if (_parms._extension_level < 0 || _parms._extension_level > extensionLevelMax) {
error("extension_level", "Parameter extension_level must be in interval [0, "
+ extensionLevelMax + "] but it is " + _parms._extension_level);
}
}
long sampleSizeMax = _parms.train().numRows();
if (_parms._sample_size < 2 || _parms._sample_size > MAX_SAMPLE_SIZE || _parms._sample_size > sampleSizeMax) {
error("sample_size","Parameter sample_size must be in interval [2, "
+ MAX_SAMPLE_SIZE + "] but it is " + _parms._sample_size);
}
if(_parms._ntrees < 1 || _parms._ntrees > MAX_NTREES)
error("ntrees", "Parameter ntrees must be in interval [1, "
+ MAX_NTREES + "] but it is " + _parms._ntrees);
}
if (expensive && error_count() == 0) checkMemoryFootPrint();
}
@Override
protected Driver trainModelImpl() {
return new ExtendedIsolationForestDriver();
}
@Override
public ModelCategory[] can_build() {
return new ModelCategory[]{
ModelCategory.AnomalyDetection
};
}
@Override
public boolean isSupervised() {
return false;
}
@Override
public boolean havePojo() {
return false;
}
@Override
public boolean haveMojo() {
return true;
}
private class ExtendedIsolationForestDriver extends Driver {
@Override
public void computeImpl() {
_model = null;
try {
init(true);
if(error_count() > 0) {
throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(ExtendedIsolationForest.this);
}
_rand = RandomUtils.getRNG(_parms._seed);
isolationTreeStats = new IsolationTreeStats();
_model = new ExtendedIsolationForestModel(dest(), _parms,
new ExtendedIsolationForestModel.ExtendedIsolationForestOutput(ExtendedIsolationForest.this));
_model.delete_and_lock(_job);
buildIsolationTreeEnsemble();
if (_parms._disable_training_metrics) {
_model._output._model_summary = createModelSummaryTable();
LOG.info(_model.toString());
} // if model is scored then it is already done in the buildIsolationTreeEnsemble() in final scoring
} finally {
if(_model != null)
_model.unlock(_job);
}
}
private void buildIsolationTreeEnsemble() {
_model._output._iTreeKeys = new Key[_parms._ntrees];
_model._output._scored_train = new ScoreKeeper[_parms._ntrees + 1];
_model._output._scored_train[0] = new ScoreKeeper();
_model._output._training_time_ms = new long[_parms._ntrees + 1];
_model._output._training_time_ms[0] = System.currentTimeMillis();
long timeLastScoreStart = 0;
long timeLastScoreEnd = 0;
long sinceLastScore = 0;
int heightLimit = (int) Math.ceil(MathUtils.log2(_parms._sample_size));
IsolationTree isolationTree = new IsolationTree(heightLimit, _parms._extension_level);
for (int tid = 0; tid < _parms._ntrees; tid++) {
Timer timer = new Timer();
Frame subSample = MRUtils.sampleFrameSmall(_train, _parms._sample_size, _rand);
double[][] subSampleArray = FrameUtils.asDoubles(subSample);
CompressedIsolationTree compressedIsolationTree = isolationTree.buildTree(subSampleArray, _parms._seed + _rand.nextInt(), tid);
if (LOG.isDebugEnabled()) {
isolationTree.logNodesNumRows(Level.DEBUG);
isolationTree.logNodesHeight(Level.DEBUG);
}
_model._output._iTreeKeys[tid] = compressedIsolationTree._key;
DKV.put(compressedIsolationTree);
_job.update(1);
_model.update(_job);
_model._output._training_time_ms[tid + 1] = System.currentTimeMillis();
LOG.info((tid + 1) + ". tree was built in " + timer);
isolationTreeStats.updateBy(isolationTree);
long now = System.currentTimeMillis();
sinceLastScore = now - timeLastScoreStart;
boolean timeToScore = (now-_job.start_time() < _parms._initial_score_interval) || // Score every time for 4 secs
// Throttle scoring to keep the cost sane; limit to a 10% duty cycle & every 4 secs
(sinceLastScore > _parms._score_interval && // Limit scoring updates to every 4sec
(double)(timeLastScoreEnd - timeLastScoreStart)/sinceLastScore < 0.1); //10% duty cycle
boolean manualInterval = _parms._score_tree_interval > 0 && (tid +1) % _parms._score_tree_interval == 0;
boolean finalScoring = _parms._ntrees == (tid + 1);
boolean scored = false;
_model._output._scored_train[tid + 1] = new ScoreKeeper();
if (_parms._score_each_iteration || manualInterval || finalScoring || (timeToScore && _parms._score_tree_interval == 0) && !_parms._disable_training_metrics) {
_model._output._scored_train[tid + 1] = new ScoreKeeper();
timeLastScoreStart = System.currentTimeMillis();
ModelMetrics.MetricBuilder metricsBuilder = new ScoreExtendedIsolationForestTask(_model).doAll(_train).getMetricsBuilder();
ModelMetrics modelMetrics = metricsBuilder.makeModelMetrics(_model, _parms.train(), null, null);
_model._output._training_metrics = modelMetrics;
_model._output._scored_train[tid + 1].fillFrom(modelMetrics);
scored = true;
timeLastScoreEnd = System.currentTimeMillis();
}
final boolean printout = (_parms._score_each_iteration || finalScoring || (sinceLastScore > _parms._score_interval && scored)) && !_parms._disable_training_metrics;
if (printout) {
_model._output._model_summary = createModelSummaryTable();
_model._output._scoring_history = createScoringHistoryTable(tid+1);
LOG.info(_model.toString());
}
}
}
}
public TwoDimTable createModelSummaryTable() {
List<String> colHeaders = new ArrayList<>();
List<String> colTypes = new ArrayList<>();
List<String> colFormat = new ArrayList<>();
colHeaders.add("Number of Trees"); colTypes.add("int"); colFormat.add("%d");
colHeaders.add("Size of Subsample"); colTypes.add("int"); colFormat.add("%d");
colHeaders.add("Extension Level"); colTypes.add("int"); colFormat.add("%d");
colHeaders.add("Seed"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Number of trained trees"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Min. Depth"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Max. Depth"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Mean Depth"); colTypes.add("float"); colFormat.add("%d");
colHeaders.add("Min. Leaves"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Max. Leaves"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Mean Leaves"); colTypes.add("float"); colFormat.add("%d");
colHeaders.add("Min. Isolated Point"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Max. Isolated Point"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Mean Isolated Point"); colTypes.add("float"); colFormat.add("%d");
colHeaders.add("Min. Not Isolated Point"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Max. Not Isolated Point"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Mean Not Isolated Point"); colTypes.add("float"); colFormat.add("%d");
colHeaders.add("Min. Zero Splits"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Max. Zero Splits"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Mean Zero Splits"); colTypes.add("float"); colFormat.add("%d");
final int rows = 1;
TwoDimTable table = new TwoDimTable(
"Model Summary", null,
new String[rows],
colHeaders.toArray(new String[0]),
colTypes.toArray(new String[0]),
colFormat.toArray(new String[0]),
"");
int row = 0;
int col = 0;
table.set(row, col++, _parms._ntrees);
table.set(row, col++, _parms._sample_size);
table.set(row, col++, _parms._extension_level);
table.set(row, col++, _parms._seed);
table.set(row, col++, isolationTreeStats._numTrees);
table.set(row, col++, isolationTreeStats._minDepth);
table.set(row, col++, isolationTreeStats._maxDepth);
table.set(row, col++, isolationTreeStats._meanDepth);
table.set(row, col++, isolationTreeStats._minLeaves);
table.set(row, col++, isolationTreeStats._maxLeaves);
table.set(row, col++, isolationTreeStats._meanLeaves);
table.set(row, col++, isolationTreeStats._minIsolated);
table.set(row, col++, isolationTreeStats._maxIsolated);
table.set(row, col++, isolationTreeStats._meanIsolated);
table.set(row, col++, isolationTreeStats._minNotIsolated);
table.set(row, col++, isolationTreeStats._maxNotIsolated);
table.set(row, col++, isolationTreeStats._meanNotIsolated);
table.set(row, col++, isolationTreeStats._minZeroSplits);
table.set(row, col++, isolationTreeStats._maxZeroSplits);
table.set(row, col, isolationTreeStats._meanZeroSplits);
return table;
}
protected TwoDimTable createScoringHistoryTable(int ntreesTrained) {
List<String> colHeaders = new ArrayList<>();
List<String> colTypes = new ArrayList<>();
List<String> colFormat = new ArrayList<>();
colHeaders.add("Timestamp"); colTypes.add("string"); colFormat.add("%s");
colHeaders.add("Duration"); colTypes.add("string"); colFormat.add("%s");
colHeaders.add("Number of Trees"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Mean Tree Path Length"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Mean Anomaly Score"); colTypes.add("double"); colFormat.add("%.5f");
if (_parms._custom_metric_func != null) {
colHeaders.add("Training Custom"); colTypes.add("double"); colFormat.add("%.5f");
}
ScoreKeeper[] sks = _model._output._scored_train;
int rows = 0;
for (int i = 0; i <= ntreesTrained; i++) {
if (i != 0 && sks[i] != null && Double.isNaN(sks[i]._anomaly_score) || sks[i] == null) continue;
rows++;
}
TwoDimTable table = new TwoDimTable(
"Scoring History", null,
new String[rows],
colHeaders.toArray(new String[0]),
colTypes.toArray(new String[0]),
colFormat.toArray(new String[0]),
"");
int row = 0;
for( int i = 0; i<=ntreesTrained; i++ ) {
if (i != 0 && sks[i] != null && Double.isNaN(sks[i]._anomaly_score) || sks[i] == null) continue;
int col = 0;
DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss");
table.set(row, col++, fmt.print(_model._output._training_time_ms[i]));
table.set(row, col++, PrettyPrint.msecs(_model._output._training_time_ms[i] - _job.start_time(), true));
table.set(row, col++, i);
ScoreKeeper st = sks[i];
table.set(row, col++, st._anomaly_score);
table.set(row, col++, st._anomaly_score_normalized);
if (_parms._custom_metric_func != null) {
table.set(row, col++, st._custom_metric);
}
assert col == colHeaders.size();
row++;
}
return table;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/isoforextended/ExtendedIsolationForestModel.java
|
package hex.tree.isoforextended;
import hex.Model;
import hex.ModelCategory;
import hex.ModelMetrics;
import hex.ScoreKeeper;
import hex.tree.isofor.ModelMetricsAnomaly;
import hex.tree.isoforextended.isolationtree.CompressedIsolationTree;
import org.apache.log4j.Logger;
import water.*;
import water.fvec.Frame;
import static hex.genmodel.algos.isoforextended.ExtendedIsolationForestMojoModel.anomalyScore;
/**
*
* @author Adam Valenta
*/
public class ExtendedIsolationForestModel extends Model<ExtendedIsolationForestModel, ExtendedIsolationForestModel.ExtendedIsolationForestParameters,
ExtendedIsolationForestModel.ExtendedIsolationForestOutput> {
private static final Logger LOG = Logger.getLogger(ExtendedIsolationForestModel.class);
public ExtendedIsolationForestModel(Key<ExtendedIsolationForestModel> selfKey, ExtendedIsolationForestParameters parms,
ExtendedIsolationForestOutput output) {
super(selfKey, parms, output);
}
@Override
public ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain) {
return new ModelMetricsAnomaly.MetricBuilderAnomaly("Extended Isolation Forest Metrics", false);
}
@Override
protected String[] makeScoringNames(){
return new String[]{"anomaly_score", "mean_length"};
}
@Override
protected String[][] makeScoringDomains(Frame adaptFrm, boolean computeMetrics, String[] names) {
assert names.length == 2;
return new String[2][];
}
@Override
protected double[] score0(double[] data, double[] preds) {
assert _output._iTreeKeys != null : "Output has no trees, check if trees are properly set to the output.";
// compute score for given point
double pathLength = 0;
int numberOfTrees = 0;
for (Key<CompressedIsolationTree> iTreeKey : _output._iTreeKeys) {
if (iTreeKey == null) continue;
numberOfTrees++;
CompressedIsolationTree iTree = DKV.getGet(iTreeKey);
double iTreeScore = iTree.computePathLength(data);
pathLength += iTreeScore;
LOG.trace("iTreeScore " + iTreeScore);
}
pathLength = pathLength / numberOfTrees;
LOG.trace("Path length " + pathLength);
double anomalyScore = anomalyScore(pathLength, _output._sample_size);
LOG.trace("Anomaly score " + anomalyScore);
preds[0] = anomalyScore;
preds[1] = pathLength;
return preds;
}
public static class ExtendedIsolationForestParameters extends Model.Parameters {
@Override
public String algoName() {
return "ExtendedIsolationForest";
}
@Override
public String fullName() {
return "Extended Isolation Forest";
}
@Override
public String javaName() {
return ExtendedIsolationForestModel.class.getName();
}
@Override
public long progressUnits() {
return _ntrees;
}
/**
* Number of trees in the forest
*/
public int _ntrees;
/**
* Maximum is N - 1 (N = numCols). Minimum is 0. EIF with extension_level = 0 behaves like Isolation Forest.
*/
public int _extension_level;
/**
* Number of randomly selected rows from original data before each tree build.
*/
public int _sample_size;
/**
* Score every so many trees (no matter what)
*/
public int _score_tree_interval;
/**
* Disable calculating training metrics (expensive on large datasets).
*/
public boolean _disable_training_metrics;
/**
* For _initial_score_interval milliseconds - score each iteration of the algorithm.
*/
public int _initial_score_interval = 4000;
/**
* After each _score_interval milliseconds - run scoring
*
* But limit the scoring time consumption to 10% of whole training time.
*/
public int _score_interval = 4000;
public ExtendedIsolationForestParameters() {
super();
_ntrees = 100;
_sample_size = 256;
_extension_level = 0;
_score_tree_interval = 0;
_disable_training_metrics = true;
}
}
public static class ExtendedIsolationForestOutput extends Model.Output {
public int _ntrees;
public long _sample_size;
public ScoreKeeper[] _scored_train;
public long[] _training_time_ms;
public Key<CompressedIsolationTree>[] _iTreeKeys;
public ExtendedIsolationForestOutput(ExtendedIsolationForest eif) {
super(eif);
_ntrees = eif._parms._ntrees;
_sample_size = eif._parms._sample_size;
}
@Override
public ModelCategory getModelCategory() {
return ModelCategory.AnomalyDetection;
}
}
@Override
protected Futures remove_impl(Futures fs, boolean cascade) {
for (Key<CompressedIsolationTree> iTreeKey : _output._iTreeKeys) {
Keyed.remove(iTreeKey, fs, true);
}
return super.remove_impl(fs, cascade);
}
@Override
protected AutoBuffer writeAll_impl(AutoBuffer ab) {
for (Key<CompressedIsolationTree> iTreeKey : _output._iTreeKeys) {
ab.putKey(iTreeKey);
}
return super.writeAll_impl(ab);
}
@Override
protected Keyed readAll_impl(AutoBuffer ab, Futures fs) {
for (Key<CompressedIsolationTree> iTreeKey : _output._iTreeKeys) {
ab.getKey(iTreeKey, fs);
}
return super.readAll_impl(ab,fs);
}
@Override
public ExtendedIsolationForestMojoWriter getMojo() {
return new ExtendedIsolationForestMojoWriter(this);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/isoforextended/ExtendedIsolationForestMojoWriter.java
|
package hex.tree.isoforextended;
import hex.ModelMojoWriter;
import hex.pca.PCAModel;
import hex.tree.isoforextended.isolationtree.CompressedIsolationTree;
import water.DKV;
import water.MemoryManager;
import java.io.IOException;
import java.nio.ByteBuffer;
public class ExtendedIsolationForestMojoWriter extends ModelMojoWriter<ExtendedIsolationForestModel, ExtendedIsolationForestModel.ExtendedIsolationForestParameters, ExtendedIsolationForestModel.ExtendedIsolationForestOutput> {
@SuppressWarnings("unused") // Called through reflection in ModelBuildersHandler
public ExtendedIsolationForestMojoWriter() {}
public ExtendedIsolationForestMojoWriter(ExtendedIsolationForestModel model) {
super(model);
}
@Override public String mojoVersion() {
return "1.00";
}
@Override
protected void writeModelData() throws IOException {
writekv("ntrees", model._output._ntrees);
writekv("sample_size", model._output._sample_size);
for (int i = 0; i < model._output._ntrees; i++) {
CompressedIsolationTree compressedIsolationTree = DKV.getGet(model._output._iTreeKeys[i]);
writeblob(String.format("trees/t%02d.bin", i), compressedIsolationTree.toBytes());
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/isoforextended/ScoreExtendedIsolationForest.java
|
package hex.tree.isoforextended;
import hex.tree.isofor.ModelMetricsAnomaly;
import water.MRTask;
import water.fvec.Chunk;
class ScoreExtendedIsolationForestTask extends MRTask<ScoreExtendedIsolationForestTask> {
private ExtendedIsolationForestModel _model;
// output
private ModelMetricsAnomaly.MetricBuilderAnomaly _metricsBuilder;
public ScoreExtendedIsolationForestTask(ExtendedIsolationForestModel _model) {
this._model = _model;
}
@Override
public void map(Chunk[] cs) {
_metricsBuilder = (ModelMetricsAnomaly.MetricBuilderAnomaly) _model.makeMetricBuilder(null);
double [] preds = new double[2];
double [] tmp = new double[cs.length];
for (int row = 0; row < cs[0]._len; row++) {
preds = _model.score0(cs, 0, row, tmp, preds);
_metricsBuilder.perRow(preds, null, _model);
}
}
@Override
public void reduce(ScoreExtendedIsolationForestTask other) {
_metricsBuilder.reduce(other._metricsBuilder);
}
public ModelMetricsAnomaly.MetricBuilderAnomaly getMetricsBuilder() {
return _metricsBuilder;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/isoforextended
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/isoforextended/isolationtree/AbstractCompressedNode.java
|
package hex.tree.isoforextended.isolationtree;
import water.AutoBuffer;
import water.Iced;
/**
* Upper class for {@link CompressedNode} and {@link CompressedLeaf} used to access both types from array.
*/
public abstract class AbstractCompressedNode extends Iced<AbstractCompressedNode> {
private final int _height;
public AbstractCompressedNode(int height) {
_height = height;
}
public int getHeight() {
return _height;
}
/**
* Serialize Node to the byte buffer
*/
public abstract void toBytes(AutoBuffer ab);
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/isoforextended
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/isoforextended/isolationtree/CompressedIsolationTree.java
|
package hex.tree.isoforextended.isolationtree;
import water.AutoBuffer;
import water.Key;
import water.Keyed;
import water.util.ArrayUtils;
import static hex.genmodel.algos.isoforextended.ExtendedIsolationForestMojoModel.*;
/**
* IsolationTree structure with better memory performance. Store only the data that are needed for scoring.
*/
public class CompressedIsolationTree extends Keyed<CompressedIsolationTree> {
private final AbstractCompressedNode[] _nodes;
public CompressedIsolationTree(int heightLimit) {
_key = Key.make("CompressedIsolationTree" + Key.rand());
_nodes = new AbstractCompressedNode[(int) Math.pow(2, heightLimit + 1) - 1];
}
public AbstractCompressedNode[] getNodes() {
return _nodes;
}
private CompressedNode compressedNode(AbstractCompressedNode node) {
assert node instanceof CompressedNode : "AbstractCompressedNode cannot be cast to CompressedNode";
return (CompressedNode) node;
}
private CompressedLeaf compressedLeaf(AbstractCompressedNode node) {
assert node instanceof CompressedLeaf : "AbstractCompressedNode cannot be cast to CompressedLeaf";
return (CompressedLeaf) node;
}
private boolean isLeaf(AbstractCompressedNode node) {
return node instanceof CompressedLeaf;
}
/**
* Implementation of Algorithm 3 (pathLength) from paper.
*
* @param row a row of the input data
* @return how deep the data are in the isolation tree plus estimation in case that heightLimit is hit
*/
public double computePathLength(double[] row) {
int position = 0;
AbstractCompressedNode node = _nodes[0];
while (!isLeaf(node)) {
CompressedNode compressedNode = compressedNode(node);
double mul = ArrayUtils.subAndMul(row, compressedNode.getP(), compressedNode.getN());
if (mul <= 0) {
position = leftChildIndex(position);
} else {
position = rightChildIndex(position);
}
if (position < _nodes.length)
node = _nodes[position];
else
break;
}
return node.getHeight() + averagePathLengthOfUnsuccessfulSearch(compressedLeaf(node).getNumRows());
}
/**
* The structure of the bytes is:
*
* sizeOfInternalArrays -> size of random slope and intercept (size of both is always equal)
* nodeNumber -> index of the node in the array, byte arrays always starts with the root and ends with some
* leaf. Null node is skipped.
* AbstractCompressedNode -> refer to implementations for the detail of byte array
*
* |sizeOfInternalArrays|nodeNumber|CompressedNode|nodeNumber|AbstractCompressedNode|....|nodeNumber|CompressedLeaf|
*
* @return CompressedIsolationTree serialized as array of bytes
*/
public byte[] toBytes() {
AutoBuffer ab = new AutoBuffer();
assert _nodes[0] != null : "Tree is empty, there are zero nodes in the tree";
ab.put4(compressedNode(_nodes[0]).getN().length); // size of the internal arrays
for(int i = 0; i < _nodes.length; i++) {
if (_nodes[i] != null) {
ab.put4(i); // node number
_nodes[i].toBytes(ab);
}
}
return ab.bufClose();
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/isoforextended
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/isoforextended/isolationtree/CompressedLeaf.java
|
package hex.tree.isoforextended.isolationtree;
import water.AutoBuffer;
import static hex.genmodel.algos.isoforextended.ExtendedIsolationForestMojoModel.LEAF;
/**
* IsolationTree Leaf Node with better memory performance. Store only the data that are needed for scoring.
*/
public class CompressedLeaf extends AbstractCompressedNode {
private final int _numRows;
public CompressedLeaf(IsolationTree.Node node) {
this(node.getHeight(), node.getNumRows());
}
public CompressedLeaf(int currentHeight, int numRows) {
super(currentHeight);
_numRows = numRows;
}
public int getNumRows() {
return _numRows;
}
/**
* The structure of the bytes is:
*
* |identifierOfTheNodeType|numRows|
*/
@Override
public void toBytes(AutoBuffer ab) {
ab.put1(LEAF); // identifier of this node type
ab.put4(_numRows);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/isoforextended
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/isoforextended/isolationtree/CompressedNode.java
|
package hex.tree.isoforextended.isolationtree;
import water.AutoBuffer;
import java.util.Arrays;
import static hex.genmodel.algos.isoforextended.ExtendedIsolationForestMojoModel.NODE;
/**
* IsolationTree Node with better memory performance. Store only the data that are needed for scoring.
* Naming convention comes from Algorithm 2 (iTree) in paper.
*/
public class CompressedNode extends AbstractCompressedNode {
/**
* Random slope
*/
private final double[] _n;
/**
* Random intercept point
*/
private final double[] _p;
public CompressedNode(IsolationTree.Node node) {
this(node.getN(), node.getP(), node.getHeight());
}
public CompressedNode(double[] n, double[] p, int currentHeight) {
super(currentHeight);
this._n = n == null ? null : Arrays.copyOf(n, n.length);
this._p = p == null ? null : Arrays.copyOf(p, p.length);
}
public double[] getN() {
return _n;
}
public double[] getP() {
return _p;
}
/**
* The structure of the bytes is:
*
* |identifierOfTheNodeType|nvalues|pvalues|
*/
@Override
public void toBytes(AutoBuffer ab) {
ab.put1(NODE); // identifier of this node type
for (double v : _n) {
ab.put8d(v);
}
for (double v : _p) {
ab.put8d(v);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/isoforextended
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/isoforextended/isolationtree/IsolationTree.java
|
package hex.tree.isoforextended.isolationtree;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import water.util.ArrayUtils;
import water.util.RandomUtils;
import java.util.Random;
/**
* IsolationTree class implements Algorithm 2 (iTree)
* Naming convention comes from the Extended Isolation Forest paper.
*
* @author Adam Valenta
*/
public class IsolationTree {
private static final Logger LOG = Logger.getLogger(IsolationTree.class);
private Node[] _nodes;
private final int _heightLimit;
private final int _extensionLevel;
private int _isolatedPoints = 0;
private long _notIsolatedPoints = 0;
private int _zeroSplits = 0;
private int _leaves = 0;
private int _depth = 0;
public IsolationTree(int _heightLimit, int _extensionLevel) {
this._heightLimit = _heightLimit;
this._extensionLevel = _extensionLevel;
}
/**
* Implementation of Algorithm 2 (iTree) from paper.
*/
public CompressedIsolationTree buildTree(double[][] data, final long seed, final int treeNum) {
int maxNumNodesInTree = (int) Math.pow(2, _heightLimit + 1) - 1;
_isolatedPoints = 0;
_notIsolatedPoints = 0;
_zeroSplits = 0;
_leaves = 0;
_depth = 0;
this._nodes = new Node[maxNumNodesInTree];
CompressedIsolationTree compressedIsolationTree = new CompressedIsolationTree(_heightLimit);
_nodes[0] = new Node(data, data[0].length, 0);
for (int i = 0; i < _nodes.length; i++) {
LOG.trace((i + 1) + " from " + _nodes.length + " is being prepared on tree " + treeNum);
Node node = _nodes[i];
if (node == null || node._external) {
continue;
}
double[][] nodeData = node._data;
int currentHeight = node._height;
if (node._height >= _heightLimit || nodeData[0].length <= 1) {
node._external = true;
node._numRows = nodeData[0].length;
node._height = currentHeight;
node._data = null; // attempt to inform Java GC the data are not longer needed
compressedIsolationTree.getNodes()[i] = new CompressedLeaf(node);
if (nodeData[0].length == 1)
_isolatedPoints++;
if (nodeData[0].length > 1)
_notIsolatedPoints += node._numRows;
_leaves++;
} else {
if (rightChildIndex(i) < _nodes.length) {
currentHeight++;
_depth = currentHeight;
node._p = ArrayUtils.uniformDistFromArray(nodeData, seed + i);
node._n = gaussianVector(
nodeData.length, nodeData.length - _extensionLevel - 1, seed + i);
FilteredData ret = extendedIsolationForestSplit(nodeData, node._p, node._n);
compressedIsolationTree.getNodes()[i] = new CompressedNode(node);
if (ret.left != null) {
_nodes[leftChildIndex(i)] = new Node(ret.left, ret.left[0].length, currentHeight);
compressedIsolationTree.getNodes()[leftChildIndex(i)] = new CompressedNode(_nodes[leftChildIndex(i)]);
} else {
_nodes[leftChildIndex(i)] = new Node(null, 0, currentHeight);
_nodes[leftChildIndex(i)]._external = true;
compressedIsolationTree.getNodes()[leftChildIndex(i)] = new CompressedLeaf(_nodes[leftChildIndex(i)]);
_leaves++;
_zeroSplits++;
}
if (ret.right != null) {
_nodes[rightChildIndex(i)] = new Node(ret.right, ret.right[0].length, currentHeight);
compressedIsolationTree.getNodes()[rightChildIndex(i)] = new CompressedNode(_nodes[rightChildIndex(i)]);
} else {
_nodes[rightChildIndex(i)] = new Node(null, 0, currentHeight);
_nodes[rightChildIndex(i)]._external = true;
compressedIsolationTree.getNodes()[rightChildIndex(i)] = new CompressedLeaf(_nodes[rightChildIndex(i)]);
_leaves++;
_zeroSplits++;
}
} else {
compressedIsolationTree.getNodes()[i] = new CompressedLeaf(node);
_leaves++;
}
node._data = null; // attempt to inform Java GC the data are not longer needed
}
}
return compressedIsolationTree;
}
private int leftChildIndex(int i) {
return 2 * i + 1;
}
private int rightChildIndex(int i) {
return 2 * i + 2;
}
/**
* Helper method. Print nodes' size of the tree.
*/
public void logNodesNumRows(Level level) {
StringBuilder logMessage = new StringBuilder();
for (int i = 0; i < _nodes.length; i++) {
if (_nodes[i] == null)
logMessage.append(". ");
else
logMessage.append(_nodes[i]._numRows + " ");
}
LOG.log(level, logMessage.toString());
}
/**
* Helper method. Print height (length of path from root) of each node in trees. Root is 0.
*/
public void logNodesHeight(Level level) {
StringBuilder logMessage = new StringBuilder();
for (int i = 0; i < _nodes.length; i++) {
if (_nodes[i] == null)
logMessage.append(". ");
else
logMessage.append(_nodes[i]._height + " ");
}
LOG.log(level, logMessage.toString());
}
/**
* IsolationTree Node. Naming convention comes from Algorithm 2 (iTree) in paper.
* _data should be always null after buildTree() method because only number of rows in data is needed for
* scoring (evaluation) stage.
*/
public static class Node {
/**
* Data in this node. After computation should be null, because only _numRows is important.
*/
private double[][] _data;
/**
* Random slope
*/
private double[] _n;
/**
* Random intercept point
*/
private double[] _p;
private int _height;
private boolean _external = false;
private int _numRows;
public Node(double[][] data, int numRows, int currentHeight) {
this._data = data;
this._numRows = numRows;
this._height = currentHeight;
}
public double[] getN() {
return _n;
}
public double[] getP() {
return _p;
}
public int getHeight() {
return _height;
}
public int getNumRows() {
return _numRows;
}
}
/**
* Compute Extended Isolation Forest split point and filter input data with this split point in the same time.
* <p>
* See Algorithm 2 (iTree) in the paper.
*
* @return Object containing data for Left and Right branch of the tree.
*/
public static FilteredData extendedIsolationForestSplit(double[][] data, double[] p, double[] n) {
double[] res = new double[data[0].length];
int leftLength = 0;
int rightLength = 0;
for (int row = 0; row < data[0].length; row++) {
for (int col = 0; col < data.length; col++) {
res[row] += (data[col][row] - p[col]) * n[col];
}
if (res[row] <= 0) {
leftLength++;
} else {
rightLength++;
}
}
double[][] left = null;
if (leftLength > 0) {
left = new double[data.length][leftLength];
}
double[][] right = null;
if (rightLength > 0) {
right = new double[data.length][rightLength];
}
for (int row = 0, rowLeft = 0, rowRight = 0; row < data[0].length; row++) {
if (res[row] <= 0) {
for (int col = 0; col < data.length; col++) {
left[col][rowLeft] = data[col][row];
}
rowLeft++;
} else {
for (int col = 0; col < data.length; col++) {
right[col][rowRight] = data[col][row];
}
rowRight++;
}
}
return new FilteredData(left, right);
}
public static class FilteredData {
private final double[][] left;
private final double[][] right;
public FilteredData(double[][] left, double[][] right) {
this.left = left;
this.right = right;
}
public double[][] getLeft() {
return left;
}
public double[][] getRight() {
return right;
}
}
/**
* Make a new array initialized to random Gaussian N(0,1) values with the given seed.
* Make randomly selected {@code zeroNum} items zeros (based on extensionLevel value).
*
* @param n length of generated vector
* @param zeroNum set randomly selected {@code zeroNum} items of vector to zero
* @return array with gaussian values. Randomly selected {@code zeroNum} item values are zeros.
*/
public static double[] gaussianVector(int n, int zeroNum, long seed) {
double[] gaussian = ArrayUtils.gaussianVector(n, seed);
Random r = RandomUtils.getRNG(seed);
while (zeroNum > 0) {
int pos = r.nextInt(n);
if (!Double.isNaN(gaussian[pos])) {
gaussian[pos] = Double.NaN;
zeroNum--;
}
}
for (int i = 0; i < gaussian.length; i++) {
if (Double.isNaN(gaussian[i]))
gaussian[i] = 0;
}
return gaussian;
}
public int getIsolatedPoints() {
return _isolatedPoints;
}
public long getNotIsolatedPoints() {
return _notIsolatedPoints;
}
public int getZeroSplits() {
return _zeroSplits;
}
public int getLeaves() {
return _leaves;
}
public int getDepth() {
return _depth;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/isoforextended
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/isoforextended/isolationtree/IsolationTreeStats.java
|
package hex.tree.isoforextended.isolationtree;
/**
* Inspired by TreeStats
*/
public class IsolationTreeStats {
public int _minDepth = -1;
public int _maxDepth = -1;
public float _meanDepth;
public int _minLeaves = -1;
public int _maxLeaves = -1;
public float _meanLeaves;
public int _minIsolated = -1;
public int _maxIsolated = -1;
public float _meanIsolated;
public long _minNotIsolated = -1;
public long _maxNotIsolated = -1;
public float _meanNotIsolated;
public int _minZeroSplits = -1;
public int _maxZeroSplits = -1;
public float _meanZeroSplits;
public int _numTrees = 0;
private long _sumDepth = 0;
private long _sumLeaves = 0;
private long _sumIsolated = 0;
private long _sumNotIsolated = 0;
private long _sumZeroSplits = 0;
public void updateBy(IsolationTree tree) {
if (tree == null) return;
if (_minDepth == -1 || _minDepth > tree.getDepth()) _minDepth = tree.getDepth();
if (_maxDepth == -1 || _maxDepth < tree.getDepth()) _maxDepth = tree.getDepth();
if (_minLeaves == -1 || _minLeaves > tree.getLeaves()) _minLeaves = tree.getLeaves();
if (_maxLeaves == -1 || _maxLeaves < tree.getLeaves()) _maxLeaves = tree.getLeaves();
if (_minIsolated == -1 || _minIsolated > tree.getIsolatedPoints()) _minIsolated = tree.getIsolatedPoints();
if (_maxIsolated == -1 || _maxIsolated < tree.getIsolatedPoints()) _maxIsolated = tree.getIsolatedPoints();
if (_minNotIsolated == -1 || _minNotIsolated > tree.getNotIsolatedPoints()) _minNotIsolated = tree.getNotIsolatedPoints();
if (_maxNotIsolated == -1 || _maxNotIsolated < tree.getNotIsolatedPoints()) _maxNotIsolated = tree.getNotIsolatedPoints();
if (_minZeroSplits == -1 || _minZeroSplits > tree.getZeroSplits()) _minZeroSplits = tree.getZeroSplits();
if (_maxZeroSplits == -1 || _maxZeroSplits < tree.getZeroSplits()) _maxZeroSplits = tree.getZeroSplits();
_sumDepth += tree.getDepth();
_sumLeaves += tree.getLeaves();
_sumIsolated += tree.getIsolatedPoints();
_sumNotIsolated += tree.getNotIsolatedPoints();
_sumZeroSplits += tree.getZeroSplits();
_numTrees++;
updateMeans();
}
private void updateMeans() {
_meanDepth = ((float) _sumDepth / _numTrees);
_meanLeaves = ((float) _sumLeaves / _numTrees);
_meanIsolated = ((float) _sumIsolated / _numTrees);
_meanNotIsolated = ((float) _sumNotIsolated / _numTrees);
_meanZeroSplits = ((float) _sumZeroSplits / _numTrees);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/uplift/ChiSquaredDivergence.java
|
package hex.tree.uplift;
public class ChiSquaredDivergence extends EuclideanDistance {
@Override
public double metric(double prCT1, double prCT0) {
return ((prCT1 - prCT0) * (prCT1 - prCT0)) / (prCT0 == 0 ? Divergence.ZERO_TO_DIVIDE : prCT0);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/uplift/Divergence.java
|
package hex.tree.uplift;
import water.Iced;
/**
* Divergence class used to calculate gain to split the node in Uplift trees algorithms.
* Currently only UpliftRandomForest uses this class.
* Source: https://link.springer.com/content/pdf/10.1007/s10115-011-0434-0.pdf page 308
*
*/
public abstract class Divergence extends Iced<Divergence> {
public static double ZERO_TO_DIVIDE = 1e-6;
/**
* Calculate distance divergence metric between two probabilities.
* @param prCT1
* @param prCT0
* @return distance divergence metric
*/
public abstract double metric(double prCT1, double prCT0);
/**
* Calculate distance metric between two probabilities in the node.
* @param prCT1 probability of treatment group
* @param prCT0 probability of control group
* @return distance divergence metric in the node
*/
public double node(double prCT1, double prCT0) {
return metric(prCT1, prCT0) + metric(1 - prCT1, 1 - prCT0);
}
/**
* Calculate gain after split
* @param prL probability of response in left node
* @param prLY1CT1 probability of response = 1 in treatment group in left node
* @param prLY1CT0 probability of response = 1 in control group in left node
* @param prR probability of response in right node
* @param prRY1CT1 probability of response = 1 in treatment group in right node
* @param prRY1CT0 probability of response = 1 in control group in right node
* @return gain after split
*/
public double split( double prL, double prLY1CT1, double prLY1CT0,
double prR, double prRY1CT1, double prRY1CT0) {
double klL = node(prLY1CT1, prLY1CT0);
double klR = node(prRY1CT1, prRY1CT0);
return prL * klL + prR * klR;
}
/**
* Calculate overall gain as divergence between split gain and node gain.
* @param prY1CT1 probability of response = 1 in treatment group before split
* @param prY1CT0 probability of response = 1 in control group before
* @param prL probability of response in left node
* @param prLY1CT1 probability of response = 1 in treatment group in left node
* @param prLY1CT0 probability of response = 1 in control group in left node
* @param prR probability of response in right node
* @param prRY1CT1 probability of response = 1 in treatment group in right node
* @param prRY1CT0 probability of response = 1 in control group in right node
* @return overall gain
*/
public double gain(double prY1CT1, double prY1CT0, double prL, double prLY1CT1, double prLY1CT0,
double prR, double prRY1CT1, double prRY1CT0) {
return split(prL, prLY1CT1, prLY1CT0, prR, prRY1CT1, prRY1CT0) - node(prY1CT1, prY1CT0);
}
/**
* Calculate normalization factor to normalize gain.
* @param prCT1 probability of treatment group
* @param prCT0 probability of control group
* @param prLCT1 probability of treatment group in left node
* @param prLCT0 probability of control group in left node
* @return normalization factor
*/
public abstract double norm(double prCT1, double prCT0, double prLCT1, double prLCT0);
/**
* Calculate normalized gain as result value to select best split.
* @param prY1CT1 probability of response = 1 in treatment group before split
* @param prY1CT0 probability of response = 1 in control group before
* @param prL probability of response in left node
* @param prLY1CT1 probability of response = 1 in treatment group in left node
* @param prLY1CT0 probability of response = 1 in control group in left node
* @param prR probability of response in right node
* @param prRY1CT1 probability of response = 1 in treatment group in right node
* @param prRY1CT0 probability of response = 1 in control group in right node
* @param prCT1 probability of treatment group
* @param prCT0 probability of control group
* @param prLCT1 probability of treatment group in left node
* @param prLCT0 probability of control group in left node
* @return normalized gain
*/
public double value(double prY1CT1, double prY1CT0, double prL, double prLY1CT1, double prLY1CT0,
double prR, double prRY1CT1, double prRY1CT0, double prCT1, double prCT0,
double prLCT1, double prLCT0) {
return gain(prY1CT1, prY1CT0, prL, prLY1CT1, prLY1CT0, prR, prRY1CT1, prRY1CT0) /
norm(prCT1, prCT0, prLCT1, prLCT0);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/uplift/EuclideanDistance.java
|
package hex.tree.uplift;
public class EuclideanDistance extends Divergence {
@Override
public double metric(double prCT1, double prCT0) {
return (prCT1 - prCT0) * (prCT1 - prCT0);
}
@Override
public double norm(
double prCT1, double prCT0,
double prLCT1, double prLCT0
) {
double nodeCT = node(prLCT1, prLCT0);
double giniCT = 2 * prCT1 * (1 - prCT1);
double giniCT1 = 2 * prLCT1 * (1 - prLCT1);
double giniCT0 = 2 * prLCT0 * (1 - prLCT0);
return giniCT * nodeCT + giniCT1 * prCT1 + giniCT0 * prCT0 + 0.5;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/uplift/KLDivergence.java
|
package hex.tree.uplift;
import static water.util.MathUtils.log2;
public class KLDivergence extends Divergence {
@Override
public double metric(double prCT1, double prCT0) {
return prCT1 * log2(prCT1 / prCT0 == 0 ? ZERO_TO_DIVIDE : prCT0);
}
@Override
public double norm(
double prCT1, double prCT0,
double prLCT1, double prLCT0
) {
double klCT = node(prCT1, prCT0);
double entCT = -(prCT1 * log2(prCT1) + prCT0 * log2(prCT0));
double entCT1 = -(prLCT1 * log2(prLCT1) + (1 - prLCT1) * log2((1 - prLCT1)));
double entCT0 = -(prLCT0 * log2(prLCT0) + (1 - prLCT0) * log2((1 - prLCT0)));
return klCT * entCT + prCT1 * entCT1 + prCT0 * entCT0 + 0.5;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/uplift/UpliftDRF.java
|
package hex.tree.uplift;
import hex.*;
import hex.genmodel.MojoModel;
import hex.genmodel.algos.upliftdrf.UpliftDrfMojoModel;
import hex.genmodel.utils.DistributionFamily;
import hex.tree.*;
import org.apache.log4j.Logger;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import water.H2O;
import water.Job;
import water.Key;
import water.MRTask;
import water.fvec.C0DChunk;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.PrettyPrint;
import water.util.TwoDimTable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
public class UpliftDRF extends SharedTree<UpliftDRFModel, UpliftDRFModel.UpliftDRFParameters, UpliftDRFModel.UpliftDRFOutput> {
private static final Logger LOG = Logger.getLogger(UpliftDRF.class);
public enum UpliftMetricType { AUTO, KL, ChiSquared, Euclidean }
@Override
public boolean isUplift() {return true;}
// Called from an http request
public UpliftDRF(hex.tree.uplift.UpliftDRFModel.UpliftDRFParameters parms) {
super(parms);
init(false);
}
public UpliftDRF(hex.tree.uplift.UpliftDRFModel.UpliftDRFParameters parms, Key<UpliftDRFModel> key) {
super(parms, key);
init(false);
}
public UpliftDRF(hex.tree.uplift.UpliftDRFModel.UpliftDRFParameters parms, Job job) {
super(parms, job);
init(false);
}
public UpliftDRF(boolean startup_once) {
super(new hex.tree.uplift.UpliftDRFModel.UpliftDRFParameters(), startup_once);
}
@Override
public boolean haveMojo() {
return true;
}
@Override
public boolean havePojo() {
return false;
}
@Override
public ModelCategory[] can_build() {
return new ModelCategory[]{
ModelCategory.BinomialUplift
};
}
/** Start the DRF training Job on an F/J thread. */
@Override protected Driver trainModelImpl() { return new UpliftDRFDriver(); }
@Override public boolean scoreZeroTrees() { return false; }
/** Initialize the ModelBuilder, validating all arguments and preparing the
* training frame. This call is expected to be overridden in the subclasses
* and each subclass will start with "super.init();". This call is made
* by the front-end whenever the GUI is clicked, and needs to be fast;
* heavy-weight prep needs to wait for the trainModel() call.
*/
@Override public void init(boolean expensive) {
super.init(expensive);
// Initialize local variables
if( _parms._mtries < 1 && _parms._mtries != -1 && _parms._mtries != -2 )
error("_mtries", "mtries must be -1 (converted to sqrt(features)) or -2 (All features) or >= 1 but it is " + _parms._mtries);
if( _train != null ) {
int ncols = _train.numCols();
if( _parms._mtries != -1 && _parms._mtries != -2 && !(1 <= _parms._mtries && _parms._mtries < ncols /*ncols includes the response*/))
error("_mtries","Computed mtries should be -1 or -2 or in interval [1,"+ncols+"[ but it is " + _parms._mtries);
}
if (_parms._sample_rate == 1f && _valid == null)
warn("_sample_rate", "Sample rate is 100% and no validation dataset. There are no out-of-bag data to compute error estimates on the training data!");
if (hasOffsetCol())
error("_offset_column", "Offsets are not yet supported for Uplift DRF.");
if (hasWeightCol())
error("_weight_column", "Weights are not yet supported for Uplift DRF.");
if (hasFoldCol())
error("_fold_column", "Cross-validation is not yet supported for Uplift DRF.");
if (_parms._nfolds > 0)
error("_nfolds", "Cross-validation is not yet supported for Uplift DRF.");
if (_nclass == 1)
error("_distribution", "UpliftDRF currently support binomial classification problems only.");
if (_nclass > 2 || _parms._distribution.equals(DistributionFamily.multinomial))
error("_distribution", "UpliftDRF currently does not support multinomial distribution.");
if (_parms._treatment_column == null)
error("_treatment_column", "The treatment column has to be defined.");
if (_parms._custom_distribution_func != null)
error("_custom_distribution_func", "The custom distribution is not yet supported for Uplift DRF.");
}
// ----------------------
private class UpliftDRFDriver extends Driver {
@Override
protected boolean doOOBScoring() {
return true;
}
@Override protected void initializeModelSpecifics() {
_mtry_per_tree = Math.max(1, (int) (_parms._col_sample_rate_per_tree * _ncols));
if (!(1 <= _mtry_per_tree && _mtry_per_tree <= _ncols))
throw new IllegalArgumentException("Computed mtry_per_tree should be in interval <1," + _ncols + "> but it is " + _mtry_per_tree);
if (_parms._mtries == -2) { //mtries set to -2 would use all columns in each split regardless of what column has been dropped during train
_mtry = _ncols;
} else if (_parms._mtries == -1) {
_mtry = (isClassifier() ? Math.max((int) Math.sqrt(_ncols), 1) : Math.max(_ncols / 3, 1)); // classification: mtry=sqrt(_ncols), regression: mtry=_ncols/3
} else {
_mtry = _parms._mtries;
}
if (!(1 <= _mtry && _mtry <= _ncols)) {
throw new IllegalArgumentException("Computed mtry should be in interval <1," + _ncols + "> but it is " + _mtry);
}
new MRTask() {
@Override public void map(Chunk chks[]) {
Chunk cy = chk_resp(chks);
for (int i = 0; i < cy._len; i++) {
if (cy.isNA(i)) continue;
int cls = (int) cy.at8(i);
chk_work(chks, cls).set(i, 1L);
}
}
}.doAll(_train);
}
// --------------------------------------------------------------------------
// Build the next random k-trees representing tid-th tree
@Override protected boolean buildNextKTrees() {
// We're going to build K (nclass) trees - each focused on correcting
// errors for a single class.
final DTree[] ktrees = new DTree[_nclass];
// Define a "working set" of leaf splits, from leafs[i] to tree._len for each tree i
int[] leafs = new int[_nclass];
// Assign rows to nodes - fill the "NIDs" column(s)
growTrees(ktrees, leafs, _rand);
// Move rows into the final leaf rows - fill "Tree" and OUT_BAG_TREES columns and zap the NIDs column
UpliftCollectPreds cp = new UpliftCollectPreds(ktrees,leafs).doAll(_train,_parms._build_tree_one_node);
// Grow the model by K-trees
_model._output.addKTrees(ktrees);
return false; //never stop early
}
// Assumes that the "Work" column are filled with horizontalized (0/1) class memberships per row (or copy of regression response)
private void growTrees(DTree[] ktrees, int[] leafs, Random rand) {
// Initial set of histograms. All trees; one leaf per tree (the root
// leaf); all columns
DHistogram hcs[][][] = new DHistogram[_nclass][1/*just root leaf*/][_ncols];
// Adjust real bins for the top-levels
int adj_nbins = Math.max(_parms._nbins_top_level,_parms._nbins);
// Use for all k-trees the same seed. NOTE: this is only to make a fair
// view for all k-trees
long rseed = rand.nextLong();
// Initially setup as-if an empty-split had just happened
for (int k = 0; k < _nclass; k++) {
if (_model._output._distribution[k] != 0) { // Ignore missing classes
ktrees[k] = new DTree(_train, _ncols, _mtry, _mtry_per_tree, rseed, _parms);
new DTree.UndecidedNode(ktrees[k], -1, DHistogram.initialHist(_train, _ncols, adj_nbins, hcs[k][0], rseed, _parms, getGlobalSplitPointsKeys(), null, false, null), null, null); // The "root" node
}
}
// Sample - mark the lines by putting 'OUT_OF_BAG' into nid(<klass>) vector
Sample s = new Sample(ktrees[0], _parms._sample_rate, _parms._sample_rate_per_class).dfork(null,new Frame(vec_nids(_train,0),vec_resp(_train)), _parms._build_tree_one_node).getResult();
// ----
// One Big Loop till the ktrees are of proper depth.
// Adds a layer to the trees each pass.
int depth=0;
for( ; depth<_parms._max_depth; depth++ ) {
hcs = buildLayer(_train, _parms._nbins, ktrees[0], leafs, hcs, _parms._build_tree_one_node);
// If we did not make any new splits, then the tree is split-to-death
if( hcs == null ) break;
}
// Each tree bottomed-out in a DecidedNode; go 1 more level and insert
// LeafNodes to hold predictions.
DTree treeTr = ktrees[0];
ktrees[1] = new DTree(ktrees[0]); // make a deep copy of the tree to assign control prediction to the leaves
DTree treeCt = ktrees[1];
int leaf = leafs[0] = treeTr.len();
for (int nid = 0; nid < leaf; nid++) {
if (treeTr.node(nid) instanceof DTree.DecidedNode) { // Should be the same for treatment and control tree
DTree.DecidedNode dnTr = treeTr.decided(nid); // Treatment tree node
DTree.DecidedNode dnCt = treeCt.decided(nid); // Control tree node
if (dnTr._split == null) { // No decision here, no row should have this NID now
if (nid == 0) { // Handle the trivial non-splitting tree
DTree.LeafNode lnTr = new DTree.LeafNode(treeTr, -1, 0);
lnTr._pred = (float) (_model._output._priorClassDist[1]);
DTree.LeafNode lnCt = new DTree.LeafNode(treeCt, -1, 0);
lnCt._pred = (float) (_model._output._priorClassDist[0]);
}
continue;
}
for (int i = 0; i < dnTr._nids.length; i++) {
int cnid = dnTr._nids[i];
if (cnid == -1 || // Bottomed out (predictors or responses known constant)
treeTr.node(cnid) instanceof DTree.UndecidedNode || // Or chopped off for depth
(treeTr.node(cnid) instanceof DTree.DecidedNode && // Or not possible to split
((DTree.DecidedNode) treeTr.node(cnid))._split == null)) {
DTree.LeafNode lnTr = new DTree.LeafNode(treeTr, nid);
lnTr._pred = (float) dnTr.predTreatment(i); // Set prediction into the treatment leaf
dnTr._nids[i] = lnTr.nid(); // Mark a leaf here for treatment
DTree.LeafNode lnCt = new DTree.LeafNode(treeCt, nid);
lnCt._pred = (float) dnCt.predControl(i); // Set prediction into the control leaf
dnCt._nids[i] = lnCt.nid(); // Mark a leaf here for control
}
}
}
}
}
// Collect and write predictions into leaves.
private class UpliftCollectPreds extends MRTask<UpliftCollectPreds> {
/* @IN */ final DTree _trees[]; // Read-only, shared (except at the histograms in the Nodes)
/* @OUT */ double allRows; // number of all OOB rows (sampled by this tree)
UpliftCollectPreds(DTree trees[], int leafs[]) { _trees=trees;}
@Override public void map( Chunk[] chks ) {
final Chunk y = chk_resp(chks); // Response
final Chunk oobt = chk_oobt(chks); // Out-of-bag rows counter over all trees
final Chunk weights = hasWeightCol() ? chk_weight(chks) : new C0DChunk(1, chks[0]._len); // Out-of-bag rows counter over all trees
// Iterate over all rows
for( int row=0; row<oobt._len; row++ ) {
double weight = weights.atd(row);
final boolean wasOOBRow = ScoreBuildHistogram.isOOBRow((int)chk_nids(chks,0).at8(row));
//final boolean wasOOBRow = false;
// For all tree (i.e., k-classes)
final Chunk nids = chk_nids(chks, 0); // Node-ids for treatment group class
final Chunk nids1 = chk_nids(chks, 1); // Node-ids for control group class
if (weight!=0) {
final DTree treeT = _trees[0];
final DTree treeC = _trees[1];
if (treeT == null) continue; // Empty class is ignored
int nid = (int) nids.at8(row); // Get Node to decide from
// Update only out-of-bag rows
// This is out-of-bag row - but we would like to track on-the-fly prediction for the row
if (wasOOBRow) {
nid = ScoreBuildHistogram.oob2Nid(nid);
if (treeT.node(nid) instanceof DTree.UndecidedNode) // If we bottomed out the tree
nid = treeT.node(nid).pid(); // Then take parent's decision
int leafnid;
if (treeT.root() instanceof DTree.LeafNode) {
leafnid = 0;
} else {
DTree.DecidedNode dn = treeT.decided(nid); // Must have a decision point
if (dn._split == null) // Unable to decide?
dn = treeT.decided(treeT.node(nid).pid()); // Then take parent's decision
leafnid = dn.getChildNodeID(chks, row); // Decide down to a leafnode
}
// Setup Tree(i) - on the fly prediction of i-tree for row-th row
// - for uplift: cumulative sum of prediction of each tree - has to be normalized by number of trees
final Chunk ct1 = chk_tree(chks, 0); // treatment tree working column holding votes for given row
ct1.set(row, (float) (ct1.atd(row) + ((DTree.LeafNode) treeT.node(leafnid)).pred()));
final Chunk ct0 = chk_tree(chks, 1); // control group tree working column holding votes for given row
ct0.set(row, (float) (ct0.atd(row) + ((DTree.LeafNode) treeC.node(leafnid)).pred()));
}
}
// reset help column for this row and this k-class
nids.set(row, 0);
nids1.set(row, 0);
// For this tree this row is out-of-bag - i.e., a tree voted for this row
if (wasOOBRow) oobt.set(row, oobt.atd(row) + weight); // track number of trees
if (weight != 0) {
if (wasOOBRow && !y.isNA(row)) {
allRows+=weight;
}
}
}
}
@Override public void reduce(UpliftCollectPreds mrt) {
allRows += mrt.allRows;
}
}
@Override protected UpliftDRFModel makeModel( Key modelKey, UpliftDRFModel.UpliftDRFParameters parms) {
return new UpliftDRFModel(modelKey,parms,new UpliftDRFModel.UpliftDRFOutput(UpliftDRF.this));
}
}
/**
* Read the 'tree' columns, do model-specific math and put the results in the
* fs[] array, and return the sum. Dividing any fs[] element by the sum
* turns the results into a probability distribution.
*/
@Override protected double score1( Chunk chks[], double weight, double offset, double fs[], int row ) {
double sum = 0;
fs[1] = weight * chk_tree(chks, 0).atd(row) / chk_oobt(chks).atd(row);
fs[2] = weight * chk_tree(chks, 1).atd(row) / chk_oobt(chks).atd(row);
fs[0] = fs[1] - fs[2];
return sum;
}
protected DHistogram[][][] buildLayer(final Frame fr, final int nbins, final DTree tree, final int leafs[], final DHistogram hcs[][][], boolean build_tree_one_node) {
// Build 1 uplift tree
// Build up the next-generation tree splits from the current histograms.
// Nearly all leaves will split one more level. This loop nest is
// O( #active_splits * #bins * #ncols )
// but is NOT over all the data.
ScoreBuildOneTree sb1t = null;
Vec vecs[] = fr.vecs();
// Build a frame with just a single tree (& work & nid) columns, so the
// nested MRTask ScoreBuildHistogram in ScoreBuildOneTree does not try
// to close other tree's Vecs when run in parallel.
int k = 0;
if( tree != null ) {
int selectedCol = _ncols + 2;
final String[] fr2cols = Arrays.copyOf(fr._names, selectedCol);
final Vec[] fr2vecs = Arrays.copyOf(vecs, selectedCol);
Frame fr2 = new Frame(fr2cols, fr2vecs); //predictors, weights and the actual response
if (isSupervised() && fr2.find(_parms._response_column) == -1) {
fr2.add(_parms._response_column, fr.vec(_parms._response_column));
}
// Add temporary workspace vectors (optional weights are taken over from fr)
int respIdx = fr2.find(_parms._response_column);
int weightIdx = fr2.find(_parms._weights_column);
int treatmentIdx = fr2.find(_parms._treatment_column);
int predsIdx = fr2.numCols(); fr2.add(fr._names[idx_tree(k)],vecs[idx_tree(k)]); //tree predictions
int workIdx = fr2.numCols(); fr2.add(fr._names[idx_work(k)],vecs[idx_work(k)]); //target value to fit (copy of actual response for DRF, residual for GBM)
int nidIdx = fr2.numCols(); fr2.add(fr._names[idx_nids(k)],vecs[idx_nids(k)]); //node indices for tree construction
if (LOG.isTraceEnabled()) LOG.trace("Building a layer for class " + k + ":\n" + fr2.toTwoDimTable());
// Async tree building
// step 1: build histograms
// step 2: split nodes
H2O.submitTask(sb1t = new ScoreBuildOneTree(this,k, nbins, tree, leafs, hcs, fr2, build_tree_one_node, _improvPerVar, _model._parms._distribution,
respIdx, weightIdx, predsIdx, workIdx, nidIdx, treatmentIdx));
}
// Block for all K trees to complete.
boolean did_split=false;
if( sb1t != null ) {
sb1t.join();
if( sb1t._did_split ) did_split=true;
if (LOG.isTraceEnabled()) {
LOG.info("Done with this layer for class " + k + ":\n" + new Frame(
new String[]{"TREE", "WORK", "NIDS"},
new Vec[]{
vecs[idx_tree(k)],
vecs[idx_work(k)],
vecs[idx_nids(k)]
}
).toTwoDimTable());
}
}
// The layer is done.
return did_split ? hcs : null;
}
@Override
protected TwoDimTable createScoringHistoryTable() {
UpliftDRFModel.UpliftDRFOutput out = _model._output;
return createUpliftScoringHistoryTable(out, out._scored_train, out._scored_valid, _job,
out._training_time_ms, _parms._custom_metric_func != null);
}
static TwoDimTable createUpliftScoringHistoryTable(Model.Output _output,
ScoreKeeper[] _scored_train,
ScoreKeeper[] _scored_valid,
Job job, long[] _training_time_ms,
boolean hasCustomMetric) {
List<String> colHeaders = new ArrayList<>();
List<String> colTypes = new ArrayList<>();
List<String> colFormat = new ArrayList<>();
colHeaders.add("Timestamp"); colTypes.add("string"); colFormat.add("%s");
colHeaders.add("Duration"); colTypes.add("string"); colFormat.add("%s");
colHeaders.add("Number of Trees"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Training ATE"); colTypes.add("double"); colFormat.add("%d");
colHeaders.add("Training ATT"); colTypes.add("double"); colFormat.add("%d");
colHeaders.add("Training ATC"); colTypes.add("double"); colFormat.add("%d");
colHeaders.add("Training AUUC nbins"); colTypes.add("int"); colFormat.add("%d");
colHeaders.add("Training AUUC"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Training AUUC normalized"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Training Qini value"); colTypes.add("double"); colFormat.add("%.5f");
if (hasCustomMetric) {
colHeaders.add("Training Custom"); colTypes.add("double"); colFormat.add("%.5f");
}
if (_output._validation_metrics != null) {
colHeaders.add("Validation ATE"); colTypes.add("double"); colFormat.add("%d");
colHeaders.add("Validation ATT"); colTypes.add("double"); colFormat.add("%d");
colHeaders.add("Validation ATC"); colTypes.add("double"); colFormat.add("%d");
colHeaders.add("Validation AUUC nbins"); colTypes.add("int"); colFormat.add("%d");
colHeaders.add("Validation AUUC"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Validation AUUC normalized"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Validation Qini value"); colTypes.add("double"); colFormat.add("%.5f");
if (hasCustomMetric) {
colHeaders.add("Validation Custom"); colTypes.add("double"); colFormat.add("%.5f");
}
}
int rows = 0;
for( int i = 0; i<_scored_train.length; i++ ) {
if (i != 0 && Double.isNaN(_scored_train[i]._AUUC) && (_scored_valid == null || Double.isNaN(_scored_valid[i]._AUUC))) continue;
rows++;
}
TwoDimTable table = new TwoDimTable(
"Scoring History", null,
new String[rows],
colHeaders.toArray(new String[0]),
colTypes.toArray(new String[0]),
colFormat.toArray(new String[0]),
"");
int row = 0;
for( int i = 0; i<_scored_train.length; i++ ) {
if (i != 0 && Double.isNaN(_scored_train[i]._AUUC) && (_scored_valid == null || Double.isNaN(_scored_valid[i]._AUUC))) continue;
int col = 0;
DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss");
table.set(row, col++, fmt.print(_training_time_ms[i]));
table.set(row, col++, PrettyPrint.msecs(_training_time_ms[i] - job.start_time(), true));
table.set(row, col++, i);
ScoreKeeper st = _scored_train[i];
table.set(row, col++, st._ate);
table.set(row, col++, st._att);
table.set(row, col++, st._atc);
table.set(row, col++, st._auuc_nbins);
table.set(row, col++, st._AUUC);
table.set(row, col++, st._auuc_normalized);
table.set(row, col++, st._qini);
if (hasCustomMetric) table.set(row, col++, st._custom_metric);
if (_output._validation_metrics != null) {
st = _scored_valid[i];
table.set(row, col++, st._ate);
table.set(row, col++, st._att);
table.set(row, col++, st._atc);
table.set(row, col++, st._auuc_nbins);
table.set(row, col++, st._AUUC);
table.set(row, col++, st._auuc_normalized);
table.set(row, col++, st._qini);
if (hasCustomMetric) table.set(row, col++, st._custom_metric);
}
row++;
}
return table;
}
@Override
protected void addCustomInfo(UpliftDRFModel.UpliftDRFOutput out) {
if(out._validation_metrics != null){
out._defaultAuucThresholds = ((ModelMetricsBinomialUplift)out._validation_metrics)._auuc._ths;
} else {
out._defaultAuucThresholds = ((ModelMetricsBinomialUplift)out._training_metrics)._auuc._ths;
}
}
@Override
protected UpliftScoreExtension makeScoreExtension() {
return new UpliftScoreExtension();
}
private static class UpliftScoreExtension extends Score.ScoreExtension {
public UpliftScoreExtension() {
}
@Override
protected double getPrediction(double[] cdist) {
return cdist[1] - cdist[2];
}
@Override
protected int[] getResponseComplements(SharedTreeModel<?, ?, ?> m) {
return new int[]{m._output.treatmentIdx()};
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/uplift/UpliftDRFModel.java
|
package hex.tree.uplift;
import hex.*;
import hex.tree.CompressedForest;
import hex.tree.SharedTreeModel;
import hex.tree.SharedTreeModelWithContributions;
import hex.tree.SharedTreePojoWriter;
import hex.util.EffectiveParametersUtils;
import water.Key;
public class UpliftDRFModel extends SharedTreeModel<UpliftDRFModel, UpliftDRFModel.UpliftDRFParameters, UpliftDRFModel.UpliftDRFOutput> {
public static class UpliftDRFParameters extends SharedTreeModel.SharedTreeParameters {
public String algoName() { return "UpliftDRF"; }
public String fullName() { return "Uplift Distributed Random Forest"; }
public String javaName() { return UpliftDRFModel.class.getName(); }
public enum UpliftMetricType { AUTO, KL, ChiSquared, Euclidean }
public UpliftMetricType _uplift_metric = UpliftMetricType.AUTO;
public int _mtries = -2; //number of columns to use per split. default depeonds on the algorithm and problem (classification/regression)
public UpliftDRFParameters() {
super();
// Set Uplift DRF specific defaults (can differ from SharedTreeModel's defaults)
_max_depth = 20;
_min_rows = 1;
_treatment_column = "treatment";
}
@Override
public long progressUnits() {
return _ntrees*2;
}
}
public static class UpliftDRFOutput extends SharedTreeModelWithContributions.SharedTreeOutput {
public double[] _defaultAuucThresholds; // thresholds for AUUC to calculate metrics
public UpliftDRFOutput( UpliftDRF b) { super(b); }
@Override
public ModelCategory getModelCategory() {
return ModelCategory.BinomialUplift;
}
@Override
public boolean isBinomialClassifier() {
return true;
}
public void setDefaultAuucThresholds(double[] defaultAuucThresholds) {
this._defaultAuucThresholds = defaultAuucThresholds;
}
}
public UpliftDRFModel(Key<UpliftDRFModel> selfKey, UpliftDRFParameters parms, UpliftDRFOutput output ) {
super(selfKey, parms, output);
}
@Override
public void initActualParamValues() {
super.initActualParamValues();
EffectiveParametersUtils.initHistogramType(_parms);
EffectiveParametersUtils.initCategoricalEncoding(_parms, Parameters.CategoricalEncodingScheme.Enum);
EffectiveParametersUtils.initUpliftMetric(_parms);
}
@Override public boolean binomialOpt() { return false; }
/** Bulk scoring API for one row. Chunks are all compatible with the model,
* and expect the last Chunks are for the final distribution and prediction.
* Default method is to just load the data into the tmp array, then call
* subclass scoring logic. */
@Override protected double[] score0(double[] data, double[] preds, double offset, int ntrees) {
super.score0(data, preds, offset, ntrees);
int N = _output._ntrees;
preds[1] /= N;
preds[2] /= N;
preds[0] = preds[1] - preds[2];
return preds;
}
@Override public ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain) {
return new ModelMetricsBinomialUplift.MetricBuilderBinomialUplift(domain, _output._defaultAuucThresholds);
}
@Override
public UpliftDrfMojoWriter getMojo() {
return new UpliftDrfMojoWriter(this);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/uplift/UpliftDrfMojoWriter.java
|
package hex.tree.uplift;
import hex.tree.SharedTreeMojoWriter;
import java.io.IOException;
public class UpliftDrfMojoWriter extends SharedTreeMojoWriter<UpliftDRFModel, UpliftDRFModel.UpliftDRFParameters, UpliftDRFModel.UpliftDRFOutput> {
@SuppressWarnings("unused") // Called through reflection in ModelBuildersHandler
public UpliftDrfMojoWriter() {}
public UpliftDrfMojoWriter(UpliftDRFModel model) { super(model); }
@Override public String mojoVersion() {
return "1.40";
}
@Override
protected void writeModelData() throws IOException {
super.writeModelData();
writekv("default_auuc_thresholds", model._output._defaultAuucThresholds);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/util/CheckpointUtils.java
|
package hex.util;
import hex.Model;
import hex.ModelBuilder;
import water.Value;
import water.exceptions.H2OIllegalArgumentException;
import water.util.ArrayUtils;
import water.util.PojoUtils;
import java.lang.reflect.Field;
import java.util.Arrays;
public class CheckpointUtils {
/**
* This method will take actual parameters and validate them with parameters of
* requested checkpoint. In case of problem, it throws an API exception.
*
* @param params model parameters
* @param nonModifiableFields params if changed will raise an error
* @param checkpointParameters checkpoint parameters
*/
private static void validateWithCheckpoint(
Model.Parameters params,
String[] nonModifiableFields,
Model.Parameters checkpointParameters
) {
for (Field fAfter : params.getClass().getFields()) {
// only look at non-modifiable fields
if (ArrayUtils.contains(nonModifiableFields, fAfter.getName())) {
for (Field fBefore : checkpointParameters.getClass().getFields()) {
if (fBefore.equals(fAfter)) {
try {
if (!PojoUtils.equals(params, fAfter, checkpointParameters, checkpointParameters.getClass().getField(fAfter.getName()))) {
throw new H2OIllegalArgumentException(fAfter.getName(), "TreeBuilder", "Field " + fAfter.getName() + " cannot be modified if checkpoint is specified!");
}
} catch (NoSuchFieldException e) {
throw new H2OIllegalArgumentException(fAfter.getName(), "TreeBuilder", "Field " + fAfter.getName() + " is not supported by checkpoint!");
}
}
}
}
}
}
private static void validateNTrees(ModelBuilder builder, Model.GetNTrees params, Model.GetNTrees output) {
if (params.getNTrees() < output.getNTrees() + 1) {
builder.error("_ntrees", "If checkpoint is specified then requested ntrees must be higher than " + (output.getNTrees() + 1));
}
}
public static <M extends Model<M, P, O>, P extends Model.Parameters, O extends Model.Output> M getAndValidateCheckpointModel(
ModelBuilder<M, P, O> builder,
String[] nonModifiableFields,
Value cv
) {
M checkpointModel = cv.get();
try {
validateWithCheckpoint(builder._input_parms, nonModifiableFields, checkpointModel._input_parms);
if (builder.isClassifier() != checkpointModel._output.isClassifier())
throw new IllegalArgumentException("Response type must be the same as for the checkpointed model.");
if (!Arrays.equals(builder.train().names(), checkpointModel._output._names)) {
throw new IllegalArgumentException("The columns of the training data must be the same as for the checkpointed model");
}
if (!Arrays.deepEquals(builder.train().domains(), checkpointModel._output._domains)) {
throw new IllegalArgumentException("Categorical factor levels of the training data must be the same as for the checkpointed model");
}
} catch (H2OIllegalArgumentException e) {
builder.error(e.values.get("argument").toString(), e.values.get("value").toString());
}
if (builder._parms instanceof Model.GetNTrees && checkpointModel._output instanceof Model.GetNTrees) {
validateNTrees(builder, (Model.GetNTrees) builder._parms, (Model.GetNTrees) checkpointModel._output);
}
return checkpointModel;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/util/ClusteringUtils.java
|
package hex.util;
import hex.ClusteringModel;
import water.util.TwoDimTable;
public class ClusteringUtils {
static public TwoDimTable createCenterTable(ClusteringModel.ClusteringOutput output, boolean standardized) {
String name = standardized ? "Standardized Cluster Means" : "Cluster Means";
if(output._size == null || output._names == null || output._domains == null || output._centers_raw == null ||
(standardized && output._centers_std_raw == null)) {
TwoDimTable table = new TwoDimTable(name, null, new String[] {"1"}, new String[]{"C1"}, new String[]{"double"},
new String[]{"%f"}, "Centroid");
table.set(0,0,Double.NaN);
return table;
}
String[] rowHeaders = new String[output._size.length];
for(int i = 0; i < rowHeaders.length; i++)
rowHeaders[i] = String.valueOf(i+1);
String[] colTypes = new String[output._names.length];
String[] colFormats = new String[output._names.length];
for (int i = 0; i < output._domains.length; ++i) {
colTypes[i] = output._domains[i] == null ? "double" : "String";
colFormats[i] = output._domains[i] == null ? "%f" : "%s";
}
TwoDimTable table = new TwoDimTable(name, null, rowHeaders, output._names, colTypes, colFormats, "Centroid");
// Internal weights/folds column is included in domain length
int domain_length = output.hasWeights()? output._domains.length - 1 : output._domains.length;
for (int j=0; j < domain_length; ++j) {
boolean string = output._domains[j] != null;
if (string) {
for (int i=0; i<output._centers_raw.length; ++i) {
table.set(i, j, output._domains[j][(int)output._centers_raw[i][j]]);
}
} else {
for (int i=0; i<output._centers_raw.length; ++i) {
table.set(i, j, standardized ? output._centers_std_raw[i][j] : output._centers_raw[i][j]);
}
}
}
return table;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/util/DimensionReductionUtils.java
|
package hex.util;
import hex.DataInfo;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import water.fvec.Frame;
import water.util.ArrayUtils;
import water.util.PrettyPrint;
import water.util.TwoDimTable;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import static java.lang.Math.sqrt;
import static water.util.ArrayUtils.*;
/**
* Created by wendycwong on 2/9/17.
*/
public class DimensionReductionUtils {
/**
* This method will calculate the importance of principal components for PCA/GLRM methods.
*
* @param std_deviation: array of singular values
* @param totVar: sum of squared singular values
* @param vars: array of singular values squared
* @param prop_var: var[i]/totVar for each i
* @param cum_var: cumulative sum of var[i]/totVar from index 0 to index i.
*/
public static void generateIPC(double[] std_deviation, double totVar, double[] vars, double[] prop_var,
double[] cum_var) {
int arrayLen = std_deviation.length;
if (totVar > 0) {
for (int i = 0; i < arrayLen; i++) {
vars[i] = std_deviation[i] * std_deviation[i];
prop_var[i] = vars[i] / totVar;
cum_var[i] = i == 0 ? prop_var[0] : cum_var[i-1] + prop_var[i];
}
}
double lastCum = cum_var[arrayLen-1];
if (lastCum > 1) { // GLRM sometimes screw up the matrix estimation pretty bad
double multF = 1/lastCum;
ArrayUtils.mult(prop_var, multF);
ArrayUtils.mult(cum_var, multF);
ArrayUtils.mult(vars, multF);
ArrayUtils.mult(std_deviation, sqrt(multF));
}
}
/**
* Create the scoring history for dimension reduction algorithms like PCA/SVD. We do make the following assumptions
* about your scoring_history. First we assume that you will always have the following field:
* 1. Timestamp: long denoting the time in ms;
* 2. All other fields are double.
*
* The following field will be generated for you automatically: Duration and Iteration.
*
* @param scoreTable: HashMap containing column headers and arraylist containing the history of values collected.
* @param tableName: title/name of your scoring table
* @param startTime: time your model building job was first started.
* @return: TwoDimTable containing the scoring history.
*/
public static TwoDimTable createScoringHistoryTableDR(LinkedHashMap<String, ArrayList> scoreTable, String tableName,
long startTime) {
List<String> colHeaders = new ArrayList<>();
List<String> colTypes = new ArrayList<>();
List<String> colFormat = new ArrayList<>();
ArrayList<String> otherTableEntries = new ArrayList<String>();
for (String fieldName:scoreTable.keySet()) {
if (fieldName.equals("Timestamp")) {
colHeaders.add("Timestamp"); colTypes.add("string"); colFormat.add("%s");
colHeaders.add("Duration"); colTypes.add("string"); colFormat.add("%s");
colHeaders.add("Iterations"); colTypes.add("long"); colFormat.add("%d");
} else {
otherTableEntries.add(fieldName); colHeaders.add(fieldName); colTypes.add("double"); colFormat.add("%.5f");
}
}
int rows = scoreTable.get("Timestamp").size(); // number of entries of training history
TwoDimTable table = new TwoDimTable(
tableName, null,
new String[rows],
colHeaders.toArray(new String[0]),
colTypes.toArray(new String[0]),
colFormat.toArray(new String[0]),
"");
assert (rows <= table.getRowDim());
for (int row = 0; row < rows; row++) {
int col = 0;
// take care of Timestamp, Duration, Iteration.
DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss");
table.set(row, col++, fmt.print((long) scoreTable.get("Timestamp").get(row)));
table.set(row, col++, PrettyPrint.msecs((long) scoreTable.get("Timestamp").get(row) - startTime, true));
table.set(row, col++, row);
// take care of the extra field
for (int remaining_cols = 0; remaining_cols < otherTableEntries.size(); remaining_cols++) {
table.set(row, col++, (double) scoreTable.get(otherTableEntries.get(remaining_cols)).get(row));
}
}
return table;
}
/**
* This function will tranform the eigenvectors calculated for a matrix T(A) to the ones calculated for
* matrix A.
*
* @param dinfo
* @param vEigenIn
* @return transformed eigenvectors
*/
public static double[][] getTransformedEigenvectors(DataInfo dinfo, double[][] vEigenIn) {
Frame tempFrame = new Frame(dinfo._adaptedFrame);
Frame eigFrame = new water.util.ArrayUtils().frame(vEigenIn);
tempFrame.add(eigFrame);
LinearAlgebraUtils.SMulTask stsk = new LinearAlgebraUtils.SMulTask(dinfo, eigFrame.numCols(),
dinfo._numOffsets[dinfo._numOffsets.length - 1]); // will allocate new memory for _atq
double[][] eigenVecs = stsk.doAll(tempFrame)._atq;
if (eigFrame != null) { // delete frame to prevent leak keys.
eigFrame.delete();
}
// need to normalize eigenvectors after multiplication by transpose(A) so that they have unit norm
double[][] eigenVecsTranspose = transpose(eigenVecs); // transpose will allocate memory
double[] eigenNormsI = new double[eigenVecsTranspose.length];
for (int vecIndex = 0; vecIndex < eigenVecsTranspose.length; vecIndex++) {
eigenNormsI[vecIndex] = 1.0 / l2norm(eigenVecsTranspose[vecIndex]);
}
return transpose(mult(eigenVecsTranspose, eigenNormsI));
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/util/DistributionUtils.java
|
package hex.util;
import hex.genmodel.utils.DistributionFamily;
import hex.glm.GLMModel;
import water.exceptions.H2OIllegalArgumentException;
import static hex.genmodel.utils.DistributionFamily.bernoulli;
import static hex.glm.GLMModel.GLMParameters.Family.*;
public class DistributionUtils {
public static DistributionFamily familyToDistribution(GLMModel.GLMParameters.Family aFamily) {
if (aFamily == GLMModel.GLMParameters.Family.binomial) {
return bernoulli;
}
try {
return Enum.valueOf(DistributionFamily.class, aFamily.toString());
}
catch (IllegalArgumentException e) {
throw new H2OIllegalArgumentException("DistributionFamily not supported for Family: " + aFamily);
}
}
public static GLMModel.GLMParameters.Family distributionToFamily(DistributionFamily distribution) {
if (bernoulli.equals(distribution))
return binomial;
try {
return Enum.valueOf(GLMModel.GLMParameters.Family.class, distribution.toString());
} catch (IllegalArgumentException e) {
throw new H2OIllegalArgumentException("Family not supported for DistributionFamily: " + distribution);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/util/EffectiveParametersUtils.java
|
package hex.util;
import hex.Model;
import hex.ScoreKeeper;
import hex.genmodel.utils.DistributionFamily;
import hex.tree.CalibrationHelper;
import hex.tree.SharedTreeModel;
import hex.tree.uplift.UpliftDRFModel;
public class EffectiveParametersUtils {
public static void initFoldAssignment(
Model.Parameters params
) {
if (params._fold_assignment == Model.Parameters.FoldAssignmentScheme.AUTO) {
if (params._nfolds > 0 && params._fold_column == null) {
params._fold_assignment = Model.Parameters.FoldAssignmentScheme.Random;
} else {
params._fold_assignment = null;
}
}
}
public static void initHistogramType(
SharedTreeModel.SharedTreeParameters params
) {
if (params._histogram_type == SharedTreeModel.SharedTreeParameters.HistogramType.AUTO) {
params._histogram_type = SharedTreeModel.SharedTreeParameters.HistogramType.UniformAdaptive;
}
}
public static void initStoppingMetric(
Model.Parameters params,
boolean isClassifier
) {
if (params._stopping_metric == ScoreKeeper.StoppingMetric.AUTO) {
if (params._stopping_rounds == 0) {
params._stopping_metric = null;
} else {
if (isClassifier) {
params._stopping_metric = ScoreKeeper.StoppingMetric.logloss;
} else {
params._stopping_metric = ScoreKeeper.StoppingMetric.deviance;
}
}
}
}
public static void initDistribution(
Model.Parameters params,
int nclasses
) {
if (params._distribution == DistributionFamily.AUTO) {
if (nclasses == 1) {
params._distribution = DistributionFamily.gaussian;}
if (nclasses == 2) {
params._distribution = DistributionFamily.bernoulli;}
if (nclasses >= 3) {
params._distribution = DistributionFamily.multinomial;}
}
}
public static void initCategoricalEncoding(
Model.Parameters params,
Model.Parameters.CategoricalEncodingScheme scheme
) {
if (params._categorical_encoding == Model.Parameters.CategoricalEncodingScheme.AUTO) {
params._categorical_encoding = scheme;
}
}
public static void initUpliftMetric(UpliftDRFModel.UpliftDRFParameters params
) {
if (params._uplift_metric == UpliftDRFModel.UpliftDRFParameters.UpliftMetricType.AUTO) {
params._uplift_metric = UpliftDRFModel.UpliftDRFParameters.UpliftMetricType.KL;
}
}
public static void initCalibrationMethod(CalibrationHelper.ParamsWithCalibration params) {
if (params.getCalibrationMethod() == CalibrationHelper.CalibrationMethod.AUTO) {
params.setCalibrationMethod(CalibrationHelper.CalibrationMethod.PlattScaling);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/util/EigenPair.java
|
package hex.util;
public class EigenPair implements Comparable<EigenPair> {
public double eigenvalue;
public double[] eigenvector;
public EigenPair(double eigenvalue, double[] eigenvector) {
this.eigenvalue = eigenvalue;
this.eigenvector = eigenvector;
}
/**
* Compare an eigenPair = (eigenvalue, eigenVector) against otherEigenPair based on respective eigenValues
*/
@Override
public int compareTo(EigenPair otherEigenPair) {
return eigenvalue < otherEigenPair.eigenvalue ? -1 : (eigenvalue > otherEigenPair.eigenvalue ? 1 : 0);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/util/LinearAlgebraUtils.java
|
package hex.util;
import Jama.EigenvalueDecomposition;
import Jama.Matrix;
import Jama.QRDecomposition;
import hex.DataInfo;
import hex.FrameTask;
import hex.Interaction;
import hex.ToEigenVec;
import hex.gam.MatrixFrameUtils.TriDiagonalMatrix;
import hex.gram.Gram;
import jsr166y.ForkJoinTask;
import jsr166y.RecursiveAction;
import water.*;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.util.ArrayUtils;
import water.util.Log;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import static java.util.Arrays.sort;
import static org.apache.commons.lang.ArrayUtils.reverse;
import static water.util.ArrayUtils.*;
public class LinearAlgebraUtils {
/*
* Forward substitution: Solve Lx = b for x with L = lower triangular matrix, b = real vector
*/
public static double[] forwardSolve(double[][] L, double[] b) {
assert L != null && L.length == b.length; // && L.length == L[0].length, allow true lower triangular matrix
double[] res = new double[b.length];
for(int i = 0; i < b.length; i++) {
res[i] = b[i];
for(int j = 0; j < i; j++)
res[i] -= L[i][j] * res[j];
res[i] /= L[i][i];
}
return res;
}
/**
* Given a matrix aMat as a double [][] array, this function will return an array that is the
* square root of the diagonals of aMat. Note that the first index is column and the second index
* is row.
* @param aMat
* @return
*/
public static double[] sqrtDiag(double[][] aMat) {
int matrixSize = aMat.length;
double[] answer = new double[matrixSize];
for (int index=0; index < matrixSize; index++)
answer[index] = Math.sqrt(aMat[index][index]);
return answer;
}
public static double[][] chol2Inv(final double[][] cholR, boolean upperTriag) {
final int matrixSize = cholR.length; // cholR is actuall transpose(R) from QR
double[][] cholL = upperTriag?ArrayUtils.transpose(cholR):cholR; // this is R from QR
final double[][] inverted = new double[matrixSize][];
RecursiveAction[] ras = new RecursiveAction[matrixSize];
for (int index=0; index<matrixSize; index++) {
final double[] oneColumn = new double[matrixSize];
oneColumn[index] = 1.0;
final int i = index;
ras[i] = new RecursiveAction() {
@Override protected void compute() {
double[] upperColumn = forwardSolve(cholL, oneColumn);
inverted[i] = Arrays.copyOf(upperColumn, matrixSize);
}
};
}
ForkJoinTask.invokeAll(ras);
double[][] cholRNew = upperTriag?cholR:ArrayUtils.transpose(cholR);
for (int index=0; index<matrixSize; index++) {
final double[] oneColumn = new double[matrixSize];
oneColumn[index] = 1.0;
final int i = index;
ras[i] = new RecursiveAction() {
@Override protected void compute() {
double[] lowerColumn = new double[matrixSize];
backwardSolve(cholRNew, inverted[i], lowerColumn);
inverted[i] = Arrays.copyOf(lowerColumn, matrixSize);
}
};
}
ForkJoinTask.invokeAll(ras);
return inverted;
}
/**
* Given the cholesky decomposition of X = QR, this method will return the inverse of
* transpose(X)*X by attempting to solve for transpose(R)*R*XTX_inverse = Identity matrix
*
* @param cholR
* @return
*/
public static double[][] chol2Inv(final double[][] cholR) {
return chol2Inv(cholR, true);
}
/***
* Generate D matrix as a lower diagonal matrix since it is symmetric and contains only 3 diagonals
* @param hj
* @return
*/
public static double[][] generateTriDiagMatrix(final double[] hj) {
final int matrixSize = hj.length-1; // matrix size is numKnots-2
final double[][] lowDiag = new double[matrixSize][];
RecursiveAction[] ras = new RecursiveAction[matrixSize];
for (int index=0; index<matrixSize; index++) {
final int rowSize = index+1;
final int i = index;
final double hjIndex = hj[index];
final double hjIndexP1 = hj[index+1];
final double oneO3 = 1.0/3.0;
final double oneO6 = 1.0/6.0;
final double[] tempDiag = MemoryManager.malloc8d(rowSize);
ras[i] = new RecursiveAction() {
@Override protected void compute() {
;
tempDiag[i] = (hjIndex+hjIndexP1)*oneO3;
if (i > 0)
tempDiag[i-1] = hjIndex*oneO6;
lowDiag[i] = Arrays.copyOf(tempDiag, rowSize);
}
};
}
ForkJoinTask.invokeAll(ras);
return lowDiag;
}
/***
* Given an matrix, a QR decomposition is carried out to the matrix as starT = QR. Given Q, an orthogonal bais
* that is complement to Q is generated consisting of numBasis vectors.
*
* @param starT: double array that will have a QR decomposition carried out on
* @param numBasis: integer denoting number of basis in the orthogonal complement
* @return
*/
public static double[][] generateOrthogonalComplement(final double[][] orthMat, final double[][] starT, final int numBasis, long seed) {
final int numOrthVec = orthMat[0].length; // number of vectors in original orthogonal basis
final int vecSize = orthMat.length; // size of vector
final double[][] orthMatT = transpose(orthMat);
double[][] orthMatCompT = new double[numBasis][vecSize]; // store transpose of orthogonal complement
double[][] orthMatCompT2 = new double[numBasis][vecSize];
double[][] orthMatCompT3;
double[] innerProd = new double[numOrthVec];
double[] scaleProd = new double[vecSize];
// take the difference between random vectors and qMat
orthMatCompT3 = subtract(generateIdentityMat(vecSize), ArrayUtils.multArrArr(orthMat, orthMatT));
for (int index = 0; index < numBasis; index++) {
System.arraycopy(orthMatCompT3[index], 0, orthMatCompT2[index], 0, vecSize);
}
applyGramSchmit(orthMatCompT2);
for (int index = 0; index < numBasis; index++) {
orthMatCompT[index] = ArrayUtils.gaussianVector(seed + index, orthMatCompT[index]);
genInnerProduct(orthMatT, orthMatCompT[index], innerProd);
for (int basisInd = 0; basisInd < numOrthVec; basisInd++) {
System.arraycopy(orthMatT[basisInd], 0, scaleProd, 0, vecSize);
mult(scaleProd, innerProd[basisInd]);
subtract(orthMatCompT[index], scaleProd, orthMatCompT[index]);
}
}
// go through random vectors with orthogonal vector basis subtracted from it, make them orthogonal to each other
applyGramSchmit(orthMatCompT);
return orthMatCompT;
}
public static double[][] generateIdentityMat(int size) {
double[][] identity = new double[size][size];
for (int index = 0; index < size; index++)
identity[index][index] = 1.0;
return identity;
}
public static double[][] generateQR(final double[][] starT) {
Matrix starTMat = new Matrix(starT); // generate Zcs as in 3.3
QRDecomposition starTMat_qr = new QRDecomposition(starTMat);
return starTMat_qr.getQ().getArray();
}
public static void genInnerProduct(double[][] mat, double[] vector, double[] innerProd) {
int numVec = mat.length;
for (int index = 0; index < numVec; index++) {
innerProd[index] = ArrayUtils.innerProduct(mat[index], vector);
}
}
public static void applyGramSchmit(double[][] matT) {
int numVec = matT.length;
int vecSize = matT[0].length;
double[] innerProd = new double[numVec];
double[] scaleVec = new double[vecSize];
for (int index = 0; index < numVec; index++) {
genInnerProduct(matT, matT[index], innerProd);
for (int indexJ = 0; indexJ < index; indexJ++) { // take the difference between random vectors
System.arraycopy(matT[indexJ], 0, scaleVec, 0, vecSize);
mult(scaleVec, innerProd[indexJ]);
subtract(matT[index], scaleVec, matT[index]);
}
double mag = 1.0/l2norm(matT[index]);
ArrayUtils.mult(matT[index], mag); // make vector to have unit magnitude
}
}
public static double[][] expandLowTrian2Ful(double[][] cholL) {
int numRows = cholL.length;
final double[][] result = new double[numRows][];
RecursiveAction[] ras = new RecursiveAction[numRows];
for (int index = 0; index < numRows; index++) {
final int i = index;
final double[] tempResult = MemoryManager.malloc8d(numRows);
ras[i] = new RecursiveAction() {
@Override protected void compute() {
for (int colIndex = 0; colIndex <= i; colIndex++)
tempResult[colIndex] = cholL[i][colIndex];
result[i] = Arrays.copyOf(tempResult, numRows);
}
};
}
ForkJoinTask.invokeAll(ras);
return result;
}
public static double[][] matrixMultiply(double[][] A, double[][] B ) {
int arow = A[0].length; // number of rows of result
int acol = A.length; // number columns in A
int bcol = B.length; // number of columns of B
final double[][] result = new double[bcol][];
RecursiveAction[] ras = new RecursiveAction[acol];
for (int index = 0; index < acol; index++) {
final int i = index;
final double[] tempResult = new double[arow];
ras[i] = new RecursiveAction() {
@Override protected void compute() {
ArrayUtils.multArrVec(A, B[i], tempResult);
result[i] = Arrays.copyOf(tempResult, arow);
}
};
}
ForkJoinTask.invokeAll(ras);
return ArrayUtils.transpose(result);
}
/**
*
* @param A
* @param B
* @param transposeResult: true will return A*B. Otherwise will return transpose(A*B)
* @return
*/
public static double[][] matrixMultiplyTriagonal(double[][] A, TriDiagonalMatrix B, boolean transposeResult) {
int arow = A.length; // number of rows of result
final int bcol = B._size+2; // number of columns of B, K
final int lastCol = bcol-1; // last column of B
final int secondLastCol = bcol-2; // last column
final int kMinus1 = bcol-3; // should be k-2 but we count from 0 and not 1, hence bcol-3 and not bcol-2
final int kMinus2 = bcol-4;
final double[][] result = new double[bcol][];
RecursiveAction[] ras = new RecursiveAction[bcol];
for (int index = 0; index < bcol; index++) { // go through each column of TriDiagonalMatrix B
final int i = index;
final double[] tempResult = new double[arow];
final double[] bColVec = new double[B._size];
ras[i] = new RecursiveAction() { // multiply each column of B with A
@Override protected void compute() {
if (i==0) {
bColVec[0] = B._first_diag[0];
} else if (i==1) {
bColVec[0] = B._second_diag[0];
if (B._first_diag.length > 1)
bColVec[1] = B._first_diag[1];
} else if (i==lastCol) {
bColVec[kMinus1] = B._third_diag[kMinus1];
} else if (i==secondLastCol) {
bColVec[kMinus2] = B._third_diag[kMinus2];
bColVec[kMinus1] =B._second_diag[kMinus1];
} else {
bColVec[i-2] = B._third_diag[i-2];
bColVec[i-1] = B._second_diag[i-1];
bColVec[i] = B._first_diag[i];
}
ArrayUtils.multArrVec(A, bColVec, tempResult);
result[i] = Arrays.copyOf(tempResult, arow);
}
};
}
ForkJoinTask.invokeAll(ras);
return transposeResult?ArrayUtils.transpose(result):result;
}
public static double[] backwardSolve(double[][] L, double[] b, double[] res) {
assert L != null && L.length == L[0].length && L.length == b.length;
if (res==null) // only allocate memory if needed
res = new double[b.length];
int lastIndex = b.length-1;
for (int rowIndex = lastIndex; rowIndex >= 0; rowIndex--) {
res[rowIndex] = b[rowIndex];
for (int colIndex = lastIndex; colIndex > rowIndex; colIndex--) {
res[rowIndex] -= L[rowIndex][colIndex]*res[colIndex];
}
res[rowIndex] /= L[rowIndex][rowIndex];
}
return res;
}
/*
* Impute missing values and transform numeric value x in col of dinfo._adaptedFrame
*/
private static double modifyNumeric(double x, int col, DataInfo dinfo) {
double y = (Double.isNaN(x) && dinfo._imputeMissing) ? dinfo._numNAFill[col] : x; // Impute missing value
if (dinfo._normSub != null && dinfo._normMul != null) // Transform x if requested
y = (y - dinfo._normSub[col]) * dinfo._normMul[col];
return y;
}
/*
* Return row with categoricals expanded in array tmp
*/
public static double[] expandRow(double[] row, DataInfo dinfo, double[] tmp, boolean modify_numeric) {
// Categorical columns
int cidx;
for(int col = 0; col < dinfo._cats; col++) {
if (Double.isNaN(row[col])) {
if (dinfo._imputeMissing)
cidx = dinfo.catNAFill()[col];
else if (!dinfo._catMissing[col])
continue; // Skip if entry missing and no NA bucket. All indicators will be zero.
else
cidx = dinfo._catOffsets[col+1]-1; // Otherwise, missing value turns into extra (last) factor
} else {
if ((dinfo._catOffsets[col + 1] - dinfo._catOffsets[col]) == 1)
cidx = dinfo.getCategoricalId(col, 0);
else
cidx = dinfo.getCategoricalId(col, (int) row[col]);
}
if (((dinfo._catOffsets[col+1]-dinfo._catOffsets[col]) == 1) && cidx >=0) // binary data here, no column expansion, copy data
tmp[cidx] = row[col];
else if(cidx >= 0) tmp[cidx] = 1;
}
// Numeric columns
int chk_cnt = dinfo._cats;
int exp_cnt = dinfo.numStart();
for(int col = 0; col < dinfo._nums; col++) {
// Only do imputation and transformation if requested
tmp[exp_cnt] = modify_numeric ? modifyNumeric(row[chk_cnt], col, dinfo) : row[chk_cnt];
exp_cnt++; chk_cnt++;
}
return tmp;
}
public static double[][] reshape1DArray(double[] arr, int m, int n) {
double[][] arr2D = new double[m][n];
for (int i = 0; i < m; i++) {
System.arraycopy(arr, i * n, arr2D[i], 0, n);
}
return arr2D;
}
public static EigenPair[] createSortedEigenpairs(double[] eigenvalues, double[][] eigenvectors) {
int count = eigenvalues.length;
EigenPair eigenPairs[] = new EigenPair[count];
for (int i = 0; i < count; i++) {
eigenPairs[i] = new EigenPair(eigenvalues[i], eigenvectors[i]);
}
sort(eigenPairs);
return eigenPairs;
}
public static EigenPair[] createReverseSortedEigenpairs(double[] eigenvalues, double[][] eigenvectors) {
EigenPair[] eigenPairs = createSortedEigenpairs(eigenvalues, eigenvectors);
reverse(eigenPairs);
return eigenPairs;
}
public static double[] extractEigenvaluesFromEigenpairs(EigenPair[] eigenPairs) {
int count = eigenPairs.length;
double[] eigenvalues = new double[count];
for (int i = 0; i < count; i++) {
eigenvalues[i] = eigenPairs[i].eigenvalue;
}
return eigenvalues;
}
public static double[][] extractEigenvectorsFromEigenpairs(EigenPair[] eigenPairs) {
int count = eigenPairs.length;
double[][] eigenvectors = new double[count][];
for (int i = 0; i < count; i++) {
eigenvectors[i] = eigenPairs[i].eigenvector;
}
return eigenvectors;
}
public static class FindMaxIndex extends MRTask<FindMaxIndex> {
public long _maxIndex = -1;
int _colIndex;
double _maxValue;
public FindMaxIndex(int colOfInterest, double maxValue) {
_colIndex = colOfInterest;
_maxValue = maxValue;
}
@Override
public void map(Chunk[] cs) {
int rowLen = cs[0].len();
long startRowIndex = cs[0].start();
for (int rowIndex=0; rowIndex < rowLen; rowIndex++) {
double rowVal = cs[_colIndex].atd(rowIndex);
if (rowVal == _maxValue) {
_maxIndex = startRowIndex+rowIndex;
}
}
}
@Override public void reduce(FindMaxIndex other) {
if (this._maxIndex < 0)
this._maxIndex = other._maxIndex;
else if (this._maxIndex > other._maxIndex)
this._maxIndex = other._maxIndex;
}
}
public static class CopyQtoQMatrix extends MRTask<CopyQtoQMatrix> {
@Override public void map(Chunk[] cs) {
int totColumn = cs.length; // all columns in cs.
int halfColumn = totColumn/2; // start of Q matrix
int totRows = cs[0].len();
for (int rowIndex=0; rowIndex < totRows; rowIndex++) {
for (int colIndex=0; colIndex < halfColumn; colIndex++) {
cs[colIndex].set(rowIndex, cs[colIndex+halfColumn].atd(rowIndex));
}
}
}
}
/**
* Computes B = XY where X is n by k and Y is k by p, saving result in new vecs
* Input: dinfo = X (large frame) with dinfo._adaptedFrame passed to doAll
* yt = Y' = transpose of Y (small matrix)
* Output: XY (large frame) is n by p
*/
public static class BMulTask extends FrameTask<BMulTask> {
final double[][] _yt; // _yt = Y' (transpose of Y)
public BMulTask(Key<Job> jobKey, DataInfo dinfo, double[][] yt) {
super(jobKey, dinfo);
_yt = yt;
}
@Override protected void processRow(long gid, DataInfo.Row row, NewChunk[] outputs) {
for(int p = 0; p < _yt.length; p++) {
double x = row.innerProduct(_yt[p]);
outputs[p].addNum(x);
}
}
}
/**
* Compute B = XY where where X is n by k and Y is k by p and they are both stored as Frames. The
* result will be stored in part of X as X|B. Make sure you allocate the correct memory to your X
* frame. In addition, this will only work with numerical columns.
*
* Note that there are a size limitation on y Frame. It needs to have row indexed by integer values only and
* not long. Otherwise, the result will be jibberish.
*/
public static class BMulTaskMatrices extends MRTask<BMulTaskMatrices> {
final Frame _y; // frame to store y
final int _nyChunks; // number of chunks of y Frame
final int _yColNum;
public BMulTaskMatrices(Frame y) {
_y = y;
_nyChunks = _y.anyVec().nChunks();
_yColNum = _y.numCols();
}
private void mulResultPerYChunk(Chunk[] xChunk, Chunk[] yChunk) {
int xChunkLen = xChunk[0].len();
int yColLen = yChunk.length;
int yChunkLen = yChunk[0].len();
int resultColOffset = xChunk.length-yColLen; // start of result column in xChunk
int xChunkColOffset = (int) yChunk[0].start();
for (int colIndex=0; colIndex < yColLen; colIndex++) {
int resultColIndex = colIndex+resultColOffset;
for (int rowIndex=0; rowIndex < xChunkLen; rowIndex++) {
double origResult = xChunk[resultColIndex].atd(rowIndex);
for (int interIndex=0; interIndex < yChunkLen; interIndex++) {
origResult += xChunk[interIndex+xChunkColOffset].atd(rowIndex)*yChunk[colIndex].atd(interIndex);
}
xChunk[resultColIndex].set(rowIndex, origResult);
}
}
}
@Override public void map(Chunk[] xChunk) {
Chunk[] ychunk = new Chunk[_y.numCols()];
for (int ychunkInd=0; ychunkInd < _nyChunks; ychunkInd++) {
for (int chkIndex =0 ; chkIndex < _yColNum; chkIndex++) // grab a y chunk
ychunk[chkIndex] = _y.vec(chkIndex).chunkForChunkIdx(ychunkInd);
mulResultPerYChunk(xChunk, ychunk);
}
}
}
/**
* Computes B = XY where X is n by k and Y is k by p, saving result in same frame
* Input: [X,B] (large frame) passed to doAll, where we write to B
* yt = Y' = transpose of Y (small matrix)
* ncolX = number of columns in X
*/
public static class BMulInPlaceTask extends MRTask<BMulInPlaceTask> {
final DataInfo _xinfo; // Info for frame X
final double[][] _yt; // _yt = Y' (transpose of Y)
final int _ncolX; // Number of cols in X
public boolean _originalImplementation = true; // if true will produce xB+b0. If false, just inner product
public BMulInPlaceTask(DataInfo xinfo, double[][] yt, int nColsExp) {
assert yt != null && yt[0].length == nColsExp;
_xinfo = xinfo;
_ncolX = xinfo._adaptedFrame.numCols();
_yt = yt;
}
public BMulInPlaceTask(DataInfo xinfo, double[][] yt, int nColsExp, boolean originalWay) {
assert yt != null && yt[0].length == nColsExp;
_xinfo = xinfo;
_ncolX = xinfo._adaptedFrame.numCols();
_yt = yt;
_originalImplementation = originalWay;
}
@Override public void map(Chunk[] cs) {
assert cs.length == _ncolX + _yt.length;
int lastColInd = _ncolX-1;
// Copy over only X frame chunks
Chunk[] xchk = new Chunk[_ncolX]; // only refer to X part, old part of frame
DataInfo.Row xrow = _xinfo.newDenseRow();
System.arraycopy(cs,0,xchk,0,_ncolX);
double sum;
for(int row = 0; row < cs[0]._len; row++) {
// Extract row of X
_xinfo.extractDenseRow(xchk, row, xrow);
if (xrow.isBad()) continue;
int bidx = _ncolX;
for (double[] ps : _yt ) {
// Inner product of X row with Y column (Y' row)
sum = _originalImplementation?xrow.innerProduct(ps):xrow.innerProduct(ps)-ps[lastColInd];
cs[bidx].set(row, sum); // Save inner product to B, new part of frame
bidx++;
}
assert bidx == cs.length;
}
}
}
/**
* Computes A'Q where A is n by p and Q is n by k
* Input: [A,Q] (large frame) passed to doAll
* Output: atq = A'Q (small matrix) is \tilde{p} by k where \tilde{p} = number of cols in A with categoricals expanded
*/
public static class SMulTask extends MRTask<SMulTask> {
final DataInfo _ainfo; // Info for frame A
final int _ncolA; // Number of cols in A
final int _ncolExp; // Number of cols in A with categoricals expanded
final int _ncolQ; // Number of cols in Q
public double[][] _atq; // Output: A'Q is p_exp by k, where p_exp = number of cols in A with categoricals expanded
public SMulTask(DataInfo ainfo, int ncolQ) {
_ainfo = ainfo;
_ncolA = ainfo._adaptedFrame.numCols();
_ncolExp = numColsExp(ainfo._adaptedFrame,true);
_ncolQ = ncolQ;
}
public SMulTask(DataInfo ainfo, int ncolQ, int ncolExp) {
_ainfo = ainfo;
_ncolA = ainfo._adaptedFrame.numCols();
_ncolExp = ncolExp; // when call from GLRM or PCA
_ncolQ = ncolQ;
}
@Override public void map(Chunk cs[]) {
assert (_ncolA + _ncolQ) == cs.length;
_atq = new double[_ncolExp][_ncolQ]; // not okay to share.
for(int k = _ncolA; k < (_ncolA + _ncolQ); k++) {
// Categorical columns
int cidx;
for(int p = 0; p < _ainfo._cats; p++) {
for(int row = 0; row < cs[0]._len; row++) {
if(cs[p].isNA(row) && _ainfo._skipMissing) continue;
double q = cs[k].atd(row);
double a = cs[p].atd(row);
if (Double.isNaN(a)) {
if (_ainfo._imputeMissing)
cidx = _ainfo.catNAFill()[p];
else if (!_ainfo._catMissing[p])
continue; // Skip if entry missing and no NA bucket. All indicators will be zero.
else
cidx = _ainfo._catOffsets[p+1]-1; // Otherwise, missing value turns into extra (last) factor
} else
cidx = _ainfo.getCategoricalId(p, (int)a);
if(cidx >= 0) _atq[cidx][k-_ncolA] += q; // Ignore categorical levels outside domain
}
}
// Numeric columns
int pnum = 0;
int pexp = _ainfo.numStart();
for(int p = _ainfo._cats; p < _ncolA; p++) {
for(int row = 0; row < cs[0]._len; row++) {
if(cs[p].isNA(row) && _ainfo._skipMissing) continue;
double q = cs[k].atd(row);
double a = cs[p].atd(row);
a = modifyNumeric(a, pnum, _ainfo);
_atq[pexp][k-_ncolA] += q * a;
}
pexp++; pnum++;
}
assert pexp == _atq.length;
}
}
@Override public void reduce(SMulTask other) {
ArrayUtils.add(_atq, other._atq);
}
}
/***
* compute the cholesky of xx which stores the lower part of a symmetric square tridiagonal matrix. We assume
* that all the elements are positive and it is in place replacement where L will be stored back in the input
* xx.
* @param xx
* @return
*/
public static void choleskySymDiagMat(double[][] xx) {
xx[0][0] = Math.sqrt(xx[0][0]);
int rowNumber = xx.length;
for (int row = 1; row < rowNumber; row++) {
// deals with lower diagonal element
int lowerDiag = row-1;
if (lowerDiag > 0) {
int kMinus2 = lowerDiag - 1;
xx[row][lowerDiag] = (xx[row][lowerDiag] - xx[row][kMinus2])/xx[lowerDiag][lowerDiag];
} else {
xx[row][lowerDiag] = xx[row][lowerDiag]/xx[lowerDiag][lowerDiag];
}
// deals with diagonal element
xx[row][row] = Math.sqrt(xx[row][row]-xx[row][lowerDiag]*xx[row][lowerDiag]);
}
}
/**
* Get R = L' from Cholesky decomposition Y'Y = LL' (same as R from Y = QR)
* @param jobKey Job key for Gram calculation
* @param yinfo DataInfo for Y matrix
* @param transpose Should result be transposed to get L?
* @return L or R matrix from Cholesky of Y Gram
*/
public static double[][] computeR(Key<Job> jobKey, DataInfo yinfo, boolean transpose) {
// Calculate Cholesky of Y Gram to get R' = L matrix
Gram.GramTask gtsk = new Gram.GramTask(jobKey, yinfo); // Gram is Y'Y/n where n = nrow(Y)
gtsk.doAll(yinfo._adaptedFrame);
Gram.Cholesky chol = gtsk._gram.cholesky(null); // If Y'Y = LL' Cholesky, then R = L'
double[][] L = chol.getL();
ArrayUtils.mult(L, Math.sqrt(gtsk._nobs)); // Must scale since Cholesky of Y'Y/n where nobs = nrow(Y)
return transpose ? L : ArrayUtils.transpose(L);
}
/**
* Solve for Q from Y = QR factorization and write into new frame
* @param jobKey Job key for Gram calculation
* @param yinfo DataInfo for Y matrix
* @param ywfrm Input frame [Y,W] where we write into W
* @return l2 norm of Q - W, where W is old matrix in frame, Q is computed factorization
*/
public static double computeQ(Key<Job> jobKey, DataInfo yinfo, Frame ywfrm, double[][] xx) {
xx = computeR(jobKey, yinfo, true);
ForwardSolve qrtsk = new ForwardSolve(yinfo, xx);
qrtsk.doAll(ywfrm);
return qrtsk._sse; // \sum (Q_{i,j} - W_{i,j})^2
}
public static double[][] computeQ(Key<Job> jobKey, DataInfo yinfo, Frame ywfrm) {
double[][] xx = computeR(jobKey, yinfo, true);
ForwardSolve qrtsk = new ForwardSolve(yinfo, xx);
qrtsk.doAll(ywfrm);
return xx; // \sum (Q_{i,j} - W_{i,j})^2
}
/**
* Solve for Q from Y = QR factorization and write into Y frame
* @param jobKey Job key for Gram calculation
* @param yinfo DataInfo for Y matrix
*/
public static double[][] computeQInPlace(Key<Job> jobKey, DataInfo yinfo) {
double[][] cholL = computeR(jobKey, yinfo, true);
ForwardSolveInPlace qrtsk = new ForwardSolveInPlace(yinfo, cholL);
qrtsk.doAll(yinfo._adaptedFrame);
return cholL;
}
/**
* Given lower triangular L, solve for Q in QL' = A (LQ' = A') using forward substitution
* Dimensions: A is n by p, Q is n by p, R = L' is p by p
* Input: [A,Q] (large frame) passed to doAll, where we write to Q
*/
public static class ForwardSolve extends MRTask<ForwardSolve> {
final DataInfo _ainfo; // Info for frame A
final int _ncols; // Number of cols in A and in Q
final double[][] _L;
public double _sse; // Output: Sum-of-squared difference between old and new Q
public ForwardSolve(DataInfo ainfo, double[][] L) {
assert L != null && L.length == L[0].length && L.length == ainfo._adaptedFrame.numCols();
_ainfo = ainfo;
_ncols = ainfo._adaptedFrame.numCols();
_L = L;
_sse = 0;
}
@Override public void map(Chunk cs[]) {
assert 2 * _ncols == cs.length;
// Copy over only A frame chunks
Chunk[] achks = new Chunk[_ncols];
System.arraycopy(cs,0,achks,0,_ncols);
for(int row = 0; row < cs[0]._len; row++) {
// 1) Extract single expanded row of A
DataInfo.Row arow = _ainfo.newDenseRow();
_ainfo.extractDenseRow(achks, row, arow);
if (arow.isBad()) continue;
double[] aexp = arow.expandCats();
// 2) Solve for single row of Q using forward substitution
double[] qrow = forwardSolve(_L, aexp);
// 3) Save row of solved values into Q
int i = 0;
for(int d = _ncols; d < 2 * _ncols; d++) {
double qold = cs[d].atd(row);
double diff = qrow[i] - qold;
_sse += diff * diff; // Calculate SSE between Q_new and Q_old
cs[d].set(row, qrow[i++]);
}
assert i == qrow.length;
}
}
}
/**
* Given lower triangular L, solve for Q in QL' = A (LQ' = A') using forward substitution
* Dimensions: A is n by p, Q is n by p, R = L' is p by p
* Input: A (large frame) passed to doAll, where we overwrite each row of A with its row of Q
*/
public static class ForwardSolveInPlace extends MRTask<ForwardSolveInPlace> {
final DataInfo _ainfo; // Info for frame A
final int _ncols; // Number of cols in A
final double[][] _L;
public ForwardSolveInPlace(DataInfo ainfo, double[][] L) {
assert L != null && L.length == L[0].length && L.length == ainfo._adaptedFrame.numCols();
_ainfo = ainfo;
_ncols = ainfo._adaptedFrame.numCols();
_L = L;
}
@Override public void map(Chunk cs[]) {
assert _ncols == cs.length;
// Copy over only A frame chunks
Chunk[] achks = new Chunk[_ncols];
System.arraycopy(cs,0,achks,0,_ncols);
for(int row = 0; row < cs[0]._len; row++) {
// 1) Extract single expanded row of A
DataInfo.Row arow = _ainfo.newDenseRow();
_ainfo.extractDenseRow(achks, row, arow);
if (arow.isBad()) continue;
double[] aexp = arow.expandCats();
// 2) Solve for single row of Q using forward substitution
double[] qrow = forwardSolve(_L, aexp);
assert qrow.length == _ncols;
// 3) Overwrite row of A with row of solved values Q
for(int d = 0; d < _ncols; d++)
cs[d].set(row, qrow[d]);
}
}
}
/** Number of columns with categoricals expanded.
* @return Number of columns with categoricals expanded into indicator columns */
public static int numColsExp(Frame fr, boolean useAllFactorLevels) {
final int uAFL = useAllFactorLevels ? 0 : 1;
int cols = 0;
for( Vec vec : fr.vecs() )
cols += (vec.isCategorical() && vec.domain() != null) ? vec.domain().length - uAFL : 1;
return cols;
}
static double[] multiple(double[] diagYY /*diagonal*/, int nTot, int nVars) {
int ny = diagYY.length;
for (int i = 0; i < ny; i++) {
diagYY[i] *= nTot;
}
double[][] uu = new double[ny][ny];
for (int i = 0; i < ny; i++) {
for (int j = 0; j < ny; j++) {
double yyij = i==j ? diagYY[i] : 0;
uu[i][j] = (yyij - diagYY[i] * diagYY[j] / nTot) / (nVars * Math.sqrt(diagYY[i] * diagYY[j]));
if (Double.isNaN(uu[i][j])) {
uu[i][j] = 0;
}
}
}
EigenvalueDecomposition eigen = new EigenvalueDecomposition(new Matrix(uu));
double[] eigenvalues = eigen.getRealEigenvalues();
double[][] eigenvectors = eigen.getV().getArray();
int maxIndex = ArrayUtils.maxIndex(eigenvalues);
return eigenvectors[maxIndex];
}
static class ProjectOntoEigenVector extends MRTask<ProjectOntoEigenVector> {
ProjectOntoEigenVector(double[] yCoord) { _yCoord = yCoord; }
final double[] _yCoord; //projection
@Override public void map(Chunk[] cs, NewChunk[] nc) {
for (int i=0;i<cs[0]._len;++i) {
if (cs[0].isNA(i)) {
nc[0].addNA();
} else {
int which = (int) cs[0].at8(i);
nc[0].addNum((float)_yCoord[which]); //make it more reproducible by casting to float
}
}
}
}
public static double[] toEigenArray(Vec src) {
Key<Frame> source = Key.make();
Key<Frame> dest = Key.make();
Frame train = new Frame(source, new String[]{"enum"}, new Vec[]{src});
int maxLevels = 1024; // keep eigen projection method reasonably fast
boolean created=false;
if (src.cardinality()>maxLevels) {
DKV.put(train);
created=true;
Log.info("Reducing the cardinality of a categorical column with " + src.cardinality() + " levels to " + maxLevels);
train = Interaction.getInteraction(train._key, train.names(), maxLevels).execImpl(dest).get();
}
DataInfo dinfo = new DataInfo(train, null, 0, true /*_use_all_factor_levels*/, DataInfo.TransformType.NONE,
DataInfo.TransformType.NONE, /* skipMissing */ false, /* imputeMissing */ true,
/* missingBucket */ false, /* weights */ false, /* offset */ false, /* fold */ false, /* intercept */ false);
DKV.put(dinfo);
Gram.GramTask gtsk = new Gram.GramTask(null, dinfo).doAll(dinfo._adaptedFrame);
// round the numbers to float precision to be more reproducible
double[] rounded = new double[gtsk._gram._diag.length];
for (int i = 0; i < rounded.length; ++i)
rounded[i] = (float) gtsk._gram._diag[i];
dinfo.remove();
double [] array = multiple(rounded, (int) gtsk._nobs, 1);
if (created) {
train.remove();
DKV.remove(source);
}
return array;
}
public static Vec toEigen(Vec src) {
Key<Frame> source = Key.make();
Key<Frame> dest = Key.make();
Frame train = new Frame(source, new String[]{"enum"}, new Vec[]{src});
int maxLevels = 1024; // keep eigen projection method reasonably fast
boolean created=false;
if (src.cardinality()>maxLevels) {
DKV.put(train);
created=true;
Log.info("Reducing the cardinality of a categorical column with " + src.cardinality() + " levels to " + maxLevels);
train = Interaction.getInteraction(train._key, train.names(), maxLevels).execImpl(dest).get();
}
Vec v = new ProjectOntoEigenVector(toEigenArray(src)).doAll(1, (byte) 3, train).outputFrame().anyVec();
if (created) {
train.remove();
DKV.remove(source);
}
return v;
}
public static ToEigenVec toEigen = new ToEigenVec() {
@Override public Vec toEigenVec(Vec src) { return toEigen(src); }
};
public static double[] toEigenProjectionArray(Frame _origTrain, Frame _train, boolean expensive) {
if (expensive && _origTrain != null && _origTrain != _train) {
List<Double> projections = new ArrayList<>();
for (int i = 0; i < _origTrain.numCols(); i++) {
Vec currentCol = _origTrain.vec(i);
if (currentCol.isCategorical()) {
double[] actProjection = toEigenArray(currentCol);
for (double v : actProjection) {
projections.add(v);
}
}
}
double[] primitive_projections = new double[projections.size()];
for (int i = 0; i < projections.size(); i++) {
primitive_projections[i] = projections.get(i);
}
return primitive_projections;
}
return null;
}
public static String getMatrixInString(double[][] matrix) {
int dimX = matrix.length;
if (dimX <= 0) {
return "";
}
int dimY = matrix[0].length;
for (int x = 1; x < dimX; x++) {
if (matrix[x].length != dimY) {
return "Stacked matrix!";
}
}
StringBuilder stringOfMatrix = new StringBuilder();
for (int x = 0; x < dimX; x++) {
for (int y = 0; y < dimY; y++) {
if (matrix[x][y] > 0) {
stringOfMatrix.append(' '); // a leading space before a number
}
stringOfMatrix.append(String.format("%.4f\t", matrix[x][y]));
}
stringOfMatrix.append('\n');
}
return stringOfMatrix.toString();
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/word2vec/HBWTree.java
|
package hex.word2vec;
import water.Key;
import water.Keyed;
import java.util.Arrays;
class HBWTree extends Keyed<HBWTree> {
private static final int MAX_CODE_LENGTH = 40;
int[][] _code;
int[][] _point;
public HBWTree() {}
private HBWTree(Key<HBWTree> key, int size) {
super(key);
_code = new int[size][];
_point = new int[size][];
}
static HBWTree buildHuffmanBinaryWordTree(long[] wordCounts) {
final int size = wordCounts.length;
long[] count = new long[size * 2 - 1];
int[] binary = new int[size * 2 - 1];
int[] parent_node = new int[size * 2 - 1];
System.arraycopy(wordCounts, 0, count, 0, size);
Arrays.fill(count, size, size * 2 - 1, (long) 1e15);
// Following algorithm constructs the Huffman tree by adding one node at a time
int min1i, min2i, pos1, pos2;
pos1 = size - 1;
pos2 = size;
for (int i = 0; i < size - 1; i++) {
// First, find two smallest nodes 'min1, min2'
if (pos1 >= 0) {
if (count[pos1] < count[pos2]) {
min1i = pos1;
pos1--;
} else {
min1i = pos2;
pos2++;
}
} else {
min1i = pos2;
pos2++;
}
if (pos1 >= 0) {
if (count[pos1] < count[pos2]) {
min2i = pos1;
pos1--;
} else {
min2i = pos2;
pos2++;
}
} else {
min2i = pos2;
pos2++;
}
count[size + i] = count[min1i] + count[min2i];
parent_node[min1i] = size + i;
parent_node[min2i] = size + i;
binary[min2i] = 1;
}
HBWTree t = new HBWTree(Key.<HBWTree>make(), size);
int[] point = new int[MAX_CODE_LENGTH];
int[] code = new int[MAX_CODE_LENGTH];
// Now assign binary code to each vocabulary word
for (int j = 0; j < size; j++) {
int k = j;
int m = 0;
while (true) {
int val = binary[k];
code[m] = val;
point[m] = k;
m++;
k = parent_node[k];
if (k == 0) break;
}
t._code[j] = new int[m];
t._point[j] = new int[m + 1];
t._point[j][0] = size - 2;
for (int l = 0; l < m; l++) {
t._code[j][m - l - 1] = code[l];
t._point[j][m - l] = point[l] - size;
}
}
return t;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/word2vec/Word2Vec.java
|
package hex.word2vec;
import hex.ModelBuilder;
import hex.ModelCategory;
import hex.word2vec.Word2VecModel.*;
import water.Job;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.Log;
import water.util.StringUtils;
import java.util.LinkedList;
import java.util.List;
public class Word2Vec extends ModelBuilder<Word2VecModel,Word2VecModel.Word2VecParameters,Word2VecModel.Word2VecOutput> {
public enum WordModel { SkipGram, CBOW }
public enum NormModel { HSM }
@Override public ModelCategory[] can_build() { return new ModelCategory[]{ ModelCategory.WordEmbedding, }; }
@Override public BuilderVisibility builderVisibility() { return BuilderVisibility.Stable; }
@Override public boolean isSupervised() { return false; }
public Word2Vec(boolean startup_once) {
super(new Word2VecParameters(), startup_once);
}
public Word2Vec(Word2VecModel.Word2VecParameters parms) {
super(parms);
init(false);
}
@Override protected Word2VecDriver trainModelImpl() { return new Word2VecDriver(); }
/** Initialize the ModelBuilder, validating all arguments and preparing the
* training frame. This call is expected to be overridden in the subclasses
* and each subclass will start with "super.init();".
*
* Verify that at the first column contains strings. Validate _vec_size, _window_size,
* _sent_sample_rate, _init_learning_rate, and epochs for values within range.
*/
@Override public void init(boolean expensive) {
super.init(expensive);
if (_parms._train != null) { // Can be called without an existing frame, but when present check for a string col
if (_parms.train().vecs().length == 0 || ! _parms.trainVec().isString())
error("_train", "The first column of the training input frame has to be column of Strings.");
}
if (_parms._vec_size > Word2VecParameters.MAX_VEC_SIZE) error("_vec_size", "Requested vector size of "+_parms._vec_size +" in Word2Vec, exceeds limit of "+Word2VecParameters.MAX_VEC_SIZE+".");
if (_parms._vec_size < 1) error("_vec_size", "Requested vector size of " + _parms._vec_size + " in Word2Vec, is not allowed.");
if (_parms._window_size < 1) error("_window_size", "Negative window size not allowed for Word2Vec. Expected value > 0, received " + _parms._window_size);
if (_parms._sent_sample_rate < 0.0) error("_sent_sample_rate", "Negative sentence sample rate not allowed for Word2Vec. Expected a value > 0.0, received " + _parms._sent_sample_rate);
if (_parms._init_learning_rate < 0.0) error("_init_learning_rate", "Negative learning rate not allowed for Word2Vec. Expected a value > 0.0, received " + _parms._init_learning_rate);
if (_parms._epochs < 1) error("_epochs", "Negative epoch count not allowed for Word2Vec. Expected value > 0, received " + _parms._epochs);
}
@Override
protected void ignoreBadColumns(int npredictors, boolean expensive) {
// Do not remove String columns - these are the ones we need!
}
@Override
public boolean haveMojo() { return true; }
private class Word2VecDriver extends Driver {
@Override public void computeImpl() {
Word2VecModel model = null;
try {
init(! _parms.isPreTrained()); // expensive == true IFF the model is not pre-trained
// The model to be built
model = new Word2VecModel(_job._result, _parms, new Word2VecOutput(Word2Vec.this));
model.delete_and_lock(_job);
if (_parms.isPreTrained())
convertToModel(_parms._pre_trained.get(), model);
else
trainModel(model);
} finally {
if (model != null) model.unlock(_job);
}
}
private void trainModel(Word2VecModel model) {
Log.info("Word2Vec: Initializing model training.");
Word2VecModelInfo modelInfo = Word2VecModelInfo.createInitialModelInfo(_parms);
// main loop
Log.info("Word2Vec: Starting to train model, " + _parms._epochs + " epochs.");
long tstart = System.currentTimeMillis();
for (int i = 0; i < _parms._epochs; i++) {
long start = System.currentTimeMillis();
WordVectorTrainer trainer = new WordVectorTrainer(_job, modelInfo).doAll(_parms.trainVec());
long stop = System.currentTimeMillis();
long actProcessedWords = trainer._processedWords;
long estProcessedWords = trainer._nodeProcessedWords._val;
if (estProcessedWords < 0.95 * actProcessedWords)
Log.warn("Estimated number processed words " + estProcessedWords +
" is significantly lower than actual number processed words " + actProcessedWords);
trainer.updateModelInfo(modelInfo);
model.update(_job); // Early version of model is visible
double duration = (stop - start) / 1000.0;
Log.info("Epoch " + i + " took " + duration + "s; Words trained/s: " + actProcessedWords / duration);
model._output._epochs=i;
if (stop_requested()) { // do at least one iteration to avoid null model being returned and all hell will break loose
break;
}
}
long tstop = System.currentTimeMillis();
Log.info("Total time: " + (tstop - tstart) / 1000.0);
Log.info("Finished training the Word2Vec model.");
model.buildModelOutput(modelInfo);
}
private void convertToModel(Frame preTrained, Word2VecModel model) {
if (_parms._vec_size != preTrained.numCols() - 1) {
throw new IllegalStateException("Frame with pre-trained model doesn't conform to the specified vector length.");
}
WordVectorConverter result = new WordVectorConverter(_job, _parms._vec_size, (int) preTrained.numRows()).doAll(preTrained);
model.buildModelOutput(result._words, result._syn0);
}
}
public static Job<Word2VecModel> fromPretrainedModel(Frame model) {
if (model == null || model.numCols() < 2) {
throw new IllegalArgumentException("Frame representing an external word2vec needs to have at least 2 columns.");
}
if (model.vec(0).get_type() != Vec.T_STR) {
throw new IllegalArgumentException("First column is expected to contain the dictionary words and be represented as String, " +
"instead got " + model.vec(0).get_type_str());
}
List<String> colErrors = new LinkedList<>();
for (int i = 1; i < model.numCols(); i++) {
if (model.vec(i).get_type() != Vec.T_NUM) {
colErrors.add(model.name(i) + " (type " + model.vec(i).get_type_str() + ")");
}
}
if (! colErrors.isEmpty()) {
throw new IllegalArgumentException("All components of word2vec mapping are expected to be numeric. Invalid columns: " +
StringUtils.join(", ", colErrors));
}
Word2VecModel.Word2VecParameters p = new Word2VecModel.Word2VecParameters();
p._vec_size = model.numCols() - 1;
p._pre_trained = model._key;
return new Word2Vec(p).trainModel();
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/word2vec/Word2VecModel.java
|
package hex.word2vec;
import hex.Model;
import hex.ModelCategory;
import hex.ModelMetrics;
import hex.word2vec.Word2VecModel.Word2VecOutput;
import hex.word2vec.Word2VecModel.Word2VecParameters;
import water.*;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.parser.BufferedString;
import water.util.IcedHashMap;
import water.util.IcedHashMapGeneric;
import water.util.IcedLong;
import water.util.RandomUtils;
import java.util.*;
public class Word2VecModel extends Model<Word2VecModel, Word2VecParameters, Word2VecOutput> {
public Word2VecModel(Key<Word2VecModel> selfKey, Word2VecParameters params, Word2VecOutput output) {
super(selfKey, params, output);
assert(Arrays.equals(_key._kb, selfKey._kb));
}
@Override
public ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain) {
throw H2O.unimpl("No Model Metrics for Word2Vec.");
}
@Override
public double[] score0(Chunk[] cs, int foo, double data[], double preds[]) {
throw H2O.unimpl();
}
@Override
protected double[] score0(double data[], double preds[]) {
throw H2O.unimpl();
}
@Override
public Word2VecMojoWriter getMojo() {
return new Word2VecMojoWriter(this);
}
/**
* Converts this word2vec model to a Frame.
* @return Frame made of columns: Word, V1, .., Vn. Word column holds the vocabulary associated
* with this word2vec model, and columns V1, .., Vn represent the embeddings in the n-dimensional space.
*/
public Frame toFrame() {
Vec zeroVec = null;
try {
zeroVec = Vec.makeZero(_output._words.length);
byte[] types = new byte[1 + _output._vecSize];
Arrays.fill(types, Vec.T_NUM);
types[0] = Vec.T_STR;
String[] colNames = new String[types.length];
colNames[0] = "Word";
for (int i = 1; i < colNames.length; i++)
colNames[i] = "V" + i;
return new ConvertToFrameTask(this).doAll(types, zeroVec).outputFrame(colNames, null);
} finally {
if (zeroVec != null) zeroVec.remove();
}
}
private static class ConvertToFrameTask extends MRTask<ConvertToFrameTask> {
private Key<Word2VecModel> _modelKey;
private transient Word2VecModel _model;
public ConvertToFrameTask(Word2VecModel model) { _modelKey = model._key; }
@Override
protected void setupLocal() { _model = DKV.getGet(_modelKey); }
@Override
public void map(Chunk[] cs, NewChunk[] ncs) {
assert cs.length == 1;
assert ncs.length == _model._output._vecSize + 1;
Chunk chk = cs[0];
int wordOffset = (int) chk.start();
int vecPos = _model._output._vecSize * wordOffset;
for (int i = 0; i < chk._len; i++) {
ncs[0].addStr(_model._output._words[wordOffset + i]);
for (int j = 1; j < ncs.length; j++)
ncs[j].addNum(_model._output._vecs[vecPos++]);
}
}
}
/**
* Takes an input string can return the word vector for that word.
*
* @param target - String of desired word
* @return float array containing the word vector values or null if
* the word isn't present in the vocabulary.
*/
public float[] transform(String target) {
return transform(new BufferedString(target));
}
private float[] transform(BufferedString word) {
if (! _output._vocab.containsKey(word))
return null;
int wordIdx = _output._vocab.get(word);
return Arrays.copyOfRange(_output._vecs, wordIdx * _output._vecSize, (wordIdx + 1) * _output._vecSize);
}
public enum AggregateMethod { NONE, AVERAGE }
public Frame transform(Vec wordVec, AggregateMethod aggregateMethod) {
if (wordVec.get_type() != Vec.T_STR) {
throw new IllegalArgumentException("Expected a string vector, got " + wordVec.get_type_str() + " vector.");
}
byte[] types = new byte[_output._vecSize];
Arrays.fill(types, Vec.T_NUM);
MRTask<?> transformTask = aggregateMethod == AggregateMethod.AVERAGE ?
new Word2VecAggregateTask(this) : new Word2VecTransformTask(this);
return transformTask.doAll(types, wordVec).outputFrame(Key.<Frame>make(), null, null);
}
private static class Word2VecTransformTask extends MRTask<Word2VecTransformTask> {
private Word2VecModel _model;
public Word2VecTransformTask(Word2VecModel model) { _model = model; }
@Override
public void map(Chunk[] cs, NewChunk[] ncs) {
assert cs.length == 1;
Chunk chk = cs[0];
BufferedString tmp = new BufferedString();
for (int i = 0; i < chk._len; i++) {
if (chk.isNA(i)) {
for (NewChunk nc : ncs) nc.addNA();
} else {
BufferedString word = chk.atStr(tmp, i);
float[] vs = _model.transform(word);
if (vs == null)
for (NewChunk nc : ncs) nc.addNA();
else
for (int j = 0; j < ncs.length; j++)
ncs[j].addNum(vs[j]);
}
}
}
}
private static class Word2VecAggregateTask extends MRTask<Word2VecAggregateTask> {
private Word2VecModel _model;
public Word2VecAggregateTask(Word2VecModel model) { _model = model; }
@Override
public void map(Chunk[] cs, NewChunk[] ncs) {
assert cs.length == 1;
Chunk chk = cs[0];
// skip words that belong to a sequence started in a previous chunk
int offset = 0;
if (chk.cidx() > 0) { // first chunk doesn't have an offset
int naPos = findNA(chk);
if (naPos < 0)
return; // chunk doesn't contain an end of sequence and should not be processed
offset = naPos + 1;
}
// process this chunk, if the last sequence is not terminated in this chunk, roll-over to the next chunk
float[] aggregated = new float[ncs.length];
int seqLength = 0;
boolean seqOpen = false;
BufferedString tmp = new BufferedString();
chunkLoop: do {
for (int i = offset; i < chk._len; i++) {
if (chk.isNA(i)) {
writeAggregate(seqLength, aggregated, ncs);
Arrays.fill(aggregated, 0.0f);
seqLength = 0;
seqOpen = false;
if (chk != cs[0])
break chunkLoop; // we just closed a sequence that was left open in one of the previous chunks
} else {
BufferedString word = chk.atStr(tmp, i);
float[] vs = _model.transform(word);
if (vs != null) {
for (int j = 0; j < ncs.length; j++)
aggregated[j] += vs[j];
seqLength++;
}
seqOpen = true;
}
}
offset = 0;
} while ((chk = chk.nextChunk()) != null);
// last sequence doesn't have to be terminated by NA
if (seqOpen)
writeAggregate(seqLength, aggregated, ncs);
}
private void writeAggregate(int seqLength, float[] aggregated, NewChunk[] ncs) {
if (seqLength == 0)
for (NewChunk nc : ncs) nc.addNA();
else
for (int j = 0; j < ncs.length; j++)
ncs[j].addNum(aggregated[j] / seqLength);
}
private int findNA(Chunk chk) {
for (int i = 0; i < chk._len; i++)
if (chk.isNA(i)) return i;
return -1;
}
}
/**
* Find synonyms (i.e. word-vectors with the highest cosine similarity)
*
* @param target String of desired word
* @param cnt Number of synonyms to find
*/
public Map<String, Float> findSynonyms(String target, int cnt) {
float[] vec = transform(target);
if ((vec == null) || (cnt == 0))
return Collections.emptyMap();
int[] synonyms = new int[cnt];
float[] scores = new float[cnt];
int min = 0;
for (int i = 0; i < cnt; i++) {
synonyms[i] = i;
scores[i] = cosineSimilarity(vec, i * vec.length, _output._vecs);
if (scores[i] < scores[min])
min = i;
}
final int vocabSize = _output._vocab.size();
for (int i = cnt; i < vocabSize; i++) {
float score = cosineSimilarity(vec, i * vec.length, _output._vecs);
if ((score <= scores[min]) || (score >= 0.999999))
continue;
synonyms[min] = i;
scores[min] = score;
// find a new min
min = 0;
for (int j = 1; j < cnt; j++)
if (scores[j] < scores[min])
min = j;
}
Map<String, Float> result = new HashMap<>(cnt);
for (int i = 0; i < cnt; i++)
result.put(_output._words[synonyms[i]].toString(), scores[i]);
return result;
}
/**
* Basic calculation of cosine similarity
* @param target - a word vector
* @param pos - position in vecs
* @param vecs - learned word vectors
* @return cosine similarity between the two word vectors
*/
private float cosineSimilarity(float[] target, int pos, float[] vecs) {
float dotProd = 0, tsqr = 0, csqr = 0;
for(int i = 0; i < target.length; i++) {
dotProd += target[i] * vecs[pos + i];
tsqr += Math.pow(target[i], 2);
csqr += Math.pow(vecs[pos + i], 2);
}
return (float) (dotProd / (Math.sqrt(tsqr) * Math.sqrt(csqr)));
}
void buildModelOutput(Word2VecModelInfo modelInfo) {
IcedHashMapGeneric<BufferedString, Integer> vocab = ((Vocabulary) DKV.getGet(modelInfo._vocabKey))._data;
BufferedString[] words = new BufferedString[vocab.size()];
for (BufferedString str : vocab.keySet())
words[vocab.get(str)] = str;
_output._vecSize = _parms._vec_size;
_output._vecs = modelInfo._syn0;
_output._words = words;
_output._vocab = vocab;
}
void buildModelOutput(BufferedString[] words, float[] syn0) {
IcedHashMapGeneric<BufferedString, Integer> vocab = new IcedHashMapGeneric<>();
for (int i = 0; i < words.length; i++)
vocab.put(words[i], i);
_output._vecSize = _parms._vec_size;
_output._vecs = syn0;
_output._words = words;
_output._vocab = vocab;
}
public static class Word2VecParameters extends Model.Parameters {
public String algoName() { return "Word2Vec"; }
public String fullName() { return "Word2Vec"; }
public String javaName() { return Word2VecModel.class.getName(); }
@Override public long progressUnits() {
return isPreTrained() ? _pre_trained.get().anyVec().nChunks() : train().vec(0).nChunks() * _epochs;
}
static final int MAX_VEC_SIZE = 10000;
public Word2Vec.WordModel _word_model = Word2Vec.WordModel.SkipGram;
public Word2Vec.NormModel _norm_model = Word2Vec.NormModel.HSM;
public int _min_word_freq = 5;
public int _vec_size = 100;
public int _window_size = 5;
public int _epochs = 5;
public float _init_learning_rate = 0.025f;
public float _sent_sample_rate = 1e-3f;
public Key<Frame> _pre_trained; // key of a frame that contains a pre-trained word2vec model
boolean isPreTrained() { return _pre_trained != null; }
Vec trainVec() { return train().vec(0); }
}
public static class Word2VecOutput extends Model.Output {
public int _vecSize;
public int _epochs;
public Word2VecOutput(Word2Vec b) { super(b); }
public BufferedString[] _words;
public float[] _vecs;
public IcedHashMapGeneric<BufferedString, Integer> _vocab;
@Override public ModelCategory getModelCategory() {
return ModelCategory.WordEmbedding;
}
}
public static class Word2VecModelInfo extends Iced {
long _vocabWordCount;
long _totalProcessedWords = 0L;
float[] _syn0, _syn1;
Key<HBWTree> _treeKey;
Key<Vocabulary> _vocabKey;
Key<WordCounts> _wordCountsKey;
private Word2VecParameters _parameters;
public final Word2VecParameters getParams() { return _parameters; }
public Word2VecModelInfo() {}
private Word2VecModelInfo(Word2VecParameters params, WordCounts wordCounts) {
_parameters = params;
long vocabWordCount = 0L;
List<Map.Entry<BufferedString, IcedLong>> wordCountList = new ArrayList<>(wordCounts._data.size());
for (Map.Entry<BufferedString, IcedLong> wc : wordCounts._data.entrySet()) {
if (wc.getValue()._val >= _parameters._min_word_freq) {
wordCountList.add(wc);
vocabWordCount += wc.getValue()._val;
}
}
Collections.sort(wordCountList, new Comparator<Map.Entry<BufferedString, IcedLong>>() {
@Override
public int compare(Map.Entry<BufferedString, IcedLong> o1, Map.Entry<BufferedString, IcedLong> o2) {
long x = o1.getValue()._val; long y = o2.getValue()._val;
return (x < y) ? -1 : ((x == y) ? 0 : 1);
}
});
int vocabSize = wordCountList.size();
long[] countAry = new long[vocabSize];
Vocabulary vocab = new Vocabulary(new IcedHashMapGeneric<BufferedString, Integer>());
int idx = 0;
for (Map.Entry<BufferedString, IcedLong> wc : wordCountList) {
countAry[idx] = wc.getValue()._val;
vocab._data.put(wc.getKey(), idx++);
}
HBWTree t = HBWTree.buildHuffmanBinaryWordTree(countAry);
_vocabWordCount = vocabWordCount;
_treeKey = publish(t);
_vocabKey = publish(vocab);
_wordCountsKey = publish(wordCounts);
//initialize weights to random values
Random rand = RandomUtils.getRNG(0xDECAF, 0xDA7A);
_syn1 = MemoryManager.malloc4f(_parameters._vec_size * vocabSize);
_syn0 = MemoryManager.malloc4f(_parameters._vec_size * vocabSize);
for (int i = 0; i < _parameters._vec_size * vocabSize; i++) _syn0[i] = (rand.nextFloat() - 0.5f) / _parameters._vec_size;
}
public static Word2VecModelInfo createInitialModelInfo(Word2VecParameters params) {
Vec v = params.trainVec();
WordCounts wordCounts = new WordCounts(new WordCountTask().doAll(v)._counts);
return new Word2VecModelInfo(params, wordCounts);
}
private static <T extends Keyed<T>> Key<T> publish(T keyed) {
Scope.track_generic(keyed);
DKV.put(keyed);
return keyed._key;
}
}
// wraps Vocabulary map into a Keyed object
public static class Vocabulary extends Keyed<Vocabulary> {
IcedHashMapGeneric<BufferedString, Integer> _data;
Vocabulary(IcedHashMapGeneric<BufferedString, Integer> data) {
super(Key.<Vocabulary>make());
_data = data;
}
}
// wraps Word-Count map into a Keyed object
public static class WordCounts extends Keyed<WordCounts> {
IcedHashMap<BufferedString, IcedLong> _data;
WordCounts(IcedHashMap<BufferedString, IcedLong> data) {
super(Key.<WordCounts>make());
_data = data;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/word2vec/Word2VecMojoWriter.java
|
package hex.word2vec;
import hex.ModelMojoWriter;
import water.MemoryManager;
import water.parser.BufferedString;
import java.io.IOException;
import java.nio.ByteBuffer;
/**
* MOJO serializer for word2vec model.
*/
public class Word2VecMojoWriter extends ModelMojoWriter<Word2VecModel, Word2VecModel.Word2VecParameters, Word2VecModel.Word2VecOutput> {
@SuppressWarnings("unused") // Called through reflection in ModelBuildersHandler
public Word2VecMojoWriter() {}
public Word2VecMojoWriter(Word2VecModel model) {
super(model);
}
@Override public String mojoVersion() {
return "1.00";
}
@Override
protected void writeModelData() throws IOException {
writekv("vec_size", model._parms._vec_size);
writekv("vocab_size", model._output._words.length);
// Vocabulary
startWritingTextFile("vocabulary");
for (BufferedString word : model._output._words) {
writeln(word.toString(), true);
}
finishWritingTextFile();
// Vectors
ByteBuffer bb = ByteBuffer.wrap(MemoryManager.malloc1(model._output._vecs.length * 4));
for (float v : model._output._vecs)
bb.putFloat(v);
writeblob("vectors", bb.array());
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/word2vec/WordCountTask.java
|
package hex.word2vec;
import water.AutoBuffer;
import water.MRTask;
import water.fvec.Chunk;
import water.parser.BufferedString;
import water.util.IcedHashMap;
import water.util.IcedLong;
import java.util.HashMap;
/**
* Reduce a string column of a given Vec to a set of unique words
* and their frequency counts
*
* Currently the array is consolidated on the calling node. Given
* the limited vocabulary size of most languages, the resulting
* array is presumed to easily fit in memory.
*/
public class WordCountTask extends MRTask<WordCountTask> {
// OUT
IcedHashMap<BufferedString, IcedLong> _counts;
WordCountTask() {}
@Override
public void map(Chunk cs) {
_counts = new IcedHashMap<>();
for (int i = 0; i < cs._len; i++) {
if (cs.isNA(i)) continue;
BufferedString str = cs.atStr(new BufferedString(), i);
IcedLong count = _counts.get(str);
if (count != null)
count._val++;
else
_counts.put(str, new IcedLong(1));
}
}
@Override
public void reduce(WordCountTask other) {
assert _counts != null;
assert other._counts != null;
for (BufferedString str : other._counts.keySet()) {
IcedLong myCount = _counts.get(str);
if (myCount == null)
_counts.put(str, other._counts.get(str));
else
myCount._val += other._counts.get(str)._val;
}
}
public final AutoBuffer write_impl(AutoBuffer ab) {
if( _counts != null )
for (BufferedString key : _counts.keySet())
ab.put2((char)key.length()).putA1(key.getBuffer(), key.getOffset(), key.getOffset() + key.length())
.put8(_counts.get(key)._val);
return ab.put2((char)65535); // End of map marker
}
public final WordCountTask read_impl(AutoBuffer ab) {
_counts = new IcedHashMap<>();
int len;
while ((len = ab.get2()) != 65535) { // Read until end-of-map marker
byte[] bs = ab.getA1(len);
long cnt = ab.get8();
_counts.put(new BufferedString(new String(bs)), new IcedLong(cnt));
}
return this;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/word2vec/WordVectorConverter.java
|
package hex.word2vec;
import water.Job;
import water.MRTask;
import water.MemoryManager;
import water.fvec.Chunk;
import water.parser.BufferedString;
import water.util.ArrayUtils;
public class WordVectorConverter extends MRTask<WordVectorConverter> {
// Job
private final Job<Word2VecModel> _job;
private final int _wordVecSize;
private final int _vocabWordCount;
float[] _syn0;
BufferedString[] _words;
public WordVectorConverter(Job<Word2VecModel> job, int wordVecSize, int vocabWordCount) {
super(null);
_job = job;
_wordVecSize = wordVecSize;
_vocabWordCount = vocabWordCount;
}
@Override
protected void setupLocal() {
_syn0 = MemoryManager.malloc4f(_wordVecSize * _vocabWordCount);
_words = new BufferedString[_vocabWordCount];
}
@Override
public void map(Chunk[] cs) {
int wordPos = (int) cs[0].start();
int pos = _wordVecSize * wordPos;
for (int i = 0; i < cs[0]._len; i++) {
_words[wordPos++] = cs[0].atStr(new BufferedString(), i);
for (int j = 1; j < cs.length; j++)
_syn0[pos++] = (float) cs[j].atd(i);
}
_job.update(1);
}
@Override
public void reduce(WordVectorConverter other) {
if (_syn0 != other._syn0) {
ArrayUtils.add(_syn0, other._syn0);
for (int i = 0; i < _vocabWordCount; i++) {
if (other._words[i] != null)
_words[i] = other._words[i];
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/word2vec/WordVectorTrainer.java
|
package hex.word2vec;
import water.DKV;
import water.Job;
import water.Key;
import water.MRTask;
import water.fvec.Chunk;
import water.parser.BufferedString;
import hex.word2vec.Word2VecModel.*;
import water.util.ArrayUtils;
import water.util.IcedHashMap;
import water.util.IcedHashMapGeneric;
import water.util.IcedLong;
import java.util.Iterator;
public class WordVectorTrainer extends MRTask<WordVectorTrainer> {
private static final int MAX_SENTENCE_LEN = 1000;
private static final int EXP_TABLE_SIZE = 1000;
private static final int MAX_EXP = 6;
private static final float[] _expTable = calcExpTable();
private static final float LEARNING_RATE_MIN_FACTOR = 0.0001F; // learning rate stops decreasing at (initLearningRate * this factor)
// Job
private final Job<Word2VecModel> _job;
// Params
private final Word2Vec.WordModel _wordModel;
private final int _wordVecSize, _windowSize, _epochs;
private final float _initLearningRate;
private final float _sentSampleRate;
private final long _vocabWordCount;
// Model IN
private final Key<Vocabulary> _vocabKey;
private final Key<WordCounts> _wordCountsKey;
private final Key<HBWTree> _treeKey;
private final long _prevTotalProcessedWords;
// Model IN & OUT
// _syn0 represents the matrix of synaptic weights connecting the input layer of the NN to the hidden layer,
// similarly _syn1 corresponds to the weight matrix of the synapses connecting the hidden layer to the output layer
// both matrices are represented in a 1D array, where M[i,j] == array[i * VEC_SIZE + j]
float[] _syn0, _syn1;
long _processedWords = 0L;
// Node-Local (Shared)
IcedLong _nodeProcessedWords; // mutable long, approximates the total number of words processed by this node
private transient IcedHashMapGeneric<BufferedString, Integer> _vocab;
private transient IcedHashMap<BufferedString, IcedLong> _wordCounts;
private transient int[][] _HBWTCode;
private transient int[][] _HBWTPoint;
private float _curLearningRate;
private long _seed = System.nanoTime();
public WordVectorTrainer(Job<Word2VecModel> job, Word2VecModelInfo input) {
super(null);
_job = job;
_treeKey = input._treeKey;
_vocabKey = input._vocabKey;
_wordCountsKey = input._wordCountsKey;
// Params
_wordModel = input.getParams()._word_model;
_wordVecSize = input.getParams()._vec_size;
_windowSize = input.getParams()._window_size;
_sentSampleRate = input.getParams()._sent_sample_rate;
_epochs = input.getParams()._epochs;
_initLearningRate = input.getParams()._init_learning_rate;
_vocabWordCount = input._vocabWordCount;
_prevTotalProcessedWords = input._totalProcessedWords;
_syn0 = input._syn0;
_syn1 = input._syn1;
_curLearningRate = calcLearningRate(_initLearningRate, _epochs, _prevTotalProcessedWords, _vocabWordCount);
}
@Override
protected void setupLocal() {
_vocab = ((Vocabulary) DKV.getGet(_vocabKey))._data;
_wordCounts = ((WordCounts) DKV.getGet(_wordCountsKey))._data;
HBWTree t = DKV.getGet(_treeKey);
_HBWTCode = t._code;
_HBWTPoint = t._point;
_nodeProcessedWords = new IcedLong(0L);
}
// Precompute the exp() table
private static float[] calcExpTable() {
float[] expTable = new float[EXP_TABLE_SIZE];
for (int i = 0; i < EXP_TABLE_SIZE; i++) {
expTable[i] = (float) Math.exp((i / (float) EXP_TABLE_SIZE * 2 - 1) * MAX_EXP);
expTable[i] = expTable[i] / (expTable[i] + 1); // Precompute f(x) = x / (x + 1)
}
return expTable;
}
@Override public void map(Chunk chk) {
final int winSize = _windowSize, vecSize = _wordVecSize;
float[] neu1 = new float[vecSize];
float[] neu1e = new float[vecSize];
ChunkSentenceIterator sentIter = new ChunkSentenceIterator(chk);
int wordCount = 0;
while (sentIter.hasNext()) {
int sentLen = sentIter.nextLength();
int[] sentence = sentIter.next();
for (int sentIdx = 0; sentIdx < sentLen; sentIdx++) {
int curWord = sentence[sentIdx];
int bagSize = 0;
if (_wordModel == Word2Vec.WordModel.CBOW) {
for (int j = 0; j < vecSize; j++) neu1[j] = 0;
for (int j = 0; j < vecSize; j++) neu1e[j] = 0;
}
// for each item in the window (except curWord), update neu1 vals
int winSizeMod = cheapRandInt(winSize);
for (int winIdx = winSizeMod; winIdx < winSize * 2 + 1 - winSizeMod; winIdx++) {
if (winIdx != winSize) { // skips curWord in sentence
int winWordSentIdx = sentIdx - winSize + winIdx;
if (winWordSentIdx < 0 || winWordSentIdx >= sentLen) continue;
int winWord = sentence[winWordSentIdx];
if (_wordModel == Word2Vec.WordModel.SkipGram)
skipGram(curWord, winWord, neu1e);
else { // CBOW
for (int j = 0; j < vecSize; j++) neu1[j] += _syn0[j + winWord * vecSize];
bagSize++;
}
}
} // end for each item in the window
if (_wordModel == Word2Vec.WordModel.CBOW && bagSize > 0) {
CBOW(curWord, sentence, sentIdx, sentLen, winSizeMod, bagSize, neu1, neu1e);
}
wordCount++;
// update learning rate
if (wordCount % 10000 == 0) {
_nodeProcessedWords._val += 10000;
long totalProcessedWordsEst = _prevTotalProcessedWords + _nodeProcessedWords._val;
_curLearningRate = calcLearningRate(_initLearningRate, _epochs, totalProcessedWordsEst, _vocabWordCount);
}
} // for each item in the sentence
} // while more sentences
_processedWords = wordCount;
_nodeProcessedWords._val += wordCount % 10000;
_job.update(1);
}
@Override public void reduce(WordVectorTrainer other) {
_processedWords += other._processedWords;
if (_syn0 != other._syn0) { // other task worked on a different syn0
float c = (float) other._processedWords / _processedWords;
ArrayUtils.add(1.0f - c, _syn0, c, other._syn0);
ArrayUtils.add(1.0f - c, _syn1, c, other._syn1);
// for diagnostics only
_nodeProcessedWords._val += other._nodeProcessedWords._val;
}
}
private void skipGram(int curWord, int winWord, float[] neu1e) {
final int vecSize = _wordVecSize;
final int l1 = winWord * vecSize;
for (int i = 0; i < vecSize; i++) neu1e[i] = 0;
hierarchicalSoftmaxSG(curWord, l1, neu1e);
// Learned weights input -> hidden
for (int i = 0; i < vecSize; i++) _syn0[i + l1] += neu1e[i];
}
private void hierarchicalSoftmaxSG(final int targetWord, final int l1, float[] neu1e) {
final int vecSize = _wordVecSize, tWrdCodeLen = _HBWTCode[targetWord].length;
final float alpha = _curLearningRate;
for (int i = 0; i < tWrdCodeLen; i++) {
int l2 = _HBWTPoint[targetWord][i] * vecSize;
float f = 0;
// Propagate hidden -> output (calc sigmoid)
for (int j = 0; j < vecSize; j++) f += _syn0[j + l1] * _syn1[j + l2];
if (f <= -MAX_EXP) continue;
else if (f >= MAX_EXP) continue;
else f = _expTable[(int) ((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
float gradient = (1 - _HBWTCode[targetWord][i] - f) * alpha;
// Propagate errors output -> hidden
for (int j = 0; j < vecSize; j++) neu1e[j] += gradient * _syn1[j + l2];
// Learn weights hidden -> output
for (int j = 0; j < vecSize; j++) _syn1[j + l2] += gradient * _syn0[j + l1];
}
}
private void CBOW(
int curWord,
int[] sentence,
int sentIdx,
int sentLen,
int winSizeMod,
int bagSize,
float[] neu1,
float[] neu1e
) {
int winWordSentIdx, winWord;
final int vecSize = _wordVecSize, winSize = _windowSize;
final int curWinSize = winSize * 2 + 1 - winSize;
for (int i = 0; i < vecSize; i++) neu1[i] /= bagSize;
hierarchicalSoftmaxCBOW(curWord, neu1, neu1e);
// hidden -> in
for (int winIdx = winSizeMod; winIdx < curWinSize; winIdx++) {
if (winIdx != winSize) {
winWordSentIdx = sentIdx - winSize + winIdx;
if (winWordSentIdx < 0 || winWordSentIdx >= sentLen) continue;
winWord = sentence[winWordSentIdx];
for (int i = 0; i < vecSize; i++) _syn0[i + winWord * vecSize] += neu1e[i];
}
}
}
private void hierarchicalSoftmaxCBOW(final int targetWord, float[] neu1, float[] neu1e) {
final int vecSize = _wordVecSize, tWrdCodeLen = _HBWTCode[targetWord].length;
final float alpha = _curLearningRate;
float gradient, f = 0;
int l2;
for (int i = 0; i < tWrdCodeLen; i++, f = 0) {
l2 = _HBWTPoint[targetWord][i] * vecSize;
// Propagate hidden -> output (calc sigmoid)
for (int j = 0; j < vecSize; j++) f += neu1[j] * _syn1[j + l2];
if (f <= -MAX_EXP) continue;
else if (f >= MAX_EXP) continue;
else f = _expTable[(int) ((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
gradient = (1 - _HBWTCode[targetWord][i] - f) * alpha;
// Propagate errors output -> hidden
for (int j = 0; j < vecSize; j++) neu1e[j] += gradient * _syn1[j + l2];
// Learn weights hidden -> output
for (int j = 0; j < vecSize; j++) _syn1[j + l2] += gradient * neu1[j];
}
}
/**
* Calculates a new global learning rate for the next round
* of map/reduce calls.
* The learning rate is a coefficient that controls the amount that
* newly learned information affects current learned information.
*/
private static float calcLearningRate(float initLearningRate, int epochs, long totalProcessed, long vocabWordCount) {
float rate = initLearningRate * (1 - totalProcessed / (float) (epochs * vocabWordCount + 1));
if (rate < initLearningRate * LEARNING_RATE_MIN_FACTOR) rate = initLearningRate * LEARNING_RATE_MIN_FACTOR;
return rate;
}
public void updateModelInfo(Word2VecModelInfo modelInfo) {
modelInfo._syn0 = _syn0;
modelInfo._syn1 = _syn1;
modelInfo._totalProcessedWords += _processedWords;
}
/**
* This is cheap and moderate in quality.
*
* @param max - Upper range limit.
* @return int between 0-(max-1).
*/
private int cheapRandInt(int max) {
_seed ^= ( _seed << 21);
_seed ^= ( _seed >>> 35);
_seed ^= ( _seed << 4);
int r = (int) _seed % max;
return r > 0 ? r : -r;
}
private class ChunkSentenceIterator implements Iterator<int[]> {
private Chunk _chk;
private int _pos = 0;
private int _len = -1;
private int[] _sent = new int[MAX_SENTENCE_LEN + 1];
private ChunkSentenceIterator(Chunk chk) { _chk = chk; }
@Override
public boolean hasNext() {
return nextLength() >= 0;
}
private int nextLength() {
if (_len >= 0)
return _len;
if (_pos >= _chk._len)
return -1;
_len = 0;
BufferedString tmp = new BufferedString();
for (; _pos < _chk._len && ! _chk.isNA(_pos) && _len < MAX_SENTENCE_LEN; _pos++) {
BufferedString str = _chk.atStr(tmp, _pos);
if (! _vocab.containsKey(str)) continue; // not in the vocab, skip
if (_sentSampleRate > 0) { // sub-sampling while creating a sentence
long count = _wordCounts.get(str)._val;
float ran = (float) ((Math.sqrt(count / (_sentSampleRate * _vocabWordCount)) + 1) * (_sentSampleRate * _vocabWordCount) / count);
if (ran * 65536 < cheapRandInt(0xFFFF)) continue;
}
_sent[_len++] = _vocab.get(tmp);
}
_sent[_len] = -1;
_pos++;
return _len;
}
@Override
public int[] next() {
if (hasNext()) {
_len = -1;
return _sent;
}
else
return null;
}
@Override
public void remove() { throw new UnsupportedOperationException("Remove is not supported"); } // should never be called
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/water
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/water/api/ModelMetricsAnomalyV3.java
|
package water.api;
import hex.tree.isofor.ModelMetricsAnomaly;
import water.api.schemas3.ModelMetricsBaseV3;
public class ModelMetricsAnomalyV3 extends ModelMetricsBaseV3<ModelMetricsAnomaly, ModelMetricsAnomalyV3> {
@API(help = "Mean Anomaly Score.", direction = API.Direction.OUTPUT)
public double mean_score;
@API(help = "Mean Normalized Anomaly Score.", direction = API.Direction.OUTPUT)
public double mean_normalized_score;
@Override
public ModelMetricsAnomalyV3 fillFromImpl(ModelMetricsAnomaly modelMetrics) {
ModelMetricsAnomalyV3 mma = super.fillFromImpl(modelMetrics);
return mma;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/water
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/water/api/ModelMetricsGLRMV99.java
|
package water.api;
import hex.glrm.ModelMetricsGLRM;
import water.api.schemas3.ModelMetricsBaseV3;
public class ModelMetricsGLRMV99 extends ModelMetricsBaseV3<ModelMetricsGLRM, ModelMetricsGLRMV99> {
@API(help="Sum of Squared Error (Numeric Cols)")
public double numerr;
@API(help="Misclassification Error (Categorical Cols)")
public double caterr;
@API(help="Number of Non-Missing Numeric Values")
public long numcnt;
@API(help="Number of Non-Missing Categorical Values")
public long catcnt;
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/water
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/water/api/ModelMetricsPCAV3.java
|
package water.api;
import hex.pca.ModelMetricsPCA;
import water.api.schemas3.ModelMetricsBaseV3;
public class ModelMetricsPCAV3 extends ModelMetricsBaseV3<ModelMetricsPCA, ModelMetricsPCAV3> {
// Empty since PCA has no model metrics
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/water
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/water/api/ModelMetricsSVDV99.java
|
package water.api;
import hex.svd.SVDModel.ModelMetricsSVD;
import water.api.schemas3.ModelMetricsBaseV3;
public class ModelMetricsSVDV99 extends ModelMetricsBaseV3<ModelMetricsSVD, ModelMetricsSVDV99> {
// Empty since SVD has no model metrics
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/water/rapids
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/water/rapids/prims/AstPredictedVsActualByVar.java
|
package water.rapids.prims;
import hex.Model;
import water.MRTask;
import water.fvec.*;
import water.rapids.Env;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.vals.ValFrame;
import water.util.ArrayUtils;
import java.util.Arrays;
public class AstPredictedVsActualByVar extends AstPrimitive<AstPredictedVsActualByVar> {
@Override
public String[] args() {
return new String[]{"model"};
}
@Override
public int nargs() {
return 1 + 4;
} // (predicted.vs.actual.by.var model frame variable predicted)
@Override
public String str() {
return "predicted.vs.actual.by.var";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot[] asts) {
Model<?, ?, ?> model = (Model<?, ?, ?>) stk.track(asts[1].exec(env)).getModel();
if (!model.isSupervised()) {
throw new IllegalArgumentException("Only supervised models are supported for calculating predicted v actual");
}
if (model._output.isMultinomialClassifier()) {
throw new IllegalArgumentException("Multinomial classification models are not supported by predicted v actual");
}
Frame frame = stk.track(asts[2].exec(env)).getFrame();
String variable = stk.track(asts[3].exec(env)).getStr();
if (frame.vec(variable) == null) {
throw new IllegalArgumentException("Frame doesn't contain column '" + variable + "'.");
}
Frame preds = stk.track(asts[4].exec(env)).getFrame();
if (frame.numRows() != preds.numRows()) {
throw new IllegalArgumentException("Input frame and frame of predictions need to have same number of columns.");
}
Vec predicted = preds.vec(0);
Vec actual = frame.vec(model._output.responseName());
Vec weights = frame.vec(model._output.weightsName());
if ((actual.domain() != predicted.domain()) && !Arrays.equals(actual.domain(), predicted.domain())) { // null or equals
throw new IllegalArgumentException("Actual and predicted need to have identical domain.");
}
Vec varVec = frame.vec(variable);
Vec[] vs = new Vec[]{predicted, actual, varVec};
if (weights != null) {
vs = ArrayUtils.append(vs, weights);
}
PredictedVsActualByVar pva = new PredictedVsActualByVar(varVec).doAll(vs);
String[] domainExt = ArrayUtils.append(varVec.domain(), null); // last one for NA
Vec[] resultVecs = new Vec[]{
Vec.makeVec(domainExt, Vec.newKey()),
Vec.makeVec(pva._preds, Vec.newKey()),
Vec.makeVec(pva._acts, Vec.newKey())
};
Frame result = new Frame(new String[]{variable, preds.name(0), "actual"}, resultVecs);
return new ValFrame(result);
}
static class PredictedVsActualByVar extends MRTask<PredictedVsActualByVar> {
private final int _s;
private double[] _preds;
private double[] _acts;
private double[] _weights;
public PredictedVsActualByVar(Vec varVec) {
_s = varVec.domain().length + 1;
}
@Override
public void map(Chunk[] cs) {
_preds = new double[_s];
_acts = new double[_s];
_weights = new double[_s];
Chunk predChunk = cs[0];
Chunk actChunk = cs[1];
Chunk varChunk = cs[2];
Chunk weightChunk = cs.length == 4 ? cs[3] : new C0DChunk(1, predChunk._len);
for (int i = 0; i < actChunk._len; i++) {
if (actChunk.isNA(i) || weightChunk.atd(i) == 0)
continue;
int level = varChunk.isNA(i) ? _s - 1 : (int) varChunk.atd(i);
double weight = weightChunk.atd(i);
_preds[level] += weight * predChunk.atd(i);
_acts[level] += weight * actChunk.atd(i);
_weights[level] += weight;
}
}
@Override
public void reduce(PredictedVsActualByVar mrt) {
_preds = ArrayUtils.add(_preds, mrt._preds);
_acts = ArrayUtils.add(_acts, mrt._acts);
_weights = ArrayUtils.add(_weights, mrt._weights);
}
@Override
protected void postGlobal() {
for (int i = 0; i < _weights.length; i++) {
if (_weights[i] == 0)
continue;
_preds[i] /= _weights[i];
_acts[i] /= _weights[i];
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/water/rapids
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/water/rapids/prims/AstSetCalibrationModel.java
|
package water.rapids.prims;
import hex.Model;
import hex.tree.CalibrationHelper;
import water.rapids.Env;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.vals.ValStr;
public class AstSetCalibrationModel extends AstPrimitive<AstSetCalibrationModel> {
@Override
public String[] args() {
return new String[]{"model", "calibrationModel"};
}
@Override
public int nargs() {
return 1 + 2;
} // (set.calibration.model model calibrationModel)
@Override
public String str() {
return "set.calibration.model";
}
@Override
public ValStr apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Model<?, ?, ?> model = (Model<?, ?, ?>) stk.track(asts[1].exec(env)).getModel();
if (! (model._output instanceof CalibrationHelper.OutputWithCalibration)) {
throw new IllegalArgumentException("Models of type " + model._parms.algoName() + " don't support calibration.");
}
Model<?, ?, ?> calibrationModel = (Model<?, ?, ?>) stk.track(asts[2].exec(env)).getModel();
try {
model.write_lock();
((CalibrationHelper.OutputWithCalibration) model._output).setCalibrationModel(calibrationModel);
model.update();
} finally {
model.unlock();
}
return new ValStr("OK");
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/water/rapids/prims
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/water/rapids/prims/isotonic/AstPoolAdjacentViolators.java
|
package water.rapids.prims.isotonic;
import hex.isotonic.PoolAdjacentViolatorsDriver;
import water.fvec.Frame;
import water.rapids.Env;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.prims.rulefit.AstPredictRule;
import water.rapids.vals.ValFrame;
public class AstPoolAdjacentViolators extends AstPrimitive<AstPredictRule> {
@Override
public String[] args() {
return new String[]{"frame"};
}
@Override
public int nargs() {
return 1 + 1;
} // (isotonic.pav frame )
@Override
public String str() {
return "isotonic.pav";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot[] asts) {
Frame frame = stk.track(asts[1].exec(env)).getFrame();
Frame result = PoolAdjacentViolatorsDriver.runPAV(frame);
return new ValFrame(result);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/water/rapids/prims
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/water/rapids/prims/rulefit/AstPredictRule.java
|
package water.rapids.prims.rulefit;
import hex.rulefit.RuleFitModel;
import water.fvec.Frame;
import water.rapids.Env;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.vals.ValFrame;
/**
* Evaluates validity of the given rules on the given data.
*/
public class AstPredictRule extends AstPrimitive<AstPredictRule> {
@Override
public String[] args() {
return new String[]{"model"};
}
@Override
public int nargs() {
return 1 + 3;
} // (rulefit.predict.rules model frame ruleIds)
@Override
public String str() {
return "rulefit.predict.rules";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
RuleFitModel model = (RuleFitModel) stk.track(asts[1].exec(env)).getModel();
Frame frame = stk.track(asts[2].exec(env)).getFrame();
String[] ruleIds = stk.track(asts[3].exec(env)).getStrs();
Frame result = model.predictRules(frame, ruleIds);
return new ValFrame(result);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/water/rapids/prims
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/water/rapids/prims/tree/AstTreeUpdateWeights.java
|
package water.rapids.prims.tree;
import hex.Model;
import water.fvec.Frame;
import water.rapids.Env;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.vals.ValStr;
/**
* Re-weights auxiliary trees in a TreeModel
*/
public class AstTreeUpdateWeights extends AstPrimitive<AstTreeUpdateWeights> {
@Override
public String[] args() {
return new String[]{"model"};
}
@Override
public int nargs() {
return 1 + 3;
} // (tree.update.weights model frame weightsColumn)
@Override
public String str() {
return "tree.update.weights";
}
@Override
public ValStr apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Model.UpdateAuxTreeWeights model = (Model.UpdateAuxTreeWeights) stk.track(asts[1].exec(env)).getModel();
Frame frame = stk.track(asts[2].exec(env)).getFrame();
String weightsColumn = stk.track(asts[3].exec(env)).getStr();
Model.UpdateAuxTreeWeights.UpdateAuxTreeWeightsReport report = model.updateAuxTreeWeights(frame, weightsColumn);
if (report.hasWarnings()) {
return new ValStr(makeShortWarning(report));
} else
return new ValStr("OK");
}
private static String makeShortWarning(Model.UpdateAuxTreeWeights.UpdateAuxTreeWeightsReport report) {
return "Some of the updated nodes have zero weights " +
"(eg.: tree #" + (report._warn_trees[0] + 1) + ", class #" + (report._warn_classes[0] + 1) + ").";
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/water/rapids/prims
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/water/rapids/prims/word2vec/AstWord2VecToFrame.java
|
package water.rapids.prims.word2vec;
import hex.word2vec.Word2VecModel;
import water.rapids.Env;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.vals.ValFrame;
/**
* Converts a word2vec model to a Frame
*/
public class AstWord2VecToFrame extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"model"};
}
@Override
public int nargs() {
return 1 + 1;
} // (word2vec.to.frame model)
@Override
public String str() {
return "word2vec.to.frame";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Word2VecModel model = (Word2VecModel) stk.track(asts[1].exec(env)).getModel();
return new ValFrame(model.toFrame());
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/water
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/water/tools/MojoConvertTool.java
|
package water.tools;
import hex.generic.Generic;
import hex.generic.GenericModel;
import water.ExtensionManager;
import water.H2O;
import water.Paxos;
import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
/**
* Convenience command line tool for converting H2O MOJO to POJO
*/
public class MojoConvertTool {
private final File _mojo_file;
private final File _pojo_file;
public MojoConvertTool(File mojoFile, File pojoFile) {
_mojo_file = mojoFile;
_pojo_file = pojoFile;
}
void convert() throws IOException {
GenericModel mojo = Generic.importMojoModel(_mojo_file.getAbsolutePath(), true);
String pojo = mojo.toJava(false, true);
Path pojoPath = Paths.get(_pojo_file.toURI());
Files.write(pojoPath, pojo.getBytes(StandardCharsets.UTF_8));
}
public static void main(String[] args) throws IOException {
try {
mainInternal(args);
}
catch (IllegalArgumentException e) {
System.err.println(e.getMessage());
System.exit(1);
}
}
public static void mainInternal(String[] args) throws IOException {
if (args.length < 2 || args[0] == null || args[1] == null) {
throw new IllegalArgumentException("java -cp h2o.jar " + MojoConvertTool.class.getName() + " source_mojo.zip target_pojo.java");
}
File mojoFile = new File(args[0]);
if (!mojoFile.exists() || !mojoFile.isFile()) {
throw new IllegalArgumentException("Specified MOJO file (" + mojoFile.getAbsolutePath() + ") doesn't exist!");
}
File pojoFile = new File(args[1]);
if (pojoFile.isDirectory() || (pojoFile.getParentFile() != null && !pojoFile.getParentFile().isDirectory())) {
throw new IllegalArgumentException("Invalid target POJO file (" + pojoFile.getAbsolutePath() + ")! Please specify a file in an existing directory.");
}
System.out.println();
System.out.println("Starting local H2O instance to facilitate MOJO to POJO conversion.");
System.out.println();
H2O.main(new String[]{"-disable_web", "-ip", "localhost", "-disable_net"});
ExtensionManager.getInstance().registerRestApiExtensions();
H2O.waitForCloudSize(1, 60_000);
Paxos.lockCloud("H2O is started in a single node configuration.");
System.out.println();
System.out.println("Converting " + mojoFile + " to " + pojoFile + "...");
new MojoConvertTool(mojoFile, pojoFile).convert();
System.out.println("DONE");
}
}
|
0
|
java-sources/ai/h2o/h2o-app/3.46.0.7
|
java-sources/ai/h2o/h2o-app/3.46.0.7/water/H2OApp.java
|
package water;
public class H2OApp extends H2OStarter {
public static int BAD_JAVA_VERSION_RETURN_CODE = 3;
public static void main(String[] args) {
if (H2O.checkUnsupportedJava(args))
System.exit(BAD_JAVA_VERSION_RETURN_CODE);
start(args, System.getProperty("user.dir"));
}
@SuppressWarnings("unused")
public static void main2(String relativeResourcePath) {
start(new String[0], relativeResourcePath);
}
}
|
0
|
java-sources/ai/h2o/h2o-app/3.46.0.7
|
java-sources/ai/h2o/h2o-app/3.46.0.7/water/H2OClientApp.java
|
package water;
/**
* Simple client application wrapper.
*
* CAUTION:
* This is used by Sparkling Water and other environments where an H2O client node is needed.
* A client node is a node that can launch and monitor work, but doesn't do any work.
* Don't use this unless you know what you are doing. You probably really just want to use
* H2OApp directly.
*/
public class H2OClientApp {
public static void main(String[] args) {
// Prepend "-client" parameter.
String[] args2 = new String[args.length + 1];
args2[0] = "-client";
int i = 1;
for (String s : args) {
args2[i] = s;
i++;
}
// Call regular H2OApp.
H2OApp.main(args2);
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/Algo.java
|
package ai.h2o.automl;
import water.ExtensionManager;
import water.H2O;
import static water.util.OSUtils.isLinux;
// if we need to make the Algo list dynamic, we should just turn this enum into a class...
// implementation of AutoML.algo can be safely removed once we get rid of this interface: current purpose
// is to keep backward compatibility with {@link AutoML.algo}
public enum Algo implements IAlgo {
GLM,
DRF,
GBM,
DeepLearning,
StackedEnsemble,
XGBoost() {
private static final String DISTRIBUTED_XGBOOST_ENABLED = H2O.OptArgs.SYSTEM_PROP_PREFIX + "automl.xgboost.multinode.enabled";
@Override
public boolean enabled() {
// on single node, XGBoost is enabled by default if the extension is enabled.
// on multinode, the same condition applies, but only on Linux by default: needs to be activated explicitly for other platforms.
boolean enabledOnMultinode = Boolean.parseBoolean(System.getProperty(DISTRIBUTED_XGBOOST_ENABLED, isLinux() ? "true" : "false"));
return ExtensionManager.getInstance().isCoreExtensionEnabled(this.name()) && (H2O.CLOUD.size() == 1 || enabledOnMultinode);
}
},
;
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/AutoML.java
|
package ai.h2o.automl;
import ai.h2o.automl.AutoMLBuildSpec.AutoMLBuildModels;
import ai.h2o.automl.AutoMLBuildSpec.AutoMLInput;
import ai.h2o.automl.AutoMLBuildSpec.AutoMLStoppingCriteria;
import ai.h2o.automl.StepResultState.ResultStatus;
import ai.h2o.automl.WorkAllocations.Work;
import ai.h2o.automl.events.EventLog;
import ai.h2o.automl.events.EventLogEntry;
import ai.h2o.automl.events.EventLogEntry.Stage;
import ai.h2o.automl.leaderboard.ModelGroup;
import ai.h2o.automl.leaderboard.ModelProvider;
import ai.h2o.automl.leaderboard.ModelStep;
import ai.h2o.automl.preprocessing.PreprocessingStep;
import hex.Model;
import hex.ScoreKeeper.StoppingMetric;
import hex.genmodel.utils.DistributionFamily;
import hex.leaderboard.*;
import hex.splitframe.ShuffleSplitFrame;
import water.*;
import water.automl.api.schemas3.AutoMLV99;
import water.exceptions.H2OAutoMLException;
import water.exceptions.H2OIllegalArgumentException;
import water.fvec.Frame;
import water.fvec.Vec;
import water.logging.Logger;
import water.logging.LoggerFactory;
import water.nbhm.NonBlockingHashMap;
import water.util.*;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static ai.h2o.automl.AutoMLBuildSpec.AutoMLStoppingCriteria.AUTO_STOPPING_TOLERANCE;
/**
* H2O AutoML
*
* AutoML is used for automating the machine learning workflow, which includes automatic training and
* tuning of many models within a user-specified time-limit. Stacked Ensembles will be automatically
* trained on collections of individual models to produce highly predictive ensemble models which, in most cases,
* will be the top performing models in the AutoML Leaderboard.
*/
public final class AutoML extends Lockable<AutoML> implements TimedH2ORunnable {
public enum Constraint {
MODEL_COUNT,
TIMEOUT,
FAILURE_COUNT,
}
public static final Comparator<AutoML> byStartTime = Comparator.comparing(a -> a._startTime);
public static final String keySeparator = "@@";
private static final int DEFAULT_MAX_CONSECUTIVE_MODEL_FAILURES = 10;
private static final boolean verifyImmutability = true; // check that trainingFrame hasn't been messed with
private static final ThreadLocal<SimpleDateFormat> timestampFormatForKeys = ThreadLocal.withInitial(() -> new SimpleDateFormat("yyyyMMdd_HHmmss"));
private static final Logger log = LoggerFactory.getLogger(AutoML.class);
private static LeaderboardExtensionsProvider createLeaderboardExtensionProvider(AutoML automl) {
final Key<AutoML> amlKey = automl._key;
return new LeaderboardExtensionsProvider() {
@Override
public LeaderboardCell[] createExtensions(Model model) {
final AutoML aml = amlKey.get();
ModelingStep step = aml.session().getModelingStep(model.getKey());
return new LeaderboardCell[] {
new TrainingTime(model),
new ScoringTimePerRow(model, aml.getLeaderboardFrame() == null ? aml.getTrainingFrame() : aml.getLeaderboardFrame()),
// new ModelSize(model._key)
new AlgoName(model),
new ModelProvider(model, step),
new ModelStep(model, step),
new ModelGroup(model, step),
};
}
};
}
/**
* Instantiate an AutoML object and start it running. Progress can be tracked via its job().
*
* @param buildSpec
* @return a new running AutoML instance.
*/
public static AutoML startAutoML(AutoMLBuildSpec buildSpec) {
AutoML aml = new AutoML(buildSpec);
aml.submit();
return aml;
}
static AutoML startAutoML(AutoMLBuildSpec buildSpec, boolean testMode) {
AutoML aml = new AutoML(buildSpec);
aml._testMode = testMode;
aml.submit();
return aml;
}
@Override
public Class<AutoMLV99.AutoMLKeyV3> makeSchema() {
return AutoMLV99.AutoMLKeyV3.class;
}
private AutoMLBuildSpec _buildSpec; // all parameters for doing this AutoML build
private Frame _origTrainingFrame; // untouched original training frame
public AutoMLBuildSpec getBuildSpec() { return _buildSpec; }
public Frame getTrainingFrame() { return _trainingFrame; }
public Frame getValidationFrame() { return _validationFrame; }
public Frame getBlendingFrame() { return _blendingFrame; }
public Frame getLeaderboardFrame() { return _leaderboardFrame; }
public Vec getResponseColumn() { return _responseColumn; }
public Vec getFoldColumn() { return _foldColumn; }
public Vec getWeightsColumn() { return _weightsColumn; }
public DistributionFamily getDistributionFamily() {
return _distributionFamily;
}
public double[] getClassDistribution() {
if (_classDistribution == null)
_classDistribution = (new MRUtils.ClassDist(_responseColumn)).doAll(_responseColumn).dist();
return _classDistribution;
}
public StepDefinition[] getActualModelingSteps() { return _actualModelingSteps; }
Frame _trainingFrame; // required training frame: can add and remove Vecs, but not mutate Vec data in place.
Frame _validationFrame; // optional validation frame; the training_frame is split automatically if it's not specified.
Frame _blendingFrame; // optional blending frame for SE (usually if xval is disabled).
Frame _leaderboardFrame; // optional test frame used for leaderboard scoring; if not specified, leaderboard will use xval metrics.
Vec _responseColumn;
Vec _foldColumn;
Vec _weightsColumn;
DistributionFamily _distributionFamily;
private double[] _classDistribution;
Date _startTime;
Countdown _runCountdown;
Job<AutoML> _job; // the Job object for the build of this AutoML.
WorkAllocations _workAllocations;
StepDefinition[] _actualModelingSteps; // the output definition, listing only the steps that were actually used
int _maxConsecutiveModelFailures = DEFAULT_MAX_CONSECUTIVE_MODEL_FAILURES;
AtomicInteger _consecutiveModelFailures = new AtomicInteger();
AtomicLong _incrementalSeed = new AtomicLong();
private String _runId;
private ModelingStepsExecutor _modelingStepsExecutor;
private AutoMLSession _session;
private Leaderboard _leaderboard;
private EventLog _eventLog;
// check that we haven't messed up the original Frame
private Vec[] _originalTrainingFrameVecs;
private String[] _originalTrainingFrameNames;
private long[] _originalTrainingFrameChecksums;
private transient NonBlockingHashMap<Key, String> _trackedKeys = new NonBlockingHashMap<>();
private transient ModelingStep[] _executionPlan;
private transient PreprocessingStep[] _preprocessing;
transient StepResultState[] _stepsResults;
private boolean _useAutoBlending;
private boolean _testMode; // when on, internal states are kept for inspection
/**
* DO NOT USE explicitly: for schema/reflection only.
*/
public AutoML() {
super(null);
}
public AutoML(AutoMLBuildSpec buildSpec) {
this(new Date(), buildSpec);
}
public AutoML(Key<AutoML> key, AutoMLBuildSpec buildSpec) {
this(key, new Date(), buildSpec);
}
/**
* @deprecated use {@link #AutoML(AutoMLBuildSpec) instead}
*/
@Deprecated
public AutoML(Date startTime, AutoMLBuildSpec buildSpec) {
this(null, startTime, buildSpec);
}
/**
* @deprecated use {@link #AutoML(Key, AutoMLBuildSpec) instead}
*/
@Deprecated
public AutoML(Key<AutoML> key, Date startTime, AutoMLBuildSpec buildSpec) {
super(key == null ? buildSpec.makeKey() : key);
try {
_startTime = startTime;
_session = AutoMLSession.getInstance(_key.toString());
_eventLog = EventLog.getOrMake(_key);
eventLog().info(Stage.Workflow, "Project: "+buildSpec.project());
validateBuildSpec(buildSpec);
_buildSpec = buildSpec;
// now that buildSpec is validated, we can assign it: all future logic can now safely access parameters through _buildSpec.
_runId = _buildSpec.instanceId();
_runCountdown = Countdown.fromSeconds(_buildSpec.build_control.stopping_criteria.max_runtime_secs());
_incrementalSeed.set(_buildSpec.build_control.stopping_criteria.seed());
prepareData();
initLeaderboard();
initPreprocessing();
_modelingStepsExecutor = new ModelingStepsExecutor(_leaderboard, _eventLog, _runCountdown);
} catch (Exception e) {
delete(); //cleanup potentially leaked keys
throw e;
}
}
/**
* Validates all buildSpec parameters and provide reasonable defaults dynamically or parameter cleaning if necessary.
*
* Ideally, validation should be fast as we should be able to call it in the future
* directly from client (e.g. Flow) to validate parameters before starting the AutoML run.
* That's also the reason why validate methods should not modify data,
* only possibly read them to validate parameters that may depend on data.
*
* In the future, we may also reuse ModelBuilder.ValidationMessage to return all validation results at once to the client (cf. ModelBuilder).
*
* @param buildSpec all the AutoML parameters to validate.
*/
private void validateBuildSpec(AutoMLBuildSpec buildSpec) {
validateInput(buildSpec.input_spec);
validateModelValidation(buildSpec);
validateModelBuilding(buildSpec.build_models);
validateEarlyStopping(buildSpec.build_control.stopping_criteria, buildSpec.input_spec);
validateReproducibility(buildSpec);
}
private void validateInput(AutoMLInput input) {
if (DKV.getGet(input.training_frame) == null)
throw new H2OIllegalArgumentException("No training data has been specified, either as a path or a key, or it is not available anymore.");
final Frame trainingFrame = DKV.getGet(input.training_frame);
final Frame validationFrame = DKV.getGet(input.validation_frame);
final Frame blendingFrame = DKV.getGet(input.blending_frame);
final Frame leaderboardFrame = DKV.getGet(input.leaderboard_frame);
Map<String, Frame> compatibleFrames = new LinkedHashMap<String, Frame>(){{
put("training", trainingFrame);
put("validation", validationFrame);
put("blending", blendingFrame);
put("leaderboard", leaderboardFrame);
}};
for (Map.Entry<String, Frame> entry : compatibleFrames.entrySet()) {
Frame frame = entry.getValue();
if (frame != null && frame.find(input.response_column) < 0) {
throw new H2OIllegalArgumentException("Response column '"+input.response_column+"' is not in the "+entry.getKey()+" frame.");
}
}
if (input.fold_column != null && trainingFrame.find(input.fold_column) < 0) {
throw new H2OIllegalArgumentException("Fold column '"+input.fold_column+"' is not in the training frame.");
}
if (input.weights_column != null && trainingFrame.find(input.weights_column) < 0) {
throw new H2OIllegalArgumentException("Weights column '"+input.weights_column+"' is not in the training frame.");
}
if (input.ignored_columns != null) {
List<String> ignoredColumns = new ArrayList<>(Arrays.asList(input.ignored_columns));
Map<String, String> doNotIgnore = new LinkedHashMap<String, String>(){{
put("response_column", input.response_column);
put("fold_column", input.fold_column);
put("weights_column", input.weights_column);
}};
for (Map.Entry<String, String> entry: doNotIgnore.entrySet()) {
if (entry.getValue() != null && ignoredColumns.contains(entry.getValue())) {
eventLog().info(Stage.Validation,
"Removing "+entry.getKey()+" '"+entry.getValue()+"' from list of ignored columns.");
ignoredColumns.remove(entry.getValue());
}
}
input.ignored_columns = ignoredColumns.toArray(new String[0]);
}
}
private void validateModelValidation(AutoMLBuildSpec buildSpec) {
if (buildSpec.input_spec.fold_column != null) {
eventLog().warn(Stage.Validation, "Fold column " + buildSpec.input_spec.fold_column + " will be used for cross-validation. nfolds parameter will be ignored.");
buildSpec.build_control.nfolds = 0;
} else if (buildSpec.build_control.nfolds == -1) {
Frame trainingFrame = DKV.getGet(buildSpec.input_spec.training_frame);
long nrows = trainingFrame.numRows();
long ncols = trainingFrame.numCols() - (buildSpec.getNonPredictors().length +
(buildSpec.input_spec.ignored_columns != null ? buildSpec.input_spec.ignored_columns.length : 0));
double max_runtime = buildSpec.build_control.stopping_criteria.max_runtime_secs();
long nthreads = Stream.of(H2O.CLOUD.members())
.mapToInt((h2o) -> h2o._heartbeat._nthreads)
.sum();
boolean use_blending = ((ncols * nrows) / (max_runtime * nthreads)) > 2064;
if (max_runtime > 0 && use_blending &&
!(buildSpec.build_control.keep_cross_validation_predictions ||
buildSpec.build_control.keep_cross_validation_models ||
buildSpec.build_control.keep_cross_validation_fold_assignment)) {
_useAutoBlending = true;
buildSpec.build_control.nfolds = 0;
eventLog().info(Stage.Validation, "Blending will be used.");
} else {
buildSpec.build_control.nfolds = 5;
eventLog().info(Stage.Validation, "5-fold cross-validation will be used.");
}
} else if (buildSpec.build_control.nfolds <= 1) {
eventLog().info(Stage.Validation, "Cross-validation disabled by user: no fold column nor nfolds > 1.");
buildSpec.build_control.nfolds = 0;
}
if ((buildSpec.build_control.nfolds > 0 || buildSpec.input_spec.fold_column != null)
&& DKV.getGet(buildSpec.input_spec.validation_frame) != null) {
eventLog().warn(Stage.Validation, "User specified a validation frame with cross-validation still enabled."
+ " Please note that the models will still be validated using cross-validation only,"
+ " the validation frame will be used to provide purely informative validation metrics on the trained models.");
}
if (Arrays.asList(
DistributionFamily.fractionalbinomial,
DistributionFamily.quasibinomial,
DistributionFamily.ordinal
).contains(buildSpec.build_control.distribution)) {
throw new H2OIllegalArgumentException("Distribution \"" + buildSpec.build_control.distribution.name() + "\" is not supported in AutoML!");
}
}
private void validateModelBuilding(AutoMLBuildModels modelBuilding) {
if (modelBuilding.exclude_algos != null && modelBuilding.include_algos != null) {
throw new H2OIllegalArgumentException("Parameters `exclude_algos` and `include_algos` are mutually exclusive: please use only one of them if necessary.");
}
if (modelBuilding.exploitation_ratio > 1) {
throw new H2OIllegalArgumentException("`exploitation_ratio` must be between 0 and 1.");
}
}
private void validateEarlyStopping(AutoMLStoppingCriteria stoppingCriteria, AutoMLInput input) {
if (stoppingCriteria.max_models() <= 0 && stoppingCriteria.max_runtime_secs() <= 0) {
stoppingCriteria.set_max_runtime_secs(3600);
eventLog().info(Stage.Validation, "User didn't set any runtime constraints (max runtime or max models), using default 1h time limit");
}
Frame refFrame = DKV.getGet(input.training_frame);
if (stoppingCriteria.stopping_tolerance() == AUTO_STOPPING_TOLERANCE) {
stoppingCriteria.set_default_stopping_tolerance_for_frame(refFrame);
eventLog().info(Stage.Validation, "Setting stopping tolerance adaptively based on the training frame: "+stoppingCriteria.stopping_tolerance());
} else {
eventLog().info(Stage.Validation, "Stopping tolerance set by the user: "+stoppingCriteria.stopping_tolerance());
double defaultTolerance = AutoMLStoppingCriteria.default_stopping_tolerance_for_frame(refFrame);
if (stoppingCriteria.stopping_tolerance() < 0.7 * defaultTolerance){
eventLog().warn(Stage.Validation, "Stopping tolerance set by the user is < 70% of the recommended default of "+defaultTolerance+", so models may take a long time to converge or may not converge at all.");
}
}
}
private void validateReproducibility(AutoMLBuildSpec buildSpec) {
eventLog().info(Stage.Validation, "Build control seed: " + buildSpec.build_control.stopping_criteria.seed() +
(buildSpec.build_control.stopping_criteria.seed() == -1 ? " (random)" : ""));
}
private void initLeaderboard() {
String sortMetric = _buildSpec.input_spec.sort_metric;
sortMetric = sortMetric == null || StoppingMetric.AUTO.name().equalsIgnoreCase(sortMetric) ? null : sortMetric.toLowerCase();
if ("deviance".equalsIgnoreCase(sortMetric)) {
sortMetric = "mean_residual_deviance"; //compatibility with names used in leaderboard
}
_leaderboard = Leaderboard.getInstance(_key.toString(), eventLog().asLogger(Stage.ModelTraining), _leaderboardFrame, sortMetric, Leaderboard.ScoreData.auto);
if (null != _leaderboard) {
eventLog().warn(Stage.Workflow, "New models will be added to existing leaderboard "+_key.toString()
+" (leaderboard frame="+(_leaderboardFrame == null ? null : _leaderboardFrame._key)+") with already "+_leaderboard.getModelKeys().length+" models.");
} else {
_leaderboard = Leaderboard.getOrMake(_key.toString(), eventLog().asLogger(Stage.ModelTraining), _leaderboardFrame, sortMetric, Leaderboard.ScoreData.auto);
}
_leaderboard.setExtensionsProvider(createLeaderboardExtensionProvider(this));
}
private void initPreprocessing() {
_preprocessing = _buildSpec.build_models.preprocessing == null
? null
: Arrays.stream(_buildSpec.build_models.preprocessing)
.map(def -> def.newPreprocessingStep(this))
.toArray(PreprocessingStep[]::new);
}
PreprocessingStep[] getPreprocessing() {
return _preprocessing;
}
ModelingStep[] getExecutionPlan() {
if (_executionPlan == null) {
_executionPlan = session().getModelingStepsRegistry().getOrderedSteps(selectModelingPlan(null), this);
}
return _executionPlan;
}
StepDefinition[] selectModelingPlan(StepDefinition[] plan) {
if (_buildSpec.build_models.modeling_plan == null) {
// as soon as user specifies max_models, consider that user expects reproducibility.
_buildSpec.build_models.modeling_plan = plan != null ? plan
: _buildSpec.build_control.stopping_criteria.max_models() > 0 ? ModelingPlans.REPRODUCIBLE
: ModelingPlans.defaultPlan();
}
return _buildSpec.build_models.modeling_plan;
}
void planWork() {
Set<IAlgo> skippedAlgos = new HashSet<>();
if (_buildSpec.build_models.exclude_algos != null) {
skippedAlgos.addAll(Arrays.asList(_buildSpec.build_models.exclude_algos));
} else if (_buildSpec.build_models.include_algos != null) {
skippedAlgos.addAll(Arrays.asList(Algo.values()));
skippedAlgos.removeAll(Arrays.asList(_buildSpec.build_models.include_algos));
}
for (Algo algo : Algo.values()) {
if (!skippedAlgos.contains(algo) && !algo.enabled()) {
boolean isMultinode = H2O.CLOUD.size() > 1;
_eventLog.warn(Stage.Workflow,
isMultinode ? "AutoML: "+algo.name()+" is not available in multi-node cluster; skipping it."
+ " See http://docs.h2o.ai/h2o/latest-stable/h2o-docs/automl.html#experimental-features for details."
: "AutoML: "+algo.name()+" is not available; skipping it."
);
skippedAlgos.add(algo);
}
}
WorkAllocations workAllocations = new WorkAllocations();
for (ModelingStep step: getExecutionPlan()) {
workAllocations.allocate(step.makeWork());
}
for (IAlgo skippedAlgo : skippedAlgos) {
eventLog().info(Stage.Workflow, "Disabling Algo: "+skippedAlgo+" as requested by the user.");
workAllocations.remove(skippedAlgo);
}
eventLog().debug(Stage.Workflow, "Defined work allocations: "+workAllocations);
distributeExplorationVsExploitationWork(workAllocations);
eventLog().debug(Stage.Workflow, "Actual work allocations: "+workAllocations);
workAllocations.freeze();
_workAllocations = workAllocations;
}
private void distributeExplorationVsExploitationWork(WorkAllocations allocations) {
if (_buildSpec.build_models.exploitation_ratio < 0) return;
int sumExploration = allocations.remainingWork(ModelingStep.isExplorationWork);
int sumExploitation = allocations.remainingWork(ModelingStep.isExploitationWork);
double explorationRatio = 1 - _buildSpec.build_models.exploitation_ratio;
int newTotal = (int)Math.round(sumExploration / explorationRatio);
int newSumExploration = sumExploration; // keeping the same weight for exploration steps (principle of less surprise).
int newSumExploitation = newTotal - newSumExploration;
for (Work work : allocations.getAllocations(ModelingStep.isExplorationWork)) {
work._weight = (int)Math.round((double)work._weight * newSumExploration/sumExploration);
}
for (Work work : allocations.getAllocations(ModelingStep.isExploitationWork)) {
work._weight = (int)Math.round((double)work._weight * newSumExploitation/sumExploitation);
}
}
/**
* Creates a job for the current AutoML instance and submits it to the task runner.
* Calling this on an already running AutoML instance has no effect.
*/
public void submit() {
if (_job == null || !_job.isRunning()) {
planWork();
H2OJob<AutoML> j = new H2OJob<>(this, _key, _runCountdown.remainingTime());
_job = j._job;
eventLog().info(Stage.Workflow, "AutoML job created: " + EventLogEntry.dateTimeFormat.get().format(_startTime))
.setNamedValue("creation_epoch", _startTime, EventLogEntry.epochFormat.get());
j.start(_workAllocations.remainingWork());
DKV.put(this);
}
}
@Override
public void run() {
_modelingStepsExecutor.start();
eventLog().info(Stage.Workflow, "AutoML build started: " + EventLogEntry.dateTimeFormat.get().format(_runCountdown.start_time()))
.setNamedValue("start_epoch", _runCountdown.start_time(), EventLogEntry.epochFormat.get());
try {
learn();
} finally {
stop();
}
}
@Override
public void stop() {
if (null == _modelingStepsExecutor) return; // already stopped
_modelingStepsExecutor.stop();
eventLog().info(Stage.Workflow, "AutoML build stopped: " + EventLogEntry.dateTimeFormat.get().format(_runCountdown.stop_time()))
.setNamedValue("stop_epoch", _runCountdown.stop_time(), EventLogEntry.epochFormat.get());
eventLog().info(Stage.Workflow, "AutoML build done: built " + _modelingStepsExecutor.modelCount() + " models");
eventLog().info(Stage.Workflow, "AutoML duration: "+ PrettyPrint.msecs(_runCountdown.duration(), true))
.setNamedValue("duration_secs", Math.round(_runCountdown.duration() / 1000.));
log.info("AutoML run summary:");
for (EventLogEntry event : eventLog()._events)
log.info(event.toString());
if (0 < leaderboard().getModelKeys().length) {
log.info(leaderboard().toLogString());
} else {
long max_runtime_secs = (long)_buildSpec.build_control.stopping_criteria.max_runtime_secs();
eventLog().warn(Stage.Workflow, "Empty leaderboard.\n"
+"AutoML was not able to build any model within a max runtime constraint of "+max_runtime_secs+" seconds, "
+"you may want to increase this value before retrying.");
}
session().detach();
possiblyVerifyImmutability();
if (!_buildSpec.build_control.keep_cross_validation_predictions) {
cleanUpModelsCVPreds();
}
}
/**
* Holds until AutoML's job is completed, if a job exists.
*/
public void get() {
if (_job != null) _job.get();
}
public Job<AutoML> job() {
if (null == _job) return null;
return DKV.getGet(_job._key);
}
public Model leader() {
return leaderboard() == null ? null : _leaderboard.getLeader();
}
public AutoMLSession session() {
_session = _session == null ? null : _session._key.get();
if (_session != null) _session.attach(this, false);
return _session;
}
public Leaderboard leaderboard() {
return _leaderboard == null ? null : (_leaderboard = _leaderboard._key.get());
}
public EventLog eventLog() {
return _eventLog == null ? null : (_eventLog = _eventLog._key.get());
}
public String projectName() {
return _buildSpec == null ? null : _buildSpec.project();
}
public long timeRemainingMs() {
return _runCountdown.remainingTime();
}
public int remainingModels() {
if (_buildSpec.build_control.stopping_criteria.max_models() == 0)
return Integer.MAX_VALUE;
return _buildSpec.build_control.stopping_criteria.max_models() - _modelingStepsExecutor.modelCount();
}
@Override
public boolean keepRunning() {
return !_runCountdown.timedOut() && remainingModels() > 0;
}
public boolean isCVEnabled() {
return _buildSpec.build_control.nfolds > 0 || _buildSpec.input_spec.fold_column != null;
}
//***************** Data Preparation Section *****************//
private void optionallySplitTrainingDataset() {
// If no cross-validation and validation or leaderboard frame are missing,
// then we need to create one out of the original training set.
if (!isCVEnabled()) {
double[] splitRatios = null;
double validationRatio = null == _validationFrame ? 0.1 : 0;
double blendingRatio = (_useAutoBlending && null == _blendingFrame) ? 0.2 : 0;
if (validationRatio + blendingRatio > 0) {
splitRatios = new double[]{
1 - (validationRatio + blendingRatio),
validationRatio,
blendingRatio
};
ArrayList<String> frames = new ArrayList();
if (null == _validationFrame) frames.add("validation");
if (null == _blendingFrame && _useAutoBlending) frames.add("blending");
String framesStr = String.join(", ", frames);
String ratioStr = Arrays.stream(splitRatios)
.mapToObj(d -> Integer.toString((int) (d * 100)))
.collect(Collectors.joining("/"));
eventLog().info(Stage.DataImport, "Since cross-validation is disabled, and " + framesStr + " frame(s) were not provided, " +
"automatically split the training data into training, " + framesStr + " frame(s) in the ratio " + ratioStr + ".");
}
if (splitRatios != null) {
Key[] keys = new Key[] {
Key.make(_runId+"_training_"+ _origTrainingFrame._key),
Key.make(_runId+"_validation_"+ _origTrainingFrame._key),
Key.make(_runId+"_blending_"+ _origTrainingFrame._key),
};
Frame[] splits = ShuffleSplitFrame.shuffleSplitFrame(
_origTrainingFrame,
keys,
splitRatios,
_buildSpec.build_control.stopping_criteria.seed()
);
_trainingFrame = splits[0];
if (_validationFrame == null && splits[1].numRows() > 0) {
_validationFrame = splits[1];
} else {
splits[1].delete();
}
if (_blendingFrame == null && splits[2].numRows() > 0) {
_blendingFrame = splits[2];
} else {
splits[2].delete();
}
}
if (_leaderboardFrame == null)
_leaderboardFrame = _validationFrame;
}
}
private DistributionFamily inferDistribution(Vec response) {
int numOfDomains = response.domain() == null ? 0 : response.domain().length;
if (_buildSpec.build_control.distribution == DistributionFamily.AUTO) {
if (numOfDomains == 0)
return DistributionFamily.gaussian;
if (numOfDomains == 2)
return DistributionFamily.bernoulli;
if (numOfDomains > 2)
return DistributionFamily.multinomial;
throw new RuntimeException("Number of classes is equal to 1.");
} else {
DistributionFamily distribution = _buildSpec.build_control.distribution;
if (numOfDomains > 2) {
if (!Arrays.asList(
DistributionFamily.multinomial,
DistributionFamily.ordinal,
DistributionFamily.custom
).contains(distribution)) {
throw new H2OAutoMLException("Wrong distribution specified! Number of classes of response is greater than 2." +
" Possible distribution values: \"multinomial\"," +
/*" \"ordinal\"," + */ // Currently unsupported in AutoML
" \"custom\".");
}
} else if (numOfDomains == 2) {
if (!Arrays.asList(
DistributionFamily.bernoulli,
DistributionFamily.quasibinomial,
DistributionFamily.fractionalbinomial,
DistributionFamily.custom
).contains(distribution)) {
throw new H2OAutoMLException("Wrong distribution specified! Number of classes of response is 2." +
" Possible distribution values: \"bernoulli\"," +
/*" \"quasibinomial\", \"fractionalbinomial\"," + */ // Currently unsupported in AutoML
" \"custom\".");
}
} else {
if (!Arrays.asList(
DistributionFamily.gaussian,
DistributionFamily.poisson,
DistributionFamily.negativebinomial,
DistributionFamily.gamma,
DistributionFamily.laplace,
DistributionFamily.quantile,
DistributionFamily.huber,
DistributionFamily.tweedie,
DistributionFamily.custom
).contains(distribution)) {
throw new H2OAutoMLException("Wrong distribution specified! Response type suggests a regression task." +
" Possible distribution values: \"gaussian\", \"poisson\", \"negativebinomial\", \"gamma\", " +
"\"laplace\", \"quantile\", \"huber\", \"tweedie\", \"custom\".");
}
}
return distribution;
}
}
private void prepareData() {
final AutoMLInput input = _buildSpec.input_spec;
_origTrainingFrame = DKV.getGet(input.training_frame);
_validationFrame = DKV.getGet(input.validation_frame);
_blendingFrame = DKV.getGet(input.blending_frame);
_leaderboardFrame = DKV.getGet(input.leaderboard_frame);
optionallySplitTrainingDataset();
if (null == _trainingFrame) {
// when nfolds>0, let trainingFrame be the original frame
// but cloning to keep an internal ref just in case the original ref gets deleted from client side
// (can occur in some corner cases with Python GC for example if frame get's out of scope during an AutoML rerun)
_trainingFrame = new Frame(_origTrainingFrame);
_trainingFrame._key = Key.make(_runId+"_training_" + _origTrainingFrame._key);
DKV.put(_trainingFrame);
}
_responseColumn = _trainingFrame.vec(input.response_column);
_foldColumn = _trainingFrame.vec(input.fold_column);
_weightsColumn = _trainingFrame.vec(input.weights_column);
_distributionFamily = inferDistribution(_responseColumn);
eventLog().info(Stage.DataImport,
"training frame: "+_trainingFrame.toString().replace("\n", " ")+" checksum: "+_trainingFrame.checksum());
if (null != _validationFrame) {
eventLog().info(Stage.DataImport,
"validation frame: "+_validationFrame.toString().replace("\n", " ")+" checksum: "+_validationFrame.checksum());
} else {
eventLog().info(Stage.DataImport, "validation frame: NULL");
}
if (null != _leaderboardFrame) {
eventLog().info(Stage.DataImport,
"leaderboard frame: "+_leaderboardFrame.toString().replace("\n", " ")+" checksum: "+_leaderboardFrame.checksum());
} else {
eventLog().info(Stage.DataImport, "leaderboard frame: NULL");
}
if (null != _blendingFrame) {
this.eventLog().info(Stage.DataImport,
"blending frame: "+_blendingFrame.toString().replace("\n", " ")+" checksum: "+_blendingFrame.checksum());
} else {
this.eventLog().info(Stage.DataImport, "blending frame: NULL");
}
eventLog().info(Stage.DataImport, "response column: "+input.response_column);
eventLog().info(Stage.DataImport, "fold column: "+_foldColumn);
eventLog().info(Stage.DataImport, "weights column: "+_weightsColumn);
if (verifyImmutability) {
// check that we haven't messed up the original Frame
_originalTrainingFrameVecs = _origTrainingFrame.vecs().clone();
_originalTrainingFrameNames = _origTrainingFrame.names().clone();
_originalTrainingFrameChecksums = new long[_originalTrainingFrameVecs.length];
for (int i = 0; i < _originalTrainingFrameVecs.length; i++)
_originalTrainingFrameChecksums[i] = _originalTrainingFrameVecs[i].checksum();
}
}
//***************** Training Jobs *****************//
private void learn() {
List<ModelingStep> completed = new ArrayList<>();
if (_preprocessing != null) {
for (PreprocessingStep preprocessingStep : _preprocessing) preprocessingStep.prepare();
}
for (ModelingStep step : getExecutionPlan()) {
if (!exceededSearchLimits(step)) {
StepResultState state = _modelingStepsExecutor.submit(step, job());
log.info("AutoML step returned with state: "+state.toString());
if (_testMode) _stepsResults = ArrayUtils.append(_stepsResults, state);
if (state.is(ResultStatus.success)) {
_consecutiveModelFailures.set(0);
completed.add(step);
} else if (state.is(ResultStatus.failed)) {
if (!step.ignores(Constraint.FAILURE_COUNT)
&& _consecutiveModelFailures.incrementAndGet() >= _maxConsecutiveModelFailures) {
throw new H2OAutoMLException("Aborting AutoML after too many consecutive model failures", state.error());
}
if (state.error() instanceof H2OAutoMLException) { // if a step throws this exception, this will immediately abort the entire AutoML run.
throw (H2OAutoMLException) state.error();
}
}
}
}
if (_preprocessing != null) {
for (PreprocessingStep preprocessingStep : _preprocessing) preprocessingStep.dispose();
}
_actualModelingSteps = session().getModelingStepsRegistry().createDefinitionPlanFromSteps(completed.toArray(new ModelingStep[0]));
eventLog().info(Stage.Workflow, "Actual modeling steps: "+Arrays.toString(_actualModelingSteps));
}
public Key makeKey(String algoName, String type, boolean with_counter) {
List<String> tokens = new ArrayList<>();
tokens.add(algoName);
if (!StringUtils.isNullOrEmpty(type)) tokens.add(type);
if (with_counter) tokens.add(Integer.toString(session().nextModelCounter(algoName, type)));
tokens.add(_runId);
return Key.make(String.join("_", tokens));
}
public void trackKeys(Key... keys) {
String whereFrom = Arrays.toString(Thread.currentThread().getStackTrace());
for (Key key : keys) _trackedKeys.put(key, whereFrom);
}
private boolean exceededSearchLimits(ModelingStep step) {
if (_job.stop_requested()) {
eventLog().debug(EventLogEntry.Stage.ModelTraining, "AutoML: job cancelled; skipping "+step._description);
return true;
}
if (!step.ignores(Constraint.TIMEOUT) && _runCountdown.timedOut()) {
eventLog().debug(EventLogEntry.Stage.ModelTraining, "AutoML: out of time; skipping "+step._description);
return true;
}
if (!step.ignores(Constraint.MODEL_COUNT) && remainingModels() <= 0) {
eventLog().debug(EventLogEntry.Stage.ModelTraining, "AutoML: hit the max_models limit; skipping "+step._description);
return true;
}
return false;
}
//***************** Clean Up + other utility functions *****************//
/**
* Delete the AutoML-related objects, including the grids and models that it built if cascade=true
*/
@Override
protected Futures remove_impl(Futures fs, boolean cascade) {
Key<Job> jobKey = _job == null ? null : _job._key;
log.debug("Cleaning up AutoML "+jobKey);
if (_buildSpec != null) {
// If the Frame was made here (e.g. buildspec contained a path, then it will be deleted
if (_buildSpec.input_spec.training_frame == null && _origTrainingFrame != null) {
_origTrainingFrame.delete(jobKey, fs, true);
}
if (_buildSpec.input_spec.validation_frame == null && _validationFrame != null) {
_validationFrame.delete(jobKey, fs, true);
}
}
if (_trainingFrame != null && _origTrainingFrame != null)
Frame.deleteTempFrameAndItsNonSharedVecs(_trainingFrame, _origTrainingFrame);
if (leaderboard() != null) leaderboard().remove(fs, cascade);
if (eventLog() != null) eventLog().remove(fs, cascade);
if (session() != null) session().remove(fs, cascade);
if (cascade && _preprocessing != null) {
for (PreprocessingStep preprocessingStep : _preprocessing) {
preprocessingStep.remove();
}
}
for (Key key : _trackedKeys.keySet()) Keyed.remove(key, fs, true);
return super.remove_impl(fs, cascade);
}
private boolean possiblyVerifyImmutability() {
boolean warning = false;
if (verifyImmutability) {
// check that we haven't messed up the original Frame
eventLog().debug(Stage.Workflow, "Verifying training frame immutability. . .");
Vec[] vecsRightNow = _origTrainingFrame.vecs();
String[] namesRightNow = _origTrainingFrame.names();
if (_originalTrainingFrameVecs.length != vecsRightNow.length) {
log.warn("Training frame vec count has changed from: " +
_originalTrainingFrameVecs.length + " to: " + vecsRightNow.length);
warning = true;
}
if (_originalTrainingFrameNames.length != namesRightNow.length) {
log.warn("Training frame vec count has changed from: " +
_originalTrainingFrameNames.length + " to: " + namesRightNow.length);
warning = true;
}
for (int i = 0; i < _originalTrainingFrameVecs.length; i++) {
if (!_originalTrainingFrameVecs[i].equals(vecsRightNow[i])) {
log.warn("Training frame vec number " + i + " has changed keys. Was: " +
_originalTrainingFrameVecs[i] + " , now: " + vecsRightNow[i]);
warning = true;
}
if (!_originalTrainingFrameNames[i].equals(namesRightNow[i])) {
log.warn("Training frame vec number " + i + " has changed names. Was: " +
_originalTrainingFrameNames[i] + " , now: " + namesRightNow[i]);
warning = true;
}
if (_originalTrainingFrameChecksums[i] != vecsRightNow[i].checksum()) {
log.warn("Training frame vec number " + i + " has changed checksum. Was: " +
_originalTrainingFrameChecksums[i] + " , now: " + vecsRightNow[i].checksum());
warning = true;
}
}
if (warning)
eventLog().warn(Stage.Workflow, "Training frame was mutated! This indicates a bug in the AutoML software.");
else
eventLog().debug(Stage.Workflow, "Training frame was not mutated (as expected).");
} else {
eventLog().debug(Stage.Workflow, "Not verifying training frame immutability. . . This is turned off for efficiency.");
}
return warning;
}
private void cleanUpModelsCVPreds() {
log.info("Cleaning up all CV Predictions for AutoML");
for (Model model : leaderboard().getModels()) {
model.deleteCrossValidationPreds();
}
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/AutoMLBuildSpec.java
|
package ai.h2o.automl;
import ai.h2o.automl.preprocessing.PreprocessingStepDefinition;
import hex.Model;
import hex.ScoreKeeper.StoppingMetric;
import hex.genmodel.utils.DistributionFamily;
import hex.grid.HyperSpaceSearchCriteria;
import water.H2O;
import water.Iced;
import water.Key;
import water.exceptions.H2OIllegalValueException;
import water.fvec.Frame;
import water.util.*;
import water.util.PojoUtils.FieldNaming;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Stream;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
/**
* Parameters which specify the build (or extension) of an AutoML build job.
*/
public class AutoMLBuildSpec extends Iced {
private static final ThreadLocal<DateFormat> instanceTimeStampFormat = ThreadLocal.withInitial(() -> new SimpleDateFormat("yyyyMMdd_Hmmss"));
private final static AtomicInteger amlInstanceCounter = new AtomicInteger();
/**
* The specification of overall build parameters for the AutoML process.
*/
public static final class AutoMLBuildControl extends Iced {
public final AutoMLStoppingCriteria stopping_criteria;
/**
* Identifier for models that should be grouped together in the leaderboard (e.g., "airlines" and "iris").
*/
public String project_name = null;
// Pass through to all algorithms
public boolean balance_classes = false;
public float[] class_sampling_factors;
public float max_after_balance_size = 5.0f;
public int nfolds = -1;
public DistributionFamily distribution = DistributionFamily.AUTO;
public String custom_distribution_func;
public double tweedie_power = 1.5;
public double quantile_alpha = 0.5;
public double huber_alpha = 0.9;
public String custom_metric_func;
public boolean keep_cross_validation_predictions = false;
public boolean keep_cross_validation_models = false;
public boolean keep_cross_validation_fold_assignment = false;
public String export_checkpoints_dir = null;
public AutoMLBuildControl() {
stopping_criteria = new AutoMLStoppingCriteria();
}
}
public static final class AutoMLStoppingCriteria extends Iced {
public static final int AUTO_STOPPING_TOLERANCE = -1;
public static double default_stopping_tolerance_for_frame(Frame frame) {
return HyperSpaceSearchCriteria.RandomDiscreteValueSearchCriteria.default_stopping_tolerance_for_frame(frame);
}
private final HyperSpaceSearchCriteria.RandomDiscreteValueSearchCriteria _searchCriteria = new HyperSpaceSearchCriteria.RandomDiscreteValueSearchCriteria();
private double _max_runtime_secs_per_model = 0;
public AutoMLStoppingCriteria() {
// reasonable defaults:
set_max_models(0); // no limit
set_max_runtime_secs(0); // no limit
set_max_runtime_secs_per_model(0); // no limit
set_stopping_rounds(3);
set_stopping_tolerance(AUTO_STOPPING_TOLERANCE);
set_stopping_metric(StoppingMetric.AUTO);
}
public double max_runtime_secs_per_model() {
return _max_runtime_secs_per_model;
}
public void set_max_runtime_secs_per_model(double max_runtime_secs_per_model) {
_max_runtime_secs_per_model = max_runtime_secs_per_model;
}
public long seed() {
return _searchCriteria.seed();
}
public int max_models() {
return _searchCriteria.max_models();
}
public double max_runtime_secs() {
return _searchCriteria.max_runtime_secs();
}
public int stopping_rounds() {
return _searchCriteria.stopping_rounds();
}
public StoppingMetric stopping_metric() {
return _searchCriteria.stopping_metric();
}
public double stopping_tolerance() {
return _searchCriteria.stopping_tolerance();
}
public void set_seed(long seed) {
_searchCriteria.set_seed(seed);
}
public void set_max_models(int max_models) {
_searchCriteria.set_max_models(max_models);
}
public void set_max_runtime_secs(double max_runtime_secs) {
_searchCriteria.set_max_runtime_secs(max_runtime_secs);
}
public void set_stopping_rounds(int stopping_rounds) {
_searchCriteria.set_stopping_rounds(stopping_rounds);
}
public void set_stopping_metric(StoppingMetric stopping_metric) {
_searchCriteria.set_stopping_metric(stopping_metric);
}
public void set_stopping_tolerance(double stopping_tolerance) {
_searchCriteria.set_stopping_tolerance(stopping_tolerance);
}
public void set_default_stopping_tolerance_for_frame(Frame frame) {
_searchCriteria.set_default_stopping_tolerance_for_frame(frame);
}
public HyperSpaceSearchCriteria.RandomDiscreteValueSearchCriteria getSearchCriteria() {
return _searchCriteria;
}
}
/**
* The specification of the datasets to be used for the AutoML process.
* The user can specify a directory path, a file path (including HDFS, s3 or the like),
* or the ID of an already-parsed Frame in the H2O cluster. Paths are processed
* as usual in H2O.
*/
public static final class AutoMLInput extends Iced {
public Key<Frame> training_frame;
public Key<Frame> validation_frame;
public Key<Frame> blending_frame;
public Key<Frame> leaderboard_frame;
public String response_column;
public String fold_column;
public String weights_column;
public String[] ignored_columns;
public String sort_metric = StoppingMetric.AUTO.name();
}
/**
* The specification of the parameters for building models for a single algo (e.g., GBM), including base model parameters and hyperparameter search.
*/
public static final class AutoMLBuildModels extends Iced {
public Algo[] exclude_algos;
public Algo[] include_algos;
public StepDefinition[] modeling_plan;
public double exploitation_ratio = -1;
public AutoMLCustomParameters algo_parameters = new AutoMLCustomParameters();
public PreprocessingStepDefinition[] preprocessing;
}
public static final class AutoMLCustomParameters extends Iced {
// convenient property to allow us to modify our model (and later grids) definitions
// and benchmark them without having to rebuild the backend for each change.
static final String ALGO_PARAMS_ALL_ENABLED = H2O.OptArgs.SYSTEM_PROP_PREFIX + "automl.algo_parameters.all.enabled";
// let's limit the list of allowed custom parameters by default for now: we can always decide to open this later.
private static final String[] ALLOWED_PARAMETERS = {
"monotone_constraints",
// "ntrees",
};
private static final String ROOT_PARAM = "algo_parameters";
public static final class AutoMLCustomParameter<V> extends Iced {
private AutoMLCustomParameter(String name, V value) {
_name = name;
_value = value;
}
private AutoMLCustomParameter(IAlgo algo, String name, V value) {
_algo = algo;
_name = name;
_value = value;
}
private IAlgo _algo;
private String _name;
private V _value;
}
public static final class Builder {
private final transient List<AutoMLCustomParameter> _anyAlgoParams = new ArrayList<>();
private final transient List<AutoMLCustomParameter> _specificAlgoParams = new ArrayList<>();
public <V> Builder add(String param, V value) {
assertParameterAllowed(param);
_anyAlgoParams.add(new AutoMLCustomParameter<>(param, value));
return this;
}
public <V> Builder add(IAlgo algo, String param, V value) {
assertParameterAllowed(param);
_specificAlgoParams.add(new AutoMLCustomParameter<>(algo, param, value));
return this;
}
/**
* Builder is necessary here as the custom parameters must be applied in a certain order,
* and we can't assume that the consumer of this API will add them in the right order.
* @return a new AutoMLCustomParameters instance with custom parameters properly assigned.
*/
public AutoMLCustomParameters build() {
AutoMLCustomParameters instance = new AutoMLCustomParameters();
// apply "all" scope first, then algo-specific ones.
for (AutoMLCustomParameter param : _anyAlgoParams) {
if (!instance.addParameter(param._name, param._value))
throw new H2OIllegalValueException(param._name, ROOT_PARAM, param._value);
}
for (AutoMLCustomParameter param : _specificAlgoParams) {
if (!instance.addParameter(param._algo, param._name, param._value))
throw new H2OIllegalValueException(param._name, ROOT_PARAM, param._value);
}
return instance;
}
private void assertParameterAllowed(String param) {
if (!Boolean.parseBoolean(System.getProperty(ALGO_PARAMS_ALL_ENABLED, "false"))
&& !ArrayUtils.contains(ALLOWED_PARAMETERS, param))
throw new H2OIllegalValueException(ROOT_PARAM, param);
}
}
public static Builder create() {
return new Builder();
}
private final IcedHashMap<String, String[]> _algoParameterNames = new IcedHashMap<>(); // stores the parameters names overridden, by algo name
private final IcedHashMap<String, Model.Parameters> _algoParameters = new IcedHashMap<>(); //stores the parameters values, by algo name
public boolean hasCustomParams(IAlgo algo) {
return _algoParameterNames.get(algo.name()) != null;
}
public boolean hasCustomParam(IAlgo algo, String param) {
return ArrayUtils.contains(_algoParameterNames.get(algo.name()), param);
}
public void applyCustomParameters(IAlgo algo, Model.Parameters destParams) {
if (hasCustomParams(algo)) {
String[] paramNames = getCustomParameterNames(algo);
String[] onlyParamNames = Stream.of(paramNames).map(p -> "_"+p).toArray(String[]::new);
PojoUtils.copyProperties(destParams, getCustomizedDefaults(algo), FieldNaming.CONSISTENT, null, onlyParamNames);
}
}
String[] getCustomParameterNames(IAlgo algo) {
return _algoParameterNames.get(algo.name());
}
Model.Parameters getCustomizedDefaults(IAlgo algo) {
if (!_algoParameters.containsKey(algo.name())) {
Model.Parameters defaults = defaultParameters(algo);
if (defaults != null) _algoParameters.put(algo.name(), defaults);
}
return _algoParameters.get(algo.name());
}
private Model.Parameters defaultParameters(IAlgo algo) {
return algo.enabled() ? ModelingStepsRegistry.defaultParameters(algo.name()) : null;
}
private void addParameterName(IAlgo algo, String param) {
if (!_algoParameterNames.containsKey(algo.name())) {
_algoParameterNames.put(algo.name(), new String[] {param});
} else {
String[] names = _algoParameterNames.get(algo.name());
if (!ArrayUtils.contains(names, param)) {
_algoParameterNames.put(algo.name(), ArrayUtils.append(names, param));
}
}
}
private <V> boolean addParameter(String param, V value) {
boolean added = false;
for (Algo algo : Algo.values()) {
added |= addParameter(algo, param, value);
}
return added;
}
private <V> boolean addParameter(IAlgo algo, String param, V value) {
Model.Parameters customParams = getCustomizedDefaults(algo);
try {
if (customParams != null
&& (setField(customParams, param, value, FieldNaming.DEST_HAS_UNDERSCORES)
|| setField(customParams, param, value, FieldNaming.CONSISTENT))) {
addParameterName(algo, param);
return true;
} else {
Log.debug("Could not set custom param " + param + " for algo " + algo);
return false;
}
} catch (IllegalArgumentException iae) {
throw new H2OIllegalValueException(param, ROOT_PARAM, value);
}
}
private <D, V> boolean setField(D dest, String fieldName, V value, FieldNaming naming) {
try {
PojoUtils.setField(dest, fieldName, value, naming);
return true;
} catch (IllegalArgumentException iae) {
// propagate exception iff the value was wrong (conversion issue), ignore if the field doesn't exist.
try {
PojoUtils.getFieldValue(dest, fieldName, naming);
} catch (IllegalArgumentException ignored){
return false;
}
throw iae;
}
}
}
public final AutoMLBuildControl build_control = new AutoMLBuildControl();
public final AutoMLInput input_spec = new AutoMLInput();
public final AutoMLBuildModels build_models = new AutoMLBuildModels();
private String instanceId;
public String project() {
if (build_control.project_name == null) {
build_control.project_name = instanceId();
}
return build_control.project_name;
}
public String instanceId() {
if (instanceId == null) {
instanceId = "AutoML_"+amlInstanceCounter.incrementAndGet()+"_"+ instanceTimeStampFormat.get().format(new Date());
}
return instanceId;
}
public Key<AutoML> makeKey() {
// if user offers a different response column,
// the new models will be added to a new Leaderboard, without removing the previous one.
// otherwise, the new models will be added to the existing leaderboard.
return Key.make(project() + AutoML.keySeparator + StringUtils.sanitizeIdentifier(input_spec.response_column));
}
public String[] getNonPredictors() {
return Arrays.stream(new String[]{input_spec.weights_column, input_spec.fold_column, input_spec.response_column})
.filter(Objects::nonNull)
.toArray(String[]::new);
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/AutoMLSession.java
|
package ai.h2o.automl;
import water.*;
import water.nbhm.NonBlockingHashMap;
import water.util.IcedHashMap;
import water.util.IcedHashSet;
import java.util.concurrent.atomic.AtomicInteger;
public class AutoMLSession extends Lockable<AutoMLSession> {
private static Key<AutoMLSession> makeKey(String projectName) {
return Key.make("AutoMLSession_"+projectName);
}
public static AutoMLSession getInstance(String projectName) {
AutoMLSession session = DKV.getGet(makeKey(projectName));
if (session == null) {
session = new AutoMLSession(projectName);
DKV.put(session);
}
return session;
}
private final String _projectName;
private final ModelingStepsRegistry _modelingStepsRegistry;
private IcedHashSet<Key<Keyed>> _resumableKeys = new IcedHashSet();
private IcedHashMap<Key, String[]> _keySources = new IcedHashMap<>();
private NonBlockingHashMap<String, AtomicInteger> _modelCounters = new NonBlockingHashMap<>();
private transient NonBlockingHashMap<String, ModelingSteps> _availableStepsByProviderName = new NonBlockingHashMap<>();
private transient AutoML _aml;
AutoMLSession(String projectName) {
super(makeKey(projectName));
_projectName = projectName;
_modelingStepsRegistry = new ModelingStepsRegistry();
}
public ModelingStepsRegistry getModelingStepsRegistry() {
return _modelingStepsRegistry;
}
void attach(AutoML aml, boolean resume) {
assert _projectName.equals(aml._key.toString()): "AutoMLSession can only be attached to an AutoML instance from project '"+_projectName+"', but got: "+aml._key;
if (_aml == null) {
_aml = aml;
if (!resume) _availableStepsByProviderName.clear();
}
}
void detach() {
for (ModelingSteps steps : _availableStepsByProviderName.values()) steps.cleanup();
_aml = null;
DKV.put(this);
}
public ModelingStep getModelingStep(Key key) {
if (!_keySources.containsKey(key)) return null;
String[] identifiers = _keySources.get(key);
assert identifiers.length > 1;
return getModelingStep(identifiers[0], identifiers[1]);
}
public ModelingStep getModelingStep(String providerName, String id) {
ModelingSteps steps = getModelingSteps(providerName);
return steps == null ? null : steps.getStep(id).orElse(null);
}
ModelingSteps getModelingSteps(String providerName) {
if (!_availableStepsByProviderName.containsKey(providerName)) {
ModelingStepsProvider provider = _modelingStepsRegistry.stepsByName.get(providerName);
if (provider == null) {
throw new IllegalArgumentException("Missing provider for modeling steps '"+providerName+"'");
}
ModelingSteps steps = provider.newInstance(_aml);
if (steps != null) _availableStepsByProviderName.put(providerName, steps);
}
return _availableStepsByProviderName.get(providerName);
}
public void registerKeySource(Key key, ModelingStep step) {
if (key != null && !_keySources.containsKey(key))
atomicUpdate(() -> _keySources.put(key, new String[]{step.getProvider(), step.getId()}));
}
public void addResumableKey(Key key) {
atomicUpdate(() -> _resumableKeys.add(key));
}
public Key[] getResumableKeys(String providerName, String id) {
ModelingStep step = getModelingStep(providerName, id);
if (step == null) return new Key[0];
return _resumableKeys.stream()
.filter(k -> step.equals(getModelingStep(k)))
.toArray(Key[]::new);
}
public int nextModelCounter(String algoName, String type) {
String key = algoName+"_"+type;
if (!_modelCounters.containsKey(key)) {
synchronized (_modelCounters) {
if (!_modelCounters.containsKey(key))
atomicUpdate(() -> _modelCounters.put(key, new AtomicInteger(0)));
}
}
AtomicInteger c = new AtomicInteger();
atomicUpdate(() -> c.set(_modelCounters.get(key).incrementAndGet()));
return c.get();
}
@Override
protected Futures remove_impl(Futures fs, boolean cascade) {
_resumableKeys.clear();
_keySources.clear();
_availableStepsByProviderName.clear();
return super.remove_impl(fs, cascade);
}
private void atomicUpdate(Runnable update) {
// atomic updates are unnecessary for now:
// if the session can be shared by multiple AutoML instances when there are reruns of the same project,
// only one instance at a time is using the session, so we don't need to update the DKV on each modification.
// AutoMLUtils.atomicUpdate(this, update, null);
update.run();
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/H2OJob.java
|
package ai.h2o.automl;
import water.H2O;
import water.Job;
import water.Key;
import water.Keyed;
public class H2OJob<T extends Keyed & H2ORunnable> {
protected final T _target;
protected Key<Job> _jobKey;
Job<T> _job;
public H2OJob(T runnable, long max_runtime_msecs) {
this(runnable, Key.make(), max_runtime_msecs);
}
public H2OJob(T runnable, Key<T> key, long max_runtime_msecs) {
_target = runnable;
_job = new Job<>(key, _target.getClass().getName(), _target.getClass().getSimpleName() + " build");
_jobKey = _job._key;
_job._max_runtime_msecs = max_runtime_msecs;
}
public Job<T> start() {
return this.start(1);
}
public Job<T> start(int work) {
return _job.start(new H2O.H2OCountedCompleter() {
@Override public void compute2() {
_target.run();
tryComplete();
}
}, work);
}
public void stop() { _jobKey.get().stop(); }
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/H2ORunnable.java
|
package ai.h2o.automl;
/**
*
* The <code>H2ORunnable</code> interface should be implemented by any class whose
* instances are intended to be submitted to the H2O forkjoin pool via
* <code>H2O.submitTask</code>. The class must define a method of no arguments called
* <code>run</code>.
*
*/
public interface H2ORunnable {
void run();
void stop();
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/IAlgo.java
|
package ai.h2o.automl;
import java.io.Serializable;
public interface IAlgo extends Serializable {
String name();
default String urlName() { return name().toLowerCase(); }
default boolean enabled() { return true; }
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/ModelParametersProvider.java
|
package ai.h2o.automl;
import hex.Model.Parameters;
public interface ModelParametersProvider<P extends Parameters> {
P newDefaultParameters();
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/ModelSelectionStrategies.java
|
package ai.h2o.automl;
import hex.Model;
import hex.leaderboard.Leaderboard;
import org.apache.log4j.Logger;
import water.Key;
import water.util.ArrayUtils;
import java.util.Arrays;
import java.util.function.Predicate;
import java.util.function.Supplier;
public final class ModelSelectionStrategies {
private static final Logger LOG = Logger.getLogger(ModelSelectionStrategies.class);
public static abstract class LeaderboardBasedSelectionStrategy<M extends Model> implements ModelSelectionStrategy<M> {
final Supplier<LeaderboardHolder> _leaderboardSupplier;
public LeaderboardBasedSelectionStrategy(Supplier<LeaderboardHolder> leaderboardSupplier) {
_leaderboardSupplier = leaderboardSupplier;
}
LeaderboardHolder makeSelectionLeaderboard() {
return _leaderboardSupplier.get();
}
}
public static class KeepBestN<M extends Model> extends LeaderboardBasedSelectionStrategy<M>{
private final int _N;
public KeepBestN(int N, Supplier<LeaderboardHolder> leaderboardSupplier) {
super(leaderboardSupplier);
_N = N;
}
@Override
@SuppressWarnings("unchecked")
public Selection<M> select(Key<M>[] originalModels, Key<M>[] newModels) {
LeaderboardHolder lbHolder = makeSelectionLeaderboard();
Leaderboard tmpLeaderboard = lbHolder.get();
tmpLeaderboard.addModels((Key<Model>[]) originalModels);
tmpLeaderboard.addModels((Key<Model>[]) newModels);
if (LOG.isDebugEnabled()) LOG.debug(tmpLeaderboard.toLogString());
Key<Model>[] sortedKeys = tmpLeaderboard.getModelKeys();
Key<Model>[] bestN = ArrayUtils.subarray(sortedKeys, 0, Math.min(sortedKeys.length, _N));
Key<M>[] toAdd = Arrays.stream(bestN).filter(k -> !ArrayUtils.contains(originalModels, k)).toArray(Key[]::new);
Key<M>[] toRemove = Arrays.stream(originalModels).filter(k -> !ArrayUtils.contains(bestN, k)).toArray(Key[]::new);
Selection selection = new Selection<>(toAdd, toRemove);
lbHolder.cleanup();
return selection;
}
}
public static class KeepBestConstantSize<M extends Model> extends LeaderboardBasedSelectionStrategy<M> {
public KeepBestConstantSize(Supplier<LeaderboardHolder> leaderboardSupplier) {
super(leaderboardSupplier);
}
@Override
public Selection<M> select(Key<M>[] originalModels, Key<M>[] newModels) {
return new KeepBestN<M>(originalModels.length, _leaderboardSupplier).select(originalModels, newModels);
}
}
public static class KeepBestNFromSubgroup<M extends Model> extends LeaderboardBasedSelectionStrategy<M> {
private final Predicate<Key<M>> _criterion;
private final int _N;
public KeepBestNFromSubgroup(int N, Predicate<Key<M>> criterion, Supplier<LeaderboardHolder> leaderboardSupplier) {
super(leaderboardSupplier);
_criterion = criterion;
_N = N;
}
@Override
public Selection<M> select(Key<M>[] originalModels, Key<M>[] newModels) {
Key<M>[] originalModelsSubgroup = Arrays.stream(originalModels).filter(_criterion).toArray(Key[]::new);
Key<M>[] newModelsSubGroup = Arrays.stream(newModels).filter(_criterion).toArray(Key[]::new);
return new KeepBestN<M>(_N, _leaderboardSupplier).select(originalModelsSubgroup, newModelsSubGroup);
}
}
public interface LeaderboardHolder {
Leaderboard get();
default void cleanup() {};
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/ModelSelectionStrategy.java
|
package ai.h2o.automl;
import hex.Model;
import water.Key;
@FunctionalInterface
public interface ModelSelectionStrategy<M extends Model>{
class Selection<M extends Model> {
final Key<M>[] _add; //models that should be added to the original population
final Key<M>[] _remove; //models that should be removed from the original population
public Selection(Key<M>[] add, Key<M>[] remove) {
_add = add;
_remove = remove;
}
}
Selection<M> select(Key<M>[] originalModels, Key<M>[] newModels);
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/ModelingPlans.java
|
package ai.h2o.automl;
import ai.h2o.automl.StepDefinition.Step;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import static ai.h2o.automl.ModelingStep.GridStep.DEFAULT_GRID_GROUP;
import static ai.h2o.automl.ModelingStep.GridStep.DEFAULT_GRID_TRAINING_WEIGHT;
import static ai.h2o.automl.ModelingStep.ModelStep.DEFAULT_MODEL_GROUP;
import static ai.h2o.automl.ModelingStep.ModelStep.DEFAULT_MODEL_TRAINING_WEIGHT;
final class ModelingPlans {
/**
* Plan reflecting the behaviour of H2O AutoML prior v3.34 as close as possible.
*
* Keeping it mainly for reference and for tests.
*/
final static StepDefinition[] ONE_LAYERED = {
new StepDefinition(Algo.XGBoost.name(), StepDefinition.Alias.defaults),
new StepDefinition(Algo.GLM.name(), StepDefinition.Alias.defaults),
new StepDefinition(Algo.DRF.name(), "def_1"),
new StepDefinition(Algo.GBM.name(), StepDefinition.Alias.defaults),
new StepDefinition(Algo.DeepLearning.name(), StepDefinition.Alias.defaults),
new StepDefinition(Algo.DRF.name(), "XRT"),
new StepDefinition(Algo.XGBoost.name(),
new Step("grid_1", DEFAULT_MODEL_GROUP, 3*DEFAULT_GRID_TRAINING_WEIGHT)),
new StepDefinition(Algo.GBM.name(),
new Step("grid_1", DEFAULT_MODEL_GROUP, 2*DEFAULT_GRID_TRAINING_WEIGHT)),
new StepDefinition(Algo.DeepLearning.name(),
new Step("grid_1", DEFAULT_MODEL_GROUP, DEFAULT_GRID_TRAINING_WEIGHT/2),
new Step("grid_2", DEFAULT_MODEL_GROUP, DEFAULT_GRID_TRAINING_WEIGHT/2),
new Step("grid_3", DEFAULT_MODEL_GROUP, DEFAULT_GRID_TRAINING_WEIGHT/2)),
new StepDefinition(Algo.GBM.name(),
new Step("lr_annealing", DEFAULT_MODEL_GROUP, DEFAULT_MODEL_TRAINING_WEIGHT)),
new StepDefinition(Algo.XGBoost.name(),
new Step("lr_search", DEFAULT_MODEL_GROUP, DEFAULT_GRID_TRAINING_WEIGHT)),
new StepDefinition(Algo.StackedEnsemble.name(),
new Step("best_of_family", DEFAULT_MODEL_GROUP, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("all", DEFAULT_MODEL_GROUP, DEFAULT_MODEL_TRAINING_WEIGHT)),
};
/**
* A simple improvement on the one-layered version using mainly default settings:
* <ul>
* <li>the first layer attempts to train all the base models.</li>
* <li>if this layer completes, a second layer trains all the grids.</li>
* <li>an optional 3rd layer is trained if exploitation ratio is on.</li>
* <li>2 SEs are trained at the end of each layer</li>
* </ul>
*
* Keeping this as an example of simple plan, mainly used for tests.
*/
final static StepDefinition[] TWO_LAYERED = {
new StepDefinition(Algo.XGBoost.name(), StepDefinition.Alias.defaults),
new StepDefinition(Algo.GLM.name(), StepDefinition.Alias.defaults),
new StepDefinition(Algo.DRF.name(), StepDefinition.Alias.defaults),
new StepDefinition(Algo.GBM.name(), StepDefinition.Alias.defaults),
new StepDefinition(Algo.DeepLearning.name(), StepDefinition.Alias.defaults),
new StepDefinition(Algo.XGBoost.name(), StepDefinition.Alias.grids),
new StepDefinition(Algo.GBM.name(), StepDefinition.Alias.grids),
new StepDefinition(Algo.DeepLearning.name(), StepDefinition.Alias.grids),
new StepDefinition(Algo.GBM.name(),
new Step("lr_annealing", DEFAULT_GRID_GROUP+1, Step.DEFAULT_WEIGHT)),
new StepDefinition(Algo.XGBoost.name(),
new Step("lr_search", DEFAULT_GRID_GROUP+1, Step.DEFAULT_WEIGHT)),
new StepDefinition(Algo.StackedEnsemble.name(), StepDefinition.Alias.defaults),
};
/**
* a multi-layered plan:
* <ol>
* <li>a short first layer with only 3 base models to be able to produce at least a few decent models
* on larger datasets if time budget is small,
* followed by an SE of those models.</li>
* <li>another layer with more base models if all the models in the first layer were able to converge
* + SEs for that layer/li>
* <li>another layer with the remaining base models + SEs</li>
* <li>another layer with the usually fast and best performing grids + SEs</li>
* <li>another layer with the remaining grids + SEs</li>
* <li>another layer doing a learning_rate search on the best GBM+XGB and adding some optional SEs</li>
* <li>another layer with more optional SEs</li>
* <li>a final layer resuming the best 2 grids so far, this time running without grid early stopping,
* and followed by 2 final SEs</li>
* </ol>
*
*/
final static StepDefinition[] TEN_LAYERED = {
// order of step definitions and steps defines the order of steps in the same priority group.
new StepDefinition(Algo.XGBoost.name(),
new Step("def_2", 1, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("def_1", 2, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("def_3", 3, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("grid_1", 4, 3*DEFAULT_GRID_TRAINING_WEIGHT),
new Step("lr_search", 6, DEFAULT_GRID_TRAINING_WEIGHT)
),
new StepDefinition(Algo.GLM.name(),
new Step("def_1", 1, DEFAULT_MODEL_TRAINING_WEIGHT)
),
new StepDefinition(Algo.DRF.name(),
new Step("def_1", 2, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("XRT", 3, DEFAULT_MODEL_TRAINING_WEIGHT)
),
new StepDefinition(Algo.GBM.name(),
new Step("def_5", 1, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("def_2", 2, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("def_3", 2, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("def_4", 2, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("def_1", 3, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("grid_1", 4, 2*DEFAULT_GRID_TRAINING_WEIGHT),
new Step("lr_annealing", 6, DEFAULT_MODEL_TRAINING_WEIGHT)
),
new StepDefinition(Algo.DeepLearning.name(),
new Step("def_1", 3, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("grid_1", 4, DEFAULT_GRID_TRAINING_WEIGHT),
new Step("grid_2", 5, DEFAULT_GRID_TRAINING_WEIGHT),
new Step("grid_3", 5, DEFAULT_GRID_TRAINING_WEIGHT)
),
new StepDefinition("completion",
new Step("resume_best_grids", 10, 2*DEFAULT_GRID_TRAINING_WEIGHT)
),
// generates BoF and All SE for each group, but we prefer to customize instances and weights below
// new StepDefinition(Algo.StackedEnsemble.name(), StepDefinition.Alias.defaults),
new StepDefinition(Algo.StackedEnsemble.name(), Stream.of(new Step[][] {
IntStream.rangeClosed(1, 5).mapToObj(group -> // BoF should be fast, giving it half-budget for optimization.
new Step("best_of_family_"+group, group, DEFAULT_MODEL_TRAINING_WEIGHT/2)
).toArray(Step[]::new),
IntStream.rangeClosed(2, 5).mapToObj(group -> // starts at 2 as we don't need an ALL SE for first group.
new Step("all_"+group, group, DEFAULT_MODEL_TRAINING_WEIGHT)
).toArray(Step[]::new),
{
new Step("monotonic", 6, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("best_of_family_gbm", 6, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("all_gbm", 7, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("best_of_family_xglm", 8, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("all_xglm", 8, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("best_of_family", 10, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("best_N", 10, DEFAULT_MODEL_TRAINING_WEIGHT),
}
}).flatMap(Stream::of).toArray(Step[]::new)),
};
final static StepDefinition[] REPRODUCIBLE = {
// order of step definitions and steps defines the order of steps in the same priority group.
new StepDefinition(Algo.XGBoost.name(),
new Step("def_2", 1, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("def_1", 2, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("def_3", 3, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("grid_1", 4, 3*DEFAULT_GRID_TRAINING_WEIGHT),
new Step("lr_search", 7, DEFAULT_GRID_TRAINING_WEIGHT)
),
new StepDefinition(Algo.GLM.name(),
new Step("def_1", 1, DEFAULT_MODEL_TRAINING_WEIGHT)
),
new StepDefinition(Algo.DRF.name(),
new Step("def_1", 2, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("XRT", 3, DEFAULT_MODEL_TRAINING_WEIGHT)
),
new StepDefinition(Algo.GBM.name(),
new Step("def_5", 1, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("def_2", 2, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("def_3", 2, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("def_4", 2, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("def_1", 3, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("grid_1", 4, 2*DEFAULT_GRID_TRAINING_WEIGHT),
new Step("lr_annealing", 7, DEFAULT_MODEL_TRAINING_WEIGHT)
),
new StepDefinition(Algo.DeepLearning.name(),
new Step("def_1", 3, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("grid_1", 4, DEFAULT_GRID_TRAINING_WEIGHT),
new Step("grid_2", 5, DEFAULT_GRID_TRAINING_WEIGHT),
new Step("grid_3", 5, DEFAULT_GRID_TRAINING_WEIGHT)
),
new StepDefinition("completion",
new Step("resume_best_grids", 6, 2*DEFAULT_GRID_TRAINING_WEIGHT)
),
new StepDefinition(Algo.StackedEnsemble.name(),
new Step("monotonic", 9, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("best_of_family_xglm", 10, DEFAULT_MODEL_TRAINING_WEIGHT),
new Step("all_xglm", 10, DEFAULT_MODEL_TRAINING_WEIGHT)
)
};
public static StepDefinition[] defaultPlan() {
return TEN_LAYERED;
}
private ModelingPlans() {}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/ModelingStep.java
|
package ai.h2o.automl;
import ai.h2o.automl.AutoMLBuildSpec.AutoMLCustomParameters;
import ai.h2o.automl.ModelSelectionStrategies.LeaderboardHolder;
import ai.h2o.automl.ModelSelectionStrategy.Selection;
import ai.h2o.automl.StepResultState.ResultStatus;
import ai.h2o.automl.WorkAllocations.JobType;
import ai.h2o.automl.WorkAllocations.Work;
import ai.h2o.automl.events.EventLog;
import ai.h2o.automl.events.EventLogEntry;
import ai.h2o.automl.events.EventLogEntry.Stage;
import ai.h2o.automl.preprocessing.PreprocessingConfig;
import ai.h2o.automl.preprocessing.PreprocessingStep;
import hex.Model;
import hex.Model.Parameters.FoldAssignmentScheme;
import hex.ModelBuilder;
import hex.ModelContainer;
import hex.ScoreKeeper.StoppingMetric;
import hex.genmodel.utils.DistributionFamily;
import hex.grid.Grid;
import hex.grid.GridSearch;
import hex.grid.HyperSpaceSearchCriteria;
import hex.grid.HyperSpaceSearchCriteria.RandomDiscreteValueSearchCriteria;
import hex.grid.HyperSpaceWalker;
import hex.leaderboard.Leaderboard;
import jsr166y.CountedCompleter;
import org.apache.commons.lang.builder.ToStringBuilder;
import water.*;
import water.exceptions.H2OIllegalArgumentException;
import water.util.ArrayUtils;
import water.util.Countdown;
import water.util.EnumUtils;
import water.util.Log;
import java.util.*;
import java.util.function.Consumer;
import java.util.function.Predicate;
/**
* Parent class defining common properties and common logic for actual {@link AutoML} training steps.
*/
public abstract class ModelingStep<M extends Model> extends Iced<ModelingStep> {
protected enum SeedPolicy {
/** No seed will be used (= random). */
None,
/** The global AutoML seed will be used. */
Global,
/** The seed is incremented for each model, starting from the global seed if there is one. */
Incremental
}
static Predicate<Work> isDefaultModel = w -> w._type == JobType.ModelBuild;
static Predicate<Work> isExplorationWork = w -> w._type == JobType.ModelBuild || w._type == JobType.HyperparamSearch;
static Predicate<Work> isExploitationWork = w -> w._type == JobType.Selection;
protected <MP extends Model.Parameters> Job<Grid> startSearch(
final Key<Grid> resultKey,
final MP baseParams,
final Map<String, Object[]> hyperParams,
final HyperSpaceSearchCriteria searchCriteria)
{
assert resultKey != null;
assert baseParams != null;
assert hyperParams.size() > 0;
assert searchCriteria != null;
applyPreprocessing(baseParams);
aml().eventLog().info(Stage.ModelTraining, "AutoML: starting "+resultKey+" hyperparameter search")
.setNamedValue("start_"+_provider+"_"+_id, new Date(), EventLogEntry.epochFormat.get());
return GridSearch.create(
resultKey,
HyperSpaceWalker.BaseWalker.WalkerFactory.create(
baseParams,
hyperParams,
new GridSearch.SimpleParametersBuilderFactory<>(),
searchCriteria
))
.withParallelism(GridSearch.SEQUENTIAL_MODEL_BUILDING)
.withMaxConsecutiveFailures(aml()._maxConsecutiveModelFailures)
.start();
}
@SuppressWarnings("unchecked")
protected <MP extends Model.Parameters> Job<M> startModel(
final Key<M> resultKey,
final MP params
) {
assert resultKey != null;
assert params != null;
Job<M> job = new Job<>(resultKey, ModelBuilder.javaName(_algo.urlName()), _description);
applyPreprocessing(params);
ModelBuilder builder = ModelBuilder.make(_algo.urlName(), job, (Key<Model>) resultKey);
builder._parms = params;
aml().eventLog().info(Stage.ModelTraining, "AutoML: starting "+resultKey+" model training")
.setNamedValue("start_"+_provider+"_"+_id, new Date(), EventLogEntry.epochFormat.get());
builder.init(false); // validate parameters
if (builder._messages.length > 0) {
for (ModelBuilder.ValidationMessage vm : builder._messages) {
if (vm.log_level() == Log.WARN) {
aml().eventLog().warn(Stage.ModelTraining, vm.field()+" param, "+vm.message());
} else if (vm.log_level() == Log.ERRR) {
aml().eventLog().error(Stage.ModelTraining, vm.field()+" param, "+vm.message());
}
}
}
return builder.trainModelOnH2ONode();
}
private boolean validParameters(Model.Parameters parms, String[] fields) {
try {
Model.Parameters params = parms.clone();
// some algos check if distribution has proper _nclass(es) so we need to set training frame and response etc
setCommonModelBuilderParams(params);
ModelBuilder mb = ModelBuilder.make(params);
mb.init(false);
return Arrays.stream(fields)
.allMatch((field) ->
mb.getMessagesByFieldAndSeverity(field, Log.ERRR).length == 0);
} catch (H2OIllegalArgumentException e) {
return false;
}
}
protected void setDistributionParameters(Model.Parameters parms) {
switch (aml().getDistributionFamily()) {
case custom:
parms._custom_distribution_func = aml().getBuildSpec().build_control.custom_distribution_func;
break;
case huber:
parms._huber_alpha = aml().getBuildSpec().build_control.huber_alpha;
break;
case tweedie:
parms._tweedie_power = aml().getBuildSpec().build_control.tweedie_power;
break;
case quantile:
parms._quantile_alpha = aml().getBuildSpec().build_control.quantile_alpha;
break;
}
try {
parms.setDistributionFamily(aml().getDistributionFamily());
} catch (H2OIllegalArgumentException e) {
parms.setDistributionFamily(DistributionFamily.AUTO);
}
if (!validParameters(parms, new String[]{"_distribution", "_family"}))
parms.setDistributionFamily(DistributionFamily.AUTO);
if (!aml().getDistributionFamily().equals(parms.getDistributionFamily())) {
aml().eventLog().info(Stage.ModelTraining,"Algo " + parms.algoName() +
" doesn't support " + _aml.getDistributionFamily().name() + " distribution. Using AUTO distribution instead.");
}
}
private final transient AutoML _aml;
protected final IAlgo _algo;
protected final String _provider;
protected final String _id;
protected int _weight;
protected int _priorityGroup;
protected AutoML.Constraint[] _ignoredConstraints = new AutoML.Constraint[0]; // whether or not to ignore the max_models/max_runtime constraints
protected String _description;
protected Work _work;
private final transient List<Consumer<Job>> _onDone = new ArrayList<>();
StepDefinition _fromDef;
transient final Predicate<Work> _isSamePriorityGroup = w -> w._priorityGroup == _priorityGroup;
protected ModelingStep(String provider, IAlgo algo, String id, int priorityGroup, int weight, AutoML autoML) {
assert priorityGroup >= 0;
_provider = provider;
_algo = algo;
_id = id;
_priorityGroup = priorityGroup;
_weight = weight;
_aml = autoML;
_description = provider+" "+id;
}
/**
* Each provider (usually one class) defining a collection of steps must have a unique name.
* @return the name of the provider (usually simply the name of an algo) defining this step.
*/
public String getProvider() {
return _provider;
}
/**
* @return the step identifier: should be unique inside its provider.
*/
public String getId() {
return _id;
}
/**
* @return a string that identifies the step uniquely among all steps defined by all providers.
*/
public String getGlobalId() {
return _provider+":"+_id;
}
public IAlgo getAlgo() {
return _algo;
}
public int getWeight() {
return _weight;
}
public int getPriorityGroup() {
return _priorityGroup;
}
public boolean isResumable() {
return false;
}
public boolean ignores(AutoML.Constraint constraint) {
return ArrayUtils.contains(_ignoredConstraints, constraint);
}
public boolean limitModelTrainingTime() {
// if max_models is used, then the global time limit should have no impact on model training budget due to reproducibility concerns.
return !ignores(AutoML.Constraint.TIMEOUT) && aml().getBuildSpec().build_control.stopping_criteria.max_models() == 0;
}
/**
* @return true iff we can call {@link #run()} on this modeling step to start a new job.
*/
public boolean canRun() {
Work work = getAllocatedWork();
return work != null && work._weight > 0;
}
/**
* Execute this modeling step, returning the job associated to it if any.
* @return
*/
public Job run() {
Job job = startJob();
if (job != null && job._result != null) {
register(job._result);
if (isResumable()) aml().session().addResumableKey(job._result);
}
return job;
}
/**
* @return an {@link Iterator} for the potential sub-steps provided by this modeling step.
*/
public Iterator<? extends ModelingStep> iterateSubSteps() {
return Collections.emptyIterator();
}
/**
* @param id
* @return the sub-step (if any) with the given identifier, or null if there's no sub-step
*/
protected Optional<? extends ModelingStep> getSubStep(String id) {
return Optional.empty();
}
protected abstract JobType getJobType();
/**
* Starts a new {@link Job} as part of this step.
* @return the newly started job.
*/
protected abstract Job startJob();
protected void onDone(Job job) {
for (Consumer<Job> exec : _onDone) {
exec.accept(job);
}
_onDone.clear();
}
protected void register(Key key) {
aml().session().registerKeySource(key, this);
}
protected AutoML aml() {
return _aml;
}
/**
* @return the total work allocated for this step.
*/
protected Work getAllocatedWork() {
if (_work == null) {
_work = getWorkAllocations().getAllocation(_id, _algo);
}
return _work;
}
/**
* Creates the {@link Work} instance representing the total work handled by this step.
* @return
*/
protected Work makeWork() {
return new Work(getId(), getAlgo(), getJobType(), getPriorityGroup(), getWeight());
}
protected Key makeKey(String name, boolean withCounter) {
return aml().makeKey(name, null, withCounter);
}
protected WorkAllocations getWorkAllocations() {
return aml()._workAllocations;
}
/**
* @return the models trained until now, sorted by the default leaderboard metric.
*/
protected Model[] getTrainedModels() {
return aml().leaderboard().getModels();
}
protected Key<Model>[] getTrainedModelsKeys() {
return aml().leaderboard().getModelKeys();
}
protected boolean isCVEnabled() {
return aml().isCVEnabled();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ModelingStep<?> that = (ModelingStep<?>) o;
return _provider.equals(that._provider) && _id.equals(that._id);
}
@Override
public int hashCode() {
return Objects.hash(_provider, _id);
}
/**
* Assign common parameters to the model params before building the model or set of models.
* This includes:
* <ul>
* <li>data-related parameters: frame/columns parameters, class distribution.</li>
* <li>cross-validation parameters/</li>
* <li>memory-optimization: if certain objects build during training should be kept after training or not/</li>
* <li>model management: checkpoints.</li>
* </ul>
* @param params the model parameters to which the common parameters will be added.
*/
protected void setCommonModelBuilderParams(Model.Parameters params) {
params._train = aml()._trainingFrame._key;
if (null != aml()._validationFrame)
params._valid = aml()._validationFrame._key;
AutoMLBuildSpec buildSpec = aml().getBuildSpec();
params._response_column = buildSpec.input_spec.response_column;
params._ignored_columns = buildSpec.input_spec.ignored_columns;
setCrossValidationParams(params);
setWeightingParams(params);
setClassBalancingParams(params);
params._custom_metric_func = buildSpec.build_control.custom_metric_func;
params._keep_cross_validation_models = buildSpec.build_control.keep_cross_validation_models;
params._keep_cross_validation_fold_assignment = buildSpec.build_control.nfolds != 0 && buildSpec.build_control.keep_cross_validation_fold_assignment;
params._export_checkpoints_dir = buildSpec.build_control.export_checkpoints_dir;
/** Using _main_model_time_budget_factor to determine if and how we should restrict the time for the main model.
* Value 0 means do not use time constraint for the main model.
* More details in {@link ModelBuilder#setMaxRuntimeSecsForMainModel()}.
*/
params._main_model_time_budget_factor = 2;
}
protected void setCrossValidationParams(Model.Parameters params) {
AutoMLBuildSpec buildSpec = aml().getBuildSpec();
params._keep_cross_validation_predictions = aml().getBlendingFrame() == null || buildSpec.build_control.keep_cross_validation_predictions;
params._fold_column = buildSpec.input_spec.fold_column;
if (buildSpec.input_spec.fold_column == null) {
params._nfolds = buildSpec.build_control.nfolds;
if (buildSpec.build_control.nfolds > 1) {
// TODO: below allow the user to specify this (vs Modulo)
params._fold_assignment = FoldAssignmentScheme.Modulo;
}
}
}
protected void setWeightingParams(Model.Parameters params) {
AutoMLBuildSpec buildSpec = aml().getBuildSpec();
params._weights_column = buildSpec.input_spec.weights_column;
}
protected void setClassBalancingParams(Model.Parameters params) {
AutoMLBuildSpec buildSpec = aml().getBuildSpec();
if (buildSpec.build_control.balance_classes) {
params._balance_classes = buildSpec.build_control.balance_classes;
params._class_sampling_factors = buildSpec.build_control.class_sampling_factors;
params._max_after_balance_size = buildSpec.build_control.max_after_balance_size;
}
}
protected void setCustomParams(Model.Parameters params) {
AutoMLCustomParameters customParams = aml().getBuildSpec().build_models.algo_parameters;
if (customParams == null) return;
customParams.applyCustomParameters(_algo, params);
}
protected void applyPreprocessing(Model.Parameters params) {
if (aml().getPreprocessing() == null) return;
for (PreprocessingStep preprocessingStep : aml().getPreprocessing()) {
PreprocessingStep.Completer complete = preprocessingStep.apply(params, getPreprocessingConfig());
_onDone.add(j -> complete.run());
}
}
protected PreprocessingConfig getPreprocessingConfig() {
return new PreprocessingConfig();
}
/**
* Configures early-stopping for the model or set of models to be built.
*
* @param parms the model parameters to which the stopping criteria will be added.
* @param defaults the default parameters for the corresponding {@link ModelBuilder}.
*/
protected void setStoppingCriteria(Model.Parameters parms, Model.Parameters defaults) {
// If the caller hasn't set ModelBuilder stopping criteria, set it from our global criteria.
AutoMLBuildSpec buildSpec = aml().getBuildSpec();
//FIXME: Do we really need to compare with defaults before setting the buildSpec value instead?
// This can create subtle bugs: e.g. if dev wanted to enforce a stopping criteria for a specific algo/model,
// he wouldn't be able to enforce the default value, that would always be overridden by buildSpec.
// We should instead provide hooks and ensure that properties are always set in the following order:
// 1. defaults, 2. user defined, 3. internal logic/algo specific based on the previous state (esp. handling of AUTO properties).
if (parms._stopping_metric == defaults._stopping_metric)
parms._stopping_metric = buildSpec.build_control.stopping_criteria.stopping_metric();
if (parms._stopping_metric == StoppingMetric.AUTO) {
parms._stopping_metric = aml().getResponseColumn().cardinality() == -1 ? StoppingMetric.deviance : StoppingMetric.logloss;
}
if (parms._stopping_rounds == defaults._stopping_rounds)
parms._stopping_rounds = buildSpec.build_control.stopping_criteria.stopping_rounds();
if (parms._stopping_tolerance == defaults._stopping_tolerance)
parms._stopping_tolerance = buildSpec.build_control.stopping_criteria.stopping_tolerance();
}
/**
* @param parms the model parameters to which the stopping criteria will be added.
* @param defaults the default parameters for the corresponding {@link ModelBuilder}.
* @param seedPolicy the policy defining how the seed will be assigned to the model parameters.
*/
protected void setSeed(Model.Parameters parms, Model.Parameters defaults, SeedPolicy seedPolicy) {
AutoMLBuildSpec buildSpec = aml().getBuildSpec();
// Don't use the same exact seed so that, e.g., if we build two GBMs they don't do the same row and column sampling.
if (parms._seed == defaults._seed) {
switch (seedPolicy) {
case Global:
parms._seed = buildSpec.build_control.stopping_criteria.seed();
break;
case Incremental:
parms._seed = _aml._incrementalSeed.get() == defaults._seed ? defaults._seed : _aml._incrementalSeed.getAndIncrement();
break;
default:
break;
}
}
}
protected void initTimeConstraints(Model.Parameters parms, double upperLimit) {
AutoMLBuildSpec buildSpec = aml().getBuildSpec();
if (parms._max_runtime_secs == 0) {
double maxPerModel = buildSpec.build_control.stopping_criteria.max_runtime_secs_per_model();
parms._max_runtime_secs = upperLimit <= 0 ? maxPerModel : Math.min(maxPerModel, upperLimit);
}
}
private String getSortMetric() {
//ensures that the sort metric is always updated according to the defaults set by leaderboard
Leaderboard leaderboard = aml().leaderboard();
return leaderboard == null ? null : leaderboard.getSortMetric();
}
private static StoppingMetric metricValueOf(String name) {
if (name == null) return StoppingMetric.AUTO;
switch (name) {
case "mean_residual_deviance": return StoppingMetric.deviance;
default:
try {
return EnumUtils.valueOf(StoppingMetric.class, name);
} catch (IllegalArgumentException ignored) { }
return StoppingMetric.AUTO;
}
}
/**
* Step designed to build a single/default model.
*/
public static abstract class ModelStep<M extends Model> extends ModelingStep<M> {
public static final int DEFAULT_MODEL_TRAINING_WEIGHT = 10;
public static final int DEFAULT_MODEL_GROUP = 1;
public ModelStep(String provider, IAlgo algo, String id, AutoML autoML) {
this(provider, algo, id, DEFAULT_MODEL_GROUP, DEFAULT_MODEL_TRAINING_WEIGHT, autoML);
}
public ModelStep(String provider, IAlgo algo, String id, int priorityGroup, int weight, AutoML autoML) {
super(provider, algo, id, priorityGroup, weight, autoML);
}
@Override
protected JobType getJobType() {
return JobType.ModelBuild;
}
public abstract Model.Parameters prepareModelParameters();
@Override
protected Job<M> startJob() {
return trainModel(prepareModelParameters());
}
protected Job<M> trainModel(Model.Parameters parms) {
return trainModel(null, parms);
}
/**
* @param key (optional) model key.
* @param parms the model builder params.
* @return a started training model.
*/
protected Job<M> trainModel(Key<M> key, Model.Parameters parms) {
String algoName = ModelBuilder.algoName(_algo.urlName());
if (null == key) key = makeKey(algoName, true);
Model.Parameters defaults = ModelBuilder.make(_algo.urlName(), null, null)._parms;
initTimeConstraints(parms, 0);
setCommonModelBuilderParams(parms);
setSeed(parms, defaults, SeedPolicy.Incremental);
setStoppingCriteria(parms, defaults);
setCustomParams(parms);
setDistributionParameters(parms);
// override model's max_runtime_secs to ensure that the total max_runtime doesn't exceed expectations
if (limitModelTrainingTime()) {
Work work = getAllocatedWork();
// double maxAssignedTimeSecs = aml().timeRemainingMs() / 1e3; // legacy
// double maxAssignedTimeSecs = aml().timeRemainingMs() * getWorkAllocations().remainingWorkRatio(work) / 1e3; //including default models in the distribution of the time budget.
// double maxAssignedTimeSecs = aml().timeRemainingMs() * getWorkAllocations().remainingWorkRatio(work, isDefaultModel) / 1e3; //PUBDEV-7595
double maxAssignedTimeSecs = aml().timeRemainingMs() * getWorkAllocations().remainingWorkRatio(work, _isSamePriorityGroup) / 1e3; // Models from a priority group + SEs
parms._max_runtime_secs = parms._max_runtime_secs == 0 ? maxAssignedTimeSecs
: Math.min(parms._max_runtime_secs, maxAssignedTimeSecs);
} else {
parms._max_runtime_secs = 0;
}
Log.debug("Training model: " + algoName + ", time remaining (ms): " + aml().timeRemainingMs());
aml().eventLog().debug(Stage.ModelTraining, parms._max_runtime_secs == 0
? "No time limitation for "+key
: "Time assigned for "+key+": "+parms._max_runtime_secs+"s");
return startModel(key, parms);
}
}
/**
* Step designed to build multiple models using a (random) grid search.
*/
public static abstract class GridStep<M extends Model> extends ModelingStep<M> {
public static final int DEFAULT_GRID_TRAINING_WEIGHT = 30;
public static final int DEFAULT_GRID_GROUP = 2;
protected static final int GRID_STOPPING_ROUND_FACTOR = 2;
public GridStep(String provider, IAlgo algo, String id, AutoML autoML) {
this(provider, algo, id, DEFAULT_GRID_GROUP, DEFAULT_GRID_TRAINING_WEIGHT, autoML);
}
public GridStep(String provider, IAlgo algo, String id, int priorityGroup, int weight, AutoML autoML) {
super(provider, algo, id, priorityGroup, weight, autoML);
}
@Override
protected JobType getJobType() {
return JobType.HyperparamSearch;
}
@Override
public boolean isResumable() {
return true;
}
public abstract Model.Parameters prepareModelParameters();
public abstract Map<String, Object[]> prepareSearchParameters();
@Override
protected Job<Grid> startJob() {
return hyperparameterSearch(prepareModelParameters(), prepareSearchParameters());
}
@Override
@SuppressWarnings("unchecked")
protected Key<Grid> makeKey(String name, boolean withCounter) {
return aml().makeKey(name, "grid", withCounter);
}
protected Job<Grid> hyperparameterSearch(Model.Parameters baseParms, Map<String, Object[]> searchParms) {
return hyperparameterSearch(null, baseParms, searchParms);
}
/**
* @param key optional grid key
* @param baseParms ModelBuilder parameter values that are common across all models in the search.
* @param searchParms hyperparameter search space,
* @return the started hyperparameter search job.
*/
protected Job<Grid> hyperparameterSearch(Key<Grid> key, Model.Parameters baseParms, Map<String, Object[]> searchParms) {
Model.Parameters defaults;
try {
defaults = baseParms.getClass().newInstance();
} catch (Exception e) {
aml().eventLog().error(Stage.ModelTraining, "Internal error doing hyperparameter search");
throw new H2OIllegalArgumentException("Hyperparameter search can't create a new instance of Model.Parameters subclass: " + baseParms.getClass());
}
initTimeConstraints(baseParms, 0);
setCommonModelBuilderParams(baseParms);
// grid seed is provided later through the searchCriteria
setStoppingCriteria(baseParms, defaults);
setCustomParams(baseParms);
setDistributionParameters(baseParms);
AutoMLBuildSpec buildSpec = aml().getBuildSpec();
RandomDiscreteValueSearchCriteria searchCriteria = (RandomDiscreteValueSearchCriteria) buildSpec.build_control.stopping_criteria.getSearchCriteria().clone();
setSearchCriteria(searchCriteria, baseParms);
if (null == key) key = makeKey(_provider, true);
aml().trackKeys(key);
Log.debug("Hyperparameter search: " + _provider + ", time remaining (ms): " + aml().timeRemainingMs());
aml().eventLog().debug(Stage.ModelTraining, searchCriteria.max_runtime_secs() == 0
? "No time limitation for " + key
: "Time assigned for " + key + ": " + searchCriteria.max_runtime_secs() + "s");
return startSearch(
key,
baseParms,
searchParms,
searchCriteria
);
}
protected void setSearchCriteria(RandomDiscreteValueSearchCriteria searchCriteria, Model.Parameters baseParms) {
Work work = getAllocatedWork();
// for time limit, this is allocated in proportion of the entire work budget.
double maxAssignedTimeSecs = limitModelTrainingTime()
? aml().timeRemainingMs() * getWorkAllocations().remainingWorkRatio(work, _isSamePriorityGroup) / 1e3
: 0;
// SE predicate can be removed if/when we decide to include SEs in the max_models limit
// for models limit, this is not assigned in the same proportion as for time,
// as the exploitation phase is not supposed to "add" models but just to replace some by better ones,
// instead, allocation is done in proportion of the entire exploration budget.
int maxAssignedModels = (int) Math.ceil(aml().remainingModels() * getWorkAllocations().remainingWorkRatio(work, isExplorationWork.and(w -> w._algo != Algo.StackedEnsemble)));
searchCriteria.set_max_runtime_secs(searchCriteria.max_runtime_secs() == 0
? maxAssignedTimeSecs
: Math.min(searchCriteria.max_runtime_secs(), maxAssignedTimeSecs));
searchCriteria.set_max_models(searchCriteria.max_models() == 0
? maxAssignedModels
: Math.min(searchCriteria.max_models(), maxAssignedModels));
searchCriteria.set_stopping_rounds(baseParms._stopping_rounds * GRID_STOPPING_ROUND_FACTOR);
}
}
/**
* Step designed to train some models (or not) and then deciding to make a selection
* and add and/or remove models to/from the current leaderboard.
*/
public static abstract class SelectionStep<M extends Model> extends ModelingStep<M> {
public static final int DEFAULT_SELECTION_TRAINING_WEIGHT = 20;
public static final int DEFAULT_SELECTION_GROUP = 3;
public SelectionStep(String provider, IAlgo algo, String id, AutoML autoML) {
this(provider, algo, id, DEFAULT_SELECTION_GROUP, DEFAULT_SELECTION_TRAINING_WEIGHT, autoML);
}
public SelectionStep(String provider, IAlgo algo, String id, int priorityGroup, int weight, AutoML autoML) {
super(provider, algo, id, priorityGroup, weight, autoML);
}
@Override
protected JobType getJobType() {
return JobType.Selection;
}
@Override
@SuppressWarnings("unchecked")
protected Key<Models> makeKey(String name, boolean withCounter) {
return aml().makeKey(name, "selection", withCounter);
}
private LeaderboardHolder makeLeaderboard(String name, EventLog eventLog) {
Leaderboard amlLeaderboard = aml().leaderboard();
EventLog tmpEventLog = eventLog == null ? EventLog.getOrMake(Key.make(name)) : eventLog;
Leaderboard tmpLeaderboard = Leaderboard.getOrMake(
name,
tmpEventLog.asLogger(Stage.ModelTraining),
amlLeaderboard.leaderboardFrame(),
amlLeaderboard.getSortMetric()
);
return new LeaderboardHolder() {
@Override
public Leaderboard get() {
return tmpLeaderboard;
}
@Override
public void cleanup() {
//by default, just empty the leaderboard and remove the container without touching anything model-related.
tmpLeaderboard.removeModels(tmpLeaderboard.getModelKeys(), false);
tmpLeaderboard.remove(false);
if (eventLog == null) {
tmpEventLog.remove();
}
}
};
}
protected LeaderboardHolder makeTmpLeaderboard(String name) {
return makeLeaderboard("tmp_"+name, null);
}
@Override
protected Job<Models> startJob() {
Key<Model>[] trainedModelKeys = getTrainedModelsKeys();
Key<Models> key = makeKey(_provider+"_"+_id, false);
aml().trackKeys(key);
Job<Models> job = new Job<>(key, Models.class.getName(), _description);
Work work = getAllocatedWork();
double maxAssignedTimeSecs = limitModelTrainingTime()
? aml().timeRemainingMs() * getWorkAllocations().remainingWorkRatio(work) / 1e3
: 0;
aml().eventLog().debug(Stage.ModelTraining, maxAssignedTimeSecs == 0
? "No time limitation for "+key
: "Time assigned for "+key+": "+maxAssignedTimeSecs+"s");
return job.start(new H2O.H2OCountedCompleter() {
final Models result = new Models(key, Model.class, job);
final Key<Models> selectionKey = Key.make(key+"_select");
final EventLog selectionEventLog = EventLog.getOrMake(selectionKey);
// EventLog selectionEventLog = aml().eventLog();
final LeaderboardHolder selectionLeaderboard = makeLeaderboard(selectionKey.toString(), selectionEventLog);
{
result.delete_and_lock(job);
}
@Override
public void compute2() {
Countdown countdown = Countdown.fromSeconds(maxAssignedTimeSecs);
Selection selection = null;
try {
ModelingStepsExecutor localExecutor = new ModelingStepsExecutor(selectionLeaderboard.get(), selectionEventLog, countdown);
localExecutor.start();
Job<Models> innerTraining = startTraining(selectionKey, maxAssignedTimeSecs);
StepResultState state = localExecutor.monitor(innerTraining, SelectionStep.this, job);
if (state.is(ResultStatus.success)) {
Log.debug("Selection leaderboard "+selectionLeaderboard.get()._key, selectionLeaderboard.get().toLogString());
selection = getSelectionStrategy().select(trainedModelKeys, selectionLeaderboard.get().getModelKeys());
Leaderboard lb = aml().leaderboard();
Log.debug("Selection result for job "+key, ToStringBuilder.reflectionToString(selection));
lb.removeModels(selection._remove, false); // do remove the model immediately from DKV: if it were part of a grid, it prevents the grid from being resumed.
aml().trackKeys(selection._remove);
lb.addModels(selection._add);
} else if (state.is(ResultStatus.failed)) {
throw (RuntimeException)state.error();
} else if (state.is(ResultStatus.cancelled)) {
throw new Job.JobCancelledException(innerTraining);
}
} finally {
result.unlock(job);
if (selection != null) {
result.addModels(selection._add);
}
}
tryComplete();
}
@Override
public void onCompletion(CountedCompleter caller) {
Keyed.remove(selectionKey, new Futures(), false); // don't cascade: tmp models removal is is done using the logic below.
selectionLeaderboard.get().removeModels(trainedModelKeys, false); // if original models were added to selection leaderboard, just remove them.
selectionLeaderboard.get().removeModels( // for newly trained models, fully remove those that don't appear in the result container.
Arrays.stream(selectionLeaderboard.get().getModelKeys()).filter(k -> !ArrayUtils.contains(result.getModelKeys(), k)).toArray(Key[]::new),
true
);
selectionLeaderboard.cleanup();
if (!aml().eventLog()._key.equals(selectionEventLog._key)) selectionEventLog.remove();
super.onCompletion(caller);
}
@Override
public boolean onExceptionalCompletion(Throwable ex, CountedCompleter caller) {
result.unlock(job._key, false);
Keyed.remove(selectionKey);
selectionLeaderboard.get().remove();
if (!aml().eventLog()._key.equals(selectionEventLog._key)) selectionEventLog.remove();
return super.onExceptionalCompletion(ex, caller);
}
}, work._weight, maxAssignedTimeSecs);
}
protected abstract Job<Models> startTraining(Key<Models> result, double maxRuntimeSecs);
protected abstract ModelSelectionStrategy getSelectionStrategy();
protected Job<Models> asModelsJob(Job job, Key<Models> result){
Job<Models> jModels = new Job<>(result, Models.class.getName(), job._description); // can use the same result key as original job, as it is dropped once its result is read
return jModels.start(new H2O.H2OCountedCompleter() {
final Models models = new Models(result, Model.class, jModels);
{
models.delete_and_lock(jModels);
}
@Override
public void compute2() {
ModelingStepsExecutor.ensureStopRequestPropagated(job, jModels, ModelingStepsExecutor.DEFAULT_POLLING_INTERVAL_IN_MILLIS);
Keyed res = job.get();
models.unlock(jModels);
if (res instanceof Model) {
models.addModel(res.getKey());
} else if (res instanceof ModelContainer) {
models.addModels(((ModelContainer) res).getModelKeys());
res.remove(false);
} else if (res == null && jModels.stop_requested()) {
// Do nothing - stop was requested before we managed to train any model
} else {
throw new H2OIllegalArgumentException("Can only convert jobs producing a single Model or ModelContainer.");
}
tryComplete();
}
}, job._work, job._max_runtime_msecs);
}
}
/**
* Step designed to dynamically choose to train a model or another, a grid or anything else,
* based on the current automl workflow history.
*/
public static abstract class DynamicStep<M extends Model> extends ModelingStep<M> {
public static final int DEFAULT_DYNAMIC_TRAINING_WEIGHT = 20;
public static final int DEFAULT_DYNAMIC_GROUP = 100;
public static class VirtualAlgo implements IAlgo {
public VirtualAlgo() {}
@Override
public String name() {
return "virtual";
}
}
private transient Collection<ModelingStep> _subSteps;
public DynamicStep(String provider, String id, AutoML autoML) {
this(provider, id, DEFAULT_DYNAMIC_GROUP, DEFAULT_DYNAMIC_TRAINING_WEIGHT, autoML);
}
public DynamicStep(String provider, String id, int priorityGroup, int weight, AutoML autoML) {
super(provider, new VirtualAlgo(), id, priorityGroup, weight, autoML);
}
@Override
public boolean canRun() {
// this step is designed to delegate its work to sub-steps by default,
// so the parent step itself has nothing to run.
return false;
}
@Override
protected Job<M> startJob() {
// see comment in canRun().
return null;
}
@Override
protected JobType getJobType() {
return JobType.Dynamic;
}
@Override
@SuppressWarnings("unchecked")
protected Key<Models> makeKey(String name, boolean withCounter) {
return aml().makeKey(name, "decision", withCounter);
}
private void initSubSteps() {
if (_subSteps == null) {
_subSteps = prepareModelingSteps();
}
}
@Override
public Iterator<? extends ModelingStep> iterateSubSteps() {
initSubSteps();
return _subSteps.iterator();
}
@Override
protected Optional<? extends ModelingStep> getSubStep(String id) {
initSubSteps();
return _subSteps.stream()
.filter(step -> step._id.equals(id))
.findFirst();
}
protected abstract Collection<ModelingStep> prepareModelingSteps();
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/ModelingSteps.java
|
package ai.h2o.automl;
import ai.h2o.automl.StepDefinition.Alias;
import ai.h2o.automl.StepDefinition.Step;
import water.Iced;
import water.util.ArrayUtils;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.stream.Stream;
public abstract class ModelingSteps extends Iced<ModelingSteps> {
private transient AutoML _aml;
public ModelingSteps(AutoML autoML) {
_aml = autoML;
}
protected AutoML aml() {
return _aml;
}
public Optional<ModelingStep> getStep(String id) {
return Stream.of(getAllSteps())
.map(step -> step._id.equals(id) ? Optional.of(step)
: (Optional<ModelingStep>)step.getSubStep(id))
.filter(Optional::isPresent)
.map(Optional::get)
.findFirst();
}
protected ModelingStep[] getSteps(Step[] steps) {
List<ModelingStep> tSteps = new ArrayList<>();
for (Step step : steps) {
getStep(step._id).ifPresent(tStep -> {
if (step._weight != Step.DEFAULT_WEIGHT) {
tStep._weight = step._weight; // override default weight
}
if (step._group!= Step.DEFAULT_GROUP) {
tStep._priorityGroup = step._group; // override default priority
}
tSteps.add(tStep);
});
}
return tSteps.toArray(new ModelingStep[0]);
}
protected ModelingStep[] getSteps(Alias alias) {
switch (alias) {
case all:
return getAllSteps();
case defaults:
return getDefaultModels();
case grids:
return getGrids();
case optionals:
case exploitation: // old misleading alias, kept for backwards compatibility
return getOptionals();
default:
return new ModelingStep[0];
}
}
protected ModelingStep[] getAllSteps() {
ModelingStep[] all = new ModelingStep[0]; // create a fresh array to avoid type issues in arraycopy
all = ArrayUtils.append(all, getDefaultModels());
all = ArrayUtils.append(all, getGrids());
all = ArrayUtils.append(all, getOptionals());
return all;
}
/**
* @return the list of all single model steps that should be executed by default when this provider is active.
*/
protected ModelingStep[] getDefaultModels() { return new ModelingStep[0]; }
/**
* @return the list of all grid steps that should be executed by default when this provider is active.
*/
protected ModelingStep[] getGrids() { return new ModelingStep[0]; }
/**
* @return the list of all steps that should be executed on-demand, i.e. requested by their id.
*/
protected ModelingStep[] getOptionals() { return new ModelingStep[0]; }
public abstract String getProvider();
protected void cleanup() {}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/ModelingStepsExecutor.java
|
package ai.h2o.automl;
import ai.h2o.automl.AutoML.Constraint;
import ai.h2o.automl.StepResultState.ResultStatus;
import ai.h2o.automl.WorkAllocations.JobType;
import ai.h2o.automl.WorkAllocations.Work;
import ai.h2o.automl.events.EventLog;
import ai.h2o.automl.events.EventLogEntry.Stage;
import hex.Model;
import hex.ModelContainer;
import hex.leaderboard.Leaderboard;
import water.Iced;
import water.Job;
import water.Key;
import water.util.Countdown;
import water.util.Log;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Class responsible for starting all the {@link ModelingStep}s and monitoring their associated {@link Job},
* i.e. polling jobs and adding their result model(s) to the {@link Leaderboard}.
*/
class ModelingStepsExecutor extends Iced<ModelingStepsExecutor> {
static final int DEFAULT_POLLING_INTERVAL_IN_MILLIS = 1000;
static final StepResultState.Resolution DEFAULT_STATE_RESOLUTION_STRATEGY = StepResultState.Resolution.optimistic;
static void ensureStopRequestPropagated(Job job, Job parentJob, int pollingIntervalInMillis) {
if (job == null || parentJob == null) return;
while (job.isRunning()) {
if (parentJob.stop_requested()) {
job.stop();
}
job.blockingWaitForDone(pollingIntervalInMillis);
}
}
final Key<EventLog> _eventLogKey;
final Key<Leaderboard> _leaderboardKey;
final Countdown _runCountdown;
private int _pollingIntervalInMillis;
private StepResultState.Resolution _stateResolutionStrategy;
private transient List<Job> _jobs; // subjobs
private final AtomicInteger _modelCount = new AtomicInteger();
ModelingStepsExecutor(Leaderboard leaderboard, EventLog eventLog, Countdown runCountdown) {
_leaderboardKey = leaderboard._key;
_eventLogKey = eventLog._key;
_runCountdown = runCountdown;
}
void setPollingInterval(int millis) {
assert millis > 0;
_pollingIntervalInMillis = millis;
}
void setStateResolutionStrategy(StepResultState.Resolution strategy) {
assert strategy != null;
_stateResolutionStrategy = strategy;
}
int modelCount() {
return _modelCount.get();
}
void start() {
start(DEFAULT_POLLING_INTERVAL_IN_MILLIS, DEFAULT_STATE_RESOLUTION_STRATEGY);
}
void start(int pollingIntervalInMillis, StepResultState.Resolution strategy) {
setPollingInterval(pollingIntervalInMillis);
setStateResolutionStrategy(strategy);
_jobs = new ArrayList<>();
_modelCount.set(0);
_runCountdown.start();
}
void stop() {
_runCountdown.stop();
if (_jobs == null) return; // already stopped
for (Job j : _jobs) j.stop();
for (Job j : _jobs) j.get(); // Hold until they all completely stop.
_jobs = null;
}
@SuppressWarnings("unchecked")
StepResultState submit(ModelingStep step, Job parentJob) {
StepResultState resultState = new StepResultState(step.getGlobalId());
for (Iterator<ModelingStep> it = step.iterateSubSteps(); it.hasNext(); ) {
resultState.addState(submit(it.next(), parentJob));
}
if (step.canRun()) {
Job job = null;
try {
job = step.run();
if (job == null) {
resultState.addState(skip(step, parentJob));
} else {
resultState.addState(monitor(job, step, parentJob));
}
} catch (Exception e) {
resultState.addState(new StepResultState(step.getGlobalId(), e));
} finally {
step.onDone(job);
}
} else {
resultState.addState(new StepResultState(step.getGlobalId(), ResultStatus.skipped));
if (step.getAllocatedWork() != null) {
step.getAllocatedWork().consume();
}
}
resultState.resolveState(_stateResolutionStrategy);
return resultState;
}
private StepResultState skip(ModelingStep step, Job parentJob) {
if (null != parentJob) {
String desc = step._description;
Work work = step.getAllocatedWork();
parentJob.update(work.consume(), "SKIPPED: "+desc);
Log.info("AutoML; skipping "+desc);
}
return new StepResultState(step.getGlobalId(), ResultStatus.skipped);
}
StepResultState monitor(Job job, ModelingStep step, Job parentJob) {
EventLog eventLog = eventLog();
String jobDescription = job._result == null ? job._description : job._result+" ["+job._description+"]";
eventLog.debug(Stage.ModelTraining, jobDescription + " started");
_jobs.add(job);
boolean ignoreTimeout = step.ignores(Constraint.TIMEOUT);
Work work = step.getAllocatedWork();
long lastWorkedSoFar = 0;
long lastTotalModelsBuilt = 0;
try {
while (job.isRunning()) {
if (parentJob != null) {
if (parentJob.stop_requested()) {
eventLog.debug(Stage.ModelTraining, "AutoML job cancelled; skipping "+jobDescription);
job.stop();
}
if (!ignoreTimeout && _runCountdown.timedOut()) {
eventLog.debug(Stage.ModelTraining, "AutoML: out of time; skipping "+jobDescription);
job.stop();
}
}
long workedSoFar = Math.round(job.progress() * work._weight);
if (parentJob != null) {
parentJob.update(Math.round(workedSoFar - lastWorkedSoFar), jobDescription);
}
if (JobType.HyperparamSearch == work._type || JobType.Selection == work._type) {
ModelContainer<?> container = (ModelContainer) job._result.get();
int totalModelsBuilt = container == null ? 0 : container.getModelCount();
if (totalModelsBuilt > lastTotalModelsBuilt) {
eventLog.debug(Stage.ModelTraining, "Built: "+totalModelsBuilt+" models for "+work._type+" : "+jobDescription);
this.addModels(container, step);
lastTotalModelsBuilt = totalModelsBuilt;
}
}
job.blockingWaitForDone(_pollingIntervalInMillis);
lastWorkedSoFar = workedSoFar;
}
if (job.isCrashed()) {
eventLog.error(Stage.ModelTraining, jobDescription+" failed: "+job.ex());
return new StepResultState(step.getGlobalId(), job.ex());
} else if (job.get() == null) {
eventLog.info(Stage.ModelTraining, jobDescription+" cancelled");
return new StepResultState(step.getGlobalId(), ResultStatus.cancelled);
} else {
// pick up any stragglers:
if (JobType.HyperparamSearch == work._type || JobType.Selection == work._type) {
eventLog.debug(Stage.ModelTraining, jobDescription+" complete");
ModelContainer<?> container = (ModelContainer) job.get();
int totalModelsBuilt = container.getModelCount();
if (totalModelsBuilt > lastTotalModelsBuilt) {
eventLog.debug(Stage.ModelTraining, "Built: "+totalModelsBuilt+" models for "+work._type+" : "+jobDescription);
this.addModels(container, step);
}
} else if (JobType.ModelBuild == work._type) {
eventLog.debug(Stage.ModelTraining, jobDescription+" complete");
this.addModel((Model) job.get(), step);
}
return new StepResultState(step.getGlobalId(), ResultStatus.success);
}
} finally {
// add remaining work
if (parentJob != null) {
parentJob.update(work._weight - lastWorkedSoFar);
}
work.consume();
_jobs.remove(job);
}
}
private void addModels(final ModelContainer container, ModelingStep step) {
for (Key<Model> key : container.getModelKeys()) step.register(key);
Leaderboard leaderboard = leaderboard();
int before = leaderboard.getModelCount();
leaderboard.addModels(container.getModelKeys());
int after = leaderboard.getModelCount();
_modelCount.addAndGet(after - before);
}
private void addModel(final Model model, ModelingStep step) {
step.register(model._key);
Leaderboard leaderboard = leaderboard();
int before = leaderboard.getModelCount();
leaderboard.addModel(model._key);
int after = leaderboard.getModelCount();
if (!step.ignores(Constraint.MODEL_COUNT))
_modelCount.addAndGet(after - before);
}
private EventLog eventLog() {
return _eventLogKey.get();
}
private Leaderboard leaderboard() {
return Leaderboard.getInstance(_leaderboardKey, eventLog().asLogger(Stage.ModelTraining));
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/ModelingStepsProvider.java
|
package ai.h2o.automl;
/**
* A simple class used by service discovery to register new {@link ModelingSteps} implementations.
*/
public interface ModelingStepsProvider<T extends ModelingSteps> {
/**
* @return the name of this provider: must be unique among all registered providers.
*/
String getName();
/**
* Creates an instance of {@link ModelingSteps} associated to this provider's name,
* or returns null to fully skip this provider.
*
* @param aml the {@link AutoML} instance needed to build the {@link ModelingSteps}
* @return an instance of {@link ModelingSteps} listing all the various AutoML steps executable with this provider name.
*/
T newInstance(AutoML aml);
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/ModelingStepsRegistry.java
|
package ai.h2o.automl;
import ai.h2o.automl.events.EventLogEntry.Stage;
import ai.h2o.automl.StepDefinition.Alias;
import ai.h2o.automl.StepDefinition.Step;
import hex.Model;
import water.Iced;
import water.nbhm.NonBlockingHashMap;
import water.util.ArrayUtils;
import java.util.*;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* The registry responsible for loading all {@link ModelingStepsProvider} using service discovery,
* and providing the list of {@link ModelingStep} to execute.
*/
public class ModelingStepsRegistry extends Iced<ModelingStepsRegistry> {
static final NonBlockingHashMap<String, ModelingStepsProvider> stepsByName = new NonBlockingHashMap<>();
static final NonBlockingHashMap<String, ModelParametersProvider> parametersByName = new NonBlockingHashMap<>();
static {
ServiceLoader<ModelingStepsProvider> trainingStepsProviders = ServiceLoader.load(ModelingStepsProvider.class);
for (ModelingStepsProvider provider : trainingStepsProviders) {
registerProvider(provider);
}
}
public static void registerProvider(ModelingStepsProvider provider) {
stepsByName.put(provider.getName(), provider);
if (provider instanceof ModelParametersProvider) { // mainly for hardcoded providers in this module, that's why we can reuse the ModelingStepsProvider
parametersByName.put(provider.getName(), (ModelParametersProvider)provider);
}
}
public static Model.Parameters defaultParameters(String provider) {
if (parametersByName.containsKey(provider)) {
return parametersByName.get(provider).newDefaultParameters();
}
return null;
}
/**
* @param aml the AutoML instance responsible to execute the {@link ModelingStep}s.
* @return the list of {@link ModelingStep}s to execute according to the given modeling plan.
*/
public ModelingStep[] getOrderedSteps(StepDefinition[] modelingPlan, AutoML aml) {
modelingPlan = aml.selectModelingPlan(modelingPlan);
aml.eventLog().info(Stage.Workflow, "Loading execution steps: "+Arrays.toString(modelingPlan));
List<ModelingStep> orderedSteps = new ArrayList<>();
for (StepDefinition def : modelingPlan) {
ModelingSteps steps = aml.session().getModelingSteps(def._name);
if (steps == null) continue;
ModelingStep[] toAdd;
if (def._alias != null) {
toAdd = steps.getSteps(def._alias);
} else if (def._steps != null) {
toAdd = steps.getSteps(def._steps);
if (toAdd.length < def._steps.length) {
List<String> toAddIds = Stream.of(toAdd).map(s -> s._id).collect(Collectors.toList());
Stream.of(def._steps)
.filter(s -> !toAddIds.contains(s._id))
.forEach(s -> aml.eventLog().warn(Stage.Workflow,
"Step '"+s._id+"' not defined in provider '"+def._name+"': skipping it."));
}
} else { // if name, but no alias or steps, put them all by default (support for simple syntax)
toAdd = steps.getSteps(Alias.all);
}
if (toAdd != null) {
for (ModelingStep ts : toAdd) {
ts._fromDef = def;
}
orderedSteps.addAll(Arrays.asList(toAdd));
}
}
return orderedSteps.stream()
.filter(step -> step.getPriorityGroup() > 0 && step.getWeight() != 0) // negative weights can be used for registration only to be loaded by a dynamic step.
.sorted(Comparator.comparingInt(step -> step._priorityGroup))
.toArray(ModelingStep[]::new);
}
public StepDefinition[] createDefinitionPlanFromSteps(ModelingStep[] steps) {
List<StepDefinition> definitions = new ArrayList<>();
for (ModelingStep step : steps) {
Step stepDesc = new Step(step._id, step._priorityGroup, step._weight);
if (definitions.size() > 0) {
StepDefinition lastDef = definitions.get(definitions.size() - 1);
if (lastDef._name.equals(step._fromDef._name)) {
lastDef._steps = ArrayUtils.append(lastDef._steps, stepDesc);
continue;
}
}
definitions.add(new StepDefinition(step._fromDef._name, stepDesc));
}
return definitions.toArray(new StepDefinition[0]);
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/Models.java
|
package ai.h2o.automl;
import hex.Model;
import hex.ModelContainer;
import water.*;
import water.api.schemas3.KeyV3;
import water.automl.api.schemas3.SchemaExtensions;
import water.util.ArrayUtils;
import java.lang.reflect.Array;
import java.lang.reflect.Modifier;
import java.util.Arrays;
public class Models<M extends Model> extends Lockable<Models<M>> implements ModelContainer<M> {
private final int _type_id;
private final Job _job;
private Key<M>[] _modelKeys = new Key[0];
public Models(Key<Models<M>> key, Class<M> clz) {
this(key, clz, null);
}
public Models(Key<Models<M>> key, Class<M> clz, Job job) {
super(key);
_type_id = (clz != null && !Modifier.isAbstract(clz.getModifiers())) ? TypeMap.getIcedId(clz.getName()) : -1;
_job = job;
}
@Override
public Key<M>[] getModelKeys() {
return _modelKeys.clone();
}
@Override
@SuppressWarnings("unchecked")
public M[] getModels() {
Arrays.stream(_modelKeys).forEach(DKV::prefetch);
Class<M> clz = (Class<M>)(_type_id >= 0 ? TypeMap.theFreezable(_type_id).getClass(): Model.class);
return Arrays.stream(_modelKeys)
.map(k -> k == null ? null : k.get())
.toArray(l -> (M[])Array.newInstance(clz, l));
}
@Override
public int getModelCount() {
return _modelKeys.length;
}
public void addModel(Key<M> key) {
addModels(new Key[]{key});
}
public void addModels(Key<M>[] keys) {
write_lock(_job);
_modelKeys = ArrayUtils.append(_modelKeys, keys);
update(_job);
unlock(_job);
}
@Override
protected Futures remove_impl(final Futures fs, boolean cascade) {
if (cascade) {
for (Key<M> k : _modelKeys)
Keyed.remove(k, fs, true);
}
_modelKeys = new Key[0];
return super.remove_impl(fs, cascade);
}
@Override
public Class<SchemaExtensions.ModelsKeyV3> makeSchema() {
return SchemaExtensions.ModelsKeyV3.class;
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/StepDefinition.java
|
package ai.h2o.automl;
import water.Iced;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
/**
* Defines a step or a list of steps to be executed.
* The steps implementations are provided by instances of (@link {@link ModelingStepsProvider}.
*/
public class StepDefinition extends Iced<StepDefinition> {
public enum Alias { all, defaults, grids, exploitation, optionals }
public static class Step extends Iced<Step> {
public static final int DEFAULT_GROUP = -1; //step will use the default priority group as defined by the ModelingStep.
public static final int DEFAULT_WEIGHT = -1; // means that the Step will use the default weight set by the ModelingStep.
/**
* The id of the step (must be unique per step provider).
*/
String _id;
int _group = DEFAULT_GROUP;
/**
* The relative weight for the given step.
* The higher the weight, the more time percentage it will be offered in a time-constrained context.
* For hyperparameter search, the weight may also impact the number of models trained in a count-model-constrained context.
*/
int _weight = DEFAULT_WEIGHT; // share of time dedicated
public Step() { /* for autofill from schema */ }
public Step(String _id) {
this._id = _id;
}
public Step(String id, int group, int weight) {
assert group == DEFAULT_GROUP || group >= 0: "non-default group must be >= 0";
assert weight == DEFAULT_WEIGHT || weight >= 0: "non-default weight must be >= 0";
this._id = id;
this._group = group;
this._weight = weight;
}
public String getId() {
return _id;
}
public int getGroup() {
return _group;
}
public int getWeight() {
return _weight;
}
@Override
public String toString() {
String s = _id;
if (_group > DEFAULT_GROUP || _weight > DEFAULT_WEIGHT) {
s += " (";
String sep = "";
if (_group > DEFAULT_GROUP) {
s += (sep+ _group +"g");
sep = ", ";
}
if (_weight > DEFAULT_WEIGHT) {
s += (sep+_weight+"w");
sep = ", ";
}
s += ")";
}
return s;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Step step = (Step) o;
return _id.equals(step._id)
&& _group == step._group
&& _weight == step._weight;
}
@Override
public int hashCode() {
return Objects.hash(_id, _group, _weight);
}
}
/**
* The name of the step provider ({@link ModelingStepsProvider}): this is usually also the name of the algorithm.
*/
String _name;
/**
* An alias representing a predefined list of steps to be executed.
*/
Alias _alias;
/**
* The list of steps to be executed.
*/
Step[] _steps;
public StepDefinition() { /* for autofill from schema */ }
public StepDefinition(String name) {
this(name, Alias.all);
}
public StepDefinition(String name, Alias alias) {
_name = name;
_alias = alias;
}
public StepDefinition(String name, String... ids) {
_name = name;
_steps = new Step[ids.length];
for (int i=0; i<ids.length; i++) _steps[i] = new Step(ids[i]);
}
public StepDefinition(String name, Step... steps) {
_name = name;
_steps = steps;
}
public String getName() {
return _name;
}
public Alias getAlias() {
return _alias;
}
public List<Step> getSteps() {
return _steps == null ? Collections.emptyList() : Collections.unmodifiableList(Arrays.asList(_steps));
}
@Override
public String toString() {
return "{"+_name+" : "+(_steps == null ? _alias : Arrays.toString(_steps))+"}";
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
StepDefinition that = (StepDefinition) o;
return _name.equals(that._name)
&& _alias == that._alias
&& Arrays.equals(_steps, that._steps);
}
@Override
public int hashCode() {
int result = Objects.hash(_name, _alias);
result = 31 * result + Arrays.hashCode(_steps);
return result;
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/StepResultState.java
|
package ai.h2o.automl;
import water.util.IcedHashMap;
import java.util.Collections;
import java.util.Map;
import static ai.h2o.automl.StepResultState.ResultStatus.*;
final class StepResultState {
enum ResultStatus {
skipped,
cancelled,
failed,
success,
}
enum Resolution {
sameAsMain, // resolves to the same state as the main step (ignoring other sub-step states).
optimistic, // success if any success, otherwise cancelled if any cancelled, otherwise failed if any failure, otherwise skipped.
pessimistic, // failures if any failure, otherwise cancelled if any cancelled, otherwise success it any success, otherwise skipped.
}
private final String _id;
private final IcedHashMap<String, StepResultState> _subStates = new IcedHashMap<>();
private ResultStatus _status;
private Throwable _error;
StepResultState(String id) {
this(id, (ResultStatus) null);
}
StepResultState(String id, ResultStatus status) {
this(id, status, null);
}
StepResultState(String id, Throwable error) {
this(id, failed, error);
}
private StepResultState(String id, ResultStatus status, Throwable error) {
_id = id;
_status = status;
_error = error;
}
void setStatus(ResultStatus status) {
assert _status == null;
_status = status;
}
void setStatus(Throwable error) {
setStatus(failed);
_error = error;
}
void addState(StepResultState state) {
_subStates.put(state.id(), state);
}
boolean is(ResultStatus status) {
return _status==status;
}
String id() {
return _id;
}
ResultStatus status() {
return _status;
}
Throwable error() {
return _error;
}
StepResultState subState(String id) {
return _subStates.get(id);
}
Map<String, StepResultState> subStates() {
return Collections.unmodifiableMap(_subStates);
}
void resolveState(Resolution strategy) {
if (_status != null) return;
if (_subStates.size() == 0) {
setStatus(skipped);
} else if (_subStates.size() == 1 && _subStates.containsKey(id())) {
StepResultState state = subState(id());
_status = state.status();
_error = state.error();
_subStates.clear();
_subStates.putAll(state.subStates());
} else {
switch (strategy) {
case sameAsMain:
StepResultState state = subState(id());
if (state != null) {
_status = state.status();
_error = state.error();
} else {
_status = cancelled;
}
break;
case optimistic:
if (_subStates.values().stream().anyMatch(s -> s.is(success)))
_status = success;
else if (_subStates.values().stream().anyMatch(s -> s.is(cancelled)))
_status = cancelled;
else if (_subStates.values().stream().anyMatch(s -> s.is(failed)))
_subStates.values().stream().filter(s -> s.is(failed)).limit(1).findFirst().ifPresent(s -> setStatus(s.error()));
else _status = skipped;
break;
case pessimistic:
if (_subStates.values().stream().anyMatch(s -> s.is(failed)))
_subStates.values().stream().filter(s -> s.is(failed)).limit(1).findFirst().ifPresent(s -> setStatus(s.error()));
else if (_subStates.values().stream().anyMatch(s -> s.is(cancelled)))
_status = cancelled;
else if (_subStates.values().stream().anyMatch(s -> s.is(success)))
_status = success;
else _status = skipped;
break;
}
}
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("StepResultState{");
sb.append("_id='").append(_id).append('\'');
sb.append(", _status=").append(_status);
if (_error != null) sb.append(", _error=").append(_error);
if (_subStates.size() > 0) sb.append(", _subStates=").append(_subStates);
sb.append('}');
return sb.toString();
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/TimedH2ORunnable.java
|
package ai.h2o.automl;
public interface TimedH2ORunnable extends H2ORunnable {
boolean keepRunning();
long timeRemainingMs();
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/WorkAllocations.java
|
package ai.h2o.automl;
import water.Iced;
import water.util.ArrayUtils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.function.Predicate;
import java.util.stream.Stream;
public class WorkAllocations extends Iced<WorkAllocations> {
public enum JobType {
Unknown,
ModelBuild,
HyperparamSearch,
Selection,
Dynamic,
}
public static class Work extends Iced<Work> {
String _id;
IAlgo _algo;
JobType _type;
int _priorityGroup;
int _weight;
Work(String id, IAlgo algo, JobType type, int priorityGroup, int weight) {
this._algo = algo;
this._type = type;
this._id = id;
this._priorityGroup = priorityGroup;
this._weight = weight;
}
public int consume() {
int consumed = _weight;
_weight = 0;
return consumed;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("Work{")
.append(_id).append(", ")
.append(_algo.name()).append(", ")
.append(_type).append(", ")
.append("group=").append(_priorityGroup).append(", ")
.append("weight=").append(_weight)
.append('}');
return sb.toString();
}
}
private boolean frozen;
private Work[] allocations = new Work[0];
WorkAllocations allocate(Work work) {
if (frozen) throw new IllegalStateException("Can not allocate new work.");
allocations = ArrayUtils.append(allocations, work);
return this;
}
WorkAllocations freeze() {
frozen = true;
return this;
}
void remove(IAlgo algo) {
if (frozen) throw new IllegalStateException("Can not modify allocations.");
List<Work> filtered = new ArrayList<>(allocations.length);
for (Work alloc : allocations) {
if (!algo.name().equals(alloc._algo.name())) {
filtered.add(alloc);
}
}
allocations = filtered.toArray(new Work[0]);
}
public Work getAllocation(String id, IAlgo algo) {
for (Work alloc : allocations) {
if (alloc._algo.name().equals(algo.name()) && alloc._id.equals(id)) return alloc;
}
return null;
}
public Work[] getAllocations(Predicate<Work> predicate) {
return Stream.of(allocations)
.filter(predicate)
.toArray(Work[]::new);
}
private int sum(Work[] workItems) {
int tot = 0;
for (Work item : workItems) {
if (item._weight > 0)
tot += item._weight;
}
return tot;
}
int remainingWork() {
return sum(allocations);
}
int remainingWork(Predicate<Work> predicate) {
return sum(getAllocations(predicate));
}
float remainingWorkRatio(Work work) {
return (float) work._weight / remainingWork();
}
float remainingWorkRatio(Work work, Predicate<Work> predicate) {
return (float) work._weight / remainingWork(predicate);
}
@Override
public String toString() {
return Arrays.toString(allocations);
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/events/EventLog.java
|
package ai.h2o.automl.events;
import ai.h2o.automl.events.EventLogEntry.Stage;
import water.DKV;
import water.Futures;
import water.Key;
import water.Keyed;
import water.logging.Logger;
import water.logging.LoggerFactory;
import water.logging.LoggingLevel;
import water.util.TwoDimTable;
import java.io.Serializable;
import java.util.function.Predicate;
import java.util.stream.Stream;
/**
* EventLog instances store significant events occurring during an AutoML run.
* Events are formatted with the intent of rendering on client side.
*/
public class EventLog extends Keyed<EventLog> {
private static final Logger log = LoggerFactory.getLogger(EventLog.class);
public final Key _runner_id;
public EventLogEntry[] _events;
public EventLog(Key runKey) {
_runner_id = runKey;
_key = Key.make(idForRun(runKey));
_events = new EventLogEntry[0];
}
public static EventLog getOrMake(Key runKey) {
EventLog eventLog = DKV.getGet(Key.make(idForRun(runKey)));
if (null == eventLog) {
eventLog = new EventLog(runKey);
}
DKV.put(eventLog);
return eventLog;
}
private static String idForRun(Key runKey) {
if (null == runKey)
return "Events_dummy";
return "Events_" + runKey;
}
/** Add a Debug EventLogEntry and log. */
public <V extends Serializable> EventLogEntry<V> debug(Stage stage, String message) {
log.debug(stage+": "+message);
return addEvent(LoggingLevel.DEBUG, stage, message);
}
/** Add a Info EventLogEntry and log. */
public <V extends Serializable> EventLogEntry<V> info(Stage stage, String message) {
log.info(stage+": "+message);
return addEvent(LoggingLevel.INFO, stage, message);
}
/** Add a Warn EventLogEntry and log. */
public <V extends Serializable> EventLogEntry<V> warn(Stage stage, String message) {
log.warn(stage+": "+message);
return addEvent(LoggingLevel.WARN, stage, message);
}
/** Add an Error EventLogEntry and log. */
public <V extends Serializable> EventLogEntry<V> error(Stage stage, String message) {
log.error(stage+": "+message);
return addEvent(LoggingLevel.ERROR, stage, message);
}
/** Add a EventLogEntry, but don't log. */
public <V extends Serializable> EventLogEntry<V> addEvent(LoggingLevel level, Stage stage, String message) {
EventLogEntry<V> entry = new EventLogEntry<>(_runner_id, level, stage, message);
addEvent(entry);
return entry;
}
/** Add a EventLogEntry, but don't log. */
public void addEvent(EventLogEntry event) {
EventLogEntry[] oldEvents = _events;
EventLogEntry[] newEvents = new EventLogEntry[_events.length + 1];
System.arraycopy(oldEvents, 0, newEvents, 0, oldEvents.length);
newEvents[oldEvents.length] = event;
_events = newEvents;
} // addEvent
/**
* Delete object and its dependencies from DKV, including models.
*/
@Override
protected Futures remove_impl(Futures fs, boolean cascade) {
_events = new EventLogEntry[0];
return super.remove_impl(fs, cascade);
}
public TwoDimTable toTwoDimTable() {
return toTwoDimTable(null);
}
public TwoDimTable toTwoDimTable(Predicate<EventLogEntry> predicate) {
String name = _runner_id == null ? "(new)" : _runner_id.toString();
return toTwoDimTable("Event Log for:" + name, predicate);
}
public TwoDimTable toTwoDimTable(String tableHeader, Predicate<EventLogEntry> predicate) {
final EventLogEntry[] events = predicate == null
? _events.clone()
: Stream.of(_events.clone())
.filter(predicate)
.toArray(EventLogEntry[]::new);
TwoDimTable table = EventLogEntry.makeTwoDimTable(tableHeader, events.length);
for (int i = 0; i < events.length; i++)
events[i].addTwoDimTableRow(table, i);
return table;
}
@Override
public String toString() {
return this.toTwoDimTable().toString();
}
public Logger asLogger(Stage stage) {
final EventLog el = this;
return new Logger() {
@Override
public void trace(String message) {
el.debug(stage, message);
}
@Override
public void debug(String message) {
el.debug(stage, message);
}
@Override
public void info(String message) {
el.info(stage, message);
}
@Override
public void warn(String message) {
el.warn(stage, message);
}
@Override
public void error(String message) {
el.error(stage, message);
}
@Override
public void fatal(String message) {
el.error(stage, message);
}
@Override
public boolean isTraceEnabled() {
return true;
}
@Override
public boolean isDebugEnabled() {
return true;
}
@Override
public boolean isInfoEnabled() {
return true;
}
@Override
public boolean isWarnEnabled() {
return true;
}
@Override
public boolean isErrorEnabled() {
return true;
}
@Override
public boolean isFatalEnabled() {
return true;
}
};
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/events/EventLogEntry.java
|
package ai.h2o.automl.events;
import ai.h2o.automl.AutoML;
import water.Iced;
import water.Key;
import water.logging.LoggingLevel;
import water.util.TwoDimTable;
import java.io.Serializable;
import java.text.FieldPosition;
import java.text.Format;
import java.text.ParsePosition;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Objects;
public class EventLogEntry<V extends Serializable> extends Iced {
public enum Stage {
Validation,
Workflow,
DataImport,
FeatureAnalysis,
FeatureReduction,
FeatureCreation,
ModelTraining,
ModelSelection,
}
static TwoDimTable makeTwoDimTable(String tableHeader, int length) {
String[] rowHeaders = new String[length];
for (int i = 0; i < length; i++) rowHeaders[i] = "" + i;
return new TwoDimTable(
tableHeader,
"Actions taken and discoveries made by AutoML",
rowHeaders,
EventLogEntry.colHeaders,
EventLogEntry.colTypes,
EventLogEntry.colFormats,
"#"
);
}
static String nowStr() {
return dateTimeFormat.get().format(new Date());
}
static abstract class SimpleFormat<T> extends Format {
@Override
public StringBuffer format(Object obj, StringBuffer toAppendTo, FieldPosition pos) {
pos.setBeginIndex(0);
pos.setEndIndex(0);
format((T)obj, toAppendTo);
return toAppendTo;
}
public abstract StringBuffer format(T t, StringBuffer toAppendTo);
@Override
public Object parseObject(String source, ParsePosition pos) {
return null;
}
}
public static final ThreadLocal<Format> epochFormat = ThreadLocal.withInitial(() -> new SimpleFormat<Date>() {
@Override
public StringBuffer format(Date date, StringBuffer toAppendTo) {
long epoch = Math.round(date.getTime() / 1e3);
toAppendTo.append(epoch);
return toAppendTo;
}
});
// uses local timezone
public static final ThreadLocal<SimpleDateFormat> dateTimeISOFormat = ThreadLocal.withInitial(() -> new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS"));
// uses local timezone
public static final ThreadLocal<SimpleDateFormat> dateTimeFormat = ThreadLocal.withInitial(() -> new SimpleDateFormat("yyyy.MM.dd HH:mm:ss.S"));
// uses local timezone
public static final ThreadLocal<SimpleDateFormat> timeFormat = ThreadLocal.withInitial(() ->new SimpleDateFormat("HH:mm:ss.S"));
private static final String[] colHeaders = {
"timestamp",
"level",
"stage",
"message",
"name",
"value",
};
private static final String[] colTypes= {
"string",
"string",
"string",
"string",
"string",
"string",
};
private static final String[] colFormats= {
"%s",
"%s",
"%s",
"%s",
"%s",
"%s",
};
private static <E extends Enum<E>> int longest(Class<E> enu) {
int longest = -1;
for (E v : enu.getEnumConstants())
longest = Math.max(longest, v.name().length());
return longest;
}
private final int longestLevel = longest(LoggingLevel.class); // for formatting
private final int longestStage = longest(Stage.class); // for formatting
private Key<AutoML> _automlKey;
private long _timestamp;
private LoggingLevel _level;
private Stage _stage;
private String _message;
private String _name;
private V _value;
private Format _valueFormatter;
public Key<AutoML> getAutomlKey() { return _automlKey; }
public long getTimestamp() {
return _timestamp;
}
public LoggingLevel getLevel() {
return _level;
}
public Stage getStage() {
return _stage;
}
public String getMessage() {
return _message;
}
public String getName() {
return _name;
}
public V getValue() {
return _value;
}
public Format getValueFormatter() {
return _valueFormatter;
}
public EventLogEntry(Key<AutoML> automlKey, LoggingLevel level, Stage stage, String message) {
_automlKey = automlKey;
_timestamp = System.currentTimeMillis();
_level = level;
_stage = stage;
_message = message;
}
public void setNamedValue(String name, V value) {
setNamedValue(name, value, null);
}
public void setNamedValue(String name, V value, Format formatter) {
_name = name;
_value = value;
_valueFormatter = formatter;
}
void addTwoDimTableRow(TwoDimTable table, int row) {
int col = 0;
table.set(row, col++, timeFormat.get().format(new Date(_timestamp)));
table.set(row, col++, _level);
table.set(row, col++, _stage);
table.set(row, col++, _message);
table.set(row, col++, _name);
table.set(row, col++, _valueFormatter == null ? _value : _valueFormatter.format(_value));
}
@Override
public String toString() {
return String.format("%-12s %-"+longestLevel+"s %-"+longestStage+"s %s %s %s",
timeFormat.get().format(new Date(_timestamp)),
_level,
_stage,
Objects.toString(_message, ""),
Objects.toString(_name, ""),
_valueFormatter == null ? Objects.toString(_value, "") : _valueFormatter.format(_value)
);
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/leaderboard/ModelGroup.java
|
package ai.h2o.automl.leaderboard;
import ai.h2o.automl.ModelingStep;
import hex.Model;
import hex.leaderboard.LeaderboardCell;
import hex.leaderboard.LeaderboardColumn;
import water.Iced;
import water.Key;
public class ModelGroup extends Iced<ModelGroup> implements LeaderboardCell<Integer, ModelGroup> {
public static final LeaderboardColumn COLUMN = new LeaderboardColumn("group", "int", "%s", true);
final Key<Model> _modelId;
final int _priorityGroup;
public ModelGroup(Model model, ModelingStep step) {
_modelId = model._key;
_priorityGroup = step == null ? -1 : step.getPriorityGroup();
}
@Override
public LeaderboardColumn getColumn() {
return COLUMN;
}
@Override
public Key<Model> getModelId() {
return _modelId;
}
@Override
public Integer getValue() {
return _priorityGroup;
}
@Override
public void setValue(Integer value) {
throw new UnsupportedOperationException();
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/leaderboard/ModelProvider.java
|
package ai.h2o.automl.leaderboard;
import ai.h2o.automl.ModelingStep;
import hex.Model;
import hex.leaderboard.LeaderboardCell;
import hex.leaderboard.LeaderboardColumn;
import water.Iced;
import water.Key;
public class ModelProvider extends Iced<ModelProvider> implements LeaderboardCell<String, ModelProvider> {
public static final LeaderboardColumn COLUMN = new LeaderboardColumn("provider", "string", "%s", true);
final Key<Model> _modelId;
final String _provider;
public ModelProvider(Model model, ModelingStep step) {
_modelId = model._key;
_provider = step == null ? "" : step.getProvider();
}
@Override
public LeaderboardColumn getColumn() {
return COLUMN;
}
@Override
public Key<Model> getModelId() {
return _modelId;
}
@Override
public String getValue() {
return _provider;
}
@Override
public void setValue(String value) {
throw new UnsupportedOperationException();
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/leaderboard/ModelSize.java
|
package ai.h2o.automl.leaderboard;
import hex.Model;
import hex.leaderboard.LeaderboardCell;
import hex.leaderboard.LeaderboardColumn;
import water.Iced;
import water.Key;
/**
* A cell computing lazily the size of a model.
*/
public class ModelSize extends Iced<ModelSize> implements LeaderboardCell<Long, ModelSize> {
public static final LeaderboardColumn COLUMN = new LeaderboardColumn("model_size_bytes", "long", "%s");
private final Key<Model> _modelId;
private Long _model_size;
public ModelSize(Key<Model> modelId) {
_modelId = modelId;
}
@Override
public LeaderboardColumn getColumn() {
return COLUMN;
}
@Override
public Key<Model> getModelId() {
return _modelId;
}
@Override
public Long getValue() {
return _model_size;
}
@Override
public void setValue(Long value) {
_model_size = value;
}
@Override
public boolean isNA() {
return getValue() == null || getValue() < 0;
}
@Override
public Long fetch() {
if (getValue() == null) {
try {
// PUBDEV-7124:
// Model model = _modelId.get();
// export binary model to temp folder
// read size
// delete saved model
} catch (Exception e) {
setValue(-1L);
}
}
return getValue();
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/leaderboard/ModelStep.java
|
package ai.h2o.automl.leaderboard;
import ai.h2o.automl.ModelingStep;
import hex.Model;
import hex.leaderboard.LeaderboardCell;
import hex.leaderboard.LeaderboardColumn;
import water.Iced;
import water.Key;
public class ModelStep extends Iced<ModelStep> implements LeaderboardCell<String, ModelStep> {
public static final LeaderboardColumn COLUMN = new LeaderboardColumn("step", "string", "%s", true);
final Key<Model> _modelId;
final String _stepId;
public ModelStep(Model model, ModelingStep step) {
_modelId = model._key;
_stepId = step == null ? "" : step.getId();
}
@Override
public LeaderboardColumn getColumn() {
return COLUMN;
}
@Override
public Key<Model> getModelId() {
return _modelId;
}
@Override
public String getValue() {
return _stepId;
}
@Override
public void setValue(String value) {
throw new UnsupportedOperationException();
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/modeling/CompletionStepsProvider.java
|
package ai.h2o.automl.modeling;
import ai.h2o.automl.*;
import ai.h2o.automl.ModelingStep.DynamicStep;
import hex.Model;
import hex.grid.Grid;
import hex.grid.HyperSpaceSearchCriteria.RandomDiscreteValueSearchCriteria;
import hex.leaderboard.Leaderboard;
import water.Job;
import water.Key;
import java.util.*;
import java.util.stream.Collectors;
public class CompletionStepsProvider implements ModelingStepsProvider<CompletionStepsProvider.CompletionSteps> {
public static class CompletionSteps extends ModelingSteps {
static final String NAME = "completion";
static class ResumingGridStep extends ModelingStep.GridStep {
private transient GridStep _step;
public ResumingGridStep(GridStep step, int priorityGroup, int weight, AutoML aml) {
super(NAME, step.getAlgo(), step.getProvider()+"_"+step.getId(), priorityGroup, weight, aml);
_work = makeWork();
_step = step;
}
@Override
public boolean canRun() {
return _step != null && _weight > 0;
}
@Override
public Model.Parameters prepareModelParameters() {
return _step.prepareModelParameters();
}
@Override
public Map<String, Object[]> prepareSearchParameters() {
return _step.prepareSearchParameters();
}
@Override
protected void setSearchCriteria(RandomDiscreteValueSearchCriteria searchCriteria, Model.Parameters baseParms) {
super.setSearchCriteria(searchCriteria, baseParms);
searchCriteria.set_stopping_rounds(0);
}
@Override
@SuppressWarnings("unchecked")
protected Job<Grid> startJob() {
Key<Grid>[] resumedGrid = aml().session().getResumableKeys(_step.getProvider(), _step.getId());
if (resumedGrid.length == 0) return null;
return hyperparameterSearch(resumedGrid[0], prepareModelParameters(), prepareSearchParameters());
}
}
static class ResumeBestNGridsStep extends DynamicStep<Model> {
private final int _nGrids;
public ResumeBestNGridsStep(String id, int nGrids, AutoML autoML) {
super(NAME, id, autoML);
_nGrids = nGrids;
}
private List<ModelingStep> sortModelingStepByPerf() {
Map<ModelingStep, List<Double>> scoresBySource = new HashMap<>();
Model[] models = getTrainedModels();
double[] metrics = aml().leaderboard().getSortMetricValues();
if (metrics == null) return Collections.emptyList();
for (int i = 0; i < models.length; i++) {
ModelingStep source = aml().session().getModelingStep(models[i]._key);
if (!scoresBySource.containsKey(source)) {
scoresBySource.put(source, new ArrayList<>());
}
scoresBySource.get(source).add(metrics[i]);
}
Comparator<Map.Entry<ModelingStep, Double>> metricsComparator = Map.Entry.comparingByValue();
if (!Leaderboard.isLossFunction(aml().leaderboard().getSortMetric())) metricsComparator = metricsComparator.reversed();
return scoresBySource.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
e -> e.getValue().stream().mapToDouble(Double::doubleValue).average().orElse(-1)
))
.entrySet().stream().sorted(metricsComparator)
.filter(e -> e.getValue() >= 0)
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
@Override
protected Collection<ModelingStep> prepareModelingSteps() {
List<ModelingStep> bestStep = sortModelingStepByPerf();
return bestStep.stream()
.filter(ModelingStep::isResumable)
.filter(GridStep.class::isInstance)
// .map(s -> aml().getModelingStep(s.getProvider(), s.getId()+"_resume"))
// .filter(Objects::nonNull)
.limit(_nGrids)
.map(s -> new ResumingGridStep((GridStep)s, _priorityGroup, _weight/_nGrids, aml()))
.collect(Collectors.toList());
}
}
private final ModelingStep[] optionals = new ModelingStep[] {
new ResumeBestNGridsStep("resume_best_grids", 2, aml())
};
public CompletionSteps(AutoML autoML) {
super(autoML);
}
@Override
public String getProvider() {
return NAME;
}
@Override
protected ModelingStep[] getOptionals() {
return optionals;
}
}
@Override
public String getName() {
return CompletionSteps.NAME;
}
@Override
public CompletionSteps newInstance(AutoML aml) {
return new CompletionSteps(aml);
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/modeling/DRFStepsProvider.java
|
package ai.h2o.automl.modeling;
import ai.h2o.automl.*;
import hex.tree.SharedTreeModel.SharedTreeParameters.HistogramType;
import hex.tree.drf.DRFModel;
import hex.tree.drf.DRFModel.DRFParameters;
import water.Job;
import water.Key;
public class DRFStepsProvider
implements ModelingStepsProvider<DRFStepsProvider.DRFSteps>
, ModelParametersProvider<DRFParameters> {
public static class DRFSteps extends ModelingSteps {
static final String NAME = Algo.DRF.name();
static abstract class DRFModelStep extends ModelingStep.ModelStep<DRFModel> {
DRFModelStep(String id, AutoML autoML) {
super(NAME, Algo.DRF, id, autoML);
}
public DRFParameters prepareModelParameters() {
DRFParameters params = new DRFParameters();
params._score_tree_interval = 5;
return params;
}
}
private final ModelingStep[] defaults = new DRFModelStep[] {
new DRFModelStep("def_1", aml()) {},
new DRFModelStep("XRT", aml()) {
{ _description = _description+" (Extremely Randomized Trees)"; }
@Override
public DRFParameters prepareModelParameters() {
DRFParameters params = super.prepareModelParameters();
params._histogram_type = HistogramType.Random;
return params;
}
@Override
protected Job<DRFModel> startJob() {
Key<DRFModel> key = makeKey("XRT", true);
return trainModel(key, prepareModelParameters());
}
},
};
public DRFSteps(AutoML autoML) {
super(autoML);
}
@Override
public String getProvider() {
return NAME;
}
@Override
protected ModelingStep[] getDefaultModels() {
return defaults;
}
}
@Override
public String getName() {
return DRFSteps.NAME;
}
@Override
public DRFSteps newInstance(AutoML aml) {
return new DRFSteps(aml);
}
@Override
public DRFParameters newDefaultParameters() {
return new DRFParameters();
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/modeling/DeepLearningStepsProvider.java
|
package ai.h2o.automl.modeling;
import ai.h2o.automl.*;
import ai.h2o.automl.preprocessing.PreprocessingConfig;
import ai.h2o.automl.preprocessing.TargetEncoding;
import hex.deeplearning.DeepLearningModel;
import hex.deeplearning.DeepLearningModel.DeepLearningParameters;
import java.util.HashMap;
import java.util.Map;
public class DeepLearningStepsProvider
implements ModelingStepsProvider<DeepLearningStepsProvider.DeepLearningSteps>
, ModelParametersProvider<DeepLearningParameters> {
public static class DeepLearningSteps extends ModelingSteps {
static final String NAME = Algo.DeepLearning.name();
static abstract class DeepLearningModelStep extends ModelingStep.ModelStep<DeepLearningModel> {
public DeepLearningModelStep(String id, AutoML autoML) {
super(NAME, Algo.DeepLearning, id, autoML);
}
@Override
protected PreprocessingConfig getPreprocessingConfig() {
//TE useless for DNN
PreprocessingConfig config = super.getPreprocessingConfig();
config.put(TargetEncoding.CONFIG_PREPARE_CV_ONLY, aml().isCVEnabled());
return config;
}
}
static abstract class DeepLearningGridStep extends ModelingStep.GridStep<DeepLearningModel> {
DeepLearningGridStep(String id, AutoML autoML) {
super(NAME, Algo.DeepLearning, id, autoML);
}
public DeepLearningParameters prepareModelParameters() {
DeepLearningParameters params = new DeepLearningParameters();
params._epochs = 10000; // early stopping takes care of epochs - no need to tune!
params._adaptive_rate = true;
params._activation = DeepLearningParameters.Activation.RectifierWithDropout;
return params;
}
@Override
protected PreprocessingConfig getPreprocessingConfig() {
//TE useless for DNN
PreprocessingConfig config = super.getPreprocessingConfig();
config.put(TargetEncoding.CONFIG_PREPARE_CV_ONLY, aml().isCVEnabled());
return config;
}
public Map<String, Object[]> prepareSearchParameters() {
Map<String, Object[]> searchParams = new HashMap<>();
searchParams.put("_rho", new Double[] { 0.9, 0.95, 0.99 });
searchParams.put("_epsilon", new Double[] { 1e-6, 1e-7, 1e-8, 1e-9 });
searchParams.put("_input_dropout_ratio", new Double[] { 0.0, 0.05, 0.1, 0.15, 0.2 });
return searchParams;
}
}
private final ModelingStep[] defaults = new DeepLearningModelStep[] {
new DeepLearningModelStep("def_1", aml()) {
@Override
public DeepLearningParameters prepareModelParameters() {
DeepLearningParameters params = new DeepLearningParameters(); // don't use common params for default DL
params._hidden = new int[]{ 10, 10, 10 };
return params;
}
},
};
private final ModelingStep[] grids = new DeepLearningGridStep[] {
new DeepLearningGridStep("grid_1", aml()) {
@Override
public Map<String, Object[]> prepareSearchParameters() {
Map<String, Object[]> searchParams = super.prepareSearchParameters();
searchParams.put("_hidden", new Integer[][] {
{ 20 },
{ 50 },
{ 100 }
});
searchParams.put("_hidden_dropout_ratios", new Double[][] {
{ 0.0 },
{ 0.1 },
{ 0.2 },
{ 0.3 },
{ 0.4 },
{ 0.5 }
});
return searchParams;
}
},
new DeepLearningGridStep("grid_2", aml()) {
@Override
public Map<String, Object[]> prepareSearchParameters() {
Map<String, Object[]> searchParams = super.prepareSearchParameters();
searchParams.put("_hidden", new Integer[][] {
{ 20, 20 },
{ 50, 50 },
{ 100, 100 }
});
searchParams.put("_hidden_dropout_ratios", new Double[][] {
{ 0.0, 0.0 },
{ 0.1, 0.1 },
{ 0.2, 0.2 },
{ 0.3, 0.3 },
{ 0.4, 0.4 },
{ 0.5, 0.5 }
});
return searchParams;
}
},
new DeepLearningGridStep("grid_3", aml()) {
@Override
public Map<String, Object[]> prepareSearchParameters() {
Map<String, Object[]> searchParams = super.prepareSearchParameters();
searchParams.put("_hidden", new Integer[][] {
{ 20, 20, 20 },
{ 50, 50, 50 },
{ 100, 100, 100 }
});
searchParams.put("_hidden_dropout_ratios", new Double[][] {
{ 0.0, 0.0, 0.0 },
{ 0.1, 0.1, 0.1 },
{ 0.2, 0.2, 0.2 },
{ 0.3, 0.3, 0.3 },
{ 0.4, 0.4, 0.4 },
{ 0.5, 0.5, 0.5 }
});
return searchParams;
}
},
};
public DeepLearningSteps(AutoML autoML) {
super(autoML);
}
@Override
public String getProvider() {
return NAME;
}
@Override
protected ModelingStep[] getDefaultModels() {
return defaults;
}
@Override
protected ModelingStep[] getGrids() {
return grids;
}
}
@Override
public String getName() {
return DeepLearningSteps.NAME;
}
@Override
public DeepLearningSteps newInstance(AutoML aml) {
return new DeepLearningSteps(aml);
}
@Override
public DeepLearningParameters newDefaultParameters() {
return new DeepLearningParameters();
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/modeling/GBMStepsProvider.java
|
package ai.h2o.automl.modeling;
import ai.h2o.automl.*;
import ai.h2o.automl.ModelSelectionStrategies.KeepBestN;
import ai.h2o.automl.events.EventLogEntry;
import hex.Model;
import hex.tree.SharedTreeModel;
import hex.tree.gbm.GBMModel;
import hex.tree.gbm.GBMModel.GBMParameters;
import water.Job;
import water.Key;
import java.util.*;
public class GBMStepsProvider
implements ModelingStepsProvider<GBMStepsProvider.GBMSteps>
, ModelParametersProvider<GBMParameters> {
public static class GBMSteps extends ModelingSteps {
static final String NAME = Algo.GBM.name();
static GBMParameters prepareModelParameters() {
GBMParameters params = new GBMParameters();
params._score_tree_interval = 5;
params._histogram_type = SharedTreeModel.SharedTreeParameters.HistogramType.AUTO;
return params;
}
static abstract class GBMModelStep extends ModelingStep.ModelStep<GBMModel> {
GBMModelStep(String id, AutoML autoML) {
super(NAME, Algo.GBM, id, autoML);
}
public GBMParameters prepareModelParameters() {
GBMParameters params = GBMSteps.prepareModelParameters();
params._ntrees = 10000;
params._sample_rate = 0.8;
params._col_sample_rate = 0.8;
params._col_sample_rate_per_tree = 0.8;
return params;
}
}
static abstract class GBMGridStep extends ModelingStep.GridStep<GBMModel> {
public GBMGridStep(String id, AutoML autoML) {
super(NAME, Algo.GBM, id, autoML);
}
public GBMParameters prepareModelParameters() {
GBMParameters params = GBMSteps.prepareModelParameters();
params._ntrees = 10000;
return params;
}
}
static abstract class GBMExploitationStep extends ModelingStep.SelectionStep<GBMModel> {
protected GBMModel getBestGBM() {
for (Model model : getTrainedModels()) {
if (model instanceof GBMModel) {
return (GBMModel) model;
}
}
return null;
}
@Override
public boolean canRun() {
return super.canRun() && getBestGBM() != null;
}
public GBMExploitationStep(String id, AutoML autoML) {
super(NAME, Algo.GBM, id, autoML);
if (autoML.getBuildSpec().build_models.exploitation_ratio > 0)
_ignoredConstraints = new AutoML.Constraint[] { AutoML.Constraint.MODEL_COUNT };
}
}
private final ModelingStep[] defaults = new GBMModelStep[] {
new GBMModelStep("def_1", aml()) {
@Override
public GBMParameters prepareModelParameters() {
GBMParameters params = super.prepareModelParameters();
params._max_depth = 6;
params._min_rows = 1;
return params;
}
},
new GBMModelStep("def_2", aml()) {
@Override
public GBMParameters prepareModelParameters() {
GBMParameters params = super.prepareModelParameters();
params._max_depth = 7;
params._min_rows = 10;
return params;
}
},
new GBMModelStep("def_3", aml()) {
@Override
public GBMParameters prepareModelParameters() {
GBMParameters params = super.prepareModelParameters();
params._max_depth = 8;
params._min_rows = 10;
return params;
}
},
new GBMModelStep("def_4", aml()) {
@Override
public GBMParameters prepareModelParameters() {
GBMParameters params = super.prepareModelParameters();
params._max_depth = 10;
params._min_rows = 10;
return params;
}
},
new GBMModelStep("def_5", aml()) {
@Override
public GBMParameters prepareModelParameters() {
GBMParameters params = super.prepareModelParameters();
params._max_depth = 15;
params._min_rows = 100;
return params;
}
},
};
static class DefaultGBMGridStep extends GBMGridStep {
public DefaultGBMGridStep(String id, AutoML autoML) {
super(id, autoML);
}
@Override
public Map<String, Object[]> prepareSearchParameters() {
Map<String, Object[]> searchParams = new HashMap<>();
searchParams.put("_max_depth", new Integer[]{3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17});
searchParams.put("_min_rows", new Integer[]{1, 5, 10, 15, 30, 100});
// searchParams.put("_learn_rate", new Double[]{0.001, 0.005, 0.008, 0.01, 0.05, 0.08, 0.1, 0.5, 0.8});
searchParams.put("_sample_rate", new Double[]{0.50, 0.60, 0.70, 0.80, 0.90, 1.00});
searchParams.put("_col_sample_rate", new Double[]{ 0.4, 0.7, 1.0});
searchParams.put("_col_sample_rate_per_tree", new Double[]{ 0.4, 0.7, 1.0});
searchParams.put("_min_split_improvement", new Double[]{1e-4, 1e-5});
return searchParams;
}
}
private final ModelingStep[] grids = new GBMGridStep[] {
new DefaultGBMGridStep("grid_1", aml()),
/*
new DefaultGBMGridStep("grid_1_resume", aml()) {
@Override
protected void setSearchCriteria(RandomDiscreteValueSearchCriteria searchCriteria, Model.Parameters baseParms) {
super.setSearchCriteria(searchCriteria, baseParms);
searchCriteria.set_stopping_rounds(0);
}
@Override
@SuppressWarnings("unchecked")
protected Job<Grid> startJob() {
Key<Grid>[] resumedGrid = aml().getResumableKeys(_provider, "grid_1");
if (resumedGrid.length == 0) return null;
return hyperparameterSearch(resumedGrid[0], prepareModelParameters(), prepareSearchParameters());
}
}
*/
};
private final ModelingStep[] exploitation = new ModelingStep[] {
new GBMExploitationStep("lr_annealing", aml()) {
Key<Models> resultKey = null;
@Override
protected Job<Models> startTraining(Key result, double maxRuntimeSecs) {
resultKey = result;
GBMModel bestGBM = getBestGBM();
aml().eventLog().info(EventLogEntry.Stage.ModelSelection, "Retraining best GBM with learning rate annealing: "+bestGBM._key);
GBMParameters params = (GBMParameters) bestGBM._input_parms.clone();
params._max_runtime_secs = 0; // reset max runtime
params._learn_rate_annealing = 0.99;
initTimeConstraints(params, maxRuntimeSecs);
setStoppingCriteria(params, new GBMParameters());
return asModelsJob(startModel(Key.make(result+"_model"), params), result);
}
@Override
protected ModelSelectionStrategy getSelectionStrategy() {
return (originalModels, newModels) ->
new KeepBestN<>(1, () -> makeTmpLeaderboard(Objects.toString(resultKey, _provider+"_"+_id)))
.select(new Key[] { getBestGBM()._key }, newModels);
}
}
};
public GBMSteps(AutoML autoML) {
super(autoML);
}
@Override
public String getProvider() {
return NAME;
}
@Override
protected ModelingStep[] getDefaultModels() {
return defaults;
}
@Override
protected ModelingStep[] getGrids() {
return grids;
}
@Override
protected ModelingStep[] getOptionals() {
return exploitation;
}
}
@Override
public String getName() {
return GBMSteps.NAME;
}
@Override
public GBMSteps newInstance(AutoML aml) {
return new GBMSteps(aml);
}
@Override
public GBMParameters newDefaultParameters() {
return new GBMParameters();
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/modeling/GLMStepsProvider.java
|
package ai.h2o.automl.modeling;
import ai.h2o.automl.*;
import ai.h2o.automl.preprocessing.PreprocessingConfig;
import ai.h2o.automl.preprocessing.TargetEncoding;
import hex.Model;
import hex.genmodel.utils.DistributionFamily;
import hex.glm.GLMModel;
import hex.glm.GLMModel.GLMParameters;
public class GLMStepsProvider
implements ModelingStepsProvider<GLMStepsProvider.GLMSteps>
, ModelParametersProvider<GLMParameters> {
public static class GLMSteps extends ModelingSteps {
static final String NAME = Algo.GLM.name();
static abstract class GLMModelStep extends ModelingStep.ModelStep<GLMModel> {
GLMModelStep(String id, AutoML autoML) {
super(NAME, Algo.GLM, id, autoML);
}
@Override
protected void setStoppingCriteria(Model.Parameters parms, Model.Parameters defaults) {
// disabled as we're using lambda search
}
public GLMParameters prepareModelParameters() {
GLMParameters params = new GLMParameters();
params._lambda_search = true;
return params;
}
@Override
protected PreprocessingConfig getPreprocessingConfig() {
//GLM (the exception as usual) doesn't support targetencoding if CV is enabled
// because it is initializing its lambdas + other params before CV (preventing changes in train frame during CV).
PreprocessingConfig config = super.getPreprocessingConfig();
config.put(TargetEncoding.CONFIG_PREPARE_CV_ONLY, aml().isCVEnabled());
return config;
}
}
private final ModelingStep[] defaults = new GLMModelStep[] {
new GLMModelStep("def_1", aml()) {
@Override
public GLMParameters prepareModelParameters() {
GLMParameters params = super.prepareModelParameters();
params._alpha = new double[] {0.0, 0.2, 0.4, 0.6, 0.8, 1.0};
params._missing_values_handling = GLMParameters.MissingValuesHandling.MeanImputation;
return params;
}
},
};
public GLMSteps(AutoML autoML) {
super(autoML);
}
@Override
public String getProvider() {
return NAME;
}
@Override
protected ModelingStep[] getDefaultModels() {
return defaults;
}
}
@Override
public String getName() {
return GLMSteps.NAME;
}
@Override
public GLMSteps newInstance(AutoML aml) {
return new GLMSteps(aml);
}
@Override
public GLMParameters newDefaultParameters() {
return new GLMParameters();
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/modeling/StackedEnsembleStepsProvider.java
|
package ai.h2o.automl.modeling;
import ai.h2o.automl.*;
import ai.h2o.automl.WorkAllocations.Work;
import ai.h2o.automl.events.EventLogEntry;
import ai.h2o.automl.preprocessing.PreprocessingConfig;
import ai.h2o.automl.preprocessing.TargetEncoding;
import hex.KeyValue;
import hex.Model;
import hex.ensemble.Metalearner;
import hex.ensemble.StackedEnsembleModel;
import hex.ensemble.StackedEnsembleModel.StackedEnsembleParameters;
import hex.glm.GLMModel;
import water.DKV;
import water.Job;
import water.Key;
import water.util.PojoUtils;
import java.util.*;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
public class StackedEnsembleStepsProvider
implements ModelingStepsProvider<StackedEnsembleStepsProvider.StackedEnsembleSteps>
, ModelParametersProvider<StackedEnsembleParameters> {
public static class StackedEnsembleSteps extends ModelingSteps {
@Override
protected void cleanup() {
super.cleanup();
Arrays.stream(aml().leaderboard().getModels())
.filter(model -> model instanceof StackedEnsembleModel)
.forEach(model -> ((StackedEnsembleModel) model).deleteBaseModelPredictions());
}
static final String NAME = Algo.StackedEnsemble.name();
static abstract class StackedEnsembleModelStep extends ModelingStep.ModelStep<StackedEnsembleModel> {
protected final Metalearner.Algorithm _metalearnerAlgo;
StackedEnsembleModelStep(String id, Metalearner.Algorithm algo, int priorityGroup, int weight, AutoML autoML) {
super(NAME, Algo.StackedEnsemble, id, priorityGroup, weight, autoML);
_metalearnerAlgo = algo;
_ignoredConstraints = new AutoML.Constraint[] {
AutoML.Constraint.MODEL_COUNT, // do not include SEs in model count (current contract: max_models = max_base_models).
AutoML.Constraint.FAILURE_COUNT // do not increment failures on SEs (several issues can occur with SEs during reruns, we should still add the error to event log, but not fail AutoML).
};
}
@Override
protected void setCrossValidationParams(Model.Parameters params) {
//added in the stack: we could probably move this here.
}
@Override
protected void setWeightingParams(Model.Parameters params) {
//Disabled: StackedEnsemble doesn't support weights in score0?
}
@Override
protected void setClassBalancingParams(Model.Parameters params) {
//Disabled
}
@Override
protected PreprocessingConfig getPreprocessingConfig() {
//SE should not have TE applied, the base models already do it.
PreprocessingConfig config = super.getPreprocessingConfig();
config.put(TargetEncoding.CONFIG_ENABLED, false);
return config;
}
@Override
@SuppressWarnings("unchecked")
public boolean canRun() {
Key<Model>[] keys = getBaseModels();
Work seWork = getAllocatedWork();
if (!super.canRun()) {
aml().job().update(0, "Skipping this StackedEnsemble");
aml().eventLog().info(EventLogEntry.Stage.ModelTraining, String.format("Skipping StackedEnsemble '%s' due to the exclude_algos option or it is already trained.", _id));
return false;
} else if (keys.length == 0) {
aml().job().update(seWork.consume(), "No base models; skipping this StackedEnsemble");
aml().eventLog().info(EventLogEntry.Stage.ModelTraining, String.format("No base models, due to timeouts or the exclude_algos option. Skipping StackedEnsemble '%s'.", _id));
return false;
} else if (keys.length == 1) {
aml().job().update(seWork.consume(), "Only one base model; skipping this StackedEnsemble");
aml().eventLog().info(EventLogEntry.Stage.ModelTraining, String.format("Skipping StackedEnsemble '%s' since there is only one model to stack", _id));
return false;
} else if (!isCVEnabled() && aml().getBlendingFrame() == null) {
aml().job().update(seWork.consume(), "Cross-validation disabled by the user and no blending frame provided; Skipping this StackedEnsemble");
aml().eventLog().info(EventLogEntry.Stage.ModelTraining, String.format("Cross-validation is disabled by the user and no blending frame was provided; skipping StackedEnsemble '%s'.", _id));
return false;
}
return !hasDoppelganger(keys);
}
@SuppressWarnings("unchecked")
protected boolean hasDoppelganger(Key<Model>[] baseModelsKeys) {
Key<StackedEnsembleModel>[] seModels = Arrays
.stream(getTrainedModelsKeys())
.filter(k -> isStackedEnsemble(k))
.toArray(Key[]::new);
Set<Key> keySet = new HashSet<>(Arrays.asList(baseModelsKeys));
for (Key<StackedEnsembleModel> seKey: seModels) {
StackedEnsembleModelStep seStep = (StackedEnsembleModelStep)aml().session().getModelingStep(seKey);
if (seStep._metalearnerAlgo != _metalearnerAlgo) continue;
final StackedEnsembleParameters seParams = seKey.get()._parms;
final Key[] seBaseModels = seParams._base_models;
if (seBaseModels.length != baseModelsKeys.length) continue;
if (keySet.equals(new HashSet<>(Arrays.asList(seBaseModels))))
return true; // We already have a SE with the same base models
}
return false;
}
protected abstract Key<Model>[] getBaseModels();
protected String getModelType(Key<Model> key) {
String keyStr = key.toString();
return keyStr.substring(0, keyStr.indexOf('_'));
}
protected boolean isStackedEnsemble(Key<Model> key) {
ModelingStep step = aml().session().getModelingStep(key);
return step != null && step.getAlgo() == Algo.StackedEnsemble;
}
@Override
public StackedEnsembleParameters prepareModelParameters() {
StackedEnsembleParameters params = new StackedEnsembleParameters();
params._valid = (aml().getValidationFrame() == null ? null : aml().getValidationFrame()._key);
params._blending = (aml().getBlendingFrame() == null ? null : aml().getBlendingFrame()._key);
params._keep_levelone_frame = true; //TODO Why is this true? Can be optionally turned off
return params;
}
protected void setMetalearnerParameters(StackedEnsembleParameters params) {
AutoMLBuildSpec buildSpec = aml().getBuildSpec();
params._metalearner_fold_column = buildSpec.input_spec.fold_column;
params._metalearner_nfolds = buildSpec.build_control.nfolds;
params.initMetalearnerParams(_metalearnerAlgo);
params._metalearner_parameters._keep_cross_validation_models = buildSpec.build_control.keep_cross_validation_models;
params._metalearner_parameters._keep_cross_validation_predictions = buildSpec.build_control.keep_cross_validation_predictions;
}
Job<StackedEnsembleModel> stack(String modelName, Key<Model>[] baseModels, boolean isLast) {
StackedEnsembleParameters params = prepareModelParameters();
params._base_models = baseModels;
params._keep_base_model_predictions = !isLast; //avoids recomputing some base predictions for each SE
setMetalearnerParameters(params);
if (_metalearnerAlgo == Metalearner.Algorithm.AUTO) setAutoMetalearnerSEParameters(params);
return stack(modelName, params);
}
Job<StackedEnsembleModel> stack(String modelName, StackedEnsembleParameters stackedEnsembleParameters) {
Key<StackedEnsembleModel> modelKey = makeKey(modelName, true);
return trainModel(modelKey, stackedEnsembleParameters);
}
protected void setAutoMetalearnerSEParameters(StackedEnsembleParameters stackedEnsembleParameters) {
// add custom alpha in GLM metalearner
GLMModel.GLMParameters metalearnerParams = (GLMModel.GLMParameters)stackedEnsembleParameters._metalearner_parameters;
metalearnerParams._alpha = new double[]{0.5, 1.0};
if (aml().getResponseColumn().isCategorical()) {
// Add logit transform
stackedEnsembleParameters._metalearner_transform = StackedEnsembleParameters.MetalearnerTransform.Logit;
}
}
}
static class BestOfFamilySEModelStep extends StackedEnsembleModelStep {
public BestOfFamilySEModelStep(String id, int priorityGroup, AutoML autoML) {
this(id, Metalearner.Algorithm.AUTO, priorityGroup, autoML);
}
public BestOfFamilySEModelStep(String id, Metalearner.Algorithm algo, int priorityGroup, AutoML autoML) {
this(id, algo, priorityGroup, DEFAULT_MODEL_TRAINING_WEIGHT, autoML);
}
public BestOfFamilySEModelStep(String id, Metalearner.Algorithm algo, int priorityGroup, int weight, AutoML autoML) {
super((id == null ? "best_of_family_"+algo.name() : id), algo, priorityGroup, weight, autoML);
_description = _description+" (built with "+algo.name()+" metalearner, using top model from each algorithm type)";
}
@Override
@SuppressWarnings("unchecked")
protected Key<Model>[] getBaseModels() {
// Set aside List<Model> for best models per model type. Meaning best GLM, GBM, DRF, XRT, and DL (5 models).
// This will give another ensemble that is smaller than the original which takes all models into consideration.
List<Key<Model>> bestModelsOfEachType = new ArrayList<>();
Set<String> typesOfGatheredModels = new HashSet<>();
for (Key<Model> key : getTrainedModelsKeys()) {
// trained models are sorted (taken from leaderboard), so we only need to pick the first of each type (excluding other StackedEnsembles)
String type = getModelType(key);
if (isStackedEnsemble(key) || typesOfGatheredModels.contains(type)) continue;
typesOfGatheredModels.add(type);
bestModelsOfEachType.add(key);
}
return bestModelsOfEachType.toArray(new Key[0]);
}
@Override
protected Job<StackedEnsembleModel> startJob() {
return stack(_provider+"_BestOfFamily", getBaseModels(), false);
}
}
static class BestNModelsSEModelStep extends StackedEnsembleModelStep {
private final int _N;
public BestNModelsSEModelStep(String id, int N, int priorityGroup, AutoML autoML) {
this(id, Metalearner.Algorithm.AUTO, N, priorityGroup, DEFAULT_MODEL_TRAINING_WEIGHT, autoML);
}
public BestNModelsSEModelStep(String id, Metalearner.Algorithm algo, int N, int priorityGroup, int weight, AutoML autoML) {
super((id == null ? "best_"+N+"_"+algo.name() : id), algo, priorityGroup, weight, autoML);
_N = N;
_description = _description+" (built with "+algo.name()+" metalearner, using best "+N+" non-SE models)";
}
@Override
@SuppressWarnings("unchecked")
protected Key<Model>[] getBaseModels() {
return Stream.of(getTrainedModelsKeys())
.filter(k -> !isStackedEnsemble(k))
.limit(_N)
.toArray(Key[]::new);
}
@Override
protected Job<StackedEnsembleModel> startJob() {
return stack(_provider+"_Best"+_N, getBaseModels(), false);
}
}
static class AllSEModelStep extends StackedEnsembleModelStep {
public AllSEModelStep(String id, int priorityGroup, AutoML autoML) {
this(id, Metalearner.Algorithm.AUTO, priorityGroup, autoML);
}
public AllSEModelStep(String id, Metalearner.Algorithm algo, int priorityGroup, AutoML autoML) {
this(id, algo, priorityGroup, DEFAULT_MODEL_TRAINING_WEIGHT, autoML);
}
public AllSEModelStep(String id, Metalearner.Algorithm algo, int priorityGroup, int weight, AutoML autoML) {
super((id == null ? "all_"+algo.name() : id), algo, priorityGroup, weight, autoML);
_description = _description+" (built with "+algo.name()+" metalearner, using all AutoML models)";
}
@Override
@SuppressWarnings("unchecked")
protected Key<Model>[] getBaseModels() {
return Stream.of(getTrainedModelsKeys())
.filter(k -> !isStackedEnsemble(k))
.toArray(Key[]::new);
}
@Override
protected Job<StackedEnsembleModel> startJob() {
return stack(_provider+"_AllModels", getBaseModels(), false);
}
}
static class MonotonicSEModelStep extends StackedEnsembleModelStep {
public MonotonicSEModelStep(String id, int priorityGroup, AutoML autoML) {
this(id, Metalearner.Algorithm.AUTO, priorityGroup, DEFAULT_MODEL_TRAINING_WEIGHT, autoML);
}
public MonotonicSEModelStep(String id, Metalearner.Algorithm algo, int priorityGroup, int weight, AutoML autoML) {
super((id == null ? "monotonic" : id), algo, priorityGroup, weight, autoML);
_description = _description+" (built with "+algo.name()+" metalearner, using monotonically constrained AutoML models)";
}
boolean hasMonotoneConstrains(Key<Model> modelKey) {
Model model = DKV.getGet(modelKey);
try {
KeyValue[] mc = (KeyValue[]) PojoUtils.getFieldValue(
model._parms, "_monotone_constraints",
PojoUtils.FieldNaming.CONSISTENT);
return mc != null && mc.length > 0;
} catch (IllegalArgumentException e) {
return false;
}
}
@Override
public boolean canRun() {
boolean canRun = super.canRun();
if (!canRun) return false;
int monotoneModels=0;
for (Key<Model> modelKey: getTrainedModelsKeys()) {
if (hasMonotoneConstrains(modelKey))
monotoneModels++;
if (monotoneModels >= 2)
return true;
}
if (monotoneModels == 1) {
aml().job().update(getAllocatedWork().consume(),
"Only one monotonic base model; skipping this StackedEnsemble");
aml().eventLog().info(EventLogEntry.Stage.ModelTraining,
String.format("Skipping StackedEnsemble '%s' since there is only one monotonic model to stack", _id));
} else {
aml().job().update(getAllocatedWork().consume(),
"No monotonic base model; skipping this StackedEnsemble");
aml().eventLog().info(EventLogEntry.Stage.ModelTraining,
String.format("Skipping StackedEnsemble '%s' since there is no monotonic model to stack", _id));
}
return false;
}
@Override
@SuppressWarnings("unchecked")
protected Key<Model>[] getBaseModels() {
return Stream.of(getTrainedModelsKeys())
.filter(k -> !isStackedEnsemble(k) && hasMonotoneConstrains(k))
.toArray(Key[]::new);
}
@Override
protected Job<StackedEnsembleModel> startJob() {
return stack(_provider + "_Monotonic", getBaseModels(), false);
}
}
private final ModelingStep[] defaults;
private final ModelingStep[] optionals;
{
// we're going to cheat a bit: ModelingSteps needs to instantiated by the AutoML instance
// to convert each StepDefinition into one or more ModelingStep(s)
// so at that time, we have access to the entire modeling plan
// and we can dynamically generate the modeling steps that we're going to need.
StepDefinition[] modelingPlan = aml().getBuildSpec().build_models.modeling_plan;
if (Stream.of(modelingPlan).noneMatch(sd -> sd.getName().equals(NAME))) {
defaults = new ModelingStep[0];
optionals = new ModelingStep[0];
} else {
List<StackedEnsembleModelStep> defaultSeSteps = new ArrayList<>();
// starting to generate the SE for each "base" group
// ie for each group with algo steps.
Set<String> defaultAlgoProviders = Stream.of(Algo.values())
.filter(a -> a != Algo.StackedEnsemble)
.map(Algo::name)
.collect(Collectors.toSet());
int[] baseAlgoGroups = Stream.of(modelingPlan)
.filter(sd -> defaultAlgoProviders.contains(sd.getName()))
.flatMapToInt(sd ->
sd.getAlias() == StepDefinition.Alias.defaults ? IntStream.of(ModelingStep.ModelStep.DEFAULT_MODEL_GROUP)
: sd.getAlias() == StepDefinition.Alias.grids ? IntStream.of(ModelingStep.GridStep.DEFAULT_GRID_GROUP)
: sd.getAlias() == StepDefinition.Alias.all ? IntStream.of(ModelingStep.ModelStep.DEFAULT_MODEL_GROUP, ModelingStep.GridStep.DEFAULT_GRID_GROUP)
: sd.getSteps().stream().flatMapToInt(s -> s.getGroup() == StepDefinition.Step.DEFAULT_GROUP
? IntStream.of(ModelingStep.ModelStep.DEFAULT_MODEL_GROUP, ModelingStep.GridStep.DEFAULT_GRID_GROUP)
: IntStream.of(s.getGroup())))
.distinct().sorted().toArray();
for (int group : baseAlgoGroups) {
defaultSeSteps.add(new BestOfFamilySEModelStep("best_of_family_" + group, group, aml()));
defaultSeSteps.add(new AllSEModelStep("all_" + group, group, aml())); // groups <=0 are ignored.
}
defaults = defaultSeSteps.toArray(new ModelingStep[0]);
// now all the additional SEs are available as optionals (usually requested by id).
int maxBaseGroup = IntStream.of(baseAlgoGroups).max().orElse(0);
List<StackedEnsembleModelStep> optionalSeSteps = new ArrayList<>();
if (maxBaseGroup > 0) {
int optionalGroup = maxBaseGroup+1;
optionalSeSteps.add(new MonotonicSEModelStep("monotonic", optionalGroup, aml()));
optionalSeSteps.add(new BestOfFamilySEModelStep("best_of_family", optionalGroup, aml()));
optionalSeSteps.add(new AllSEModelStep("all", optionalGroup, aml()));
if (Algo.XGBoost.enabled()) {
optionalSeSteps.add(new BestOfFamilySEModelStep("best_of_family_xgboost", Metalearner.Algorithm.xgboost, optionalGroup, aml()));
optionalSeSteps.add(new AllSEModelStep("all_xgboost", Metalearner.Algorithm.xgboost, optionalGroup, aml()));
}
optionalSeSteps.add(new BestOfFamilySEModelStep("best_of_family_gbm", Metalearner.Algorithm.gbm, optionalGroup, aml()));
optionalSeSteps.add(new AllSEModelStep("all_gbm", Metalearner.Algorithm.gbm, optionalGroup, aml()));
optionalSeSteps.add(new BestOfFamilySEModelStep("best_of_family_xglm", optionalGroup, aml()) {
@Override
protected boolean hasDoppelganger(Key<Model>[] baseModelsKeys) {
return false;
}
@Override
protected void setMetalearnerParameters(StackedEnsembleParameters params) {
super.setMetalearnerParameters(params);
GLMModel.GLMParameters metalearnerParams = (GLMModel.GLMParameters) params._metalearner_parameters;
metalearnerParams._lambda_search = true;
}
});
optionalSeSteps.add(new AllSEModelStep("all_xglm", optionalGroup, aml()) {
@Override
protected boolean hasDoppelganger(Key<Model>[] baseModelsKeys) {
Set<String> modelTypes = new HashSet<>();
for (Key<Model> key : baseModelsKeys) {
String modelType = getModelType(key);
if (modelTypes.contains(modelType)) return false;
modelTypes.add(modelType);
}
return true;
}
@Override
protected void setMetalearnerParameters(StackedEnsembleParameters params) {
super.setMetalearnerParameters(params);
GLMModel.GLMParameters metalearnerParams = (GLMModel.GLMParameters) params._metalearner_parameters;
metalearnerParams._lambda_search = true;
}
});
// optionalSeSteps.add(new BestNModelsSEModelStep("best_20", 20, optionalGroup, aml()));
int card = aml().getResponseColumn().cardinality();
int maxModels = card <= 2 ? 1_000 : Math.max(100, 1_000 / (card - 1));
optionalSeSteps.add(new BestNModelsSEModelStep("best_N", maxModels, optionalGroup, aml()));
}
optionals = optionalSeSteps.toArray(new ModelingStep[0]);
}
}
public StackedEnsembleSteps(AutoML autoML) {
super(autoML);
}
@Override
public String getProvider() {
return NAME;
}
@Override
protected ModelingStep[] getDefaultModels() {
return defaults;
}
@Override
protected ModelingStep[] getOptionals() {
return optionals;
}
}
@Override
public String getName() {
return StackedEnsembleSteps.NAME;
}
@Override
public StackedEnsembleSteps newInstance(AutoML aml) {
return new StackedEnsembleSteps(aml);
}
@Override
public StackedEnsembleParameters newDefaultParameters() {
return new StackedEnsembleParameters();
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/modeling/XGBoostSteps.java
|
package ai.h2o.automl.modeling;
import ai.h2o.automl.*;
import ai.h2o.automl.ModelSelectionStrategies.KeepBestN;
import ai.h2o.automl.events.EventLogEntry;
import hex.Model;
import hex.genmodel.utils.DistributionFamily;
import hex.grid.GridSearch;
import hex.grid.HyperSpaceSearchCriteria.SequentialSearchCriteria;
import hex.grid.HyperSpaceSearchCriteria.StoppingCriteria;
import hex.grid.SequentialWalker;
import hex.tree.xgboost.XGBoostModel;
import hex.tree.xgboost.XGBoostModel.XGBoostParameters;
import water.Job;
import water.Key;
import java.util.*;
import java.util.stream.IntStream;
public class XGBoostSteps extends ModelingSteps {
static final String NAME = Algo.XGBoost.name();
static XGBoostParameters prepareModelParameters(AutoML aml, boolean emulateLightGBM) {
XGBoostParameters params = new XGBoostParameters();
if (emulateLightGBM) {
params._tree_method = XGBoostParameters.TreeMethod.hist;
params._grow_policy = XGBoostParameters.GrowPolicy.lossguide;
}
params._score_tree_interval = 5;
params._ntrees = 10000;
// params._min_split_improvement = 0.01f;
return params;
}
static abstract class XGBoostModelStep extends ModelingStep.ModelStep<XGBoostModel> {
boolean _emulateLightGBM;
XGBoostModelStep(String id, AutoML autoML, boolean emulateLightGBM) {
super(NAME, Algo.XGBoost, id, autoML);
_emulateLightGBM = emulateLightGBM;
}
public XGBoostParameters prepareModelParameters() {
XGBoostParameters params = XGBoostSteps.prepareModelParameters(aml(), _emulateLightGBM);
if (aml().getBuildSpec().build_control.balance_classes && aml().getDistributionFamily().equals(DistributionFamily.bernoulli)) {
double[] dist = aml().getClassDistribution();
params._scale_pos_weight = (float) (dist[0] / dist[1]);
}
return params;
}
}
static abstract class XGBoostGridStep extends ModelingStep.GridStep<XGBoostModel> {
boolean _emulateLightGBM;
public XGBoostGridStep(String id, AutoML autoML, boolean emulateLightGBM) {
super(NAME, Algo.XGBoost, id, autoML);
_emulateLightGBM = emulateLightGBM;
}
public XGBoostParameters prepareModelParameters() {
return XGBoostSteps.prepareModelParameters(aml(), _emulateLightGBM);
}
}
static abstract class XGBoostExploitationStep extends ModelingStep.SelectionStep<XGBoostModel> {
boolean _emulateLightGBM;
protected XGBoostModel getBestXGB() {
return getBestXGBs(1).get(0);
}
protected List<XGBoostModel> getBestXGBs(int topN) {
List<XGBoostModel> xgbs = new ArrayList<>();
for (Model model : getTrainedModels()) {
if (model instanceof XGBoostModel) {
xgbs.add((XGBoostModel) model);
}
if (xgbs.size() == topN) break;
}
return xgbs;
}
@Override
public boolean canRun() {
return super.canRun() && getBestXGBs(1).size() > 0;
}
public XGBoostExploitationStep(String id, AutoML autoML, boolean emulateLightGBM) {
super(NAME, Algo.XGBoost, id, autoML);
_emulateLightGBM = emulateLightGBM;
if (autoML.getBuildSpec().build_models.exploitation_ratio > 0)
_ignoredConstraints = new AutoML.Constraint[] { AutoML.Constraint.MODEL_COUNT };
}
}
private final ModelingStep[] defaults = new XGBoostModelStep[] {
new XGBoostModelStep("def_1", aml(), false) {
@Override
public XGBoostParameters prepareModelParameters() {
//XGB 1 (medium depth)
XGBoostParameters params = super.prepareModelParameters();
params._max_depth = 10;
params._min_rows = 5;
params._sample_rate = 0.6;
params._col_sample_rate = 0.8;
params._col_sample_rate_per_tree = 0.8;
if (_emulateLightGBM) {
params._max_leaves = 1 << params._max_depth;
params._max_depth = params._max_depth * 2;
}
return params;
}
},
new XGBoostModelStep("def_2", aml(), false) {
@Override
public XGBoostParameters prepareModelParameters() {
//XGB 2 (deep)
XGBoostParameters params = super.prepareModelParameters();
params._max_depth = 15;
params._min_rows = 10;
params._sample_rate = 0.6;
params._col_sample_rate = 0.8;
params._col_sample_rate_per_tree = 0.8;
if (_emulateLightGBM) {
params._max_leaves = 1 << params._max_depth;
params._max_depth = params._max_depth * 2;
}
return params;
}
},
new XGBoostModelStep("def_3", aml(), false) {
@Override
public XGBoostParameters prepareModelParameters() {
//XGB 3 (shallow)
XGBoostParameters params = super.prepareModelParameters();
params._max_depth = 5;
params._min_rows = 3;
params._sample_rate = 0.8;
params._col_sample_rate = 0.8;
params._col_sample_rate_per_tree = 0.8;
if (_emulateLightGBM) {
params._max_leaves = 1 << params._max_depth;
params._max_depth = params._max_depth * 2;
}
return params;
}
},
};
static class DefaultXGBoostGridStep extends XGBoostGridStep {
public DefaultXGBoostGridStep(String id, AutoML autoML) {
super(id, autoML, false);
}
@Override
public XGBoostParameters prepareModelParameters() {
XGBoostParameters params = super.prepareModelParameters();
// Reset scale pos weight so we can grid search the parameter
params._scale_pos_weight = (new XGBoostParameters())._scale_pos_weight;
return params;
}
@Override
public Map<String, Object[]> prepareSearchParameters() {
Map<String, Object[]> searchParams = new HashMap<>();
// searchParams.put("_ntrees", new Integer[]{100, 1000, 10000}); // = _n_estimators
if (_emulateLightGBM) {
searchParams.put("_max_leaves", new Integer[]{1 << 5, 1 << 10, 1 << 15, 1 << 20});
searchParams.put("_max_depth", new Integer[]{10, 20, 50});
} else {
searchParams.put("_max_depth", new Integer[]{3, 6, 9, 12, 15});
if (aml().getWeightsColumn() == null || aml().getWeightsColumn().isInt()) {
searchParams.put("_min_rows", new Double[]{1.0, 3.0, 5.0, 10.0, 15.0, 20.0}); // = _min_child_weight
} else {
searchParams.put("_min_rows", new Double[]{0.01, 0.1, 1.0, 3.0, 5.0, 10.0, 15.0, 20.0}); // = _min_child_weight
}
}
searchParams.put("_sample_rate", new Double[]{0.6, 0.8, 1.0}); // = _subsample
searchParams.put("_col_sample_rate", new Double[]{0.6, 0.8, 1.0}); // = _colsample_bylevel"
searchParams.put("_col_sample_rate_per_tree", new Double[]{0.7, 0.8, 0.9, 1.0}); // = _colsample_bytree: start higher to always use at least about 40% of columns
// searchParams.put("_min_split_improvement", new Float[]{0.01f, 0.05f, 0.1f, 0.5f, 1f, 5f, 10f, 50f}); // = _gamma
// searchParams.put("_tree_method", new XGBoostParameters.TreeMethod[]{XGBoostParameters.TreeMethod.auto});
searchParams.put("_booster", new XGBoostParameters.Booster[]{ // include gblinear? cf. https://github.com/h2oai/h2o-3/issues/8381
XGBoostParameters.Booster.gbtree, //default, let's use it more often: note that some combinations may be trained multiple time by the RGS then.
XGBoostParameters.Booster.gbtree,
XGBoostParameters.Booster.dart
});
searchParams.put("_reg_lambda", new Float[]{0.001f, 0.01f, 0.1f, 1f, 10f, 100f});
searchParams.put("_reg_alpha", new Float[]{0.001f, 0.01f, 0.1f, 0.5f, 1f});
if (aml().getBuildSpec().build_control.balance_classes && aml().getDistributionFamily().equals(DistributionFamily.bernoulli)) {
double[] dist = aml().getClassDistribution();
final float negPosRatio = (float)(dist[0] / dist[1]);
final float imbalanceRatio = negPosRatio < 1 ? 1 / negPosRatio : negPosRatio;
searchParams.put("_scale_pos_weight", new Float[]{1.f, negPosRatio});
searchParams.put("_max_delta_step", new Float[]{0f, Math.min(5f, imbalanceRatio / 2), Math.min(10f, imbalanceRatio)});
}
return searchParams;
}
}
static class XGBoostGBLinearGridStep extends XGBoostGridStep {
public XGBoostGBLinearGridStep(String id, AutoML autoML) {
super(id, autoML, false);
}
@Override
public XGBoostParameters prepareModelParameters() {
return XGBoostSteps.prepareModelParameters(aml(), false);
}
@Override
public Map<String, Object[]> prepareSearchParameters() {
Map<String, Object[]> searchParams = new HashMap<>();
/*
// not supported/exposed in our xgboost yet
if (aml().getBuildSpec().build_control.isReproducible()) {
searchParams.put("_updater", new String[] {"coord_descent"});
searchParams.put("_feature_selector", new String[] {"cyclic", "greedy"}); // TODO: check if others are deterministic
} else {
searchParams.put("_updater", new String[] {"shotgun", "coord_descent"});
searchParams.put("_feature_selector", new String[] {"cyclic", "shuffle", "random", "greedy", "thrifty"});
}
int ncols = aml().getTrainingFrame().numCols() - (aml().getBuildSpec().getNonPredictors().length +
(aml().getBuildSpec().input_spec.ignored_columns != null ? aml().getBuildSpec().input_spec.ignored_columns.length : 0));
searchParams.put("_top_k", IntStream.range(0, ncols-1).boxed().toArray(Integer[]::new));
*/
searchParams.put("_booster", new XGBoostParameters.Booster[]{ XGBoostParameters.Booster.gblinear });
searchParams.put("_reg_lambda", new Float[]{0.001f, 0.01f, 0.1f, 1f, 10f, 100f});
searchParams.put("_reg_alpha", new Float[]{0.001f, 0.01f, 0.1f, 0.5f, 1f});
return searchParams;
}
}
private final ModelingStep[] grids = new XGBoostGridStep[] {
new DefaultXGBoostGridStep("grid_1", aml()),
new XGBoostGBLinearGridStep("grid_gblinear", aml()),
/*
new DefaultXGBoostGridStep("grid_1_resume", aml()) {
@Override
protected void setSearchCriteria(RandomDiscreteValueSearchCriteria searchCriteria, Model.Parameters baseParms) {
super.setSearchCriteria(searchCriteria, baseParms);
searchCriteria.set_stopping_rounds(0);
}
@Override
@SuppressWarnings("unchecked")
protected Job<Grid> startJob() {
Key<Grid>[] resumedGrid = aml().getResumableKeys(_provider, "grid_1");
if (resumedGrid.length == 0) return null;
return hyperparameterSearch(resumedGrid[0], prepareModelParameters(), prepareSearchParameters());
}
}
*/
};
private final ModelingStep[] exploitation = new ModelingStep[] {
new XGBoostExploitationStep("lr_annealing", aml(), false) {
Key<Models> resultKey = null;
@Override
protected Job<Models> startTraining(Key result, double maxRuntimeSecs) {
resultKey = result;
XGBoostModel bestXGB = getBestXGB();
aml().eventLog().info(EventLogEntry.Stage.ModelSelection, "Retraining best XGBoost with learning rate annealing: "+bestXGB._key);
XGBoostParameters params = (XGBoostParameters) bestXGB._input_parms.clone();
params._max_runtime_secs = 0; // reset max runtime
params._learn_rate_annealing = 0.99;
initTimeConstraints(params, maxRuntimeSecs);
setStoppingCriteria(params, new XGBoostParameters());
return asModelsJob(startModel(Key.make(result+"_model"), params), result);
}
@Override
protected ModelSelectionStrategy getSelectionStrategy() {
return (originalModels, newModels) ->
new KeepBestN<>(1, () -> makeTmpLeaderboard(Objects.toString(resultKey, _provider+"_"+_id)))
.select(new Key[] { getBestXGB()._key }, newModels);
}
},
new XGBoostExploitationStep("lr_search", aml(), false) {
Key resultKey = null;
@Override
protected ModelSelectionStrategy getSelectionStrategy() {
return (originalModels, newModels) ->
new KeepBestN<>(1, () -> makeTmpLeaderboard(Objects.toString(resultKey, _provider+"_"+_id)))
.select(new Key[] { getBestXGB()._key }, newModels);
}
@Override
protected Job<Models> startTraining(Key result, double maxRuntimeSecs) {
resultKey = result;
XGBoostModel bestXGB = getBestXGBs(1).get(0);
aml().eventLog().info(EventLogEntry.Stage.ModelSelection, "Applying learning rate search on best XGBoost: "+bestXGB._key);
XGBoostParameters params = (XGBoostParameters) bestXGB._input_parms.clone();
XGBoostParameters defaults = new XGBoostParameters();
params._max_runtime_secs = 0; // reset max runtime
initTimeConstraints(params, 0); // ensure we have a max runtime per model in the grid
setStoppingCriteria(params, defaults); // keep the same seed as the bestXGB
// keep stopping_rounds fixed, but increases score_tree_interval when lowering learn rate
int sti = params._score_tree_interval;
Object[][] hyperParams = new Object[][] {
new Object[] {"_learn_rate", "_score_tree_interval"},
new Object[] { 0.5 , sti },
new Object[] { 0.2 , 2*sti },
new Object[] { 0.1 , 3*sti },
new Object[] { 0.05 , 4*sti },
new Object[] { 0.02 , 5*sti },
new Object[] { 0.01 , 6*sti },
new Object[] { 0.005 , 7*sti },
new Object[] { 0.002 , 8*sti },
new Object[] { 0.001 , 9*sti },
new Object[] { 0.0005, 10*sti },
};
/*
Object[][] hyperParams = new Object[][] {
new Object[] {"_learn_rate", "_score_tree_interval"},
new Object[] { 0.5 , sti },
new Object[] { 0.2 , (1<<1)*sti },
new Object[] { 0.1 , (1<<2)*sti },
new Object[] { 0.05 , (1<<3)*sti },
new Object[] { 0.02 , (1<<4)*sti },
new Object[] { 0.01 , (1<<5)*sti },
new Object[] { 0.005 , (1<<6)*sti },
new Object[] { 0.002 , (1<<7)*sti },
new Object[] { 0.001 , (1<<8)*sti },
new Object[] { 0.0005, (1<<9)*sti },
};
*/
aml().eventLog().info(EventLogEntry.Stage.ModelTraining, "AutoML: starting "+resultKey+" model training")
.setNamedValue("start_"+_provider+"_"+_id, new Date(), EventLogEntry.epochFormat.get());
return asModelsJob(GridSearch.startGridSearch(
Key.make(result+"_grid"),
new SequentialWalker<>(
params,
hyperParams,
new GridSearch.SimpleParametersBuilderFactory<>(),
new SequentialSearchCriteria(StoppingCriteria.create()
.maxRuntimeSecs((int)maxRuntimeSecs)
.stoppingMetric(params._stopping_metric)
.stoppingRounds(3) // enforcing this as we define the sequence and it is quite small.
.stoppingTolerance(params._stopping_tolerance)
.build())
),
GridSearch.SEQUENTIAL_MODEL_BUILDING
), result);
}
}
};
public XGBoostSteps(AutoML autoML) {
super(autoML);
}
@Override
public String getProvider() {
return NAME;
}
@Override
protected ModelingStep[] getDefaultModels() {
return defaults;
}
@Override
protected ModelingStep[] getGrids() {
return grids;
}
@Override
protected ModelingStep[] getOptionals() {
return exploitation;
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/modeling/XGBoostStepsProvider.java
|
package ai.h2o.automl.modeling;
import ai.h2o.automl.*;
import hex.Model;
import hex.ModelBuilder;
import water.util.Log;
/**
* This class is decoupled from the XGBoostSteps implementation to avoid having to load XGBoost classes
* when the extension is not available.
*/
public class XGBoostStepsProvider implements ModelingStepsProvider<XGBoostSteps>, ModelParametersProvider<Model.Parameters> {
@Override
public String getName() {
return XGBoostSteps.NAME;
}
@Override
public XGBoostSteps newInstance(AutoML aml) {
return Algo.XGBoost.enabled() ? new XGBoostSteps(aml) : null;
}
@Override
public Model.Parameters newDefaultParameters() {
return Algo.XGBoost.enabled() ? ModelBuilder.make(getName(), null, null)._parms : null;
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/preprocessing/PreprocessingConfig.java
|
package ai.h2o.automl.preprocessing;
import java.util.HashMap;
public class PreprocessingConfig extends HashMap<String, Object> {
boolean get(String key, boolean defaultValue) {
return (boolean) getOrDefault(key, defaultValue);
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/preprocessing/PreprocessingStep.java
|
package ai.h2o.automl.preprocessing;
import ai.h2o.automl.ModelingStep;
import hex.Model;
public interface PreprocessingStep<T> {
interface Completer extends Runnable {}
String getType();
/**
* preprocessing steps are prepared by default before the AutoML session starts training the first model.
*/
void prepare();
/**
* applies this preprocessing step to the model parameters right before the model training starts.
* @param params
* @return a function used to "complete" the preprocessing step: it is called by default at the end of the job creating model(s) from the given parms.
* This can mean for example cleaning the temporary artifacts that may have been created to apply the preprocessing step.
*/
Completer apply(Model.Parameters params, PreprocessingConfig config);
/**
* preprocessing steps are disposed by default at the end of the AutoML training session.
* Note that disposing here doesn't mean being removed from the system,
* the goal is mainly to clean resources that are not needed anymore for the current AutoML run.
*/
void dispose();
/**
* Completely remove from the system
*/
void remove();
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/preprocessing/PreprocessingStepDefinition.java
|
package ai.h2o.automl.preprocessing;
import ai.h2o.automl.AutoML;
import water.Iced;
public class PreprocessingStepDefinition extends Iced<PreprocessingStepDefinition> {
public enum Type {
TargetEncoding
}
Type _type;
public PreprocessingStepDefinition() { /* for reflection */ }
public PreprocessingStepDefinition(Type type) {
_type = type;
}
public PreprocessingStep newPreprocessingStep(AutoML aml) {
switch (_type) {
case TargetEncoding:
return new TargetEncoding(aml);
default:
throw new IllegalStateException();
}
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/ai/h2o/automl/preprocessing/TargetEncoding.java
|
package ai.h2o.automl.preprocessing;
import ai.h2o.automl.AutoML;
import ai.h2o.automl.AutoMLBuildSpec.AutoMLBuildControl;
import ai.h2o.automl.AutoMLBuildSpec.AutoMLInput;
import ai.h2o.automl.events.EventLogEntry.Stage;
import ai.h2o.targetencoding.TargetEncoder;
import ai.h2o.targetencoding.TargetEncoderModel;
import ai.h2o.targetencoding.TargetEncoderModel.DataLeakageHandlingStrategy;
import ai.h2o.targetencoding.TargetEncoderModel.TargetEncoderParameters;
import ai.h2o.targetencoding.TargetEncoderPreprocessor;
import hex.Model;
import hex.Model.Parameters.FoldAssignmentScheme;
import hex.ModelPreprocessor;
import water.DKV;
import water.Key;
import water.fvec.Frame;
import water.fvec.Vec;
import water.rapids.ast.prims.advmath.AstKFold;
import water.util.ArrayUtils;
import java.util.*;
import java.util.function.Predicate;
public class TargetEncoding implements PreprocessingStep {
public static String CONFIG_ENABLED = "target_encoding_enabled";
public static String CONFIG_PREPARE_CV_ONLY = "target_encoding_prepare_cv_only";
static String TE_FOLD_COLUMN_SUFFIX = "_te_fold";
private static final Completer NOOP = () -> {};
private AutoML _aml;
private TargetEncoderPreprocessor _tePreprocessor;
private TargetEncoderModel _teModel;
private final List<Completer> _disposables = new ArrayList<>();
private TargetEncoderParameters _defaultParams;
private boolean _encodeAllColumns = false; // if true, bypass all restrictions in columns selection.
private int _columnCardinalityThreshold = 25; // the minimal cardinality for a column to be TE encoded.
public TargetEncoding(AutoML aml) {
_aml = aml;
}
@Override
public String getType() {
return PreprocessingStepDefinition.Type.TargetEncoding.name();
}
@Override
public void prepare() {
AutoMLInput amlInput = _aml.getBuildSpec().input_spec;
AutoMLBuildControl amlBuild = _aml.getBuildSpec().build_control;
Frame amlTrain = _aml.getTrainingFrame();
TargetEncoderParameters params = (TargetEncoderParameters) getDefaultParams().clone();
params._train = amlTrain._key;
params._response_column = amlInput.response_column;
params._seed = amlBuild.stopping_criteria.seed();
Set<String> teColumns = selectColumnsToEncode(amlTrain, params);
if (teColumns.isEmpty()) return;
_aml.eventLog().warn(Stage.FeatureCreation,
"Target Encoding integration in AutoML is in an experimental stage, the models obtained with this feature can not yet be downloaded as MOJO for production.");
if (_aml.isCVEnabled()) {
params._data_leakage_handling = DataLeakageHandlingStrategy.KFold;
params._fold_column = amlInput.fold_column;
if (params._fold_column == null) {
//generate fold column
Frame train = new Frame(params.train());
Vec foldColumn = createFoldColumn(
params.train(),
FoldAssignmentScheme.Modulo,
amlBuild.nfolds,
params._response_column,
params._seed
);
DKV.put(foldColumn);
params._fold_column = params._response_column+TE_FOLD_COLUMN_SUFFIX;
train.add(params._fold_column, foldColumn);
register(train, params._train.toString(), true);
params._train = train._key;
_disposables.add(() -> {
foldColumn.remove();
DKV.remove(train._key);
});
}
}
String[] keep = params.getNonPredictors();
params._ignored_columns = Arrays.stream(amlTrain.names())
.filter(col -> !teColumns.contains(col) && !ArrayUtils.contains(keep, col))
.toArray(String[]::new);
TargetEncoder te = new TargetEncoder(params, _aml.makeKey(getType(), null, false));
_teModel = te.trainModel().get();
_tePreprocessor = new TargetEncoderPreprocessor(_teModel);
}
@Override
public Completer apply(Model.Parameters params, PreprocessingConfig config) {
if (_tePreprocessor == null || !config.get(CONFIG_ENABLED, true)) return NOOP;
if (!config.get(CONFIG_PREPARE_CV_ONLY, false))
params._preprocessors = (Key<ModelPreprocessor>[])ArrayUtils.append(params._preprocessors, _tePreprocessor._key);
Frame train = new Frame(params.train());
String foldColumn = _teModel._parms._fold_column;
boolean addFoldColumn = foldColumn != null && train.find(foldColumn) < 0;
if (addFoldColumn) {
train.add(foldColumn, _teModel._parms._train.get().vec(foldColumn));
register(train, params._train.toString(), true);
params._train = train._key;
params._fold_column = foldColumn;
params._nfolds = 0; // to avoid confusion or errors
params._fold_assignment = FoldAssignmentScheme.AUTO; // to avoid confusion or errors
}
return () -> {
//revert train changes
if (addFoldColumn) {
DKV.remove(train._key);
}
};
}
@Override
public void dispose() {
for (Completer disposable : _disposables) disposable.run();
}
@Override
public void remove() {
if (_tePreprocessor != null) {
_tePreprocessor.remove(true);
_tePreprocessor = null;
_teModel = null;
}
}
public void setDefaultParams(TargetEncoderParameters defaultParams) {
_defaultParams = defaultParams;
}
public void setEncodeAllColumns(boolean encodeAllColumns) {
_encodeAllColumns = encodeAllColumns;
}
public void setColumnCardinalityThreshold(int threshold) {
_columnCardinalityThreshold = threshold;
}
private TargetEncoderParameters getDefaultParams() {
if (_defaultParams != null) return _defaultParams;
_defaultParams = new TargetEncoderParameters();
_defaultParams._keep_original_categorical_columns = false;
_defaultParams._blending = true;
_defaultParams._inflection_point = 5;
_defaultParams._smoothing = 10;
_defaultParams._noise = 0;
return _defaultParams;
}
private Set<String> selectColumnsToEncode(Frame fr, TargetEncoderParameters params) {
final Set<String> encode = new HashSet<>();
if (_encodeAllColumns) {
encode.addAll(Arrays.asList(fr.names()));
} else {
Predicate<Vec> cardinalityLargeEnough = v -> v.cardinality() >= _columnCardinalityThreshold;
Predicate<Vec> cardinalityNotTooLarge = params._blending
? v -> (double) fr.numRows() / v.cardinality() > params._inflection_point
: v -> true;
for (int i = 0; i < fr.names().length; i++) {
Vec v = fr.vec(i);
if (cardinalityLargeEnough.test(v) && cardinalityNotTooLarge.test(v))
encode.add(fr.name(i));
}
}
AutoMLInput amlInput = _aml.getBuildSpec().input_spec;
List<String> nonPredictors = Arrays.asList(
amlInput.weights_column,
amlInput.fold_column,
amlInput.response_column
);
encode.removeAll(nonPredictors);
return encode;
}
TargetEncoderPreprocessor getTEPreprocessor() {
return _tePreprocessor;
}
TargetEncoderModel getTEModel() {
return _teModel;
}
private static void register(Frame fr, String keyPrefix, boolean force) {
Key<Frame> key = fr._key;
if (key == null || force)
fr._key = keyPrefix == null ? Key.make() : Key.make(keyPrefix+"_"+Key.rand());
if (force) DKV.remove(key);
DKV.put(fr);
}
public static Vec createFoldColumn(Frame fr,
FoldAssignmentScheme fold_assignment,
int nfolds,
String responseColumn,
long seed) {
Vec foldColumn;
switch (fold_assignment) {
default:
case AUTO:
case Random:
foldColumn = AstKFold.kfoldColumn(fr.anyVec().makeZero(), nfolds, seed);
break;
case Modulo:
foldColumn = AstKFold.moduloKfoldColumn(fr.anyVec().makeZero(), nfolds);
break;
case Stratified:
foldColumn = AstKFold.stratifiedKFoldColumn(fr.vec(responseColumn), nfolds, seed);
break;
}
return foldColumn;
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/water
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/water/automl/RegisterRestApi.java
|
package water.automl;
import water.api.AbstractRegister;
import water.api.RestApiContext;
import water.automl.api.AutoMLBuilderHandler;
import water.automl.api.AutoMLHandler;
import water.automl.api.LeaderboardsHandler;
public class RegisterRestApi extends AbstractRegister {
@Override
public void registerEndPoints(RestApiContext context) {
context.registerEndpoint("automl_build",
"POST /99/AutoMLBuilder", AutoMLBuilderHandler.class, "build",
"Start an AutoML build process.");
context.registerEndpoint("automl",
"GET /99/AutoML/{automl_id}", AutoMLHandler.class, "fetch",
"Fetch the specified AutoML object.");
context.registerEndpoint("leaderboards",
"GET /99/Leaderboards", LeaderboardsHandler.class, "list",
"Return all the AutoML leaderboards.");
context.registerEndpoint("leaderboard",
"GET /99/Leaderboards/{project_name}", LeaderboardsHandler.class, "fetch",
"Return the AutoML leaderboard for the given project.");
}
@Override
public String getName() {
return "AutoML";
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/water/automl
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/water/automl/api/AutoMLBuilderHandler.java
|
package water.automl.api;
import ai.h2o.automl.AutoML;
import ai.h2o.automl.AutoMLBuildSpec;
import water.api.Handler;
import water.api.schemas3.JobV3;
import water.automl.api.schemas3.AutoMLBuildSpecV99;
public class AutoMLBuilderHandler extends Handler {
@SuppressWarnings("unused") // called through reflection by RequestServer
public AutoMLBuildSpecV99 build(int version, AutoMLBuildSpecV99 schema) {
AutoMLBuildSpec buildSpec = schema.createAndFillImpl();
AutoML aml = AutoML.startAutoML(buildSpec);
// update schema with changes during validation
schema.fillFromImpl(buildSpec);
schema.job = new JobV3().fillFromImpl(aml.job());
return schema;
}
}
|
0
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/water/automl
|
java-sources/ai/h2o/h2o-automl/3.46.0.7/water/automl/api/AutoMLHandler.java
|
package water.automl.api;
import ai.h2o.automl.AutoML;
import water.*;
import water.api.Handler;
import water.automl.api.schemas3.AutoMLV99;
import water.exceptions.H2OKeyNotFoundArgumentException;
import java.util.stream.Stream;
public class AutoMLHandler extends Handler {
@SuppressWarnings("unused") // called through reflection by RequestServer
/** Return an AutoML object by ID. */
public AutoMLV99 fetch(int version, AutoMLV99 autoMLV99) {
AutoML autoML = DKV.getGet(autoMLV99.automl_id.name);
if (autoML == null) {
AutoML[] amls = fetchAllForProject(autoMLV99.automl_id.name);
if (amls.length > 0) {
autoML = Stream.of(amls).max(AutoML.byStartTime).get();
}
}
return autoMLV99.fillFromImpl(autoML);
}
private static AutoML[] fetchAllForProject(final String project_name) {
final Key[] automlKeys = KeySnapshot.globalSnapshot().filter(new KeySnapshot.KVFilter() {
@Override
public boolean filter(KeySnapshot.KeyInfo k) {
return Value.isSubclassOf(k._type, AutoML.class) && k._key.toString().startsWith(project_name+AutoML.keySeparator);
}
}).keys();
AutoML[] amls = new AutoML[automlKeys.length];
for (int i = 0; i < automlKeys.length; i++) {
AutoML aml = getFromDKV(automlKeys[i]);
amls[i] = aml;
}
return amls;
}
private static AutoML getFromDKV(Key key) {
Value v = DKV.get(key);
if (v == null)
throw new H2OKeyNotFoundArgumentException(key.toString());
return (AutoML) v.get();
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.